swift – AVAssetWriter连续段

swift – AVAssetWriter连续段,第1张

概述我想要录制一系列片段,这些片段通过视频播放器或ffmpeg -f concat一起播放时无缝播放. 在任何一种情况下,我都会在每个分段连接点处得到非常明显的音频打嗝. 我目前的策略是维护2个AssetWriter实例.在每个截止点,我开始一个新的作家,等到它准备好,然后开始给它样品.当视频和音频样本在特定时间点完成时,我关闭最后一个作者. 如何修改此项以获得连续剪辑录制?根本原因是什么问题? im 我想要录制一系列片段,这些片段通过视频播放器或ffmpeg -f concat一起播放时无缝播放.

在任何一种情况下,我都会在每个分段连接点处得到非常明显的音频打嗝.

我目前的策略是维护2个AssetWriter实例.在每个截止点,我开始一个新的作家,等到它准备好,然后开始给它样品.当视频和音频样本在特定时间点完成时,我关闭最后一个作者.

如何修改此项以获得连续剪辑录制?根本原因是什么问题?

import Foundationimport UIKitimport AVFoundationclass StreamController: UIVIEwController,AVCaptureAudioDataOutputSampleBufferDelegate,AVCaptureVIDeoDataOutputSampleBufferDelegate {    @IBOutlet weak var prevIEwVIEw: UIVIEw!    var closingVIDeoinput: AVAssetWriterinput?    var closingAudioinput: AVAssetWriterinput?    var closingAssetWriter: AVAssetWriter?    var currentVIDeoinput: AVAssetWriterinput?    var currentAudioinput: AVAssetWriterinput?    var currentAssetWriter: AVAssetWriter?    var nextVIDeoinput: AVAssetWriterinput?    var nextAudioinput: AVAssetWriterinput?    var nextAssetWriter: AVAssetWriter?    var prevIEwLayer: AVCaptureVIDeoPrevIEwLayer?    var vIDeoHelper: VIDeoHelper?    var startTime: NSTimeInterval = 0    overrIDe func vIEwDIDLoad() {        super.vIEwDIDLoad()        startTime = NSDate().timeIntervalSince1970        createSegmentWriter()        vIDeoHelper = VIDeoHelper()        vIDeoHelper!.delegate = self        vIDeoHelper!.startSession()        NSTimer.scheduledTimerWithTimeInterval(5,target: self,selector: "createSegmentWriter",userInfo: nil,repeats: true)    }    func createSegmentWriter() {        print("Creating segment writer at t=\(NSDate().timeIntervalSince1970 - self.startTime)")        nextAssetWriter = try! AVAssetWriter(URL: NSURL(fileURLWithPath: OutputfilenameHelper.instance.pathForOutput()),fileType: AVfileTypeMPEG4)        nextAssetWriter!.shouldOptimizeforNetworkUse = true        let vIDeoSettings: [String:AnyObject] = [AVVIDeoCodecKey: AVVIDeoCodecH264,AVVIDeoWIDthKey: 960,AVVIDeoHeightKey: 540]        nextVIDeoinput = AVAssetWriterinput(mediaType: AVMediaTypeVIDeo,outputSettings: vIDeoSettings)        nextVIDeoinput!.expectsMediaDataInRealTime = true        nextAssetWriter?.addinput(nextVIDeoinput!)        let audioSettings: [String:AnyObject] = [                AVFormatIDKey: NSNumber(unsignedInt: kAudioFormatMPEG4AAC),AVSampleRateKey: 44100.0,AVNumberOfChannelsKey: 2,]        nextAudioinput = AVAssetWriterinput(mediaType: AVMediaTypeAudio,outputSettings: audioSettings)        nextAudioinput!.expectsMediaDataInRealTime = true        nextAssetWriter?.addinput(nextAudioinput!)        nextAssetWriter!.startWriting()    }    overrIDe func vIEwDIDAppear(animated: Bool) {        super.vIEwDIDAppear(animated)        prevIEwLayer = AVCaptureVIDeoPrevIEwLayer(session: vIDeoHelper!.captureSession)        prevIEwLayer!.frame = self.prevIEwVIEw.bounds        prevIEwLayer!.vIDeoGravity = AVLayerVIDeoGravityResizeAspectFill        if ((prevIEwLayer?.connection?.supportsVIDeoOrIEntation) != nil) {            prevIEwLayer?.connection?.vIDeoOrIEntation = AVCaptureVIDeoOrIEntation.LandscapeRight        }        self.prevIEwVIEw.layer.addSublayer(prevIEwLayer!)    }    func closeWriter() {        if vIDeoFinished && audioFinished {            let outputfile = closingAssetWriter?.outputURL.pathComponents?.last            closingAssetWriter?.finishWritingWithCompletionHandler() {                let delta = NSDate().timeIntervalSince1970 - self.startTime                print("segment \(outputfile) finished at t=\(delta)")            }            self.closingAudioinput = nil            self.closingVIDeoinput = nil            self.closingAssetWriter = nil            audioFinished = false            vIDeoFinished = false        }    }    func closingVIDeoFinished() {        if closingVIDeoinput != nil {            vIDeoFinished = true            closeWriter()        }    }    func closingAudioFinished() {        if closingAudioinput != nil {            audioFinished = true            closeWriter()        }    }    var closingTime: CMTime = kCMTimeZero    var audioFinished = false    var vIDeoFinished = false    func captureOutput(captureOutput: AVCaptureOutput!,dIDOutputSampleBuffer sampleBuffer: CMSampleBufferRef,fromConnection connection: AVCaptureConnection!) {        let sampleTime: CMTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)        if let nextWriter = nextAssetWriter {            if nextWriter.status.rawValue != 0 {                print("Switching asset writers at t=\(NSDate().timeIntervalSince1970 - self.startTime)")                closingAssetWriter = currentAssetWriter                closingVIDeoinput = currentVIDeoinput                closingAudioinput = currentAudioinput                currentAssetWriter = nextAssetWriter                currentVIDeoinput = nextVIDeoinput                currentAudioinput = nextAudioinput                nextAssetWriter = nil                nextVIDeoinput = nil                nextAudioinput = nil                closingTime = sampleTime                currentAssetWriter!.startSessionAtSourceTime(sampleTime)            }        }        if currentAssetWriter != nil {            if let _ = captureOutput as? AVCaptureVIDeoDataOutput {                if (CMTimeCompare(sampleTime,closingTime) < 0) {                    if closingVIDeoinput?.readyForMoreMediaData == true {                        closingVIDeoinput?.appendSampleBuffer(sampleBuffer)                    }                } else {                    closingVIDeoFinished()                    if currentVIDeoinput?.readyForMoreMediaData == true {                        currentVIDeoinput?.appendSampleBuffer(sampleBuffer)                    }                }            } else if let _ = captureOutput as? AVCaptureAudioDataOutput {                if (CMTimeCompare(sampleTime,closingTime) < 0) {                    if currentAudioinput?.readyForMoreMediaData == true {                        currentAudioinput?.appendSampleBuffer(sampleBuffer)                    }                } else {                    closingAudioFinished()                    if currentAudioinput?.readyForMoreMediaData == true {                        currentAudioinput?.appendSampleBuffer(sampleBuffer)                    }                }            }        }    }    overrIDe func shouldautorotate() -> Bool {        return true    }    overrIDe func supportedInterfaceOrIEntations() -> UIInterfaceOrIEntationMask {        return [UIInterfaceOrIEntationMask.LandscapeRight]    }}
解决方法 我认为根本原因是由于视频和音频CMSampleBuffers代表不同的时间间隔.您需要拆分并加入音频CMSampleBuffers,以使它们无缝插入到AVAssetWriter的时间轴中,该时间轴可能基于视频演示时间戳.

为什么音频必须改变而不是视频?它似乎不对称,但我想这是因为音频具有更高的采样率.

附:实际上创建新的拆分样本缓冲区看起来令人生畏. CMSampleBufferCreate有一吨参数. CMSampleBufferCopysampleBufferForRange可能更容易使用,也更有效.

总结

以上是内存溢出为你收集整理的swift – AVAssetWriter连续段全部内容,希望文章能够帮你解决swift – AVAssetWriter连续段所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: https://outofmemory.cn/web/1015716.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-22
下一篇 2022-05-22

发表评论

登录后才能评论

评论列表(0条)

保存