这是播放和录制的渲染回调:
static Osstatus recordingCallback(voID *inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,audiobufferlist *ioData) { IosAudioController *microphone = (__brIDge IosAudioController *)inRefCon; // render audio into buffer Osstatus result = AudioUnitRender(microphone.audioUnit,ioActionFlags,inTimeStamp,inBusNumber,inNumberFrames,microphone.tempBuffer); checkStatus(result);// kAudioUnitErr_InvalIDPropertyValue // notify delegate of new buffer List to process if ([microphone.dataSource respondsToSelector:@selector(microphone:hasBufferList:withBufferSize:withNumberOfChannels:)]) { [microphone.dataSource microphone:microphone hasBufferList:microphone.tempBuffer withBufferSize:inNumberFrames withNumberOfChannels:microphone.destinationFormat.mChannelsPerFrame]; } return result;}/** This callback is called when the audioUnit needs new data to play through the speakers. If you don't have any,just don't write anything in the buffers */static Osstatus playbackCallback(voID *inRefCon,audiobufferlist *ioData) { IosAudioController *output = (__brIDge IosAudioController *)inRefCon; // // Try to ask the data source for audio data to fill out the output's // buffer List // if( [output.dataSource respondsToSelector:@selector(outputShouldUseCircularBuffer:)] ){ TPCircularBuffer *circularBuffer = [output.dataSource outputShouldUseCircularBuffer:output]; if( !circularBuffer ){ // SInt32 *left = ioData->mBuffers[0].mData; // SInt32 *right = ioData->mBuffers[1].mData; // for(int i = 0; i < inNumberFrames; i++ ){ // left[ i ] = 0.0f; // right[ i ] = 0.0f; // } *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence; return noErr; }; /** Thank you Michael Tyson (A Tasty Pixel) for writing the TPCircularBuffer,you are amazing! */ // Get the available bytes in the circular buffer int32_t availableBytes; voID *buffer = TPCircularBufferTail(circularBuffer,&availableBytes); int32_t amount = 0; // float floatNumber = availableBytes * 0.25 / 48; // float speakerNumber = ioData->mBuffers[0].mDataByteSize * 0.25 / 48; for (int i=0; i < ioData->mNumberBuffers; i++) { AudioBuffer abuffer = ioData->mBuffers[i]; // IDeally we'd have all the bytes to be copIEd,but compare it against the available bytes (get min) amount = MIN(abuffer.mDataByteSize,availableBytes); // copy buffer to audio buffer which gets played after function return memcpy(abuffer.mData,buffer,amount); // set data size abuffer.mDataByteSize = amount; } // Consume those bytes ( this will internally push the head of the circular buffer ) TPCircularBufferConsume(circularBuffer,amount); } else { // // Silence if there is nothing to output // *ioActionFlags |= kAudioUnitRenderAction_OutputIsSilence; } return noErr;}
_tempBuffer配置了4096帧.
以下是我取消分配audioUnit的方法.请注意,由于VoiceProcessingIO单元如果启动,停止并再次启动它可能无法正常工作的错误,我需要每次都处理并初始化它.这是一个已知的问题并发布在这里,但我不记得链接.
if (_tempBuffer != NulL) { for(unsigned i = 0; i < _tempBuffer->mNumberBuffers; i++) { free(_tempBuffer->mBuffers[i].mData); } free(_tempBuffer);}AudioComponentInstancedispose(_audioUnit);
此配置适用于6,6及更早版本的设备.但是在6s(可能是6s)出现了问题.有些时候,(那些错误确实非常烦人.我讨厌它.对我来说,它在20次测试中发生了6-7次),仍有传入和传出的数据来自IoUnit,但根本没有声音.
它似乎永远不会发生在第一次测试,所以我猜它可能是IoUnit的内存问题,我仍然不知道如何解决这个问题.
任何建议将不胜感激.
UPDATE
我忘了展示我如何配置AudioUnit
// Describe audio component AudioComponentDescription desc; desc.componentType = kAudioUnitType_Output; desc.componentSubType = kAudioUnitSubType_VoiceProcessingIO; desc.componentFlags = 0; desc.componentFlagsMask = 0; desc.componentManufacturer = kAudioUnitManufacturer_Apple; // Get component AudioComponent inputComponent = AudioComponentFindNext(NulL,&desc); // Get audio units status = AudioComponentInstanceNew(inputComponent,&_audioUnit); checkStatus(status); // Enable IO for recording UInt32 flag = 1; status = AudioUnitSetProperty(_audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_input,kinputBus,&flag,sizeof(flag)); checkStatus(status); // Enable IO for playback status = AudioUnitSetProperty(_audioUnit,kAudioUnitScope_Output,kOutputBus,sizeof(flag)); checkStatus(status); // Apply format status = AudioUnitSetProperty(_audioUnit,kAudioUnitProperty_StreamFormat,&_destinationFormat,sizeof(self.destinationFormat)); checkStatus(status); status = AudioUnitSetProperty(_audioUnit,sizeof(self.destinationFormat)); checkStatus(status); // Set input callback AURenderCallbackStruct callbackStruct; callbackStruct.inputProc = recordingCallback; callbackStruct.inputProcRefCon = (__brIDge voID * _Nullable)(self); status = AudioUnitSetProperty(_audioUnit,kAudioOutputUnitProperty_SetinputCallback,kAudioUnitScope_Global,&callbackStruct,sizeof(callbackStruct)); checkStatus(status); // Set output callback callbackStruct.inputProc = playbackCallback; callbackStruct.inputProcRefCon = (__brIDge voID * _Nullable)(self); status = AudioUnitSetProperty(_audioUnit,kAudioUnitProperty_SetRenderCallback,sizeof(callbackStruct)); checkStatus(status); // disable buffer allocation for the recorder (optional - do this if we want to pass in our own) flag = 0; status = AudioUnitSetProperty(_audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,sizeof(flag)); [self configureMicrophoneBufferList]; // Initialise status = AudioUnitinitialize(_audioUnit);@R_403_6120@ 3件事可能是问题:
对于静默(在下溢期间),您可能希望尝试使用0的inNumberFrames填充缓冲区,而不是未修改地返回它们.
在音频回调中,Apple DTS不推荐使用任何Objective C消息(您的respondsToSelector:call).
在音频处理确实停止之前,您不应释放缓冲区或调用AudioComponentInstancedispose.由于音频单元在另一个实时线程中运行,因此它们不会(获取线程或cpu时间)真正停止,直到您的应用程序进行停止音频呼叫后的一段时间.我会等几秒钟,当然不会在延迟时间之后调用(重新)初始化或(重新)启动.
总结以上是内存溢出为你收集整理的ios – AudioUnit“Sometime”不起作用. “只”发生在6s(可能是6s加,但我还没有测试过)全部内容,希望文章能够帮你解决ios – AudioUnit“Sometime”不起作用. “只”发生在6s(可能是6s加,但我还没有测试过)所遇到的程序开发问题。
如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。
欢迎分享,转载请注明来源:内存溢出
评论列表(0条)