ios – 无输出的实时音频处理

ios – 无输出的实时音频处理,第1张

概述我正在看这个例子 http://teragonaudio.com/article/How-to-do-realtime-recording-with-effect-processing-on-iOS.html 我想关掉我的输出.我尝试将kAudioSessionCategory_PlayAndRecord更改为kAudioSessionCategory_RecordAudio,但这不起作用.我也试 我正在看这个例子 http://teragonaudio.com/article/How-to-do-realtime-recording-with-effect-processing-on-iOS.html

我想关掉我的输出.我尝试将kAudioSessioncategory_PlayAndRecord更改为kAudioSessioncategory_RecordAudio,但这不起作用.我也试图摆脱:

if(AudioUnitSetProperty(*audioUnit,kAudioUnitProperty_StreamFormat,kAudioUnitScope_Output,1,&streamDescription,sizeof(streamDescription)) != noErr) {        return 1;    }

Becouse我想从麦克风那里获得声音但不能播放它.但是当我的声音变为renderCallback方法时,无论我做什么都有-50错误.当音频在输出上自动播放时,一切正常……

使用代码更新:

using namespace std;AudioUnit *audioUnit = NulL;float *convertedSampleBuffer = NulL;int initAudioSession() {    audioUnit = (AudioUnit*)malloc(sizeof(AudioUnit));    if(AudioSessionInitialize(NulL,NulL,NulL) != noErr) {        return 1;    }    if(AudioSessionSetActive(true) != noErr) {        return 1;    }    UInt32 sessioncategory = kAudioSessioncategory_PlayAndRecord;    if(AudioSessionSetProperty(kAudioSessionProperty_Audiocategory,sizeof(UInt32),&sessioncategory) != noErr) {        return 1;    }    float32 bufferSizeInSec = 0.02f;    if(AudioSessionSetProperty(kAudioSessionProperty_PreferredHarDWareIOBufferDuration,sizeof(float32),&bufferSizeInSec) != noErr) {        return 1;    }    UInt32 overrIDecategory = 1;    if(AudioSessionSetProperty(kAudioSessionProperty_OverrIDecategoryDefaultToSpeaker,&overrIDecategory) != noErr) {        return 1;    }    // There are many propertIEs you might want to provIDe callback functions for:    // kAudioSessionProperty_AudioRouteChange    // kAudioSessionProperty_OverrIDecategoryEnableBluetoothinput    // etc.    return 0;}Osstatus renderCallback(voID *userData,AudioUnitRenderActionFlags *actionFlags,const AudioTimeStamp *audioTimeStamp,UInt32 busNumber,UInt32 numFrames,audiobufferlist *buffers) {    Osstatus status = AudioUnitRender(*audioUnit,actionFlags,audioTimeStamp,numFrames,buffers);    int doOutput = 0;    if(status != noErr) {        return status;    }    if(convertedSampleBuffer == NulL) {        // Lazy initialization of this buffer is necessary because we don't        // kNow the frame count until the first callback        convertedSampleBuffer = (float*)malloc(sizeof(float) * numFrames);        baseTime = (float)QRealTimer::getUptimeInMilliseconds();    }    SInt16 *inputFrames = (SInt16*)(buffers->mBuffers->mData);    // If your DSP code can use integers,then don't bother converting to    // floats here,as it just wastes cpu. However,most DSP algorithms rely    // on floating point,and this is especially true if you are porting a    // VST/AU to iOS.    int i;    for( i = numFrames; i < fftlength; i++ )        // Shifting buffer        x_inbuf[i - numFrames] = x_inbuf[i];    for(  i = 0; i < numFrames; i++) {        x_inbuf[i + x_phase] = (float)inputFrames[i] / (float)32768;    }    if( x_phase + numFrames == fftlength )    {        x_alignment.SigProc_frontend(x_inbuf);  // Signal processing front-end (FFT!)        doOutput = x_alignment.Align();        /// Output as text! In the real-time version,//      this is where we update visualisation callbacks and launch other services        if ((doOutput) & (x_netscore.isEvent(x_alignment.position()))            &(x_alignment.lastAction()<x_alignment.position()) )        {          //  here i want to do something with my input!        }    }    else        x_phase += numFrames;   return noErr;}int initAudioStreams(AudioUnit *audioUnit) {    UInt32 audiocategory = kAudioSessioncategory_PlayAndRecord;    if(AudioSessionSetProperty(kAudioSessionProperty_Audiocategory,&audiocategory) != noErr) {        return 1;    }    UInt32 overrIDecategory = 1;    if(AudioSessionSetProperty(kAudioSessionProperty_OverrIDecategoryDefaultToSpeaker,&overrIDecategory) != noErr) {        // Less serIoUs error,but you may want to handle it and bail here    }    AudioComponentDescription componentDescription;    componentDescription.componentType = kAudioUnitType_Output;    componentDescription.componentSubType = kAudioUnitSubType_RemoteIO;    componentDescription.componentManufacturer = kAudioUnitManufacturer_Apple;    componentDescription.componentFlags = 0;    componentDescription.componentFlagsMask = 0;    AudioComponent component = AudioComponentFindNext(NulL,&componentDescription);    if(AudioComponentInstanceNew(component,audioUnit) != noErr) {        return 1;    }    UInt32 enable = 1;    if(AudioUnitSetProperty(*audioUnit,kAudioOutputUnitProperty_EnableIO,kAudioUnitScope_input,&enable,sizeof(UInt32)) != noErr) {        return 1;    }    AURenderCallbackStruct callbackStruct;    callbackStruct.inputProc = renderCallback; // Render function    callbackStruct.inputProcRefCon = NulL;    if(AudioUnitSetProperty(*audioUnit,kAudioUnitProperty_SetRenderCallback,&callbackStruct,sizeof(AURenderCallbackStruct)) != noErr) {        return 1;    }    AudioStreamBasicDescription streamDescription;    // You might want to replace this with a different value,but keep in mind that the    // iPhone does not support all sample rates. 8kHz,22kHz,and 44.1kHz should all work.    streamDescription.mSampleRate = 44100;    // Yes,I kNow you probably want floating point samples,but the iPhone isn't going    // to give you floating point data. You'll need to make the conversion by hand from    // linear PCM <-> float.    streamDescription.mFormatID = kAudioFormatlinearPCM;    // This part is important!    streamDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger |    kAudioFormatFlagsNativeEndian |    kAudioFormatFlagIsPacked;    streamDescription.mBitsPerChannel = 16;    // 1 sample per frame,will always be 2 as long as 16-bit samples are being used    streamDescription.mBytesPerFrame = 2;    streamDescription.mChannelsPerFrame = 1;    streamDescription.mBytesPerPacket = streamDescription.mBytesPerFrame *    streamDescription.mChannelsPerFrame;    // Always should be set to 1    streamDescription.mFramesPerPacket = 1;    // Always set to 0,just to be sure    streamDescription.mReserved = 0;    // Set up input stream with above propertIEs    if(AudioUnitSetProperty(*audioUnit,sizeof(streamDescription)) != noErr) {        return 1;    }    // Ditto for the output stream,which we will be sending the processed audio to    if(AudioUnitSetProperty(*audioUnit,sizeof(streamDescription)) != noErr) {        return 1;    }    return 0;}int startAudioUnit(AudioUnit *audioUnit) {    if(AudioUnitinitialize(*audioUnit) != noErr) {        return 1;    }    if(AudioOutputUnitStart(*audioUnit) != noErr) {        return 1;    }    return 0;}

从我的VC打电话:

initAudioSession();    initAudioStreams( audioUnit);    startAudioUnit( audioUnit);
解决方法 如果您只想录制,不播放,只需注释掉设置renderCallback的行:
AURenderCallbackStruct callbackStruct;callbackStruct.inputProc = renderCallback; // Render functioncallbackStruct.inputProcRefCon = NulL;if(AudioUnitSetProperty(*audioUnit,sizeof(AURenderCallbackStruct)) != noErr) {  return 1;}

看到代码后更新:

我怀疑,你错过了输入回调.添加以下行:

// at top:#define kinputBus 1AURenderCallbackStruct callbackStruct;/**/callbackStruct.inputProc = &ALAudioUnit::recordingCallback;callbackStruct.inputProcRefCon = this;status = AudioUnitSetProperty(audioUnit,kAudioOutputUnitProperty_SetinputCallback,kAudioUnitScope_Global,kinputBus,sizeof(callbackStruct));

现在在你的录音回调中:

Osstatus ALAudioUnit::recordingCallback(voID *inRefCon,AudioUnitRenderActionFlags *ioActionFlags,const AudioTimeStamp *inTimeStamp,UInt32 inBusNumber,UInt32 inNumberFrames,audiobufferlist *ioData){    // Todo: Use inRefCon to access our interface object to do stuff    // Then,use inNumberFrames to figure out how much data is available,and make    // that much space available in buffers in an audiobufferlist.    // Then:    // Obtain recorded samples    Osstatus status;    ALAudioUnit *pThis = reinterpret_cast<ALAudioUnit*>(inRefCon);    if (!pThis)        return noErr;    //assert (pThis->m_nMaxSliceFrames >= inNumberFrames);    pThis->recorderBufferList->GetBufferList().mBuffers[0].mDataByteSize = inNumberFrames * pThis->m_recorderSBD.mBytesPerFrame;    status = AudioUnitRender(pThis->audioUnit,ioActionFlags,inTimeStamp,inBusNumber,inNumberFrames,&pThis->recorderBufferList->GetBufferList());    THROW_EXCEPTION_IF_ERROR(status,"error rendering audio unit");    // If we're not playing,I don't care about the data,simply discard it    if (!pThis->playbackState || pThis->isSeeking) return noErr;    // Now,we have the samples we just read sitting in buffers in bufferList    pThis->DoStuffWithTheRecordedAudio(inNumberFrames,pThis->recorderBufferList,inTimeStamp);    return noErr;}

顺便说一句,我正在分配自己的缓冲区,而不是使用AudioUnit提供的缓冲区.如果要使用AudioUnit分配的缓冲区,可能需要更改这些部分.

更新:

如何分配自己的缓冲区:

recorderBufferList = new AUBufferList();recorderBufferList->Allocate(m_recorderSBD,m_nMaxSliceFrames);recorderBufferList->PrepareBuffer(m_recorderSBD,m_nMaxSliceFrames);

此外,如果您这样做,请告诉AudioUnit不分配缓冲区:

// disable buffer allocation for the recorder (optional - do this if we want to pass in our own)flag = 0;status = AudioUnitSetProperty(audioUnit,kAudioUnitProperty_ShouldAllocateBuffer,&flag,sizeof(flag));

你需要包括CoreAudio utility classes

总结

以上是内存溢出为你收集整理的ios – 无输出的实时音频处理全部内容,希望文章能够帮你解决ios – 无输出的实时音频处理所遇到的程序开发问题。

如果觉得内存溢出网站内容还不错,欢迎将内存溢出网站推荐给程序员好友。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/web/1105660.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-28
下一篇 2022-05-28

发表评论

登录后才能评论

评论列表(0条)

保存