Android 9 Audio系统笔记:AudioFlinger音频流处理流程

Android 9 Audio系统笔记:AudioFlinger音频流处理流程,第1张

好久没写了,今天碰巧有个小伙伴问我关于音频流这一块的,久了还有点记不起来,我就顺便写一下,后面就不用又找一遍代码了,所谓好记性不如烂笔头。

所以,这里是关于如何从AudioTrack 写入数据到audioflinger,以及audioflinger如何写入到hal层的音频流处理流程,主要写一下audioflinger处理流程,和写一些细节。

获取音频流

1、client写入数据:

app client 通过创建AudioTrack后,在播放的时候会不断的调用audiotrack的write方法,不断的向audioflinger写数据。

//frameworks\av\media\libaudioclient\AudioTrack.cpp
ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
{
    if (mTransfer != TRANSFER_SYNC) {
        return INVALID_OPERATION;
    }

    if (isDirect()) {
        AutoMutex lock(mLock);
        int32_t flags = android_atomic_and(
                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
                            &mCblk->mFlags);
        if (flags & CBLK_INVALID) {
            return DEAD_OBJECT;
        }
    }
    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
        // Sanity-check: user is most-likely passing an error code, and it would
        // make the return value ambiguous (actualSize vs error).
        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
        return BAD_VALUE;
    }
    size_t written = 0;
    Buffer audioBuffer;
    while (userSize >= mFrameSize) {
        audioBuffer.frameCount = userSize / mFrameSize;
        status_t err = obtainBuffer(&audioBuffer,
                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
        if (err < 0) {
            if (written > 0) {
                break;
            }
            if (err == TIMED_OUT || err == -EINTR) {
                err = WOULD_BLOCK;
            }
            return ssize_t(err);
        }
        size_t toWrite = audioBuffer.size;
        memcpy(audioBuffer.i8, buffer, toWrite);
        buffer = ((const char *) buffer) + toWrite;
        userSize -= toWrite;
        written += toWrite;
        releaseBuffer(&audioBuffer);
    }
    if (written > 0) {
        mFramesWritten += written / mFrameSize;
    }
    return written;
}
2、service端的写

作为service端,不断的接收并处理client写入的数据。

//frameworks/av/services/audioflinger/Tracks.cpp
bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
{
    Buffer *pInBuffer;
    Buffer inBuffer;
    bool outputBufferFull = false;
    inBuffer.frameCount = frames;
    inBuffer.raw = data;
    //处理客户端write数据,这个就不在这里细说了,这里不打算细讲这个环形缓冲,有兴趣的可以自行阅读代码
}
AudioFlinger音频流处理

主要处理流程:
track预处理
混音处理
向HAL输出数据

bool AudioFlinger::PlaybackThread::threadLoop()
{			//1、track预处理
			mMixerStatus = prepareTracks_l(&tracksToRemove);
			//2、混音处理
			threadLoop_mix();
            if (mMixerBufferValid) {
            	//
                void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
                audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
                // mono blend occurs for mixer threads only (not direct or offloaded)
                // and is handled here if we're going directly to the sink.
                if (requireMonoBlend() && !mEffectBufferValid) {
                    mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount, mNormalFrameCount,
                               true /*limit*/);
                }
				//将mMixerBuffer 按hal 层格式 实际最终还是拷贝到 mSinkBuffer 对象上
                memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
                        mNormalFrameCount * mChannelCount);
            }
            //3、向HAL输出数据
			ret = threadLoop_write();
}
track预处理

向mAudioMixer更新track对象等 *** 作

//prepareTracks_l
AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
        Vector< sp<Track> > *tracksToRemove)
{
	.....
    for (size_t i=0 ; i<count ; i++) {
        const sp<Track> t = mActiveTracks[i];
        ....
		// if an active track doesn't exist in the AudioMixer, create it.
        if (!mAudioMixer->exists(name)) {
            status_t status = mAudioMixer->create(
                    name,
                    track->mChannelMask,
                    track->mFormat,
                    track->mSessionId);
            if (status != OK) {
                ALOGW("%s: cannot create track name"
                        " %d, mask %#x, format %#x, sessionId %d in AudioMixer",
                        __func__, name, track->mChannelMask, track->mFormat, track->mSessionId);
                tracksToRemove->add(track);
                track->invalidate(); // consider it dead.
                continue;
            }
        }
        ....
	}
	.....
}
混音处理
void AudioFlinger::MixerThread::threadLoop_mix()
{
    // mix buffers...
    mAudioMixer->process();
    mCurrentWriteLength = mSinkBufferSize;
    // increase sleep time progressively when application underrun condition clears.
    // Only increase sleep time if the mixer is ready for two consecutive times to avoid
    // that a steady state of alternating ready/not ready conditions keeps the sleep time
    // such that we would underrun the audio HAL.
    if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
        sleepTimeShift--;
    }
    mSleepTimeUs = 0;
    mStandbyTimeNs = systemTime() + mStandbyDelayNs;
    //TODO: delay standby when effects have a tail
}
//frameworks/av/media/libaudioprocessing/include/media/AudioMixerBase.h
	void process() {
        preProcess();
        //这里才是处理数据
        (this->*mHook)();
        postProcess();
    }

数据获取&混音处理 (this->*mHook)();
mHook 以process__genericResampling 为例:

//frameworks/av/media/libaudioprocessing/AudioMixerBase.cpp
// generic code with resampling
void AudioMixerBase::process__genericResampling()
{
    ALOGVV("process__genericResampling\n");
    int32_t * const outTemp = mOutputTemp.get(); // naked ptr
    size_t numFrames = mFrameCount;
    for (const auto &pair : mGroups) {
        const auto &group = pair.second;
        const std::shared_ptr<TrackBase> &t1 = mTracks[group[0]];
        // clear temp buffer
        memset(outTemp, 0, sizeof(*outTemp) * t1->mMixerChannelCount * mFrameCount);
        for (const int name : group) {
            const std::shared_ptr<TrackBase> &t = mTracks[name];
            int32_t *aux = NULL;
            if (CC_UNLIKELY(t->needs & NEEDS_AUX)) {
                aux = t->auxBuffer;
            }
            // this is a little goofy, on the resampling case we don't
            // acquire/release the buffers because it's done by
            // the resampler.
            if (t->needs & NEEDS_RESAMPLE) {
                (t.get()->*t->hook)(outTemp, numFrames, mResampleTemp.get() /* naked ptr */, aux);
            } else {
                size_t outFrames = 0;
                while (outFrames < numFrames) {
                    t->buffer.frameCount = numFrames - outFrames;
                    //获取client写入的数据
                    t->bufferProvider->getNextBuffer(&t->buffer);
                    t->mIn = t->buffer.raw;
                    // t->mIn == nullptr can happen if the track was flushed just after having
                    // been enabled for mixing.
                    if (t->mIn == nullptr) break;
                    (t.get()->*t->hook)(
                            outTemp + outFrames * t->mMixerChannelCount, t->buffer.frameCount,
                            mResampleTemp.get() /* naked ptr */,
                            aux != nullptr ? aux + outFrames : nullptr);
                    outFrames += t->buffer.frameCount;
                    //通知释放 通知client 有可以写入的buffer了
                    t->bufferProvider->releaseBuffer(&t->buffer);
                }
            }
        }
        /**
        
          这个mainBuffer与 mOutput->write((char *)mSinkBuffer, 0); 中的mSinkBuffer 相关联
          //相关代码如下:
          //frameworks\av\services\audioflinger\Threads.cpp
          AudioFlinger::PlaybackThread::mixer_state AudioFlinger::MixerThread::prepareTracks_l(
        Vector< sp > *tracksToRemove)
        {
        ........
                        mAudioMixer->setParameter(
                        name,
                        AudioMixer::TRACK,
                        AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
                mAudioMixer->setParameter(
                        name,
                        AudioMixer::TRACK,
                        AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
        ........
		}
          */
        
        //这里是将outTemp 数据按最终输出格式拷贝给到 mainBuffer
        convertMixerFormat(t1->mainBuffer, t1->mMixerFormat,
                outTemp, t1->mMixerInFormat, numFrames * t1->mMixerChannelCount);
    }
}
向HAL输出数据
  // frameworks\av\services\audioflinger\Threads.cpp
    ssize_t AudioFlinger::MixerThread::threadLoop_write()
{
    // FIXME we should only do one push per cycle; confirm this is true
    // Start the fast mixer if it's not already running
    if (mFastMixer != 0) {
        FastMixerStateQueue *sq = mFastMixer->sq();
        FastMixerState *state = sq->begin();
        if (state->mCommand != FastMixerState::MIX_WRITE &&
                (kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
            if (state->mCommand == FastMixerState::COLD_IDLE) {
                // FIXME workaround for first HAL write being CPU bound on some devices
                ATRACE_BEGIN("write");
                 //AudioStreamOut  *mOutput;
                 //致此,audioflinger的写入流程基本结束,剩下的就是写入到audiohal层,
                 //最后在audiohal处理之后就会通过tinyalsa写入到驱动完成了声音流的输出过程了
                mOutput->write((char *)mSinkBuffer, 0);
                ATRACE_END();
                int32_t old = android_atomic_inc(&mFastMixerFutex);
                if (old == -1) {
                    (void) syscall(__NR_futex, &mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
                }
#ifdef AUDIO_WATCHDOG
                if (mAudioWatchdog != 0) {
                    mAudioWatchdog->resume();
                }
#endif
            }
            state->mCommand = FastMixerState::MIX_WRITE;
#ifdef FAST_THREAD_STATISTICS
            mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
                FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN);
#endif
            sq->end();
            sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
            if (kUseFastMixer == FastMixer_Dynamic) {
                mNormalSink = mPipeSink;
            }
        } else {
            sq->end(false /*didModify*/);
        }
    }
    return PlaybackThread::threadLoop_write();
}
补充

每个client 创建audiotrack的时候都是要通过AudioFlinger来创建的。audioflinger交给Threads.cpp这边创建,创建完毕后会保存在Threads 的mTracks 对象里面,当每次loop的时候会先调用prepareTracks_l 来准备数据,此时会及时将mTracks 同步到AudioMixer(不是将对象直接赋值给AudioMixer对象,AudioMixer会同步创建一个AudioMixer::Track来与mTracks对应,这个AudioMixer::Track然后再包含Track,最后实际获取数据的时候是通过这个Track来获取到client数据的)。

sp<IAudioTrack> AudioFlinger::createTrack(const CreateTrackInput& input,
                                          CreateTrackOutput& output,
                                          status_t *status)
{
......
        track = thread->createTrack_l(client, streamType, input.attr, &output.sampleRate,
                                      input.config.format, input.config.channel_mask,
                                      &output.frameCount, &output.notificationFrameCount,
                                      input.notificationsPerBuffer, input.speed,
                                      input.sharedBuffer, sessionId, &output.flags,
                                      input.clientInfo.clientTid, clientUid, &lStatus, portId);
......
}


sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
        const sp<AudioFlinger::Client>& client,
        audio_stream_type_t streamType,
        const audio_attributes_t& attr,
        uint32_t *pSampleRate,
        audio_format_t format,
        audio_channel_mask_t channelMask,
        size_t *pFrameCount,
        size_t *pNotificationFrameCount,
        uint32_t notificationsPerBuffer,
        float speed,
        const sp<IMemory>& sharedBuffer,
        audio_session_t sessionId,
        audio_output_flags_t *flags,
        pid_t tid,
        uid_t uid,
        status_t *status,
        audio_port_handle_t portId)
{
......
        track = new Track(this, client, streamType, attr, sampleRate, format,
                          channelMask, frameCount,
                          nullptr /* buffer */, (size_t)0 /* bufferSize */, sharedBuffer,
                          sessionId, uid, *flags, TrackBase::TYPE_DEFAULT, portId);

        lStatus = track != 0 ? track->initCheck() : (status_t) NO_MEMORY;
        if (lStatus != NO_ERROR) {
            ALOGE("createTrack_l() initCheck failed %d; no control block?", lStatus);
            // track must be cleared from the caller as the caller has the AF lock
            goto Exit;
        }
        mTracks.add(track);
        sp<EffectChain> chain = getEffectChain_l(sessionId);
        if (chain != 0) {
            ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
            track->setMainBuffer(chain->inBuffer());
            chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
            chain->incTrackCnt();
        }
......
}

最后

sourceinside阅读有些跳转不了,有点恼火。

欢迎分享,转载请注明来源:内存溢出

原文地址: http://outofmemory.cn/web/992464.html

(0)
打赏 微信扫一扫 微信扫一扫 支付宝扫一扫 支付宝扫一扫
上一篇 2022-05-21
下一篇 2022-05-21

发表评论

登录后才能评论

评论列表(0条)

保存