Android Audio系统分析(一)概览

文章分析是从项目MP3StreamPlayer开始跑流程分析来的,没错,App Demo就是下载这个开始分析来的,手头有源码的自己也去下载一份体验一把吧~~

初始化动作

App代码

1
2
3
4
5
6
7
8
9
10
11
12
audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRate,
AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT,
AudioTrack.getMinBufferSize(
sampleRate,
AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT
),
AudioTrack.MODE_STREAM
);

frameworks/base/media/java/android/media/AudioTrack.java

1
2
3
4
5
6
7
8
9
10
11
12
13
public AudioTrack(AudioAttributes attributes, AudioFormat format, int bufferSizeInBytes,
int mode, int sessionId)
throws IllegalArgumentException {
IBinder b = ServiceManager.getService(Context.APP_OPS_SERVICE);
mAppOps = IAppOpsService.Stub.asInterface(b);

mAttributes = (new AudioAttributes.Builder(attributes).build());

// native initialization 这里进行native的一些初始化,包括`native AutioTrack`
int initResult = native_setup(new WeakReference<AudioTrack>(this), mAttributes,
mSampleRate, mChannels, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session);
}

frameworks/base/core/jni/android_media_AudioTrack.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
static jint
android_media_AudioTrack_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jobject jaa,
jint sampleRateInHertz, jint javaChannelMask,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession) {

// create the native AudioTrack object
// 创建native AudioTrack对象
sp<AudioTrack> lpTrack = new AudioTrack();

AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();

switch (memoryMode) {
// lpTrack进行初始化动作
case MODE_STATIC:
// AudioTrack is using shared memory

if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
ALOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}

status = lpTrack->set(
AUDIO_STREAM_DEFAULT,// stream type, but more info conveyed in paa (last argument)
sampleRateInHertz,
format,// word length, PCM
nativeChannelMask,
frameCount,
AUDIO_OUTPUT_FLAG_NONE,
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId,// audio session ID
AudioTrack::TRANSFER_SHARED,
NULL, // default offloadInfo
-1, -1, // default uid, pid values
paa);
break;
}

// 这里将lpTrack被指向"nativeTrackInJavaObj"
setAudioTrack(env, thiz, lpTrack);

}

这里大致就是应用层创建了一个音频对象,最终是指向了native AudioTrack(就是AudioTrack.cpp初始了),要处理的事件都会跑向了它

写数据

App代码

1
audioTrack.write(chunk,0,chunk.length);

frameworks/base/media/java/android/media/AudioTrack.java

1
2
3
4
public int write(byte[] audioData, int offsetInBytes, int sizeInBytes) {
int ret = native_write_byte(audioData, offsetInBytes, sizeInBytes, mAudioFormat,
true /*isBlocking*/);
}

frameworks/base/core/jni/android_media_AudioTrack.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
static jint android_media_AudioTrack_write_byte(JNIEnv *env,  jobject thiz,
jbyteArray javaAudioData,
jint offsetInBytes, jint sizeInBytes,
jint javaAudioFormat,
jboolean isWriteBlocking) {
sp<AudioTrack> lpTrack = getAudioTrack(env, thiz);

jint written = writeToTrack(lpTrack, javaAudioFormat, cAudioData, offsetInBytes, sizeInBytes,
isWriteBlocking == JNI_TRUE /* blocking */);
}

// 这里从Java层的`AudioTrack`获取了"nativeTrackInJavaObj"的地址(native AudioTrack就是在本文件[android_media_AudioTrack.cpp]的setup里初始化的),直接强转为native AudioTrack
// 源码里好些喜欢用这种方式留个指针拿来用
static sp<AudioTrack> getAudioTrack(JNIEnv* env, jobject thiz)
{
Mutex::Autolock l(sLock);
AudioTrack* const at =
(AudioTrack*)env->GetLongField(thiz, javaAudioTrackFields.nativeTrackInJavaObj);
return sp<AudioTrack>(at);
}

jint writeToTrack(const sp<AudioTrack>& track, jint audioFormat, const jbyte* data,
jint offsetInBytes, jint sizeInBytes, bool blocking = true) {
switch (format) {
case AUDIO_FORMAT_PCM_16_BIT: {
memcpy(track->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);
written = sizeInBytes;
} break;
}

writeToTrack函数可以看到,是将数据写入了native AudioTrack的共享内存里了

track->sharedBuffer()->pointer()是什么

frameworks/av/include/media/AudioTrack.h

track是native AudioTrack, sharedBuffersp<IMemory> sharedBuffer() const { return mSharedBuffer; }

frameworks/native/libs/binder/IMemory.cpp

pointer

1
2
3
4
5
6
7
8
void* IMemory::pointer() const {
ssize_t offset;
sp<IMemoryHeap> heap = getMemory(&offset);
void* const base = heap!=0 ? heap->base() : MAP_FAILED;
if (base == MAP_FAILED)
return 0;
return static_cast<char*>(base) + offset;
}

这段是看共享内存,是个Binder,那感觉放心多了,不会那种太无头序,关于Binder网上很多文章,可以自行参考

然后我们跟踪这个mSharedBuffer,发现它是在native AudioTrack时被赋值的

frameworks/av/media/libmedia/AudioTrack.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
audio_output_flags_t flags,
callback_t cbf,
void* user,
uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
int sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
{
mSharedBuffer = sharedBuffer;
}

这个set方法是不是眼熟,就是上面android_media_AudioTrack.cpp里的android_media_AudioTrack_setup方法中的初始化时有调用,也就是说共享内存是在初始化时就已经添加好了,最终,mSharedBuffer的大小是mSharedBuffer == AudioTrack.getMinBufferSize(),APP初始AudioTrack时增加,内存大小就成动态变更的了

继续跟踪mSharedBuffer

frameworks/av/media/libmedia/AudioTrack.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
status_t AudioTrack::createTrack_l()
{
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();

sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
mSampleRate,
// AudioFlinger only sees 16-bit PCM
mFormat == AUDIO_FORMAT_PCM_8_BIT &&
!(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
AUDIO_FORMAT_PCM_16_BIT : mFormat,
mChannelMask,
&temp,
&trackFlags,
mSharedBuffer,
output,
tid,
&mSessionId,
mClientUid,
&status);
}

frameworks/av/media/libmedia/AudioSystem.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
{
sp<IAudioFlinger> af;
sp<AudioFlingerClient> afc;
{
Mutex::Autolock _l(gLock);
if (gAudioFlinger == 0) {
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
binder = sm->getService(String16("media.audio_flinger"));
if (binder != 0)
break;
ALOGW("AudioFlinger not published, waiting...");
usleep(500000); // 0.5 s
} while (true);
}

frameworks/av/services/audioflinger/AudioFlinger.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
sp<IAudioTrack> AudioFlinger::createTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *frameCount,
IAudioFlinger::track_flags_t *flags,
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
int *sessionId,
int clientUid,
status_t *status)
{
sp<PlaybackThread::Track> track;
PlaybackThread *thread = checkPlaybackThread_l(output);
track = thread->createTrack_l(client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, lSessionId, flags, tid, clientUid, &lStatus);
}

frameworks/av/services/audioflinger/Threads.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
sp<AudioFlinger::PlaybackThread::Track> AudioFlinger::PlaybackThread::createTrack_l(
const sp<AudioFlinger::Client>& client,
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
const sp<IMemory>& sharedBuffer,
int sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int uid,
status_t *status)
{
if (!isTimed) {
track = new Track(this, client, streamType, sampleRate, format,
channelMask, frameCount, NULL, sharedBuffer,
sessionId, uid, *flags, TrackBase::TYPE_DEFAULT);
} else {
track = TimedTrack::create(this, client, streamType, sampleRate, format,
channelMask, frameCount, sharedBuffer, sessionId, uid);
}
mTracks.add(track);
}

frameworks/av/services/audioflinger/Track.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
AudioFlinger::PlaybackThread::Track::Track(
PlaybackThread *thread,
const sp<Client>& client,
audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
const sp<IMemory>& sharedBuffer,
int sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
track_type type)
: TrackBase(thread, client, sampleRate, format, channelMask, frameCount,
(sharedBuffer != 0) ? sharedBuffer->pointer() : buffer,
sessionId, uid, flags, true /*isOut*/,
(type == TYPE_PATCH) ? ( buffer == NULL ? ALLOC_LOCAL : ALLOC_NONE) : ALLOC_CBLK,
type),
mSharedBuffer(sharedBuffer),
{
}

这里可以看到,mSharedBuffer已经成功的放进了AudioFlinger,且通过了PlaybackThread创建线程Track,将其放进了mTracks

How To Play

目前已经跟踪mSharedBuffer初始化的来龙去脉,那么就来看看是怎么播放的了

frameworks/av/services/audioflinger/Threads.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, audio_devices_t device, type_t type)
: PlaybackThread(audioFlinger, output, id, device, type),
{
mOutputSink = new AudioStreamOutSink(output->stream);
if (initFastMixer) {
const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
(void)posix_memalign(&mSinkBuffer, 32, sinkBufferSize);
}
switch (kUseFastMixer) {
case FastMixer_Static:
mNormalSink = initFastMixer ? mPipeSink : mOutputSink;
break;
}
}

bool AudioFlinger::PlaybackThread::threadLoop()
{
if (mMixerBufferValid) {
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;

memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
mNormalFrameCount * mChannelCount);
}

if (mBytesRemaining) {
ssize_t ret = threadLoop_write();
if (ret < 0) {
mBytesRemaining = 0;
} else {
mBytesWritten += ret;
mBytesRemaining -= ret;
}
}
}

ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
{
ssize_t bytesWritten;
const size_t offset = mCurrentWriteLength - mBytesRemaining;

// If an NBAIO sink is present, use it to write the normal mixer's submix
if (mNormalSink != 0) {
ssize_t framesWritten = mNormalSink->write((char *)mSinkBuffer + offset, count);
}
}

在这里,在MixerThread构造函数里初始化AudioStreamOutSink赋值给mNormalSink,通过循环threadLoop,调用threadLoop_write写数据。

上面出了一个新的类AudioStreamOutSink,它就是会链接hal层的audio进行播放,具体初始化看下面

frameworks/av/services/audioflinger/AudioFlinger.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
sp<AudioFlinger::PlaybackThread> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
audio_config_t *config,
audio_devices_t devices,
const String8& address,
audio_output_flags_t flags)
{
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);

status_t status = hwDevHal->open_output_stream(hwDevHal,
*output,
devices,
flags,
config,
&outStream,
address.string());

if (status == NO_ERROR && outStream != NULL) {
AudioStreamOut *outputStream = new AudioStreamOut(outHwDev, outStream, flags);

thread = new MixerThread(this, outputStream, *output, devices);

mPlaybackThreads.add(*output, thread);
return thread;
}
}

这个的findSuitableHwDev_l就是匹配Hardware的Audio,分析请看下一往篇加载HAL,然后通过构建了AudioStreamOut对象,传送给了MixerThread

是不是发现,这里段播放这一段怎么直接从AudioFlinger开始?,我没找到代码流程如何变换的- -!!,找源码也一头雾水,大家可以frameworks/av/services/audioflinger/Track.cpp和frameworks/av/services/audioflinger/Threads.cpp和其它,如果乐意,可以分享给我,非常感谢!

文章作者: 二十I邊界
文章链接: https://xuie0000.com/post/2016-10-24-2019/Android-Audio系统分析(一)概览.html
版权声明: 本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 二十I邊界