Android 11 HAL层集成FFMPEG

news2024/11/14 1:22:52

1.集成目录:

android/vendor/noch/common/external/NoboMediaCodec

2.文件夹目录

3. Android.mk实现


# Copyright
#

LOCAL_PATH := $(call my-dir)

SF_COMMON_MK := $(LOCAL_PATH)/common.mk

include $(call first-makefiles-under,$(LOCAL_PATH))

4.common.mk实现

#
# Copyright
#

include $(CLEAR_VARS)

# Include base FFMPEG definitions.
FFMPEG_ARCH := $(TARGET_ARCH)

FFMPEG_2ND_ARCH := false
ifneq ($(TARGET_2ND_ARCH_VARIANT),)
   ifeq ($(FFMPEG_MULTILIB),32)
      FFMPEG_2ND_ARCH := true
   endif
endif

ifeq ($(FFMPEG_2ND_ARCH), true)
    FFMPEG_ARCH := $(TARGET_2ND_ARCH)
endif

ifeq ($(FFMPEG_ARCH),arm64)
    FFMPEG_ARCH := aarch64
endif

FFMPEG_ARCH_VARIANT := $(TARGET_ARCH_VARIANT)
ifeq ($(FFMPEG_2ND_ARCH), true)
   FFMPEG_ARCH_VARIANT := $(TARGET_2ND_ARCH_VARIANT)
endif

ifneq ($(filter x86 x86_64, $(FFMPEG_ARCH)),)
    TARGET_CONFIG := config-$(FFMPEG_ARCH)-$(FFMPEG_ARCH_VARIANT).h
    TARGET_CONFIG_ASM := config-$(FFMPEG_ARCH).asm
else
    TARGET_CONFIG := config-$(FFMPEG_ARCH_VARIANT).h
    TARGET_CONFIG_ASM := config-$(FFMPEG_ARCH_VARIANT).asm
endif
LOCAL_CFLAGS := \
        -DANDROID_SDK_VERSION=$(PLATFORM_SDK_VERSION) \
        -DTARGET_CONFIG=\"$(TARGET_CONFIG)\" \
        -DTARGET_CONFIG_ASM=$(TARGET_CONFIG_ASM) \
        -DHAVE_AV_CONFIG_H -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -DPIC \

LOCAL_ASFLAGS := $(LOCAL_CFLAGS)

# Some flags to work with FFMEG headers.
LOCAL_CFLAGS += -D__STDC_CONSTANT_MACROS=1 -D__STDINT_LIMITS=1

# All modules are installed on /vendor and optional.
#LOCAL_PROPRIETARY_MODULE := true
LOCAL_MODULE_TAGS := optional

5.codec2目录

 (1)、codec2下面的Android.mk

#
# Copyright
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
include $(SF_COMMON_MK)
ifneq ( ,$(filter 11 R ,$(PLATFORM_VERSION)))

LOCAL_CFLAGS += -DANDROID11
LOCAL_MODULE := android.hardware.media.c2@1.1-ffmpeg-service
endif
ifneq ( ,$(filter 12 S ,$(PLATFORM_VERSION)))

LOCAL_CFLAGS += -DANDROID12
LOCAL_MODULE := android.hardware.media.c2@1.2-ffmpeg-service
endif
#LOCAL_VENDOR_MODULE := true
LOCAL_SYSTEM_EXT_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := hw
# LOCAL_VINTF_FRAGMENTS := manifest_media_c2_V1_1_ffmpeg.xml
LOCAL_PRELINK_MODULE = false
LOCAL_REQUIRED_MODULES := \
        media_codecs_ffmpeg_c2.xml
LOCAL_SRC_FILES := \
        C2FFMPEGAudioDecodeComponent.cpp \
        C2FFMPEGAudioDecodeInterface.cpp \
        C2FFMPEGVideoDecodeComponent.cpp \
        C2FFMPEGVideoDecodeInterface.cpp \
        service.cpp
LOCAL_SHARED_LIBRARIES := \
        libavcodec \
        libavutil \
        libavservices_minijail \
        libbase \
        libbinder \
        libcodec2_soft_common \
        libcodec2_vndk \
        libffmpeg_utils \
        libhidlbase \
        liblog \
        libstagefright_foundation \
        libswresample \
        libswscale \
        libutils
ifneq ( ,$(filter 11 R ,$(PLATFORM_VERSION)))

LOCAL_INIT_RC := android.hardware.media.c2@1.1-ffmpeg-service.rc
LOCAL_SHARED_LIBRARIES += \
        android.hardware.media.c2@1.1 \
        libcodec2_hidl@1.1
LOCAL_REQUIRED_MODULES += \
        android.hardware.media.c2@1.1-ffmpeg.policy
endif
ifneq ( ,$(filter 12 S ,$(PLATFORM_VERSION)))

LOCAL_REQUIRED_MODULES += \
        android.hardware.media.c2@1.2-ffmpeg.policy
LOCAL_INIT_RC := android.hardware.media.c2@1.2-ffmpeg-service.rc
LOCAL_SHARED_LIBRARIES += \
        android.hardware.media.c2@1.2 \
        libcodec2_hidl@1.2
endif
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_SYSTEM_EXT_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := seccomp_policy
ifneq ( ,$(filter 11 R ,$(PLATFORM_VERSION)))

LOCAL_MODULE := android.hardware.media.c2@1.1-ffmpeg.policy
LOCAL_SRC_FILES_arm := seccomp_policy/android.hardware.media.c2@1.1-ffmpeg-arm.policy
LOCAL_SRC_FILES_arm64 := seccomp_policy/android.hardware.media.c2@1.1-ffmpeg-arm64.policy
endif
ifneq ( ,$(filter 12 S ,$(PLATFORM_VERSION)))

LOCAL_MODULE := android.hardware.media.c2@1.2-ffmpeg.policy
LOCAL_SRC_FILES_arm := seccomp_policy/android.hardware.media.c2@1.2-ffmpeg-arm.policy
LOCAL_SRC_FILES_arm64 := seccomp_policy/android.hardware.media.c2@1.2-ffmpeg-arm64.policy
endif
include $(BUILD_PREBUILT)
include $(CLEAR_VARS)
LOCAL_MODULE := media_codecs_ffmpeg_c2.xml
LOCAL_SYSTEM_EXT_MODULE := true
#LOCAL_VENDOR_MODULE := true
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_SRC_FILES := media_codecs_ffmpeg_c2.xml
include $(BUILD_PREBUILT)

(2)、android.hardware.media.c2@1.1-ffmpeg-service.rc内容

#
# Copyright
#

service android-hardware-media-c2-hal-1-1 /system_ext/bin/hw/android.hardware.media.c2@1.1-ffmpeg-service
    class main
    user mediacodec
    group camera drmrpc mediadrm
    ioprio rt 4
    writepid /dev/cpuset/foreground/tasks

(3)、media_codecs_ffmpeg_c2.xml文件内容

<?xml version="1.0" encoding="utf-8" ?>
<!-- Copyright
-->
<MediaCodecs>
    <Decoders>
        <!-- audio codecs -->
        <MediaCodec name="c2.ffmpeg.ape.decoder"    type="audio/x-ape" >
            <Alias name="OMX.ffmpeg.ape.decoder" />
            <Limit name="channel-count" max="8" />
            <Limit name="sample-rate" ranges="8000-192000" />
        </MediaCodec>
        <MediaCodec name="c2.ffmpeg.wma.decoder"    type="audio/x-ms-wma" >
            <Alias name="OMX.ffmpeg.wma.decoder" />
            <Limit name="channel-count" max="8" />
            <Limit name="sample-rate" ranges="8000-192000" />
        </MediaCodec>
        <MediaCodec name="c2.ffmpeg.mp3.decoder"    type="audio/mpeg" >
            <Alias name="OMX.ffmpeg.mp3.decoder" />
            <Limit name="channel-count" max="8" />
            <Limit name="sample-rate" ranges="8000-192000" />
        </MediaCodec>
        <!-- video codecs -->
        <MediaCodec name="c2.ffmpeg.xvid.decoder"   type="video/xvid">
            <Alias name="OMX.ffmpeg.xvid.decoder" />
            <Limit name="size" min="2x2" max="2048x2048" />
            <Limit name="alignment" value="2x2" />
            <Feature name="adaptive-playback" />
        </MediaCodec>
        <MediaCodec name="c2.ffmpeg.vc1.decoder"    type="video/vc1">
            <Alias name="OMX.ffmpeg.vc1.decoder" />
            <Limit name="size" min="2x2" max="2048x2048" />
            <Limit name="alignment" value="2x2" />
            <Feature name="adaptive-playback" />
        </MediaCodec>
        <MediaCodec name="c2.ffmpeg.wmv.decoder"    type="video/x-ms-wmv">
            <Alias name="OMX.ffmpeg.wmv.decoder" />
            <Limit name="size" min="2x2" max="2048x2048" />
            <Limit name="alignment" value="2x2" />
            <Feature name="adaptive-playback" />
        </MediaCodec>
    </Decoders>
</MediaCodecs>

(4)、manifest_media_c2_V1_1_ffmpeg.xml内容

<manifest version="1.0" type="framework">
    <hal format="hidl">
        <name>android.hardware.media.c2</name>
        <transport>hwbinder</transport>
        <version>1.1</version>
        <interface>
            <name>IComponentStore</name>
            <instance>ffmpeg</instance>
        </interface>
    </hal>
</manifest>

(5)、service.cpp文件内容

#define LOG_NDEBUG 0
#define LOG_TAG "android.hardware.media.c2@1.1-ffmpeg-service"
#define SERVICE_NAME "ffmpeg"

#include <android-base/logging.h>
#include <android-base/properties.h>
#include <binder/ProcessState.h>
#ifdef ANDROID12
#include <codec2/hidl/1.2/ComponentStore.h>
#define LOG_TAG "android.hardware.media.c2@1.2-ffmpeg-service"
#elif defined(ANDROID11)
#include <codec2/hidl/1.1/ComponentStore.h>
#define LOG_TAG "android.hardware.media.c2@1.1-ffmpeg-service"
#elif
#include <codec2/hidl/1.0/ComponentStore.h>
#define LOG_TAG "android.hardware.media.c2@1.0-ffmpeg-service"
#endif
#include <hidl/HidlTransportSupport.h>
#include <minijail.h>

#include <util/C2InterfaceHelper.h>
#include <C2Component.h>
#include <C2Config.h>

#include "C2FFMPEGCommon.h"
#include "C2FFMPEGAudioDecodeComponent.h"
#include "C2FFMPEGAudioDecodeInterface.h"
#include "C2FFMPEGVideoDecodeComponent.h"
#include "C2FFMPEGVideoDecodeInterface.h"

namespace android {

// This is the absolute on-device path of the prebuild_etc module
// "android.hardware.media.c2@1.1-ffmpeg-seccomp_policy" in Android.bp.
static constexpr char kBaseSeccompPolicyPath[] =
        "/system_ext/etc/seccomp_policy/"
        #ifdef ANDROID12
        "android.hardware.media.c2@1.2-ffmpeg.policy";
        #elif defined(ANDROID11)
        "android.hardware.media.c2@1.1-ffmpeg.policy";
        #elif
        "android.hardware.media.c2@1.1-ffmpeg.policy";
        #endif
// Additional seccomp permissions can be added in this file.
// This file does not exist by default.
static constexpr char kExtSeccompPolicyPath[] =
        "/system_ext/etc/seccomp_policy/"
        "android.hardware.media.c2@1.1-ffmpeg-extended.policy";

static const C2FFMPEGComponentInfo kFFMPEGVideoComponents[] = {
    { "c2.ffmpeg.xvid.decoder"  , MEDIA_MIMETYPE_VIDEO_XVID  , AV_CODEC_ID_MPEG4      },
    { "c2.ffmpeg.vc1.decoder"   , MEDIA_MIMETYPE_VIDEO_VC1   , AV_CODEC_ID_VC1        },
    { "c2.ffmpeg.vtrial.decoder", MEDIA_MIMETYPE_VIDEO_FFMPEG, AV_CODEC_ID_NONE       },
    { "c2.ffmpeg.wmv.decoder"   , MEDIA_MIMETYPE_VIDEO_WMV   , AV_CODEC_ID_WMV2       },
};

static const size_t kNumVideoComponents =
    (sizeof(kFFMPEGVideoComponents) / sizeof(kFFMPEGVideoComponents[0]));

static const C2FFMPEGComponentInfo kFFMPEGAudioComponents[] = {
    { "c2.ffmpeg.mp3.decoder"   , MEDIA_MIMETYPE_AUDIO_MPEG         , AV_CODEC_ID_MP3    },
    { "c2.ffmpeg.ape.decoder"   , MEDIA_MIMETYPE_AUDIO_APE          , AV_CODEC_ID_APE    },
    { "c2.ffmpeg.atrial.decoder", MEDIA_MIMETYPE_AUDIO_FFMPEG       , AV_CODEC_ID_NONE   },
    { "c2.ffmpeg.wma.decoder"   , MEDIA_MIMETYPE_AUDIO_WMA          , AV_CODEC_ID_WMAV2  },
};

static const size_t kNumAudioComponents =
    (sizeof(kFFMPEGAudioComponents) / sizeof(kFFMPEGAudioComponents[0]));

class StoreImpl : public C2ComponentStore {
public:
    StoreImpl()
        : mReflectorHelper(std::make_shared<C2ReflectorHelper>()),
          mInterface(mReflectorHelper) {
	ALOGD("StoreImpl::StoreImpl");
    }

    virtual ~StoreImpl() override = default;

    virtual C2String getName() const override {
        ALOGD("StoreImpl::getName");
	return SERVICE_NAME;
    }

    virtual c2_status_t createComponent(
            C2String name,
            std::shared_ptr<C2Component>* const component) override {
        ALOGD("createComponent: %s", name.c_str());
        for (int i = 0; i < kNumAudioComponents; i++) {
            auto info = &kFFMPEGAudioComponents[i];
			ALOGD("createComponent: %s, info->name:%s", name.c_str(), info->name);
            if (name == info->name) {
                component->reset();
                *component = std::shared_ptr<C2Component>(
                        new C2FFMPEGAudioDecodeComponent(
                                info, std::make_shared<C2FFMPEGAudioDecodeInterface>(info, mReflectorHelper)));
                return C2_OK;
            }
        }
        for (int i = 0; i < kNumVideoComponents; i++) {
            auto info = &kFFMPEGVideoComponents[i];
            if (name == info->name) {
                component->reset();
                *component = std::shared_ptr<C2Component>(
                        new C2FFMPEGVideoDecodeComponent(
                                info, std::make_shared<C2FFMPEGVideoDecodeInterface>(info, mReflectorHelper)));
                return C2_OK;
            }
        }
        return C2_NOT_FOUND;
    }

    virtual c2_status_t createInterface(
            C2String name,
            std::shared_ptr<C2ComponentInterface>* const interface) override {
        ALOGD("createInterface: %s", name.c_str());
        for (int i = 0; i < kNumAudioComponents; i++) {
            auto info = &kFFMPEGAudioComponents[i];
			ALOGD("createInterface: %s, info->name:%s", name.c_str(), info->name);
            if (name == info->name) {
                interface->reset();
                *interface = std::shared_ptr<C2ComponentInterface>(
                        new SimpleInterface<C2FFMPEGAudioDecodeInterface>(
                                info->name, 0, std::make_shared<C2FFMPEGAudioDecodeInterface>(info, mReflectorHelper)));
                return C2_OK;
            }
        }
        for (int i = 0; i < kNumVideoComponents; i++) {
            auto info = &kFFMPEGVideoComponents[i];
            if (name == info->name) {
                interface->reset();
                *interface = std::shared_ptr<C2ComponentInterface>(
                        new SimpleInterface<C2FFMPEGVideoDecodeInterface>(
                                info->name, 0, std::make_shared<C2FFMPEGVideoDecodeInterface>(info, mReflectorHelper)));
                return C2_OK;
            }
        }
        ALOGE("createInterface: unknown component = %s", name.c_str());
        return C2_NOT_FOUND;
    }

    virtual std::vector<std::shared_ptr<const C2Component::Traits>>
            listComponents() override {
        std::vector<std::shared_ptr<const C2Component::Traits>> ret;
        uint32_t defaultRank = ::android::base::GetUintProperty("debug.ffmpeg-codec2.rank", 0x110u);
        uint32_t defaultRankAudio = ::android::base::GetUintProperty("debug.ffmpeg-codec2.rank.audio", defaultRank);
        uint32_t defaultRankVideo = ::android::base::GetUintProperty("debug.ffmpeg-codec2.rank.video", defaultRank);
        ALOGD("listComponents: defaultRank=%x, defaultRankAudio=%x, defaultRankVideo=%x",
              defaultRank, defaultRankAudio, defaultRankVideo);
#define RANK_DISABLED 0xFFFFFFFF
        if (defaultRank != RANK_DISABLED) {
            if (defaultRankAudio != RANK_DISABLED) {
                for (int i = 0; i < kNumAudioComponents; i++) {
                    auto traits = std::make_shared<C2Component::Traits>();
                    traits->name = kFFMPEGAudioComponents[i].name;
                    traits->domain = C2Component::DOMAIN_AUDIO;
                    traits->kind = C2Component::KIND_DECODER;
                    traits->mediaType = kFFMPEGAudioComponents[i].mediaType;
                    traits->rank = defaultRankAudio;
                    ret.push_back(traits);
                }
            }
            if (defaultRankVideo != RANK_DISABLED) {
                for (int i = 0; i < kNumVideoComponents; i++) {
                    auto traits = std::make_shared<C2Component::Traits>();
                    traits->name = kFFMPEGVideoComponents[i].name;
                    traits->domain = C2Component::DOMAIN_VIDEO;
                    traits->kind = C2Component::KIND_DECODER;
                    traits->mediaType = kFFMPEGVideoComponents[i].mediaType;
                    traits->rank = defaultRankVideo;
                    ret.push_back(traits);
                }
            }
        }
        return ret;
    }

    virtual c2_status_t copyBuffer(
            std::shared_ptr<C2GraphicBuffer> /* src */,
            std::shared_ptr<C2GraphicBuffer> /* dst */) override {
        return C2_OMITTED;
    }

    virtual c2_status_t query_sm(
        const std::vector<C2Param*>& stackParams,
        const std::vector<C2Param::Index>& heapParamIndices,
        std::vector<std::unique_ptr<C2Param>>* const heapParams) const override {
        return mInterface.query(stackParams, heapParamIndices, C2_MAY_BLOCK, heapParams);
    }

    virtual c2_status_t config_sm(
            const std::vector<C2Param*>& params,
            std::vector<std::unique_ptr<C2SettingResult>>* const failures) override {
        return mInterface.config(params, C2_MAY_BLOCK, failures);
    }

    virtual std::shared_ptr<C2ParamReflector> getParamReflector() const override {
        return mReflectorHelper;
    }

    virtual c2_status_t querySupportedParams_nb(
            std::vector<std::shared_ptr<C2ParamDescriptor>>* const params) const override {
        return mInterface.querySupportedParams(params);
    }

    virtual c2_status_t querySupportedValues_sm(
            std::vector<C2FieldSupportedValuesQuery>& fields) const override {
        return mInterface.querySupportedValues(fields, C2_MAY_BLOCK);
    }

private:
    class Interface : public C2InterfaceHelper {
    public:
        Interface(const std::shared_ptr<C2ReflectorHelper> &helper)
            : C2InterfaceHelper(helper) {
            setDerivedInstance(this);

            addParameter(
                DefineParam(mIonUsageInfo, "ion-usage")
                .withDefault(new C2StoreIonUsageInfo())
                .withFields({
                    C2F(mIonUsageInfo, usage).flags(
                            {C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE}),
                    C2F(mIonUsageInfo, capacity).inRange(0, UINT32_MAX, 1024),
                    C2F(mIonUsageInfo, heapMask).any(),
                    C2F(mIonUsageInfo, allocFlags).flags({}),
                    C2F(mIonUsageInfo, minAlignment).equalTo(0)
                })
                .withSetter(SetIonUsage)
                .build());

//            addParameter(
//                DefineParam(mDmaBufUsageInfo, "dmabuf-usage")
//                .withDefault(C2StoreDmaBufUsageInfo::AllocUnique(0))
//                .withFields({
//                    C2F(mDmaBufUsageInfo, m.usage).flags({C2MemoryUsage::CPU_READ | C2MemoryUsage::CPU_WRITE}),
//                    C2F(mDmaBufUsageInfo, m.capacity).inRange(0, UINT32_MAX, 1024),
//                    C2F(mDmaBufUsageInfo, m.heapName).any(),
//                    C2F(mDmaBufUsageInfo, m.allocFlags).flags({}),
//                })
//                .withSetter(SetDmaBufUsage)
//                .build());
        }

        virtual ~Interface() = default;

    private:
        static C2R SetIonUsage(bool /* mayBlock */, C2P<C2StoreIonUsageInfo> &me) {
            // Vendor's TODO: put appropriate mapping logic
            me.set().heapMask = ~0;
            me.set().allocFlags = 0;
            me.set().minAlignment = 0;
            return C2R::Ok();
        }

//        static C2R SetDmaBufUsage(bool /* mayBlock */, C2P<C2StoreDmaBufUsageInfo> &me) {
//            // Vendor's TODO: put appropriate mapping logic
//            strncpy(me.set().m.heapName, "system", me.v.flexCount());
//            me.set().m.allocFlags = 0;
//            return C2R::Ok();
//        }


        std::shared_ptr<C2StoreIonUsageInfo> mIonUsageInfo;
//        std::shared_ptr<C2StoreDmaBufUsageInfo> mDmaBufUsageInfo;
    };
    std::shared_ptr<C2ReflectorHelper> mReflectorHelper;
    Interface mInterface;
};

} // namespace android

int main(int /* argc */, char** /* argv */) {
    using namespace ::android;

        #ifdef ANDROID12
        LOG(DEBUG) << "android.hardware.media.c2@1.2-service starting...";
        #elif defined(ANDROID11)
        LOG(DEBUG) << "android.hardware.media.c2@1.1-service starting...";
        #elif
        LOG(DEBUG) << "android.hardware.media.c2@1*-service starting...";
        #endif


    // Set up minijail to limit system calls.
    signal(SIGPIPE, SIG_IGN);
    SetUpMinijail(kBaseSeccompPolicyPath, kExtSeccompPolicyPath);

    ProcessState::self()->startThreadPool();
    // Extra threads may be needed to handle a stacked IPC sequence that
    // contains alternating binder and hwbinder calls. (See b/35283480.)
    hardware::configureRpcThreadpool(8, true /* callerWillJoin */);
	{

	#ifdef ANDROID12
		using namespace ::android::hardware::media::c2::V1_2;

	#elif defined(ANDROID11)
		using namespace ::android::hardware::media::c2::V1_1;
	#elif
		using namespace ::android::hardware::media::c2::V1_0;
	#endif
    	sp<IComponentStore> componentstore;
	 	componentstore = new utils::ComponentStore(
                std::make_shared<StoreImpl>());
		if (componentstore == nullptr) {
            LOG(ERROR) << "Cannot create Codec2's IComponentStore service.";
        } else {
            constexpr char const* serviceName = SERVICE_NAME;
            if (componentstore->registerAsService(serviceName) != OK) {
                LOG(ERROR) << "Cannot register Codec2's IComponentStore service"
                              " with instance name << \""
                           << serviceName << "\".";
            } else {
                LOG(DEBUG) << "Codec2's IComponentStore service registered. "
                              "Instance name: \"" << serviceName << "\".";
            }
        }
    }

    // Create IComponentStore service.

        // TODO: Replace this with
        // store = new utils::ComponentStore(
        //         /* implementation of C2ComponentStore */);



    hardware::joinRpcThreadpool();
    return 0;
}

 (6)、C2FFMPEGAudioDecodeComponent.h内容

/*
 * Copyright
 */

#ifndef C2_FFMPEG_AUDIO_DECODE_COMPONENT_H
#define C2_FFMPEG_AUDIO_DECODE_COMPONENT_H

#include <SimpleC2Component.h>
#include "C2FFMPEGCommon.h"
#include "C2FFMPEGAudioDecodeInterface.h"

namespace android {

struct CodecHelper;

class C2FFMPEGAudioDecodeComponent : public SimpleC2Component {
public:
    /**
     * @brief C2FFMPEGAudioDecodeComponent 类的构造函数
    */
    explicit C2FFMPEGAudioDecodeComponent(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2FFMPEGAudioDecodeInterface>& intf);
    /**
     * @brief C2FFMPEGAudioDecodeComponent 类的析构函数
    */
    virtual ~C2FFMPEGAudioDecodeComponent();

protected:
    /**
     * @brief 记录init日志信息,调用 initDecoder()进行初始化操作
     *
     * @return 成功返回 C2_OK
    */
    c2_status_t onInit() override;
    /**
     * @brief 记录stop日志信息
     *
     * @return 成功返回 C2_OK
    */
    c2_status_t onStop() override;
    /**
     * @brief 记录reset日志信息,调用deInitDecoder()和initDecoder()
    */
    void onReset() override;
    /**
     * @brief 记录relesase日志信息,进行释放操作
    */
    void onRelease() override;
    /**
     * @brief 进行状态刷新,并记录日志信息
     *
     * @return 成功返回 C2_OK 表示状态刷新成功
    */
    c2_status_t onFlush_sm() override;
    /**
     * @brief 音频解码
    */
    void process(
        const std::unique_ptr<C2Work> &work,
        const std::shared_ptr<C2BlockPool> &pool) override;
    /**
     * @brief 排空音频解码器
     *
     * @return 返回相应状态
    */
    c2_status_t drain(
        uint32_t drainMode,
        const std::shared_ptr<C2BlockPool> &pool) override;

private:
    /**
     * @brief 初始化解码器
     *
     * @return 返回 C2_OK 表示成功,返回其他值表示失败
    */
    c2_status_t initDecoder();
    /**
     * @brief 打开解码器,如果解码器已经打开,则return OK,否则,查找并初始化指定编解码器,配置解码器参数,打开解码器
     *
     * @return C2_OK   打开解码器成功
     *         C2_NOT_FOUND   未找到指定编解码器
     *         C2_NO_INIT   解码器初始化失败
     *         C2_NO_MEMORY   内存分配失败
    */
    c2_status_t openDecoder();
    /**
     * @brief 反初始化解码器,释放资源并关闭解码器
    */
    void deInitDecoder();
    /**
     * @brief 处理解码器配置信息
     *
     * @param inBuffer 包含解码器配置信息的数据缓冲区
     * @return 处理成功返回C2_OK
    */
    c2_status_t processCodecConfig(C2ReadView* inBuffer);
    /**
     * @brief 发送输入数据给解码器进行解码
     *
     * @param inBuffer 输入数据缓冲区,包含待解码的音频数据
     * @param timestamp 输入数据的时间戳
     * @return 成功返回 C2_OK
    */
    c2_status_t sendInputBuffer(C2ReadView* inBuffer, int64_t timestamp);
    /**
     * @brief 接收解码器输出的音频帧
     *
     * @param hasFrame 指示是否成功接收到音频帧的标志
     * @return 成功返回 C2_OK
    */
    c2_status_t receiveFrame(bool* hasFrame);
    /**
     * @brief 获取输出缓冲区并执行音频转换
     *
     * @param outBuffer 指向输出缓冲区的指针
     * @return 成功返回 C2_OK
    */
    c2_status_t getOutputBuffer(C2WriteView* outBuffer);
    /**
     * @brief 获取编码信息、采样率、通道数,更新音频参数
    */
    void updateAudioParameters();

private:
    const C2FFMPEGComponentInfo* mInfo;
    std::shared_ptr<C2FFMPEGAudioDecodeInterface> mIntf;
    enum AVCodecID mCodecID;
    AVCodecContext* mCtx;
    AVFrame* mFrame;
    AVPacket* mPacket;
    bool mFFMPEGInitialized;
    bool mCodecAlreadyOpened;
    bool mEOSSignalled;
    // Audio resampling
    struct SwrContext* mSwrCtx;
    enum AVSampleFormat mTargetSampleFormat;
    int mTargetSampleRate;
    int mTargetChannels;
    // Misc
    CodecHelper* mCodecHelper;
};

} // namespace android

#endif // C2_FFMPEG_AUDIO_DECODE_COMPONENT_H

(7)、C2FFMPEGAudioDecodeComponent.cpp 内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "C2FFMPEGAudioDecodeComponent"
#include <android-base/stringprintf.h>
#include <log/log.h>

#include <SimpleC2Interface.h>
#include "C2FFMPEGAudioDecodeComponent.h"
#include <libswresample/swresample_internal.h>

#define DEBUG_FRAMES 0
#define DEBUG_EXTRADATA 0

namespace android {

static enum AVSampleFormat convertFormatToFFMPEG(C2Config::pcm_encoding_t encoding) {
    switch (encoding) {
        case C2Config::PCM_8: return AV_SAMPLE_FMT_U8;
        case C2Config::PCM_16: return AV_SAMPLE_FMT_S16;
        case C2Config::PCM_FLOAT: return AV_SAMPLE_FMT_FLT;
        default: return AV_SAMPLE_FMT_NONE;
    }
}

__unused
static C2Config::pcm_encoding_t convertFormatToC2(enum AVSampleFormat format) {
    switch (format) {
        case AV_SAMPLE_FMT_U8: return C2Config::PCM_8;
        case AV_SAMPLE_FMT_S16: return C2Config::PCM_16;
        case AV_SAMPLE_FMT_FLT: return C2Config::PCM_FLOAT;
        default: return C2Config::PCM_16;
    }
}

// Helper structures to encapsulate the specific codec behaviors.
// Currently only used to process extradata.

struct CodecHelper {
    virtual ~CodecHelper() {}
    virtual c2_status_t onCodecConfig(AVCodecContext* mCtx, C2ReadView* inBuffer);
    virtual c2_status_t onOpen(AVCodecContext* mCtx);
    virtual c2_status_t onOpened(AVCodecContext* mCtx);
};

c2_status_t CodecHelper::onCodecConfig(AVCodecContext* mCtx, C2ReadView* inBuffer) {
    int orig_extradata_size = mCtx->extradata_size;
    int add_extradata_size = inBuffer->capacity();

#if DEBUG_EXTRADATA
    ALOGD("CodecHelper::onCodecConfig: add = %u, current = %d", add_extradata_size, orig_extradata_size);
#endif
    mCtx->extradata_size += add_extradata_size;
    mCtx->extradata = (uint8_t *) realloc(mCtx->extradata, mCtx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
    if (! mCtx->extradata) {
        ALOGE("CodecHelper::onCodecConfig: ffmpeg audio decoder failed to alloc extradata memory.");
        return C2_NO_MEMORY;
    }
    memcpy(mCtx->extradata + orig_extradata_size, inBuffer->data(), add_extradata_size);
    memset(mCtx->extradata + mCtx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);

    return C2_OK;
}

c2_status_t CodecHelper::onOpen(AVCodecContext* mCtx) {
#if DEBUG_EXTRADATA
    ALOGD("CodecHelper::onOpen: extradata = %d", mCtx->extradata_size);
#else
    // Silence compilation warning.
    (void)mCtx;
#endif
    return C2_OK;
}

c2_status_t CodecHelper::onOpened(AVCodecContext* mCtx) {
    (void)mCtx;
    return C2_OK;
}

CodecHelper* createCodecHelper(enum AVCodecID codec_id) {
    return new CodecHelper();
}

C2FFMPEGAudioDecodeComponent::C2FFMPEGAudioDecodeComponent(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2FFMPEGAudioDecodeInterface>& intf)
    : SimpleC2Component(std::make_shared<SimpleInterface<C2FFMPEGAudioDecodeInterface>>(componentInfo->name, 0, intf)),
      mInfo(componentInfo),
      mIntf(intf),
      mCodecID(componentInfo->codecID),
      mCtx(NULL),
      mFrame(NULL),
      mPacket(NULL),
      mFFMPEGInitialized(false),
      mCodecAlreadyOpened(false),
      mEOSSignalled(false),
      mSwrCtx(NULL),
      mTargetSampleFormat(AV_SAMPLE_FMT_NONE),
      mTargetSampleRate(44100),
      mTargetChannels(1) {
    ALOGD("C2FFMPEGAudioDecodeComponent::C2FFMPEGAudioDecodeComponent mediaType = %s", componentInfo->mediaType);
}

C2FFMPEGAudioDecodeComponent::~C2FFMPEGAudioDecodeComponent() {
    ALOGD("~C2FFMPEGAudioDecodeComponent: mCtx = %p", mCtx);
    onRelease();
}

c2_status_t C2FFMPEGAudioDecodeComponent::initDecoder() {
    if (! mFFMPEGInitialized) {
        if (initFFmpeg() != C2_OK) {
            ALOGE("initDecoder: FFMPEG initialization failed.");
            return C2_NO_INIT;
        }
        mFFMPEGInitialized = true;
    }

    mCtx = avcodec_alloc_context3(NULL);
    if (! mCtx) {
        ALOGE("initDecoder: avcodec_alloc_context failed.");
        return C2_NO_MEMORY;
    }

    mCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    mCtx->codec_id = mCodecID;

    updateAudioParameters();

    av_channel_layout_default(&mCtx->ch_layout, mTargetChannels);
    mCtx->sample_rate = mTargetSampleRate;
    mCtx->bit_rate = 0;
    mCtx->sample_fmt = mTargetSampleFormat;

    // Avoid resampling if possible, ask the codec for the target format.
    mCtx->request_sample_fmt = mCtx->sample_fmt;

    const FFMPEGAudioCodecInfo* codecInfo = mIntf->getCodecInfo();

    if (codecInfo) {
        ALOGD("initDecoder: use codec info from extractor");
        mCtx->codec_id = (enum AVCodecID)codecInfo->codec_id;
        mCtx->bit_rate = mIntf->getBitrate(); // The extractor always sets bitrate
        mCtx->bits_per_coded_sample = codecInfo->bits_per_coded_sample;
        mCtx->block_align = codecInfo->block_align;
        // FIXME: Is more needed...?
    }

    mCodecHelper = createCodecHelper(mCtx->codec_id);

    ALOGD("initDecoder: %p [%s], %s - sr=%d, ch=%d, fmt=%s",
          mCtx, avcodec_get_name(mCtx->codec_id), mInfo->mediaType,
          mCtx->sample_rate, mCtx->ch_layout.nb_channels, av_get_sample_fmt_name(mCtx->sample_fmt));

    return C2_OK;
}

c2_status_t C2FFMPEGAudioDecodeComponent::openDecoder() {
    if (mCodecAlreadyOpened) {
        return C2_OK;
    }

    mCodecHelper->onOpen(mCtx);

    // Find decoder
    mCtx->codec = avcodec_find_decoder(mCtx->codec_id);
    if (! mCtx->codec) {
        ALOGE("openDecoder: ffmpeg audio decoder failed to find codec %d", mCtx->codec_id);
        return C2_NOT_FOUND;
    }

    // Configure decoder
    mCtx->workaround_bugs   = 1;
    mCtx->idct_algo         = 0;
    mCtx->skip_frame        = AVDISCARD_DEFAULT;
    mCtx->skip_idct         = AVDISCARD_DEFAULT;
    mCtx->skip_loop_filter  = AVDISCARD_DEFAULT;
    mCtx->error_concealment = 3;

    mCtx->flags |= AV_CODEC_FLAG_BITEXACT;

    ALOGD("openDecoder: begin to open ffmpeg audio decoder(%s), mCtx sample_rate: %d, channels: %d",
           avcodec_get_name(mCtx->codec_id), mCtx->sample_rate, mCtx->ch_layout.nb_channels);

    int err = avcodec_open2(mCtx, mCtx->codec, NULL);
    if (err < 0) {
        ALOGE("openDecoder: ffmpeg audio decoder failed to initialize.(%s)", av_err2str(err));
        return C2_NO_INIT;
    }
    mCodecAlreadyOpened = true;

    mCodecHelper->onOpened(mCtx);

    ALOGD("openDecoder: open ffmpeg audio decoder(%s) success, mCtx sample_rate: %d, "
          "channels: %d, sample_fmt: %s, bits_per_coded_sample: %d, bits_per_raw_sample: %d",
          avcodec_get_name(mCtx->codec_id),
          mCtx->sample_rate, mCtx->ch_layout.nb_channels,
          av_get_sample_fmt_name(mCtx->sample_fmt),
          mCtx->bits_per_coded_sample, mCtx->bits_per_raw_sample);

    mFrame = av_frame_alloc();
    if (! mFrame) {
        ALOGE("openDecoder: oom for audio frame");
        return C2_NO_MEMORY;
    }

    return C2_OK;
}

void C2FFMPEGAudioDecodeComponent::deInitDecoder() {
    ALOGD("deInitDecoder: %p", mCtx);
    if (mCtx) {
        if (avcodec_is_open(mCtx)) {
            avcodec_flush_buffers(mCtx);
        }
        if (mCtx->extradata) {
            av_free(mCtx->extradata);
            mCtx->extradata = NULL;
            mCtx->extradata_size = 0;
        }
        if (mCodecAlreadyOpened) {
            avcodec_close(mCtx);
            mCodecAlreadyOpened = false;
        }
        av_freep(&mCtx);
    }
    if (mFrame) {
        av_frame_free(&mFrame);
        mFrame = NULL;
    }
    if (mPacket) {
        av_packet_free(&mPacket);
        mPacket = NULL;
    }
    if (mSwrCtx) {
        swr_free(&mSwrCtx);
    }
    if (mCodecHelper) {
        delete mCodecHelper;
        mCodecHelper = NULL;
    }
    mEOSSignalled = false;
}

c2_status_t C2FFMPEGAudioDecodeComponent::processCodecConfig(C2ReadView* inBuffer) {
#if DEBUG_EXTRADATA
    ALOGD("processCodecConfig: inBuffer = %d", inBuffer->capacity());
#endif
    if (! mCodecAlreadyOpened) {
        return mCodecHelper->onCodecConfig(mCtx, inBuffer);
    } else {
        ALOGW("processCodecConfig: decoder is already opened, ignoring %d bytes", inBuffer->capacity());
    }

    return C2_OK;
}

c2_status_t C2FFMPEGAudioDecodeComponent::sendInputBuffer(
        C2ReadView *inBuffer, int64_t timestamp) {
    if (!mPacket) {
        mPacket = av_packet_alloc();
        if (!mPacket) {
            ALOGE("sendInputBuffer: oom for audio packet");
            return C2_NO_MEMORY;
        }
    }

    mPacket->data = inBuffer ? const_cast<uint8_t *>(inBuffer->data()) : NULL;
    mPacket->size = inBuffer ? inBuffer->capacity() : 0;
    mPacket->pts = timestamp;
    mPacket->dts = timestamp;

    int err = avcodec_send_packet(mCtx, mPacket);
    av_packet_unref(mPacket);

    if (err < 0) {
        ALOGE("sendInputBuffer: failed to send data to decoder err = %d", err);
        // Don't report error to client.
    }

    return C2_OK;
}

c2_status_t C2FFMPEGAudioDecodeComponent::receiveFrame(bool* hasFrame) {
    int err = avcodec_receive_frame(mCtx, mFrame);

    if (err == 0) {
        *hasFrame = true;
    } else if (err == AVERROR(EAGAIN) || err == AVERROR_EOF) {
        *hasFrame = false;
    } else {
        ALOGE("receiveFrame: failed to receive frame from decoder err = %d", err);
        // Don't report error to client.
    }

    return C2_OK;
}

c2_status_t C2FFMPEGAudioDecodeComponent::getOutputBuffer(C2WriteView* outBuffer) {
    if (! mSwrCtx ||
        mSwrCtx->in_sample_fmt != mFrame->format ||
        mSwrCtx->in_sample_rate != mFrame->sample_rate ||
        av_channel_layout_compare(&mSwrCtx->in_ch_layout, &mFrame->ch_layout) != 0 ||
        mSwrCtx->out_sample_fmt != mTargetSampleFormat ||
        mSwrCtx->out_sample_rate != mTargetSampleRate ||
        mSwrCtx->out_ch_layout.nb_channels != mTargetChannels) {
        if (mSwrCtx) {
            swr_free(&mSwrCtx);
        }

        AVChannelLayout newLayout;

        av_channel_layout_default(&newLayout, mTargetChannels);
        swr_alloc_set_opts2(&mSwrCtx,
                            &newLayout, mTargetSampleFormat, mTargetSampleRate,
                            &mFrame->ch_layout, (enum AVSampleFormat)mFrame->format, mFrame->sample_rate,
                            0, NULL);
        av_channel_layout_uninit(&newLayout);
        if (! mSwrCtx || swr_init(mSwrCtx) < 0) {
            ALOGE("getOutputBuffer: cannot create audio converter - sr=%d, ch=%d, fmt=%s => sr=%d, ch=%d, fmt=%s",
                  mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name((enum AVSampleFormat)mFrame->format),
                  mTargetSampleRate, mTargetChannels, av_get_sample_fmt_name(mTargetSampleFormat));
            if (mSwrCtx) {
                swr_free(&mSwrCtx);
            }
            return C2_NO_MEMORY;
        }

        ALOGD("getOutputBuffer: created audio converter - sr=%d, ch=%d, fmt=%s => sr=%d, ch=%d, fmt=%s",
              mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name((enum AVSampleFormat)mFrame->format),
              mTargetSampleRate, mTargetChannels, av_get_sample_fmt_name(mTargetSampleFormat));
    }

    uint8_t* out[1] = { outBuffer->data() };
    int ret = swr_convert(mSwrCtx, out, mFrame->nb_samples, (const uint8_t**)mFrame->extended_data, mFrame->nb_samples);

    if (ret < 0) {
        ALOGE("getOutputBuffer: audio conversion failed");
        return C2_CORRUPTED;
    } else if (ret != mFrame->nb_samples) {
        ALOGW("getOutputBuffer: audio conversion truncated!");
    }

#if DEBUG_FRAMES
    ALOGD("getOutputBuffer: audio converted - sr=%d, ch=%d, fmt=%s, #=%d => sr=%d, ch=%d, fmt=%s, #=%d(%d)",
          mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name((enum AVSampleFormat)mFrame->format), mFrame->nb_samples,
          mTargetSampleRate, mTargetChannels, av_get_sample_fmt_name(mTargetSampleFormat), mFrame->nb_samples, outBuffer->capacity());
#endif

    return C2_OK;
}

void C2FFMPEGAudioDecodeComponent::updateAudioParameters() {
    mTargetSampleFormat = convertFormatToFFMPEG(mIntf->getPcmEncodingInfo());
    mTargetSampleRate = mIntf->getSampleRate();
    mTargetChannels = mIntf->getChannelCount();
}

c2_status_t C2FFMPEGAudioDecodeComponent::onInit() {
    ALOGD("onInit");
    return initDecoder();
}

c2_status_t C2FFMPEGAudioDecodeComponent::onStop() {
    ALOGD("onStop");
    return C2_OK;
}

void C2FFMPEGAudioDecodeComponent::onReset() {
    ALOGD("onReset");
    deInitDecoder();
    initDecoder();
}

void C2FFMPEGAudioDecodeComponent::onRelease() {
    ALOGD("onRelease");
    deInitDecoder();
    if (mFFMPEGInitialized) {
        deInitFFmpeg();
        mFFMPEGInitialized = false;
    }
}

c2_status_t C2FFMPEGAudioDecodeComponent::onFlush_sm() {
    ALOGD("onFlush_sm");
    if (mCtx && avcodec_is_open(mCtx)) {
        // Make sure that the next buffer output does not still
        // depend on fragments from the last one decoded.
        avcodec_flush_buffers(mCtx);
        mEOSSignalled = false;
    }
    return C2_OK;
}

void C2FFMPEGAudioDecodeComponent::process(
    const std::unique_ptr<C2Work> &work,
    const std::shared_ptr<C2BlockPool>& pool
) {
    size_t inSize = 0u;
    bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM);
    C2ReadView rView = mDummyReadView;
    bool hasInputBuffer = false;
    bool hasFrame = false;

    if (! work->input.buffers.empty()) {
        rView = work->input.buffers[0]->data().linearBlocks().front().map().get();
        inSize = rView.capacity();
        hasInputBuffer = true;
    }

#if DEBUG_FRAMES
    ALOGD("process: input flags=%08x ts=%lu idx=%lu #buf=%lu[%lu] #conf=%lu #info=%lu",
          work->input.flags, work->input.ordinal.timestamp.peeku(), work->input.ordinal.frameIndex.peeku(),
          work->input.buffers.size(), inSize, work->input.configUpdate.size(), work->input.infoBuffers.size());
#endif

    if (mEOSSignalled) {
        ALOGE("process: ignoring work while EOS reached");
        work->workletsProcessed = 0u;
        work->result = C2_BAD_VALUE;
        return;
    }

    if (hasInputBuffer && rView.error()) {
        ALOGE("process: read view map failed err = %d", rView.error());
        work->workletsProcessed = 0u;
        work->result = rView.error();
        return;
    }

    // In all cases the work is marked as completed.
    // NOTE: This has an impact on the drain operation.

    work->result = C2_OK;
    work->worklets.front()->output.flags = (C2FrameData::flags_t)0;
    work->worklets.front()->output.buffers.clear();
    work->worklets.front()->output.ordinal = work->input.ordinal;
    work->workletsProcessed = 1u;

    if (inSize || (eos && mCodecAlreadyOpened)) {
        c2_status_t err = C2_OK;

        if (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) {
            work->result = processCodecConfig(&rView);
            return;
        }

        if (! mCodecAlreadyOpened) {
            err = openDecoder();
            if (err != C2_OK) {
                work->result = err;
                return;
            }
        }

        err = sendInputBuffer(&rView, work->input.ordinal.timestamp.peekll());
        if (err != C2_OK) {
            work->result = err;
            return;
        }

        while (true) {
            hasFrame = false;
            err = receiveFrame(&hasFrame);
            if (err != C2_OK) {
                work->result = err;
                return;
            }

            if (! hasFrame) {
                break;
            }
            // for some wma file, it receives an invalid packet while EOS
            if (inSize == 0 && eos && mFrame->best_effort_timestamp < 0) {
                ALOGW("process: receive an invalid packet while EOS, drop it.");
                break;
            }

#if DEBUG_FRAMES
            ALOGD("process: got frame pts=%" PRId64 " dts=%" PRId64 " ts=%" PRId64 " - sr=%d, ch=%d, fmt=%s, #=%d",
                  mFrame->pts, mFrame->pkt_dts, mFrame->best_effort_timestamp,
                  mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name((enum AVSampleFormat)mFrame->format),
                  mFrame->nb_samples);
#endif
            // Always target the sample format on output port. Even if we can trigger a config update
            // for the sample format, Android does not support planar formats, so if the codec uses
            // such format (e.g. AC3), conversion is needed. Technically we can limit the conversion to
            // planer->packed, but that means Android would also do its own conversion to the wanted
            // format on output port. To avoid double conversion, target directly the wanted format.

            bool needConfigUpdate = (mFrame->sample_rate != mTargetSampleRate ||
                                     mFrame->ch_layout.nb_channels != mTargetChannels);
            bool needResampling = (needConfigUpdate ||
                                   mFrame->format != mTargetSampleFormat ||
                                   // We only support sending audio data to Android in native order.
                                   mFrame->ch_layout.order != AV_CHANNEL_ORDER_NATIVE);

            if (needConfigUpdate) {
                ALOGD("process: audio params changed - sr=%d, ch=%d, fmt=%s => sr=%d, ch=%d, fmt=%s",
                      mTargetSampleRate, mTargetChannels, av_get_sample_fmt_name(mTargetSampleFormat),
                      mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name(mTargetSampleFormat));

                if (work->worklets.front()->output.buffers.size() > 0) {
                    // Not sure if this would ever happen, nor how to handle it...
                    ALOGW("process: audio params changed with non empty output buffers pending");
                }

                C2StreamSampleRateInfo::output sampleRate(0u, mFrame->sample_rate);
                C2StreamChannelCountInfo::output channelCount(0u, mFrame->ch_layout.nb_channels);
                std::vector<std::unique_ptr<C2SettingResult>> failures;

                err = mIntf->config({ &sampleRate, &channelCount }, C2_MAY_BLOCK, &failures);
                if (err == C2_OK) {
                    work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(sampleRate));
                    work->worklets.front()->output.configUpdate.push_back(C2Param::Copy(channelCount));
                    updateAudioParameters();
                } else {
                    ALOGE("process: config update failed err = %d", err);
                    work->result = C2_CORRUPTED;
                    return;
                }
            }

            std::shared_ptr<C2LinearBlock> block;
            int len = av_samples_get_buffer_size(NULL, mTargetChannels, mFrame->nb_samples, mTargetSampleFormat, 0);

            err = pool->fetchLinearBlock(len, { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE }, &block);
            if (err != C2_OK) {
                ALOGE("process: failed to fetch linear block for #=%d err = %d",
                      mFrame->nb_samples, err);
                work->result = C2_CORRUPTED;
                return;
            }

            C2WriteView wView = block->map().get();

            err = wView.error();
            if (err != C2_OK) {
                ALOGE("process: write view map failed err = %d", err);
                work->result = C2_CORRUPTED;
                return;
            }

            if (needResampling) {
                err = getOutputBuffer(&wView);
                if (err != C2_OK) {
                    work->result = err;
                    return;
                }
            }
            else {
#if DEBUG_FRAMES
                ALOGD("process: no audio conversion needed");
#endif
                memcpy(wView.data(), mFrame->data[0], mFrame->linesize[0]);
            }

            std::shared_ptr<C2Buffer> buffer = createLinearBuffer(std::move(block), 0, len);

            if (mCtx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES) {
                auto fillWork = [buffer, &work, this](const std::unique_ptr<C2Work>& clone) {
                    clone->worklets.front()->output.configUpdate = std::move(work->worklets.front()->output.configUpdate);
                    clone->worklets.front()->output.buffers.clear();
                    clone->worklets.front()->output.buffers.push_back(buffer);
                    clone->worklets.front()->output.ordinal = clone->input.ordinal;
                    if (mFrame->best_effort_timestamp != AV_NOPTS_VALUE) {
                        work->worklets.front()->output.ordinal.timestamp = mFrame->best_effort_timestamp;
                    }
                    clone->worklets.front()->output.flags = C2FrameData::FLAG_INCOMPLETE;
                    clone->workletsProcessed = 1u;
                    clone->result = C2_OK;
                };

#if DEBUG_FRAMES
                ALOGD("process: send subframe buffer ts=%" PRIu64 " idx=%" PRIu64,
                      work->input.ordinal.timestamp.peeku(), work->input.ordinal.frameIndex.peeku());
#endif
                cloneAndSend(work->input.ordinal.frameIndex.peeku(), work, fillWork);
            }
            else {
                work->worklets.front()->output.buffers.push_back(buffer);
                if (mFrame->best_effort_timestamp != AV_NOPTS_VALUE) {
                    work->worklets.front()->output.ordinal.timestamp = mFrame->best_effort_timestamp;
                }
                break;
            }
        }
    }
#if DEBUG_FRAMES
    else {
        ALOGW("process: ignoring empty work");
    }
#endif

    if (eos) {
        mEOSSignalled = true;
        work->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
    }
}

c2_status_t C2FFMPEGAudioDecodeComponent::drain(
    uint32_t drainMode,
    const std::shared_ptr<C2BlockPool>& /* pool */
) {
    ALOGD("drain: mode = %u", drainMode);

    if (drainMode == NO_DRAIN) {
        ALOGW("drain: NO_DRAIN is no-op");
        return C2_OK;
    }
    if (drainMode == DRAIN_CHAIN) {
        ALOGW("drain: DRAIN_CHAIN not supported");
        return C2_OMITTED;
    }
    if (! mCodecAlreadyOpened) {
        ALOGW("drain: codec not opened yet");
        return C2_OK;
    }

    bool hasFrame = false;
    c2_status_t err = C2_OK;

    while (err == C2_OK) {
        hasFrame = false;
        err = sendInputBuffer(NULL, 0);
        if (err == C2_OK) {
            err = receiveFrame(&hasFrame);
            if (hasFrame) {
                ALOGW("drain: skip frame pts=%" PRId64 " dts=%" PRId64 " ts=%" PRId64 " - sr=%d, ch=%d, fmt=%s, #=%d",
                      mFrame->pts, mFrame->pkt_dts, mFrame->best_effort_timestamp,
                      mFrame->sample_rate, mFrame->ch_layout.nb_channels, av_get_sample_fmt_name((enum AVSampleFormat)mFrame->format),
                      mFrame->nb_samples);
            } else {
                err = C2_NOT_FOUND;
            }
        }
    }

    return C2_OK;
}

} // namespace android

(8)、C2FFMPEGAudioDecodeInterface.h 内容

/*
 * Copyright
 */

#ifndef C2_FFMPEG_AUDIO_DECODE_INTERFACE_H
#define C2_FFMPEG_AUDIO_DECODE_INTERFACE_H

#include <SimpleC2Interface.h>
#include "C2FFMPEGCommon.h"
#include "codec_utils.h"

namespace android {

class C2FFMPEGAudioDecodeInterface : public SimpleInterface<void>::BaseParams {
public:
    /**
     * @brief C2FFMPEGAudioDecodeInterface 构造函数
     *
     * @param componentInfo 指向 C2FFMPEGComponentInfo 的指针,包含组件信息
     * @param helper 共享指针
    */
    explicit C2FFMPEGAudioDecodeInterface(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2ReflectorHelper>& helper);

    uint32_t getSampleRate() const { return mSampleRate->value; }
    uint32_t getChannelCount() const { return mChannelCount->value; }
    uint32_t getBitrate() const { return mBitrate->value; }
    C2Config::pcm_encoding_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
    /**
     * @brief 获取音频解码器的信息
     *
     * @return 音频解码器的信息,类型为 FFMPEGAudioCodecInfo*
    */
    const FFMPEGAudioCodecInfo* getCodecInfo() const;

private:
    /**
     * @brief 设置原始编解码器数据参数,并返回操作结果
     *
     * @param mayBlock 是否可能阻塞
     * @param me C2P 类型的输入参数,包含原始编解码器数据信息
     * @return C2R::Ok()
    */
    static C2R CodecSetter(
        bool mayBlock, C2P<C2StreamRawCodecDataInfo::input>& me);

private:
    std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
    std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
    std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
    std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
    std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
    std::shared_ptr<C2StreamRawCodecDataInfo::input> mRawCodecData;
};

} // namespace android

#endif // C2_FFMPEG_AUDIO_DECODE_INTERFACE_H

(9)、C2FFMPEGAudioDecodeInterface.cpp内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "C2FFMPEGAudioDecodeInterface"
#include <log/log.h>

#include <media/stagefright/foundation/MediaDefs.h>
#include "C2FFMPEGAudioDecodeInterface.h"

#define MAX_CHANNEL_COUNT 8
#define INPUT_MAX_BUFFER_SIZE_WMA (32*1024)
#define INPUT_MAX_BUFFER_SIZE_APE (4*1024*1024)

namespace android {

constexpr size_t kDefaultOutputPortDelay = 2;
constexpr size_t kMaxOutputPortDelay = 16;

C2FFMPEGAudioDecodeInterface::C2FFMPEGAudioDecodeInterface(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2ReflectorHelper>& helper)
    : SimpleInterface<void>::BaseParams(
        helper,
        componentInfo->name,
        C2Component::KIND_DECODER,
        C2Component::DOMAIN_AUDIO,
        componentInfo->mediaType) {
    noPrivateBuffers();
    noInputReferences();
    noOutputReferences();
    noInputLatency();
    noTimeStretch();
    setDerivedInstance(this);

    ALOGD("C2FFMPEGAudioDecodeInterface::C2FFMPEGAudioDecodeInterface");

    addParameter(
            DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
            .withDefault(new C2PortActualDelayTuning::output(kDefaultOutputPortDelay))
            .withFields({C2F(mActualOutputDelay, value).inRange(0, kMaxOutputPortDelay)})
            .withSetter(Setter<decltype(*mActualOutputDelay)>::StrictValueWithNoDeps)
            .build());

    addParameter(
            DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
            .withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
            .withFields({C2F(mSampleRate, value).oneOf({
                7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000,
                44100, 48000, 64000, 88200, 96000, 192000
            })})
            .withSetter(Setter<decltype(*mSampleRate)>::NonStrictValueWithNoDeps)
            .build());

    addParameter(
            DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
            .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
            .withFields({C2F(mBitrate, value).inRange(8000, 320000)})
            .withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
            .build());

    addParameter(
            DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
            .withDefault(new C2StreamChannelCountInfo::output(0u, 2))
            .withFields({C2F(mChannelCount, value).inRange(1, MAX_CHANNEL_COUNT)})
            .withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
            .build());

    addParameter(
            DefineParam(mPcmEncodingInfo, C2_PARAMKEY_PCM_ENCODING)
            .withDefault(new C2StreamPcmEncodingInfo::output(0u, C2Config::PCM_16))
            .withFields({C2F(mPcmEncodingInfo, value).oneOf({
                 C2Config::PCM_16,
                 C2Config::PCM_8,
                 C2Config::PCM_FLOAT,
                 })
            })
            .withSetter((Setter<decltype(*mPcmEncodingInfo)>::StrictValueWithNoDeps))
            .build());

    if (strcasecmp(componentInfo->mediaType, MEDIA_MIMETYPE_AUDIO_WMA) == 0) {
        addParameter(
                DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
                .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, INPUT_MAX_BUFFER_SIZE_WMA))
                .build());
    } else if (strcasecmp(componentInfo->mediaType, MEDIA_MIMETYPE_AUDIO_APE) == 0) {
        addParameter(
                DefineParam(mInputMaxBufSize, C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE)
                .withConstValue(new C2StreamMaxBufferSizeInfo::input(0u, INPUT_MAX_BUFFER_SIZE_APE))
                .build());
    }

    addParameter(
            DefineParam(mRawCodecData, C2_PARAMKEY_RAW_CODEC_DATA)
            .withDefault(C2StreamRawCodecDataInfo::input::AllocShared(0, 0u))
            .withFields({C2F(mRawCodecData, m.value)})
            .withSetter(CodecSetter)
            .build());
}

C2R C2FFMPEGAudioDecodeInterface::CodecSetter(
        bool mayBlock __unused, C2P<C2StreamRawCodecDataInfo::input>& me __unused) {
    return C2R::Ok();
}

const FFMPEGAudioCodecInfo* C2FFMPEGAudioDecodeInterface::getCodecInfo() const {
    if (mRawCodecData->flexCount() == sizeof(FFMPEGAudioCodecInfo)) {
        return (const FFMPEGAudioCodecInfo*)mRawCodecData->m.value;
    }
    return nullptr;
}

} // namespace android

(10)、C2FFMPEGCommon.h内容

/*
 * Copyright
 */

#ifndef C2_FFMPEG_COMPONENT_COMMON_H
#define C2_FFMPEG_COMPONENT_COMMON_H

#include <media/stagefright/foundation/MediaDefs.h>
#include "ffmpeg_utils.h"

namespace android {

typedef struct {
    const char* name;
    const char* mediaType;
    enum AVCodecID codecID;
} C2FFMPEGComponentInfo;

typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexRawCodecData> C2StreamRawCodecDataInfo;
constexpr char C2_PARAMKEY_RAW_CODEC_DATA[] = "coded.raw-codec-data";

} // namespace android

#endif // C2_FFMPEG_COMPONENT_COMMON_H

(11)、C2FFMPEGVideoDecodeComponent.h内容

/*
 * Copyright
 */

#ifndef C2_FFMPEG_VIDEO_DECODE_COMPONENT_H
#define C2_FFMPEG_VIDEO_DECODE_COMPONENT_H

#include <deque>
#include <utility>
#include <SimpleC2Component.h>
#include "C2FFMPEGCommon.h"
#include "C2FFMPEGVideoDecodeInterface.h"

namespace android {

typedef std::pair<uint64_t, uint64_t> PendingWork;

class C2FFMPEGVideoDecodeComponent : public SimpleC2Component {
public:
    /**
     * @brief C2FFMPEGVideoDecodeComponent 类的构造函数
     */
    explicit C2FFMPEGVideoDecodeComponent(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2FFMPEGVideoDecodeInterface>& intf);
    /**
     * @brief C2FFMPEGVideoDecodeComponent 类的析构函数
     */
    virtual ~C2FFMPEGVideoDecodeComponent();

protected:
    /**
     * @brief 记录init日志信息,调用 initDecoder()进行初始化操作
     *
     * @return 成功返回 C2_OK
    */
    c2_status_t onInit() override;
    /**
     * @brief 记录stop日志信息
     *
     * @return 成功返回 C2_OK
    */
    c2_status_t onStop() override;
    /**
     * @brief 记录reset日志信息,调用deInitDecoder()和initDecoder()
    */
    void onReset() override;
    /**
     * @brief 记录relesase日志信息,进行释放操作
    */
    void onRelease() override;
    /**
     * @brief 进行状态刷新,并记录日志信息
     *
     * @return 成功返回 C2_OK 表示状态刷新成功
    */
    c2_status_t onFlush_sm() override;
    /**
     * @brief 视频解码
    */
    void process(
        const std::unique_ptr<C2Work> &work,
        const std::shared_ptr<C2BlockPool> &pool) override;
    /**
     * @brief 排空视频解码器
     *
     * @return 返回相应状态
    */
    c2_status_t drain(
        uint32_t drainMode,
        const std::shared_ptr<C2BlockPool> &pool) override;

private:
    /**
     * @brief 初始化解码器
     *
     * @return 成功返回 C2_OK
    */
    c2_status_t initDecoder();
    /**
     * @brief 打开解码器,如果解码器已经打开,则return OK,否则,查找并初始化指定编解码器,配置解码器参数,打开解码器
     *
     * @return C2_OK   打开解码器成功
     *         C2_NOT_FOUND   未找到指定编解码器
     *         C2_NO_INIT   解码器初始化失败
     *         C2_NO_MEMORY   内存分配失败
    */
    c2_status_t openDecoder();
    /**
     * @brief 反初始化解码器,释放资源并关闭解码器
    */
    void deInitDecoder();
    /**
     * @brief 处理解码器配置信息
     *
     * @param inBuffer 包含解码器配置信息的数据缓冲区
     * @return 处理成功返回C2_OK
    */
    c2_status_t processCodecConfig(C2ReadView* inBuffer);
    /**
     * @brief 发送输入数据给解码器进行解码
     *
     * @param inBuffer 输入数据缓冲区,包含待解码的音频数据
     * @param timestamp 输入数据的时间戳
     * @return 成功返回 C2_OK
    */
    c2_status_t sendInputBuffer(C2ReadView* inBuffer, int64_t timestamp);
    /**
     * @brief 接收解码器输出的音频帧
     *
     * @param hasFrame 指示是否成功接收到音频帧的标志
     * @return 成功返回 C2_OK
    */
    c2_status_t receiveFrame(bool* hasPicture);
    /**
     * @brief 获取输出缓冲区并执行音频转换
     *
     * @param outBuffer 指向输出缓冲区的指针
     * @return 成功返回 C2_OK
    */
    c2_status_t getOutputBuffer(C2GraphicView* outBuffer);
    /**
     * @brief 输出视频帧并处理相关逻辑
     *
     * @param work C2Work 指针
     * @param pool C2BlockPool 指针
     * @return 返回处理结果状态
    */
    c2_status_t outputFrame(
        const std::unique_ptr<C2Work> &work,
        const std::shared_ptr<C2BlockPool> &pool);
    /**
     * @brief 将工作单元添加到待处理工作队列中
     *
     * @param work 工作单元的指针
    */
    void pushPendingWork(const std::unique_ptr<C2Work>& work);
    /**
     * @brief 从待处理工作队列中移除工作单元
     *
     * @param work 工作单元的指针
    */
    void popPendingWork(const std::unique_ptr<C2Work>& work);
    /**
     * @brief 移除待处理工作队列中比给定工作单元时间戳早的所有工作单元
     *
     * @param work 工作单元的指针
    */
    void prunePendingWorksUntil(const std::unique_ptr<C2Work>& work);

private:
    const C2FFMPEGComponentInfo* mInfo;
    std::shared_ptr<C2FFMPEGVideoDecodeInterface> mIntf;
    enum AVCodecID mCodecID;
    AVCodecContext* mCtx;
    struct SwsContext *mImgConvertCtx;
    AVFrame* mFrame;
    AVPacket* mPacket;
    bool mFFMPEGInitialized;
    bool mCodecAlreadyOpened;
    bool mExtradataReady;
    bool mEOSSignalled;
    std::deque<PendingWork> mPendingWorkQueue;
};

} // namespace android

#endif // C2_FFMPEG_VIDEO_DECODE_COMPONENT_H

(13)、C2FFMPEGVideoDecodeComponent.cpp内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "C2FFMPEGVideoDecodeComponent"
#include <android-base/properties.h>
#include <log/log.h>
#include <algorithm>

#include <SimpleC2Interface.h>
#include "C2FFMPEGVideoDecodeComponent.h"

#define DEBUG_FRAMES 0
#define DEBUG_WORKQUEUE 0
#define DEBUG_EXTRADATA 0

namespace android {

C2FFMPEGVideoDecodeComponent::C2FFMPEGVideoDecodeComponent(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2FFMPEGVideoDecodeInterface>& intf)
    : SimpleC2Component(std::make_shared<SimpleInterface<C2FFMPEGVideoDecodeInterface>>(componentInfo->name, 0, intf)),
      mInfo(componentInfo),
      mIntf(intf),
      mCodecID(componentInfo->codecID),
      mCtx(NULL),
      mImgConvertCtx(NULL),
      mFrame(NULL),
      mPacket(NULL),
      mFFMPEGInitialized(false),
      mCodecAlreadyOpened(false),
      mExtradataReady(false),
      mEOSSignalled(false) {
    ALOGD("C2FFMPEGVideoDecodeComponent::C2FFMPEGVideoDecodeComponent mediaType = %s", componentInfo->mediaType);
}

C2FFMPEGVideoDecodeComponent::~C2FFMPEGVideoDecodeComponent() {
    ALOGD("~C2FFMPEGVideoDecodeComponent: mCtx = %p", mCtx);
    onRelease();
}

c2_status_t C2FFMPEGVideoDecodeComponent::initDecoder() {
    if (! mFFMPEGInitialized) {
        if (initFFmpeg() != C2_OK) {
            ALOGE("initDecoder: FFMPEG initialization failed.");
            return C2_NO_INIT;
        }
        mFFMPEGInitialized = true;
    }

    mCtx = avcodec_alloc_context3(NULL);
    if (! mCtx) {
        ALOGE("initDecoder: avcodec_alloc_context failed.");
        return C2_NO_MEMORY;
    }

    C2StreamPictureSizeInfo::output size(0u, 320, 240);
    c2_status_t err = mIntf->query({ &size }, {}, C2_DONT_BLOCK, nullptr);
    if (err != C2_OK) {
        ALOGE("initDecoder: cannot query picture size, err = %d", err);
    }

    mCtx->codec_type = AVMEDIA_TYPE_VIDEO;
    mCtx->codec_id = mCodecID;
    mCtx->extradata_size = 0;
    mCtx->extradata = NULL;
    mCtx->width = size.width;
    mCtx->height = size.height;

    const FFMPEGVideoCodecInfo* codecInfo = mIntf->getCodecInfo();

    if (codecInfo) {
        ALOGD("initDecoder: use codec info from extractor");
        mCtx->codec_id = (enum AVCodecID)codecInfo->codec_id;
    }

    ALOGD("initDecoder: %p [%s], %d x %d, %s",
          mCtx, avcodec_get_name(mCtx->codec_id), size.width, size.height, mInfo->mediaType);

    return C2_OK;
}

c2_status_t C2FFMPEGVideoDecodeComponent::openDecoder() {
    if (mCodecAlreadyOpened) {
        return C2_OK;
    }

    // Can't change extradata after opening the decoder.
#if DEBUG_EXTRADATA
    ALOGD("openDecoder: extradata_size = %d", mCtx->extradata_size);
#endif
    mExtradataReady = true;

    // Find decoder again as codec_id may have changed.
    mCtx->codec = avcodec_find_decoder(mCtx->codec_id);
    if (! mCtx->codec) {
        ALOGE("openDecoder: ffmpeg video decoder failed to find codec %d", mCtx->codec_id);
        return C2_NOT_FOUND;
    }

    // Configure decoder.
    mCtx->workaround_bugs   = 1;
    mCtx->idct_algo         = 0;
    mCtx->skip_frame        = AVDISCARD_DEFAULT;
    mCtx->skip_idct         = AVDISCARD_DEFAULT;
    mCtx->skip_loop_filter  = AVDISCARD_DEFAULT;
    mCtx->error_concealment = 3;
    mCtx->thread_count      = base::GetIntProperty("debug.ffmpeg-codec2.threads", VIDEO_DECODE_THREAD_COUNT);

    if (base::GetBoolProperty("debug.ffmpeg-codec2.fast", false)) {
        mCtx->flags2 |= AV_CODEC_FLAG2_FAST;
    }

    ALOGD("openDecoder: opening ffmpeg decoder(%s): threads = %d, hw = %s",
          avcodec_get_name(mCtx->codec_id), mCtx->thread_count, mCtx->hw_device_ctx ? "yes" : "no");

    int err = avcodec_open2(mCtx, mCtx->codec, NULL);
    if (err < 0) {
        ALOGE("openDecoder: ffmpeg video decoder failed to initialize. (%s)", av_err2str(err));
        return C2_NO_INIT;
    }
    mCodecAlreadyOpened = true;

    ALOGD("openDecoder: open ffmpeg video decoder(%s) success, caps = %08x",
          avcodec_get_name(mCtx->codec_id), mCtx->codec->capabilities);

    mFrame = av_frame_alloc();
    if (! mFrame) {
        ALOGE("openDecoder: oom for video frame");
        return C2_NO_MEMORY;
    }

    return C2_OK;
}

void C2FFMPEGVideoDecodeComponent::deInitDecoder() {
    ALOGD("%p deInitDecoder: %p", this, mCtx);
    if (mCtx) {
        if (avcodec_is_open(mCtx)) {
            avcodec_flush_buffers(mCtx);
        }
        if (mCtx->extradata) {
            av_free(mCtx->extradata);
            mCtx->extradata = NULL;
            mCtx->extradata_size = 0;
        }
        if (mCodecAlreadyOpened) {
            avcodec_close(mCtx);
            mCodecAlreadyOpened = false;
        }
        av_freep(&mCtx);
    }
    if (mFrame) {
        av_frame_free(&mFrame);
        mFrame = NULL;
    }
    if (mPacket) {
        av_packet_free(&mPacket);
        mPacket = NULL;
    }
    if (mImgConvertCtx) {
        sws_freeContext(mImgConvertCtx);
        mImgConvertCtx = NULL;
    }
    mEOSSignalled = false;
    mExtradataReady = false;
    mPendingWorkQueue.clear();
}

c2_status_t C2FFMPEGVideoDecodeComponent::processCodecConfig(C2ReadView* inBuffer) {
    int orig_extradata_size = mCtx->extradata_size;
    int add_extradata_size = inBuffer->capacity();

#if DEBUG_EXTRADATA
    ALOGD("processCodecConfig: add = %u, current = %d", add_extradata_size, orig_extradata_size);
#endif
    if (! mExtradataReady) {
        mCtx->extradata_size += add_extradata_size;
        mCtx->extradata = (uint8_t *) realloc(mCtx->extradata, mCtx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
        if (! mCtx->extradata) {
            ALOGE("processCodecConfig: ffmpeg video decoder failed to alloc extradata memory.");
            return C2_NO_MEMORY;
        }
        memcpy(mCtx->extradata + orig_extradata_size, inBuffer->data(), add_extradata_size);
        memset(mCtx->extradata + mCtx->extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
    }
    else {
        ALOGW("processCodecConfig: decoder is already opened, ignoring...");
    }

    return C2_OK;
}

c2_status_t C2FFMPEGVideoDecodeComponent::sendInputBuffer(
        C2ReadView *inBuffer, int64_t timestamp) {
    if (!mPacket) {
        mPacket = av_packet_alloc();
        if (!mPacket) {
            ALOGE("sendInputBuffer: oom for video packet");
            return C2_NO_MEMORY;
        }
    }

    mPacket->data = inBuffer ? const_cast<uint8_t *>(inBuffer->data()) : NULL;
    mPacket->size = inBuffer ? inBuffer->capacity() : 0;
    mPacket->pts = timestamp;
    mPacket->dts = AV_NOPTS_VALUE;

    int err = avcodec_send_packet(mCtx, mPacket);
    av_packet_unref(mPacket);

    if (err < 0) {
        ALOGE("sendInputBuffer: failed to send data (%d) to decoder: %s (%08x)",
              inBuffer->capacity(), av_err2str(err), err);
        if (err == AVERROR(EAGAIN)) {
            // Frames must be read first, notify main decoding loop.
            ALOGD("sendInputBuffer: returning C2_BAD_STATE");
            return C2_BAD_STATE;
        }
        // Otherwise don't send error to client.
    }

    return C2_OK;
}

c2_status_t C2FFMPEGVideoDecodeComponent::receiveFrame(bool* hasPicture) {
    int err = avcodec_receive_frame(mCtx, mFrame);

    *hasPicture = false;
    if (err == 0) {
        *hasPicture = true;
    } else if (err != AVERROR(EAGAIN) && err != AVERROR_EOF) {
        ALOGE("receiveFrame: failed to receive frame from decoder err = %d", err);
        // Don't report error to client.
    }

    return C2_OK;
}

c2_status_t C2FFMPEGVideoDecodeComponent::getOutputBuffer(C2GraphicView* outBuffer) {
    uint8_t* data[4];
    int linesize[4];
    C2PlanarLayout layout = outBuffer->layout();
    struct SwsContext* currentImgConvertCtx = mImgConvertCtx;

    data[0] = outBuffer->data()[C2PlanarLayout::PLANE_Y];
    data[1] = outBuffer->data()[C2PlanarLayout::PLANE_U];
    data[2] = outBuffer->data()[C2PlanarLayout::PLANE_V];
    linesize[0] = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
    linesize[1] = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
    linesize[2] = layout.planes[C2PlanarLayout::PLANE_V].rowInc;

    mImgConvertCtx = sws_getCachedContext(currentImgConvertCtx,
           mFrame->width, mFrame->height, (AVPixelFormat)mFrame->format,
           mFrame->width, mFrame->height, AV_PIX_FMT_YUV420P,
           SWS_BICUBIC, NULL, NULL, NULL);
    if (mImgConvertCtx && mImgConvertCtx != currentImgConvertCtx) {
        ALOGD("getOutputBuffer: created video converter - %s => %s",
              av_get_pix_fmt_name((AVPixelFormat)mFrame->format), av_get_pix_fmt_name(AV_PIX_FMT_YUV420P));

    } else if (! mImgConvertCtx) {
        ALOGE("getOutputBuffer: cannot initialize the conversion context");
        return C2_NO_MEMORY;
    }

    sws_scale(mImgConvertCtx, mFrame->data, mFrame->linesize,
              0, mFrame->height, data, linesize);

    return C2_OK;
}

static void fillEmptyWork(const std::unique_ptr<C2Work>& work) {
    work->worklets.front()->output.flags =
        (C2FrameData::flags_t)(work->input.flags & C2FrameData::FLAG_END_OF_STREAM);
    work->worklets.front()->output.buffers.clear();
    work->worklets.front()->output.ordinal = work->input.ordinal;
    work->workletsProcessed = 1u;
    work->result = C2_OK;
#if DEBUG_WORKQUEUE
    ALOGD("WorkQueue: drop idx=%" PRIu64 ", ts=%" PRIu64,
          work->input.ordinal.frameIndex.peeku(), work->input.ordinal.timestamp.peeku());
#endif
}

static bool comparePendingWork(const PendingWork& w1, const PendingWork& w2) {
    return w1.second < w2.second;
}

void C2FFMPEGVideoDecodeComponent::pushPendingWork(const std::unique_ptr<C2Work>& work) {
    uint32_t outputDelay = mIntf->getOutputDelay();

    if (mPendingWorkQueue.size() >= outputDelay) {
        uint32_t newOutputDelay = outputDelay;
        std::vector<std::unique_ptr<C2Param>> configUpdate;

        if (newOutputDelay != outputDelay) {
            C2PortActualDelayTuning::output delay(newOutputDelay);
            std::vector<std::unique_ptr<C2SettingResult>> failures;
            int err;

            err = mIntf->config({ &delay }, C2_MAY_BLOCK, &failures);
            if (err == C2_OK) {
                ALOGD("WorkQueue: queue full, output delay set to %u", newOutputDelay);
                configUpdate.push_back(C2Param::Copy(delay));
            } else {
                ALOGE("WorkQueue: output delay update to %u failed err = %d",
                      newOutputDelay, err);
            }
        }

        auto fillEmptyWorkWithConfigUpdate = [&configUpdate](const std::unique_ptr<C2Work>& work) {
            fillEmptyWork(work);
            work->worklets.front()->output.configUpdate = std::move(configUpdate);
        };

        finish(mPendingWorkQueue.front().first, fillEmptyWorkWithConfigUpdate);
        mPendingWorkQueue.pop_front();
    }
#if DEBUG_WORKQUEUE
    ALOGD("WorkQueue: push idx=%" PRIu64 ", ts=%" PRIu64,
          work->input.ordinal.frameIndex.peeku(), work->input.ordinal.timestamp.peeku());
#endif
    mPendingWorkQueue.push_back(PendingWork(work->input.ordinal.frameIndex.peeku(),
                                            work->input.ordinal.timestamp.peeku()));
    std::sort(mPendingWorkQueue.begin(), mPendingWorkQueue.end(), comparePendingWork);
}

void C2FFMPEGVideoDecodeComponent::popPendingWork(const std::unique_ptr<C2Work>& work) {
    uint64_t index = work->input.ordinal.frameIndex.peeku();
    auto it = std::find_if(mPendingWorkQueue.begin(), mPendingWorkQueue.end(),
                           [index](const PendingWork& pWork) { return index == pWork.first; });

#if DEBUG_WORKQUEUE
    ALOGD("WorkQueue: pop idx=%" PRIu64 ", ts=%" PRIu64,
          work->input.ordinal.frameIndex.peeku(), work->input.ordinal.timestamp.peeku());
#endif

    if (it != mPendingWorkQueue.end()) {
        mPendingWorkQueue.erase(it);
    }
#if DEBUG_WORKQUEUE
    else {
        ALOGD("WorkQueue: pop work not found idx=%" PRIu64 ", ts=%" PRIu64,
              work->input.ordinal.frameIndex.peeku(), work->input.ordinal.timestamp.peeku());
    }
#endif
    prunePendingWorksUntil(work);
}

void C2FFMPEGVideoDecodeComponent::prunePendingWorksUntil(const std::unique_ptr<C2Work>& work) {
#if DEBUG_WORKQUEUE
    ALOGD("WorkQueue: prune until idx=%" PRIu64 ", ts=%" PRIu64,
          work->input.ordinal.frameIndex.peeku(), work->input.ordinal.timestamp.peeku());
#endif
    // Drop all works with a PTS earlier than provided argument.
    while (mPendingWorkQueue.size() > 0 &&
           mPendingWorkQueue.front().second < work->input.ordinal.timestamp.peeku()) {
        finish(mPendingWorkQueue.front().first, fillEmptyWork);
        mPendingWorkQueue.pop_front();
    }
}

c2_status_t C2FFMPEGVideoDecodeComponent::onInit() {
    ALOGD("onInit");
    return initDecoder();
}

c2_status_t C2FFMPEGVideoDecodeComponent::onStop() {
    ALOGD("onStop");
    return C2_OK;
}

void C2FFMPEGVideoDecodeComponent::onReset() {
    ALOGD("onReset");
    deInitDecoder();
    initDecoder();
}

void C2FFMPEGVideoDecodeComponent::onRelease() {
    ALOGD("onRelease");
    deInitDecoder();
    if (mFFMPEGInitialized) {
        deInitFFmpeg();
        mFFMPEGInitialized = false;
    }
}

c2_status_t C2FFMPEGVideoDecodeComponent::onFlush_sm() {
    ALOGD("onFlush_sm");
    if (mCtx && avcodec_is_open(mCtx)) {
        // Make sure that the next buffer output does not still
        // depend on fragments from the last one decoded.
        avcodec_flush_buffers(mCtx);
        mEOSSignalled = false;
    }
    return C2_OK;
}

c2_status_t C2FFMPEGVideoDecodeComponent::outputFrame(
    const std::unique_ptr<C2Work>& work,
    const std::shared_ptr<C2BlockPool> &pool
) {
    c2_status_t err;
    std::vector<std::unique_ptr<C2Param>> configUpdate;

#if DEBUG_FRAMES
    ALOGD("outputFrame: pts=%" PRId64 " dts=%" PRId64 " ts=%" PRId64 " - %d x %d (%x)",
          mFrame->pts, mFrame->pkt_dts, mFrame->best_effort_timestamp, mFrame->width, mFrame->height, mFrame->format);
#endif

    if (mFrame->width != mIntf->getWidth() || mFrame->height != mIntf->getHeight()) {
        ALOGD("outputFrame: video params changed - %d x %d (%x)", mFrame->width, mFrame->height, mFrame->format);

        C2StreamPictureSizeInfo::output size(0u, mFrame->width, mFrame->height);
        std::vector<std::unique_ptr<C2SettingResult>> failures;

        err = mIntf->config({ &size }, C2_MAY_BLOCK, &failures);
        if (err == OK) {
            configUpdate.push_back(C2Param::Copy(size));
            mCtx->width = mFrame->width;
            mCtx->height = mFrame->height;
        } else {
            ALOGE("outputFrame: config update failed err = %d", err);
            return C2_CORRUPTED;
        }
    }

    std::shared_ptr<C2GraphicBlock> block;

    err = pool->fetchGraphicBlock(mFrame->width, mFrame->height, HAL_PIXEL_FORMAT_YV12,
                                  { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE }, &block);

    if (err != C2_OK) {
        ALOGE("outputFrame: failed to fetch graphic block %d x %d (%x) err = %d",
              mFrame->width, mFrame->height, HAL_PIXEL_FORMAT_YV12, err);
        return C2_CORRUPTED;
    }

    C2GraphicView wView = block->map().get();

    err = wView.error();
    if (err != C2_OK) {
        ALOGE("outputFrame: graphic view map failed err = %d", err);
        return C2_CORRUPTED;
    }

    err = getOutputBuffer(&wView);
    if (err == C2_OK) {
        std::shared_ptr<C2Buffer> buffer = createGraphicBuffer(std::move(block), C2Rect(mFrame->width, mFrame->height));

        buffer->setInfo(mIntf->getPixelFormatInfo());

        if (work && c2_cntr64_t(mFrame->best_effort_timestamp) == work->input.ordinal.frameIndex) {
            prunePendingWorksUntil(work);
            work->worklets.front()->output.configUpdate = std::move(configUpdate);
            work->worklets.front()->output.buffers.clear();
            work->worklets.front()->output.buffers.push_back(buffer);
            work->worklets.front()->output.ordinal = work->input.ordinal;
            work->workletsProcessed = 1u;
            work->result = C2_OK;
        } else {
            auto fillWork = [buffer, &configUpdate, this](const std::unique_ptr<C2Work>& work) {
                popPendingWork(work);
                work->worklets.front()->output.configUpdate = std::move(configUpdate);
                work->worklets.front()->output.flags = (C2FrameData::flags_t)0;
                work->worklets.front()->output.buffers.clear();
                work->worklets.front()->output.buffers.push_back(buffer);
                work->worklets.front()->output.ordinal = work->input.ordinal;
                work->workletsProcessed = 1u;
                work->result = C2_OK;
#if DEBUG_FRAMES
                ALOGD("outputFrame: work(finish) idx=%" PRIu64 ", processed=%u, result=%d",
                      work->input.ordinal.frameIndex.peeku(), work->workletsProcessed, work->result);
#endif
            };

            finish(mFrame->best_effort_timestamp, fillWork);
        }
    } else {
        return err;
    }

    return C2_OK;
}

void C2FFMPEGVideoDecodeComponent::process(
    const std::unique_ptr<C2Work> &work,
    const std::shared_ptr<C2BlockPool> &pool
) {
    size_t inSize = 0u;
    bool eos = (work->input.flags & C2FrameData::FLAG_END_OF_STREAM);
    C2ReadView rView = mDummyReadView;
    bool hasInputBuffer = false;

    if (! work->input.buffers.empty()) {
        rView = work->input.buffers[0]->data().linearBlocks().front().map().get();
        inSize = rView.capacity();
        hasInputBuffer = true;
    }

#if DEBUG_FRAMES
    ALOGD("process: input flags=%08x ts=%lu idx=%lu #buf=%lu[%lu] #conf=%lu #info=%lu",
          work->input.flags, work->input.ordinal.timestamp.peeku(), work->input.ordinal.frameIndex.peeku(),
          work->input.buffers.size(), inSize, work->input.configUpdate.size(), work->input.infoBuffers.size());
#endif

    if (mEOSSignalled) {
        ALOGE("process: ignoring work while EOS reached");
        work->workletsProcessed = 0u;
        work->result = C2_BAD_VALUE;
        return;
    }

    if (hasInputBuffer && rView.error()) {
        ALOGE("process: read view map failed err = %d", rView.error());
        work->workletsProcessed = 0u;
        work->result = rView.error();
        return;
    }

    // In all cases the work is marked as completed.
    //
    // There is not always a 1:1 mapping between input and output frames, in particular for
    // interlaced content. Keeping the corresponding worklets in the queue quickly fills it
    // in and stalls the decoder. But there's no obvious mechanism to determine, from
    // FFMPEG API, whether a given packet will produce an output frame and the worklet should
    // be kept around so it can be completed when the frame is produced.
    //
    // NOTE: This has an impact on the drain operation.

    work->result = C2_OK;
    work->worklets.front()->output.flags = (C2FrameData::flags_t)0;
    work->workletsProcessed = 0u;

    if (inSize || (eos && mCodecAlreadyOpened)) {
        c2_status_t err = C2_OK;

        if (work->input.flags & C2FrameData::FLAG_CODEC_CONFIG) {
            work->workletsProcessed = 1u;
            work->result = processCodecConfig(&rView);
            return;
        }

        if (! mCodecAlreadyOpened) {
            err = openDecoder();
            if (err != C2_OK) {
                work->workletsProcessed = 1u;
                work->result = err;
                return;
            }
        }

        bool inputConsumed = false;
        bool outputAvailable = true;
        bool hasPicture = false;
#if DEBUG_FRAMES
        int outputFrameCount = 0;
#endif

        while (!inputConsumed || outputAvailable) {
            if (!inputConsumed) {
                err = sendInputBuffer(&rView, work->input.ordinal.frameIndex.peekll());
                if (err == C2_OK) {
                    inputConsumed = true;
                    outputAvailable = true;
                    work->input.buffers.clear();
                } else if (err != C2_BAD_STATE) {
                    work->workletsProcessed = 1u;
                    work->result = err;
                    return;
                }
            }

            if (outputAvailable) {
                hasPicture = false;
                err = receiveFrame(&hasPicture);
                if (err != C2_OK) {
                    work->workletsProcessed = 1u;
                    work->result = err;
                    return;
                }

                if (hasPicture) {
                    err = outputFrame(work, pool);
                    if (err != C2_OK) {
                        work->workletsProcessed = 1u;
                        work->result = err;
                        return;
                    }
#if DEBUG_FRAMES
                    else {
                        outputFrameCount++;
                    }
#endif
                }
                else {
#if DEBUG_FRAMES
                    if (!outputFrameCount) {
                        ALOGD("process: no frame");
                    }
#endif
                    outputAvailable = false;
                }
            }
        }
    }
#if DEBUG_FRAMES
    else {
        ALOGD("process: empty work");
    }
#endif

    if (eos) {
        mEOSSignalled = true;
        work->worklets.front()->output.flags = C2FrameData::FLAG_END_OF_STREAM;
        work->workletsProcessed = 1u;
    }

    if (work->workletsProcessed == 0u) {
        pushPendingWork(work);
    }

#if DEBUG_FRAMES
    ALOGD("process: work(end) idx=%" PRIu64 ", processed=%u, result=%d",
          work->input.ordinal.frameIndex.peeku(), work->workletsProcessed, work->result);
#endif
}

c2_status_t C2FFMPEGVideoDecodeComponent::drain(
    uint32_t drainMode,
    const std::shared_ptr<C2BlockPool>& pool
) {
    ALOGD("drain: mode = %u", drainMode);

    if (drainMode == NO_DRAIN) {
        ALOGW("drain: NO_DRAIN is no-op");
        return C2_OK;
    }
    if (drainMode == DRAIN_CHAIN) {
        ALOGW("drain: DRAIN_CHAIN not supported");
        return C2_OMITTED;
    }
    if (! mCodecAlreadyOpened) {
        ALOGW("drain: codec not opened yet");
        return C2_OK;
    }

    bool hasPicture = false;
    c2_status_t err = C2_OK;

    err = sendInputBuffer(NULL, 0);
    while (err == C2_OK) {
        hasPicture = false;
        err = receiveFrame(&hasPicture);
        if (hasPicture) {
            // Ignore errors at this point, just drain the decoder.
            outputFrame(nullptr, pool);
        } else {
            err = C2_NOT_FOUND;
        }
    }

    return C2_OK;
}

} // namespace android

(13)、C2FFMPEGVideoDecodeInterface.h内容:

/*
 * Copyright
 */

#ifndef C2_FFMPEG_VIDEO_DECODE_INTERFACE_H
#define C2_FFMPEG_VIDEO_DECODE_INTERFACE_H

#include <SimpleC2Interface.h>
#include "C2FFMPEGCommon.h"
#include "codec_utils.h"

#define VIDEO_DECODE_THREAD_COUNT 2

namespace android {

class C2FFMPEGVideoDecodeInterface : public SimpleInterface<void>::BaseParams {
public:
    /**
     * @brief C2FFMPEGVideoDecodeInterface构造函数
     *
     * @param componentInfo 指向 C2FFMPEGComponentInfo 的指针,包含组件信息
     * @param helper 共享指针
     */
    explicit C2FFMPEGVideoDecodeInterface(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2ReflectorHelper>& helper);

    uint32_t getWidth() const { return mSize->width; }
    uint32_t getHeight() const { return mSize->height; }
    /**
     * @brief 获取 FFmpeg 视频编解码器信息
     *
     * @return 指向 FFMPEGVideoCodecInfo 的指针,如果未设置原始编解码器数据或数据大小不匹配则返回 nullptr
    */
    const FFMPEGVideoCodecInfo* getCodecInfo() const;
    uint64_t getConsumerUsage() const { return mConsumerUsage->value; }
    const std::shared_ptr<C2StreamPixelFormatInfo::output>&
        getPixelFormatInfo() const { return mPixelFormat; }
    uint32_t getOutputDelay() const { return mActualOutputDelay->value; }

private:
    /**
     * @brief 设置图像大小参数
     *
     * @param mayBlock 是否阻塞
     * @param oldMe 旧的图像大小参数
     * @param me 待设置的新图像大小参数
     *
     * @return C2R 结构,表示设置结果
    */
    static C2R SizeSetter(
        bool mayBlock,
        const C2P<C2StreamPictureSizeInfo::output> &oldMe,
        C2P<C2StreamPictureSizeInfo::output> &me);
    /**
     * @brief 设置配置文件和级别参数
     *
     * @param mayBlock 是否阻塞
     * @param me 待设置的新配置文件和级别参数
     * @param size 图像大小参数
     *
     * @return C2R 结构,表示设置结果
     */
    static C2R ProfileLevelSetter(
        bool mayBlock,
        C2P<C2StreamProfileLevelInfo::input> &me,
        const C2P<C2StreamPictureSizeInfo::output> &size);
    /**
     * @brief 设置编解码器参数
     *
     * @param mayBlock 是否阻塞
     * @param me 待设置的新编解码器参数
     *
     * @return C2R 结构,表示设置结果
     */
    static C2R CodecSetter(
        bool mayBlock, C2P<C2StreamRawCodecDataInfo::input>& me);

private:
    std::shared_ptr<C2StreamPictureSizeInfo::output> mSize;
    std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
    std::shared_ptr<C2StreamColorInfo::output> mColorInfo;
    std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
    std::shared_ptr<C2StreamRawCodecDataInfo::input> mRawCodecData;
    std::shared_ptr<C2StreamUsageTuning::output> mConsumerUsage;
};

} // namespace android

#endif // C2_FFMPEG_VIDEO_DECODE_INTERFACE_H

(14)、C2FFMPEGVideoDecodeInterface.cpp内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "C2FFMPEGVideoDecodeInterface"
#include <android-base/properties.h>
#include <log/log.h>
#include <thread>

#include <media/stagefright/foundation/MediaDefs.h>
#include "C2FFMPEGVideoDecodeInterface.h"

namespace android {

constexpr size_t kMaxDimension = 4080;

C2FFMPEGVideoDecodeInterface::C2FFMPEGVideoDecodeInterface(
        const C2FFMPEGComponentInfo* componentInfo,
        const std::shared_ptr<C2ReflectorHelper>& helper)
    : SimpleInterface<void>::BaseParams(
        helper,
        componentInfo->name,
        C2Component::KIND_DECODER,
        C2Component::DOMAIN_VIDEO,
        componentInfo->mediaType) {
    noPrivateBuffers();
    noInputReferences();
    noOutputReferences();
    noInputLatency();
    noTimeStretch();
    setDerivedInstance(this);

    ALOGD("C2FFMPEGVideoDecodeInterface::C2FFMPEGVideoDecodeInterface");
    addParameter(
            DefineParam(mAttrib, C2_PARAMKEY_COMPONENT_ATTRIBUTES)
            .withConstValue(new C2ComponentAttributesSetting(C2Component::ATTRIB_IS_TEMPORAL))
            .build());

    addParameter(
            DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
            .withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
            .withFields({
                C2F(mSize, width).inRange(16, kMaxDimension, 2),
                C2F(mSize, height).inRange(16, kMaxDimension, 2),
            })
            .withSetter(SizeSetter)
            .build());

        int nthreads = base::GetIntProperty("debug.ffmpeg-codec2.threads", VIDEO_DECODE_THREAD_COUNT);

        if (nthreads <= 0) {
                nthreads = std::thread::hardware_concurrency();
        }

        addParameter(
                DefineParam(mActualOutputDelay, C2_PARAMKEY_OUTPUT_DELAY)
                .withConstValue(new C2PortActualDelayTuning::output(2 * nthreads))
                .build());

    C2ChromaOffsetStruct locations[1] = { C2ChromaOffsetStruct::ITU_YUV_420_0() };
    std::shared_ptr<C2StreamColorInfo::output> defaultColorInfo =
        C2StreamColorInfo::output::AllocShared(
                1u, 0u, 8u /* bitDepth */, C2Color::YUV_420);
    memcpy(defaultColorInfo->m.locations, locations, sizeof(locations));

    defaultColorInfo =
        C2StreamColorInfo::output::AllocShared(
                { C2ChromaOffsetStruct::ITU_YUV_420_0() },
                0u, 8u /* bitDepth */, C2Color::YUV_420);
    helper->addStructDescriptors<C2ChromaOffsetStruct>();

    addParameter(
            DefineParam(mColorInfo, C2_PARAMKEY_CODED_COLOR_INFO)
            .withConstValue(defaultColorInfo)
            .build());

    addParameter(
            DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
            .withConstValue(new C2StreamPixelFormatInfo::output(
                                 0u, HAL_PIXEL_FORMAT_YV12))
            .build());

    addParameter(
            DefineParam(mRawCodecData, C2_PARAMKEY_RAW_CODEC_DATA)
            .withDefault(C2StreamRawCodecDataInfo::input::AllocShared(0, 0u))
            .withFields({C2F(mRawCodecData, m.value)})
            .withSetter(CodecSetter)
            .build());

    addParameter(
            DefineParam(mConsumerUsage, C2_PARAMKEY_OUTPUT_STREAM_USAGE)
            .withDefault(new C2StreamUsageTuning::output(
                                0u, GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_COMPOSER))
            .withFields({C2F(mConsumerUsage, value).any()})
            .withSetter(Setter<decltype(*mConsumerUsage)>::StrictValueWithNoDeps)
            .build());
}

C2R C2FFMPEGVideoDecodeInterface::SizeSetter(
        bool /* mayBlock */,
        const C2P<C2StreamPictureSizeInfo::output> &oldMe,
        C2P<C2StreamPictureSizeInfo::output> &me) {
    C2R res = C2R::Ok();

    if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
        res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.width)));
        me.set().width = oldMe.v.width;
    }
    if (!me.F(me.v.height).supportsAtAll(me.v.height)) {
        res = res.plus(C2SettingResultBuilder::BadValue(me.F(me.v.height)));
        me.set().height = oldMe.v.height;
    }

    return res;
}

C2R C2FFMPEGVideoDecodeInterface::ProfileLevelSetter(
        bool /* mayBlock */,
        C2P<C2StreamProfileLevelInfo::input>& /* me */,
        const C2P<C2StreamPictureSizeInfo::output>& /* size */) {
    return C2R::Ok();
}

C2R C2FFMPEGVideoDecodeInterface::CodecSetter(
        bool mayBlock __unused, C2P<C2StreamRawCodecDataInfo::input>& me __unused) {
    return C2R::Ok();
}

const FFMPEGVideoCodecInfo* C2FFMPEGVideoDecodeInterface::getCodecInfo() const {
    if (mRawCodecData->flexCount() == sizeof(FFMPEGVideoCodecInfo)) {
        return (const FFMPEGVideoCodecInfo*)mRawCodecData->m.value;
    }
    return nullptr;
}

} // namespace android

6、extractor文件夹

做了UT测试,所以文件多了LDRAtests,Android.mk改名为Android.mk.bak

(1)、Android.mk内容

#
# Copyright
#

LOCAL_PATH := $(call my-dir)

include $(SF_COMMON_MK)

LOCAL_SRC_FILES := \
        FFmpegExtractor.cpp

LOCAL_SHARED_LIBRARIES += \
        libavcodec        \
        libavformat       \
        libavutil         \
        libcutils         \
        libffmpeg_utils   \
        liblog            \
        libstagefright    \
        libstagefright_foundation \
        libutils libmediandk
LOCAL_CFLAGS += -Wno-unused-variable -Wno-unused-parameter


LOCAL_MODULE:= libffmpeg_extractor
LOCAL_SYSTEM_EXT_MODULE := true
LOCAL_MODULE_RELATIVE_PATH := extractors

include $(BUILD_SHARED_LIBRARY)

(2)、FFmpegExtractor.h内容

/*
 * Copyright
 */

#ifndef SUPER_EXTRACTOR_H_

#define SUPER_EXTRACTOR_H_

#include <media/MediaExtractorPluginApi.h>
#include <media/MediaExtractorPluginHelper.h>
#include <media/NdkMediaFormat.h>
#include <media/stagefright/foundation/ABase.h>
#include <utils/threads.h>
#include <utils/KeyedVector.h>

#include "ffmpeg_utils.h"

namespace android {

struct ABuffer;
struct AMessage;
class String8;
struct FFmpegSource;

struct FFmpegExtractor : public MediaExtractorPluginHelper {
    /**
     * @brief FFmpegExtractor构造,初始化媒体提取器
     *
     * @param sourece 数据源指针
     * @param meta 获取媒体文件的元数据的AMessage对象
     * @return 返回媒体文件中的轨道数目
     */
    FFmpegExtractor(DataSourceHelper *source, const sp<AMessage> &meta);

    /**
     * @brief 计算媒体文件中的轨道数
     *
     * @return 返回媒体文件中轨道数
     */
    virtual size_t countTracks();
    /**
    * @brief 获取媒体文件中index的轨道
    *
    * @param index 要获取轨道的索引
    * @return 返回index的媒体轨道对象指针,如果索引无效则返回NULL
    */
    virtual MediaTrackHelper* getTrack(size_t index);
    /**
    * @brief 获取index轨道的元数据信息
    *
    * @param meta 用于存储轨道元数据的 AMediaFormat 对象指针
    * @param index 要获取元数据的轨道索引
    * @param flags 标志位
    * @return 成功返回 AMEDIA_OK,否则返回 AMEDIA_ERROR_UNKNOWN
    */
    virtual media_status_t getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags);

    /**
    * @brief 获取媒体文件的元数据信息
    *
    * @param meta 用于存储媒体文件元数据的 AMediaFormat 对象指针
    * @return 成功返回 AMEDIA_OK
    */
    virtual media_status_t getMetaData(AMediaFormat *meta);

    /**
    * @brief 获取媒体文件的标志位
    *
    * @return 返回媒体文件的标志位,表示媒体文件的功能支持情况
    */
    virtual uint32_t flags() const;
    virtual const char* name() { return "FFmpegExtractor"; }

protected:
    /**
    * @brief FFmpegExtractor 类的析构函数
    */
    virtual ~FFmpegExtractor();

private:
    friend struct FFmpegSource;

    struct TrackInfo {
        int mIndex; //stream index
        AMediaFormat *mMeta;
        AVStream *mStream;
        PacketQueue *mQueue;
        bool mSeek;
    };

    Vector<TrackInfo> mTracks;

    mutable Mutex mLock;

    DataSourceHelper *mDataSource;
    AMediaFormat *mMeta;

    char mFilename[PATH_MAX];
    int mGenPTS;
    int mVideoDisable;
    int mAudioDisable;
    int mShowStatus;
    int mSeekByBytes;
    int64_t mDuration;
    bool mEOF;
    size_t mPktCounter;
    int mAbortRequest;

    PacketQueue *mAudioQ;
    PacketQueue *mVideoQ;

    AVFormatContext *mFormatCtx;
    int mVideoStreamIdx;
    int mAudioStreamIdx;
    AVStream *mVideoStream;
    AVStream *mAudioStream;
    bool mDefersToCreateVideoTrack;
    bool mDefersToCreateAudioTrack;
    AVBSFContext *mVideoBsfc;
    AVBSFContext *mAudioBsfc;
    bool mParsedMetadata;

    static int decodeInterruptCb(void *ctx);

    /**
     * @brief 初始化流
     *
     * @return 返回-1表示失败,0表示成功
     */
    int initStreams();
    /**
     * @brief 反初始化流
     *
     * 关闭音频和视频流,并关闭输入文件。
     */
    void deInitStreams();
    /**
     * @brief 从媒体文件的元数据中提取信息
     *
     * @param meta 媒体文件中的元数据
     */
    void fetchStuffsFromSniffedMeta(const sp<AMessage> &meta);
    /**
     * @brief 设置FFmpeg的默认参数
     */
    void setFFmpegDefaultOpts();
    /**
     * @brief 从媒体文件中提取下一个数据包
     *
     * @return 提取成功,返回流索引;如果到达文件末尾,则返回AVERROR_EOF
     */
    int feedNextPacket();
    /**
     * @brief 从指定轨道队列中获取数据包
     *
     * @param trackIndex 轨道索引
     * @param pkt AVPacket 结构体指针,用于存储获取的数据包
     * @return 成功获取数据包 return 0;出现错误 返回err
     */
    int getPacket(int trackIndex, AVPacket *pkt);
    /**
     * @brief 检查是否支持给定的编解码器ID
     *
     * @param codec_id AVCodecID 编解码器ID
     * @return 支持指定的编解码器ID,则返回true;否则返回false
     */
    bool isCodecSupported(enum AVCodecID codec_id);
    /**
     * @brief 设置视频格式信息
     *
     * @param stream AVStream* 视频流
     * @param meta AMediaFormat* 媒体格式
     * @return 成功返回AMEDIA_OK
     */
    media_status_t setVideoFormat(AVStream *stream, AMediaFormat *meta);
    /**
     * @brief 设置音频格式信息
     *
     * @param stream AVStream* 音频流
     * @param meta AMediaFormat* 媒体格式
     * @return 成功返回AMEDIA_OK
     */
    media_status_t setAudioFormat(AVStream *stream, AMediaFormat *meta);
    /**
     * @brief 设置媒体元数据中的持续时间信息
     *
     * @param stream AVStream* 音视频流
     * @param meta AMediaFormat* 媒体格式
     */
    void setDurationMetaData(AVStream *stream, AMediaFormat *meta);
    /**
     * @brief 打开指定流的组件
     *
     * @param streamIndex 流索引
     * @return 返回0表示成功,返回-1表示失败
     */
    int streamComponentOpen(int streamIndex);
    /**
     * @brief 关闭指定流的组件
     *
     * @param streamIndex 流索引
     */
    void streamComponentClose(int streamIndex);
    /**
     * @brief 执行流的定位操作
     *
     * @param trackIndex 流索引
     * @param pos 定位位置
     * @param mode 定位模式
     * @return 成功返回SEEK,失败返回NO_SEEK
     */
    int streamSeek(int trackIndex, int64_t pos,
                    MediaTrackHelper::ReadOptions::SeekMode mode);
    /**
     * @brief 检查 extradata 是否满足要求,处理不兼容情况
     *
     * @param avpar AVCodecParameters 指针
     * @return extradata 满足要求返回 1,否则返回 0
     */
    int checkExtradata(AVCodecParameters *avpar);

    DISALLOW_EVIL_CONSTRUCTORS(FFmpegExtractor);
};

}  // namespace android

#endif  // SUPER_EXTRACTOR_H_

(3)、FFmpegExtractor.cpp内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "FFmpegExtractor"
#include <utils/Log.h>

#include <stdint.h>
#include <limits.h> /* INT_MAX */
#include <inttypes.h>
#include <sys/prctl.h>

#include <utils/misc.h>
#include <utils/String8.h>
#include <cutils/properties.h>
#include <media/stagefright/DataSourceBase.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/foundation/hexdump.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>

#include "codec_utils.h"
#include "ffmpeg_cmdutils.h"

#include "FFmpegExtractor.h"

#define EXTRACTOR_MAX_PROBE_PACKETS 200
#define FF_MAX_EXTRADATA_SIZE ((1 << 28) - AV_INPUT_BUFFER_PADDING_SIZE)
#define EXTRACTOR_SNIFF_MAX_BYTES 16
#define EXTRACTOR_SNIFF_MIN_BYTES 4

#define SUPPOURT_UNKNOWN_FORMAT    1

//debug
#define DEBUG_PKT                  0
#define DEBUG_EXTRADATA            0

enum {
    NO_SEEK = 0,
    SEEK,
};

namespace android {

static const char *findMatchingContainer(const char *name);
static CMediaExtractor *CreateFFMPEGExtractor(CDataSource *source, void *meta);

struct FFmpegSource : public MediaTrackHelper {
    FFmpegSource(FFmpegExtractor *extractor, size_t index);

    virtual media_status_t start();
    virtual media_status_t stop();
    virtual media_status_t getFormat(AMediaFormat *meta);

    virtual media_status_t read(
            MediaBufferHelper **buffer, const ReadOptions *options);

protected:
    virtual ~FFmpegSource();

private:
    friend struct FFmpegExtractor;

    FFmpegExtractor *mExtractor;
    size_t mTrackIndex;

    enum AVMediaType mMediaType;

    mutable Mutex mLock;

    bool mIsAVC;
    bool mIsHEVC;
    size_t mNALLengthSize;
    bool mNal2AnnexB;

    AVStream *mStream;
    PacketQueue *mQueue;

    int64_t mFirstKeyPktTimestamp;
    int64_t mLastPTS;
    int64_t mTargetTime;

    DISALLOW_EVIL_CONSTRUCTORS(FFmpegSource);
};



FFmpegExtractor::FFmpegExtractor(DataSourceHelper *source, const sp<AMessage> &meta)
    : mDataSource(source),
      mAudioQ(NULL),
      mVideoQ(NULL),
      mFormatCtx(NULL),
      mParsedMetadata(false) {
    ALOGV("FFmpegExtractor::FFmpegExtractor");

    mMeta = AMediaFormat_new();
    fetchStuffsFromSniffedMeta(meta);

    mVideoQ = packet_queue_alloc();
    mAudioQ = packet_queue_alloc();

    int err = initStreams();
    if (err < 0) {
        ALOGE("failed to init ffmpeg");
        return;
    }

    while (mPktCounter <= EXTRACTOR_MAX_PROBE_PACKETS &&
           (mDefersToCreateVideoTrack || mDefersToCreateAudioTrack)) {
        err = feedNextPacket();
        if (err < 0 && err != AVERROR(EAGAIN)) {
            ALOGE("deferred track creation failed, %s (%08x)", av_err2str(err), err);
            return;
        }
    }

    ALOGV("mPktCounter: %zu, mEOF: %d, pb->error(if has): %d, mDefersToCreateVideoTrack: %d, mDefersToCreateAudioTrack: %d",
          mPktCounter, mEOF, mFormatCtx->pb ? mFormatCtx->pb->error : 0, mDefersToCreateVideoTrack, mDefersToCreateAudioTrack);

    if (mDefersToCreateVideoTrack) {
        ALOGW("deferred creation of video track failed, disabling stream");
        streamComponentClose(mVideoStreamIdx);
    }

    if (mDefersToCreateAudioTrack) {
        ALOGW("deferred creation of audio track failed, disabling stream");
        streamComponentClose(mAudioStreamIdx);
    }
}

FFmpegExtractor::~FFmpegExtractor() {
    ALOGV("FFmpegExtractor::~FFmpegExtractor");

    mAbortRequest = 1;
    deInitStreams();

    Mutex::Autolock autoLock(mLock);

    packet_queue_free(&mVideoQ);
    packet_queue_free(&mAudioQ);

    for (auto& trackInfo : mTracks) {
        AMediaFormat_delete(trackInfo.mMeta);
    }
    AMediaFormat_delete(mMeta);
}

size_t FFmpegExtractor::countTracks() {
    ALOGV("FFmpegExtractor::countTracks");
    return mTracks.size();
}

MediaTrackHelper* FFmpegExtractor::getTrack(size_t index) {
    ALOGV("FFmpegExtractor::getTrack[%zu]", index);

    if (index >= mTracks.size()) {
        return NULL;
    }

    return new FFmpegSource(this, index);
}

media_status_t FFmpegExtractor::getTrackMetaData(AMediaFormat *meta, size_t index, uint32_t flags __unused) {
    ALOGV("FFmpegExtractor::getTrackMetaData[%zu]", index);

    if (index >= mTracks.size()) {
        return AMEDIA_ERROR_UNKNOWN;
    }

    /* Quick and dirty, just get a frame 1/4 in */
    if (mTracks.itemAt(index).mIndex == mVideoStreamIdx &&
            mFormatCtx->duration != AV_NOPTS_VALUE) {
        AMediaFormat_setInt64(mTracks.editItemAt(index).mMeta,
                AMEDIAFORMAT_KEY_THUMBNAIL_TIME, mFormatCtx->duration / 4);
    }

    AMediaFormat_copy(meta, mTracks.itemAt(index).mMeta);
    return AMEDIA_OK;
}

media_status_t FFmpegExtractor::getMetaData(AMediaFormat *meta) {
    ALOGV("FFmpegExtractor::getMetaData");

    if (!mParsedMetadata) {
        parseMetadataTags(mFormatCtx, mMeta);
        mParsedMetadata = true;
    }

    AMediaFormat_copy(meta, mMeta);
    return AMEDIA_OK;
}

uint32_t FFmpegExtractor::flags() const {
    ALOGV("FFmpegExtractor::flags");

    uint32_t flags = CAN_PAUSE;

    if (mFormatCtx->duration != AV_NOPTS_VALUE) {
        flags |= CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD | CAN_SEEK;
    }

    return flags;
}

int FFmpegExtractor::checkExtradata(AVCodecParameters *avpar)
{
    enum AVCodecID codec_id = AV_CODEC_ID_NONE;
    const char *name = NULL;
    bool *defersToCreateTrack = NULL;
    AVBSFContext **bsfc = NULL;

    // init
    if (avpar->codec_type == AVMEDIA_TYPE_VIDEO) {
        bsfc = &mVideoBsfc;
        defersToCreateTrack = &mDefersToCreateVideoTrack;
    } else if (avpar->codec_type == AVMEDIA_TYPE_AUDIO){
        bsfc = &mAudioBsfc;
        defersToCreateTrack = &mDefersToCreateAudioTrack;
    }

    codec_id = avpar->codec_id;

    // ignore extradata
    if (codec_id != AV_CODEC_ID_H264
            && codec_id != AV_CODEC_ID_MPEG4
            && codec_id != AV_CODEC_ID_MPEG1VIDEO
            && codec_id != AV_CODEC_ID_MPEG2VIDEO
            && codec_id != AV_CODEC_ID_AAC) {
        return 1;
    }

    // is extradata compatible with android?
    if (codec_id != AV_CODEC_ID_AAC) {
        int is_compatible = is_extradata_compatible_with_android(avpar);
        if (!is_compatible) {
            ALOGI("[%s] extradata is not compatible with android, should to extract it from bitstream",
                    av_get_media_type_string(avpar->codec_type));
            *defersToCreateTrack = true;
            *bsfc = NULL; // H264 don't need bsfc, only AAC?
            return 0;
        }
        return 1;
    }

    if (codec_id == AV_CODEC_ID_AAC) {
        name = "aac_adtstoasc";
    }

    if (avpar->extradata_size <= 0) {
        const char* type = av_get_media_type_string(avpar->codec_type);
        ALOGI("[%s] no extradata found, extract it from bitstream", type);
        *defersToCreateTrack = true;
         //CHECK(name != NULL);
        if (!*bsfc && name) {
            const AVBitStreamFilter* bsf = av_bsf_get_by_name(name);
            if (!bsf) {
                ALOGE("[%s] (%s) cannot find bitstream filter", type, name);
                *defersToCreateTrack = false;
                return -1;
            }
            if (av_bsf_alloc(bsf, bsfc) < 0 || !*bsfc) {
                ALOGE("[%s] (%s) cannot allocate bitstream filter", type, name);
                *defersToCreateTrack = false;
                return -1;
            }
            // (*bsfc)->time_base_in = avpar->time_base;
            if (avcodec_parameters_copy((*bsfc)->par_in, avpar)
                    || av_bsf_init(*bsfc)) {
                ALOGE("[%s] (%s) cannot initialize bitstream filter", type, name);
                *defersToCreateTrack = false;
                return -1;
            }
            ALOGV("[%s] (%s) created bitstream filter", type, name);
            return 0;
        } else {
            return 0;
        }
    }
    return 1;
}

static void printTime(int64_t time, const char* type)
{
    int hours, mins, secs, us;

    if (time == AV_NOPTS_VALUE)
        return;

    secs = time / AV_TIME_BASE;
    us = time % AV_TIME_BASE;
    mins = secs / 60;
    secs %= 60;
    hours = mins / 60;
    mins %= 60;
    ALOGI("[%s] the time is %02d:%02d:%02d.%02d",
          type, hours, mins, secs, (100 * us) / AV_TIME_BASE);
}

bool FFmpegExtractor::isCodecSupported(enum AVCodecID codec_id)
{
    switch(codec_id) {
    case AV_CODEC_ID_H264:
    case AV_CODEC_ID_MPEG4:
    case AV_CODEC_ID_H263:
    case AV_CODEC_ID_H263P:
    case AV_CODEC_ID_H263I:
    case AV_CODEC_ID_AAC:
    case AV_CODEC_ID_AC3:
    case AV_CODEC_ID_MP2:
    case AV_CODEC_ID_MP3:
    case AV_CODEC_ID_MPEG1VIDEO:
    case AV_CODEC_ID_MPEG2VIDEO:
    case AV_CODEC_ID_WMV1:
    case AV_CODEC_ID_WMV2:
    case AV_CODEC_ID_WMV3:
    case AV_CODEC_ID_VC1:
    case AV_CODEC_ID_VP8:
    case AV_CODEC_ID_VP9:
    case AV_CODEC_ID_WMAV1:
    case AV_CODEC_ID_WMAV2:
    case AV_CODEC_ID_WMAPRO:
    case AV_CODEC_ID_WMALOSSLESS:
    case AV_CODEC_ID_RV20:
    case AV_CODEC_ID_RV30:
    case AV_CODEC_ID_RV40:
    case AV_CODEC_ID_COOK:
    case AV_CODEC_ID_APE:
    case AV_CODEC_ID_DTS:
    case AV_CODEC_ID_FLAC:
    case AV_CODEC_ID_FLV1:
    case AV_CODEC_ID_VORBIS:
    case AV_CODEC_ID_HEVC:
    case AV_CODEC_ID_ALAC:
        return true;
    default:
        return false;
    }
}

media_status_t FFmpegExtractor::setVideoFormat(AVStream *stream, AMediaFormat *meta)
{
    AVCodecParameters *avpar = NULL;
    media_status_t ret = AMEDIA_ERROR_UNKNOWN;

    avpar = stream->codecpar;
    CHECK_EQ((int)avpar->codec_type, (int)AVMEDIA_TYPE_VIDEO);

    switch(avpar->codec_id) {
    case AV_CODEC_ID_H264:
        if (avpar->extradata[0] == 1) {
            ret = setAVCFormat(avpar,meta);
        } else {
            ret = setH264Format(avpar,meta );
        }
        break;
    case AV_CODEC_ID_MPEG4:
        ret = setMPEG4Format(avpar, meta);
        break;
    case AV_CODEC_ID_H263:
    case AV_CODEC_ID_H263P:
    case AV_CODEC_ID_H263I:
        ret = setH263Format(avpar,meta );
        break;
    case AV_CODEC_ID_MPEG2VIDEO:
        ret = setMPEG2VIDEOFormat(avpar,meta);
        break;
    case AV_CODEC_ID_VC1:
        ret = setVC1Format(avpar, meta);
        break;
    case AV_CODEC_ID_WMV1:
        ret = setWMV1Format(avpar, meta);
        break;
    case AV_CODEC_ID_WMV2:
        ret = setWMV2Format(avpar, meta);
        break;
    case AV_CODEC_ID_WMV3:
        ret = setWMV3Format(avpar, meta);
        break;
    default:
        ALOGE("[video] unsupported codec (id: %d, name: %s)",
                avpar->codec_id, avcodec_get_name(avpar->codec_id));
        ret = AMEDIA_ERROR_UNSUPPORTED;
        break;
    }

    if (ret == AMEDIA_OK) {
        // rotation
        double theta = get_rotation(stream);
        int rotationDegrees = 0;

        if (fabs(theta - 90) < 1.0) {
            rotationDegrees = 90;
        } else if (fabs(theta - 180) < 1.0) {
            rotationDegrees = 180;
        } else if (fabs(theta - 270) < 1.0) {
            rotationDegrees = 270;
        }
        if (rotationDegrees != 0) {
            AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_ROTATION, rotationDegrees);
        }
    }

    if (ret == AMEDIA_OK) {
        float aspect_ratio;
        int width, height;

        if (avpar->sample_aspect_ratio.num == 0)
            aspect_ratio = 0;
        else
            aspect_ratio = av_q2d(avpar->sample_aspect_ratio);

        if (aspect_ratio <= 0.0)
            aspect_ratio = 1.0;
        aspect_ratio *= (float)avpar->width / (float)avpar->height;

        /* XXX: we suppose the screen has a 1.0 pixel ratio */
        height = avpar->height;
        width = ((int)rint(height * aspect_ratio)) & ~1;

        ALOGI("[video] width: %d, height: %d, bit_rate: % " PRId64 " aspect ratio: %f",
                avpar->width, avpar->height, avpar->bit_rate, aspect_ratio);

        AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_WIDTH, avpar->width);
        AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_HEIGHT, avpar->height);
        if ((width > 0) && (height > 0) &&
            ((avpar->width != width || avpar->height != height))) {
            AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_SAR_WIDTH, width);
            AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_SAR_HEIGHT, height);
            ALOGI("[video] SAR width: %d, SAR height: %d", width, height);
        }
        if (avpar->bit_rate > 0) {
            AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_BIT_RATE, avpar->bit_rate);
        }
        AMediaFormat_setString(meta, "file-format", findMatchingContainer(mFormatCtx->iformat->name));
        setDurationMetaData(stream, meta);

        FFMPEGVideoCodecInfo info = {
            .codec_id = avpar->codec_id,
        };

        AMediaFormat_setBuffer(meta, "raw-codec-data", &info, sizeof(info));
    }

    return ret;
}

media_status_t FFmpegExtractor::setAudioFormat(AVStream *stream, AMediaFormat *meta)
{
    AVCodecParameters *avpar = NULL;
    media_status_t ret = AMEDIA_ERROR_UNKNOWN;

    avpar = stream->codecpar;
    CHECK_EQ((int)avpar->codec_type, (int)AVMEDIA_TYPE_AUDIO);

    switch(avpar->codec_id) {
    case AV_CODEC_ID_MP3:
        ret = setMP3Format(avpar, meta);
        break;
    case AV_CODEC_ID_AC3:
        ret = setAC3Format(avpar,meta );
        break;
    case AV_CODEC_ID_AAC:
        ret = setAACFormat(avpar, meta);
        break;
    case AV_CODEC_ID_WMAV1:
        ret = setWMAV1Format(avpar, meta);
        break;
    case AV_CODEC_ID_WMAV2:
        ret = setWMAV2Format(avpar, meta);
        break;
    case AV_CODEC_ID_WMAPRO:
        ret = setWMAProFormat(avpar, meta);
        break;
    case AV_CODEC_ID_WMALOSSLESS:
        ret = setWMALossLessFormat(avpar, meta);
        break;
    case AV_CODEC_ID_APE:
        ret = setAPEFormat(avpar, meta);
        break;
    case AV_CODEC_ID_PCM_U8:
    case AV_CODEC_ID_PCM_S16LE:
    case AV_CODEC_ID_PCM_S24LE:
    case AV_CODEC_ID_PCM_S32LE:
        ret = setPCMFormat(avpar, meta);
        break;
    default:
        ALOGE("[audio] unsupported codec (id: %d, name: %s)",
                avpar->codec_id, avcodec_get_name(avpar->codec_id));
        ret = AMEDIA_ERROR_UNSUPPORTED;
        break;
    }

    if (ret == AMEDIA_OK) {
        ALOGD("[audio] bit_rate: %" PRId64 ", sample_rate: %d, channels: %d, "
                "bits_per_coded_sample: %d, block_align: %d "
                "bits_per_raw_sample: %d, sample_format: %d",
                avpar->bit_rate, avpar->sample_rate, avpar->ch_layout.nb_channels,
                avpar->bits_per_coded_sample, avpar->block_align,
                avpar->bits_per_raw_sample, avpar->format);

        AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_CHANNEL_COUNT, avpar->ch_layout.nb_channels);
        AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_BIT_RATE, avpar->bit_rate);
        int32_t bits = avpar->bits_per_raw_sample > 0 ?
                avpar->bits_per_raw_sample :
                av_get_bytes_per_sample((enum AVSampleFormat)avpar->format) * 8;
        AMediaFormat_setInt32(meta, "bits-per-raw-sample", bits);
        AMediaFormat_setInt32(meta, "sample-rate", avpar->sample_rate);
        AMediaFormat_setInt32(meta, "block-align", avpar->block_align);
        AMediaFormat_setInt32(meta, "sample-format", avpar->format);
        //LCTD AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_PCM_ENCODING, sampleFormatToEncoding(avpar->sample_fmt));
        AMediaFormat_setString(meta, "file-format", findMatchingContainer(mFormatCtx->iformat->name));
        setDurationMetaData(stream, meta);

        FFMPEGAudioCodecInfo info = {
            .codec_id = avpar->codec_id,
            .bits_per_coded_sample = avpar->bits_per_coded_sample,
            .block_align = avpar->block_align,
        };

        AMediaFormat_setBuffer(meta, "raw-codec-data", &info, sizeof(info));
    }

    return ret;
}

void FFmpegExtractor::setDurationMetaData(AVStream *stream, AMediaFormat *meta)
{
    AVCodecParameters *avpar = stream->codecpar;

    if (stream->duration != AV_NOPTS_VALUE) {
        int64_t duration = av_rescale_q(stream->duration, stream->time_base, AV_TIME_BASE_Q);
        const char *s = av_get_media_type_string(avpar->codec_type);
        printTime(duration, s);
        if (stream->start_time != AV_NOPTS_VALUE) {
            ALOGV("[%s] startTime: %" PRId64, s, stream->start_time);
        } else {
            ALOGV("[%s] startTime:N/A", s);
        }
        AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_DURATION, duration);
    } else {
        // default when no stream duration
        AMediaFormat_setInt64(meta, AMEDIAFORMAT_KEY_DURATION, mFormatCtx->duration);
    }
}

int FFmpegExtractor::streamComponentOpen(int streamIndex)
{
    TrackInfo *trackInfo = NULL;
    AVCodecParameters *avpar = NULL;
    bool supported = false;
    int ret = 0;

    if (streamIndex < 0 || streamIndex >= (int)mFormatCtx->nb_streams) {
        ALOGE("opening stream with invalid stream index(%d)", streamIndex);
        return -1;
    }
    avpar = mFormatCtx->streams[streamIndex]->codecpar;

    const char* type = av_get_media_type_string(avpar->codec_type);
    ALOGI("[%s] opening stream @ index(%d)", type, streamIndex);

    supported = isCodecSupported(avpar->codec_id);
    if (! supported) {
        ALOGD("[%s] unsupported codec (%s), but give it a chance",
              type, avcodec_get_name(avpar->codec_id));
    }

    if ((mFormatCtx->streams[streamIndex]->disposition & AV_DISPOSITION_ATTACHED_PIC) ||
        avpar->codec_tag == MKTAG('j', 'p', 'e', 'g')) {
        ALOGD("[%s] not opening attached picture(%s)", type, avcodec_get_name(avpar->codec_id));
        return -1;
    }
    ALOGI("[%s] support the codec(%s) disposition(%x)",
          type, avcodec_get_name(avpar->codec_id), mFormatCtx->streams[streamIndex]->disposition);

    for (size_t i = 0; i < mTracks.size(); ++i) {
        if (streamIndex == mTracks.editItemAt(i).mIndex) {
            ALOGE("[%s] this track already exists", type);
            return 0;
        }
    }

    mFormatCtx->streams[streamIndex]->discard = AVDISCARD_DEFAULT;

    ALOGV("[%s] tag %s/0x%08x with codec(%s)\n",
          type, av_fourcc2str(avpar->codec_tag), avpar->codec_tag, avcodec_get_name(avpar->codec_id));

    AMediaFormat *meta = AMediaFormat_new();

    switch (avpar->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        if (mVideoStreamIdx == -1)
            mVideoStreamIdx = streamIndex;
        if (mVideoStream == NULL)
            mVideoStream = mFormatCtx->streams[streamIndex];

        ret = checkExtradata(avpar);
        if (ret != 1) {
            if (ret == -1) {
                // disable the stream
                mVideoStreamIdx = -1;
                mVideoStream = NULL;
                packet_queue_flush(mVideoQ);
                mFormatCtx->streams[streamIndex]->discard = AVDISCARD_ALL;
            }
            return ret;
         }
#if DEBUG_EXTRADATA
        if (avpar->extradata) {
            ALOGV("[%s] stream extradata(%d):", type, avpar->extradata_size);
            hexdump(avpar->extradata, avpar->extradata_size);
        } else {
            ALOGV("[%s] stream has no extradata, but we can ignore it.", type);
        }
#endif
        if (setVideoFormat(mVideoStream, meta) != AMEDIA_OK) {
            ALOGE("[%s] setVideoFormat failed", type);
            return -1;
        }

        ALOGV("[%s] creating track", type);
        mTracks.push();
        trackInfo = &mTracks.editItemAt(mTracks.size() - 1);
        trackInfo->mIndex  = streamIndex;
        trackInfo->mMeta   = meta;
        trackInfo->mStream = mVideoStream;
        trackInfo->mQueue  = mVideoQ;
        trackInfo->mSeek   = false;

        mDefersToCreateVideoTrack = false;

        break;
    case AVMEDIA_TYPE_AUDIO:
        if (mAudioStreamIdx == -1)
            mAudioStreamIdx = streamIndex;
        if (mAudioStream == NULL)
            mAudioStream = mFormatCtx->streams[streamIndex];

        ret = checkExtradata(avpar);
        if (ret != 1) {
            if (ret == -1) {
                // disable the stream
                mAudioStreamIdx = -1;
                mAudioStream = NULL;
                packet_queue_flush(mAudioQ);
                mFormatCtx->streams[streamIndex]->discard = AVDISCARD_ALL;
            }
            return ret;
        }
#if DEBUG_EXTRADATA
        if (avpar->extradata) {
            ALOGV("[%s] stream extradata(%d):", type, avpar->extradata_size);
            hexdump(avpar->extradata, avpar->extradata_size);
        } else {
            ALOGV("[%s] stream has no extradata, but we can ignore it.", type);
        }
#endif
        if (setAudioFormat(mAudioStream, meta) != AMEDIA_OK) {
            ALOGE("[%s] setAudioFormat failed", type);
            return -1;
        }

        ALOGV("[%s] creating track", type);
        mTracks.push();
        trackInfo = &mTracks.editItemAt(mTracks.size() - 1);
        trackInfo->mIndex  = streamIndex;
        trackInfo->mMeta   = meta;
        trackInfo->mStream = mAudioStream;
        trackInfo->mQueue  = mAudioQ;
        trackInfo->mSeek   = false;

        mDefersToCreateAudioTrack = false;

        break;
    case AVMEDIA_TYPE_SUBTITLE:
        /* Unsupport now */
        CHECK(!"Should not be here. Unsupported media type.");
        break;
    default:
        CHECK(!"Should not be here. Unsupported media type.");
        break;
    }
    return 0;
}

void FFmpegExtractor::streamComponentClose(int streamIndex)
{
    AVCodecParameters *avpar;

    if (streamIndex < 0 || streamIndex >= (int)mFormatCtx->nb_streams) {
        ALOGE("closing stream with invalid index(%d)", streamIndex);
        return;
    }
    avpar = mFormatCtx->streams[streamIndex]->codecpar;

    const char* type = av_get_media_type_string(avpar->codec_type);
    ALOGI("[%s] closing stream @ index(%d)", type, streamIndex);

    switch (avpar->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        ALOGV("[%s] packet_queue_abort", type);
        packet_queue_abort(mVideoQ);
        ALOGV("[%s] packet_queue_end", type);
        packet_queue_flush(mVideoQ);
        break;
    case AVMEDIA_TYPE_AUDIO:
        ALOGV("[%s] packet_queue_abort", type);
        packet_queue_abort(mAudioQ);
        ALOGV("[%s] packet_queue_end", type);
        packet_queue_flush(mAudioQ);
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        break;
    default:
        break;
    }

    mFormatCtx->streams[streamIndex]->discard = AVDISCARD_ALL;
    switch (avpar->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        mVideoStream    = NULL;
        mVideoStreamIdx = -1;
        if (mVideoBsfc) {
            av_bsf_free(&mVideoBsfc);
        }
        mDefersToCreateVideoTrack = false;
        break;
    case AVMEDIA_TYPE_AUDIO:
        mAudioStream    = NULL;
        mAudioStreamIdx = -1;
        if (mAudioBsfc) {
            av_bsf_free(&mAudioBsfc);
        }
        mDefersToCreateAudioTrack = false;
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        break;
    default:
        break;
    }
}

/* seek in the stream */
int FFmpegExtractor::streamSeek(int trackIndex, int64_t pos,
        MediaTrackHelper::ReadOptions::SeekMode mode)
{
    Mutex::Autolock _l(mLock);

    const TrackInfo& track = mTracks.itemAt(trackIndex);
    const char* type = av_get_media_type_string(track.mStream->codecpar->codec_type);

    if (track.mSeek) {
        // Don't do anything if seeking is already in progress
        ALOGV("[%s] seek already in progress",
              av_get_media_type_string(track.mStream->codecpar->codec_type));
        return NO_SEEK;
    }

    int64_t seekPos = pos, seekMin, seekMax;
    int err;

    switch (mode) {
        case MediaTrackHelper::ReadOptions::SEEK_PREVIOUS_SYNC:
            seekMin = 0;
            seekMax = seekPos;
            break;
        case MediaTrackHelper::ReadOptions::SEEK_NEXT_SYNC:
            seekMin = seekPos;
            seekMax = INT64_MAX;
            break;
        case MediaTrackHelper::ReadOptions::SEEK_CLOSEST_SYNC:
            seekMin = 0;
            seekMax = INT64_MAX;
            break;
        case MediaTrackHelper::ReadOptions::SEEK_CLOSEST:
            seekMin = 0;
            seekMax = seekPos;
            break;
        default:
            TRESPASS();
    }

    err = avformat_seek_file(mFormatCtx, -1, seekMin, seekPos, seekMax, 0);
    if (err < 0) {
        ALOGE("[%s] seek failed(%s (%08x)), restarting at the beginning",
              type, av_err2str(err), err);
        err = avformat_seek_file(mFormatCtx, -1, 0, 0, 0, 0);
        if (err < 0) {
            ALOGE("[%s] seek failed(%s (%08x))", type, av_err2str(err), err);
            return NO_SEEK;
        }
    }

    ALOGV("[%s] (seek) pos=%" PRId64 ", min=%" PRId64 ", max=%" PRId64,
          type, seekPos, seekMin, seekMax);

    mEOF = false;
    for (int i = 0; i < mTracks.size(); i++) {
        TrackInfo& ti = mTracks.editItemAt(i);
        packet_queue_flush(ti.mQueue);
        ti.mSeek = true;
    }

    return SEEK;
}

int FFmpegExtractor::decodeInterruptCb(void *ctx)
{
    FFmpegExtractor *extractor = static_cast<FFmpegExtractor *>(ctx);
    return extractor->mAbortRequest;
}

void FFmpegExtractor::fetchStuffsFromSniffedMeta(const sp<AMessage> &meta)
{
    AString url;
    AString mime;

    //url
    CHECK(meta->findString("extended-extractor-url", &url));
    CHECK(url.c_str() != NULL);
    CHECK(url.size() < PATH_MAX);

    memcpy(mFilename, url.c_str(), url.size());
    mFilename[url.size()] = '\0';

    //mime
    CHECK(meta->findString("extended-extractor-mime", &mime));
    CHECK(mime.c_str() != NULL);
    AMediaFormat_setString(mMeta, AMEDIAFORMAT_KEY_MIME, mime.c_str());
}

void FFmpegExtractor::setFFmpegDefaultOpts()
{
    mGenPTS       = 0;
#if DEBUG_DISABLE_VIDEO
    mVideoDisable = 1;
#else
    mVideoDisable = 0;
#endif
#if DEBUG_DISABLE_AUDIO
    mAudioDisable = 1;
#else
    mAudioDisable = 0;
#endif
    mShowStatus   = 0;
    mSeekByBytes  = 0; /* seek by bytes 0=off 1=on -1=auto" */
    mDuration     = AV_NOPTS_VALUE;

    mVideoStreamIdx = -1;
    mAudioStreamIdx = -1;
    mVideoStream  = NULL;
    mAudioStream  = NULL;
    mDefersToCreateVideoTrack = false;
    mDefersToCreateAudioTrack = false;
    mVideoBsfc = NULL;
    mAudioBsfc = NULL;

    mAbortRequest = 0;
    mPktCounter   = 0;
    mEOF          = false;
}

int FFmpegExtractor::initStreams()
{
    int err = 0;
    int i = 0;
    int ret = 0, audio_ret = -1, video_ret = -1;
    AVDictionaryEntry *t = NULL;
    AVDictionary **opts = NULL;
    int orig_nb_streams = 0;
    int st_index[AVMEDIA_TYPE_NB] = {0};
    int wanted_stream[AVMEDIA_TYPE_NB] = {0};
    st_index[AVMEDIA_TYPE_AUDIO]  = -1;
    st_index[AVMEDIA_TYPE_VIDEO]  = -1;
    wanted_stream[AVMEDIA_TYPE_AUDIO]  = -1;
    wanted_stream[AVMEDIA_TYPE_VIDEO]  = -1;
    AVDictionary *format_opts = NULL, *codec_opts = NULL;

    setFFmpegDefaultOpts();

    mFormatCtx = avformat_alloc_context();
    if (!mFormatCtx)
    {
        ALOGE("oom for alloc avformat context");
        ret = -1;
        //goto fail;
        return ret;
    }
    mFormatCtx->interrupt_callback.callback = decodeInterruptCb;
    mFormatCtx->interrupt_callback.opaque = this;
    ALOGV("mFilename: %s", mFilename);
    err = avformat_open_input(&mFormatCtx, mFilename, NULL, &format_opts);
    if (err < 0) {
        ALOGE("avformat_open_input(%s) failed: %s (%08x)", mFilename, av_err2str(err), err);
        ret = -1;
        //goto fail;
        return ret;
    }

    if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        ALOGE("Option %s not found.\n", t->key);
        //ret = AVERROR_OPTION_NOT_FOUND;
        ret = -1;
        av_dict_free(&format_opts);
        //goto fail;
        return ret;
    }

    av_dict_free(&format_opts);

    if (mGenPTS)
        mFormatCtx->flags |= AVFMT_FLAG_GENPTS;

    opts = setup_find_stream_info_opts(mFormatCtx, codec_opts);
    orig_nb_streams = mFormatCtx->nb_streams;

    err = avformat_find_stream_info(mFormatCtx, opts);
    if (err < 0) {
        ALOGE("avformat_find_stream_info(%s) failed: %s (%08x)", mFilename, av_err2str(err), err);
        ret = -1;
        //goto fail;
        return ret;
    }
    for (i = 0; i < orig_nb_streams; i++)
        av_dict_free(&opts[i]);
    av_freep(&opts);

    if (mFormatCtx->pb)
        mFormatCtx->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end

    if (mSeekByBytes < 0)
        mSeekByBytes = !!(mFormatCtx->iformat->flags & AVFMT_TS_DISCONT)
            && strcmp("ogg", mFormatCtx->iformat->name);

    for (i = 0; i < (int)mFormatCtx->nb_streams; i++)
        mFormatCtx->streams[i]->discard = AVDISCARD_ALL;
    if (!mVideoDisable)
        st_index[AVMEDIA_TYPE_VIDEO] =
            av_find_best_stream(mFormatCtx, AVMEDIA_TYPE_VIDEO,
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
    if (!mAudioDisable)
        st_index[AVMEDIA_TYPE_AUDIO] =
            av_find_best_stream(mFormatCtx, AVMEDIA_TYPE_AUDIO,
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
                                st_index[AVMEDIA_TYPE_VIDEO],
                                NULL, 0);
    if (mShowStatus) {
        av_dump_format(mFormatCtx, 0, mFilename, 0);
    }

    if (mFormatCtx->duration != AV_NOPTS_VALUE &&
            mFormatCtx->start_time != AV_NOPTS_VALUE) {
        int hours, mins, secs, us;

        ALOGV("file startTime: %" PRId64, mFormatCtx->start_time);

        mDuration = mFormatCtx->duration;

        secs = mDuration / AV_TIME_BASE;
        us = mDuration % AV_TIME_BASE;
        mins = secs / 60;
        secs %= 60;
        hours = mins / 60;
        mins %= 60;
        ALOGI("the duration is %02d:%02d:%02d.%02d",
            hours, mins, secs, (100 * us) / AV_TIME_BASE);
    }

    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
        audio_ret = streamComponentOpen(st_index[AVMEDIA_TYPE_AUDIO]);
        if (audio_ret >= 0)
            packet_queue_start(mAudioQ);
    }

    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
        video_ret = streamComponentOpen(st_index[AVMEDIA_TYPE_VIDEO]);
        if (video_ret >= 0)
            packet_queue_start(mVideoQ);
    }

    if (audio_ret < 0 && video_ret < 0) {
        ALOGE("initStreams(%s) could not find any audio/video", mFilename);
        ret = -1;
        //goto fail;
        return ret;
    }

    ret = 0;

//fail:
    return ret;
}

void FFmpegExtractor::deInitStreams()
{
    if (mAudioStreamIdx >= 0)
        streamComponentClose(mAudioStreamIdx);
    if (mVideoStreamIdx >= 0)
        streamComponentClose(mVideoStreamIdx);

    if (mFormatCtx) {
        avformat_close_input(&mFormatCtx);
    }
}

int FFmpegExtractor::feedNextPacket() {
    AVPacket pkt1, *pkt = &pkt1;
    int ret;

    // Shortcut if EOF already reached

    if (mEOF) {
        return AVERROR_EOF;
    }

    // Read next frame

    ret = av_read_frame(mFormatCtx, pkt);
    if (ret < 0) {
        if (ret == AVERROR_EOF) {
            ALOGV("file reached EOF");
        } else {
            ALOGE("failed to read next frame: %s (%08x)", av_err2str(ret), ret);
        }
        mEOF = true;
        return AVERROR_EOF;
    }
    mPktCounter++;

#if DEBUG_PKT
    ALOGV("next packet [%d] pts=%" PRId64 ", dts=%" PRId64 ", size=%d",
          pkt->stream_index, pkt->pts, pkt->dts, pkt->size);
#endif

    // Handle bitstream filter and deferred track creation

    if (pkt->stream_index == mVideoStreamIdx) {
         if (mDefersToCreateVideoTrack) {
            AVCodecParameters *avpar = mFormatCtx->streams[mVideoStreamIdx]->codecpar;
            int i = parser_split(avpar, pkt->data, pkt->size);

            if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
                if (avpar->extradata) {
                    av_freep(&avpar->extradata);
                }
                avpar->extradata_size = i;
                avpar->extradata = (uint8_t *)av_malloc(avpar->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
                if (avpar->extradata) {
                    // sps + pps(there may be sei in it)
                    memcpy(avpar->extradata, pkt->data, avpar->extradata_size);
                    memset(avpar->extradata + i, 0, AV_INPUT_BUFFER_PADDING_SIZE);
                } else {
                    ALOGE("[video] failed to allocate new extradata");
                    return AVERROR(ENOMEM);
                }
            } else {
                av_packet_unref(pkt);
                return AVERROR(EAGAIN);
            }

            streamComponentOpen(mVideoStreamIdx);
            if (!mDefersToCreateVideoTrack) {
                ALOGI("[video] probe packet counter: %zu when track created", mPktCounter);
            }
        }
    } else if (pkt->stream_index == mAudioStreamIdx) {
        AVCodecParameters *avpar = mFormatCtx->streams[mAudioStreamIdx]->codecpar;

        if (mAudioBsfc && pkt->data) {
            ret = av_bsf_send_packet(mAudioBsfc, pkt);
            if (ret < 0) {
                ALOGE("[audio::%s] failed to send packet to filter, err = %d", mAudioBsfc->filter->name, ret);
                av_packet_unref(pkt);
                return ret;
            }
            ret = av_bsf_receive_packet(mAudioBsfc, pkt);
            if (ret < 0) {
                ALOGE_IF(ret != AVERROR(EAGAIN), "[audio::%s] failed to received packet from filter, err=%d",
                         mAudioBsfc->filter->name, ret);
                av_packet_unref(pkt);
                return ret;
            }
            if (mDefersToCreateAudioTrack && avpar->extradata_size <= 0) {
                size_t new_extradata_size = 0;
                uint8_t* new_extradata = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &new_extradata_size);

                if (new_extradata_size > 0) {
                    ALOGV("[audio::%s] extradata found, len=%zd", mAudioBsfc->filter->name, new_extradata_size);
                    avpar->extradata = (uint8_t*)av_mallocz(new_extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
                    if (avpar->extradata) {
                        memcpy(avpar->extradata, new_extradata, new_extradata_size);
                        avpar->extradata_size = new_extradata_size;
                    } else {
                        ALOGE("[audio::%s] failed to allocate new extradata", mAudioBsfc->filter->name);
                        return AVERROR(ENOMEM);
                    }
                }
            }
        }

        if (mDefersToCreateAudioTrack) {
            if (avpar->extradata_size <= 0) {
                av_packet_unref(pkt);
                return AVERROR(EAGAIN);
            }
            streamComponentOpen(mAudioStreamIdx);
            if (!mDefersToCreateAudioTrack) {
                ALOGI("[audio] probe packet counter: %zu when track created", mPktCounter);
            }
        }
    }

    // Queue frame

    if (pkt->stream_index == mVideoStreamIdx) {
        packet_queue_put(mVideoQ, pkt);
        return mVideoStreamIdx;
    } else if (pkt->stream_index == mAudioStreamIdx) {
        packet_queue_put(mAudioQ, pkt);
        return mAudioStreamIdx;
    } else {
        av_packet_unref(pkt);
        return AVERROR(EAGAIN);
    }
}

int FFmpegExtractor::getPacket(int trackIndex, AVPacket *pkt) {
    TrackInfo& track = mTracks.editItemAt(trackIndex);
    const char* type = av_get_media_type_string(track.mStream->codecpar->codec_type);
    int err;

    while (true) {
        Mutex::Autolock _l(mLock);

        err = packet_queue_get(track.mQueue, pkt, 0);
        if (err > 0) {
            if (track.mSeek && (pkt->flags & AV_PKT_FLAG_KEY) != 0) {
                ALOGV("[%s] (seek) key frame found @ ts=%" PRId64,
                      type, pkt->pts != AV_NOPTS_VALUE ? av_rescale_q(pkt->pts, track.mStream->time_base, AV_TIME_BASE_Q) : -1);
                track.mSeek = false;
            }
            if (! track.mSeek) {
                return 0;
            } else {
                ALOGV("[%s] (seek) drop non key frame", type);
            }
        } else if (err < 0) {
            return AVERROR_UNKNOWN;
        } else if (err == 0) {
            err = feedNextPacket();
            if (err < 0 && err != AVERROR(EAGAIN)) {
                return err;
            }
        }
    }
}



FFmpegSource::FFmpegSource(
        FFmpegExtractor *extractor, size_t index)
    : mExtractor(extractor),
      mTrackIndex(index),
      mIsAVC(false),
      mIsHEVC(false),
      mNal2AnnexB(false),
      mStream(mExtractor->mTracks.itemAt(index).mStream),
      mQueue(mExtractor->mTracks.itemAt(index).mQueue), 
      mLastPTS(AV_NOPTS_VALUE),
      mTargetTime(AV_NOPTS_VALUE) {
      AMediaFormat *meta = mExtractor->mTracks.itemAt(index).mMeta;
      AVCodecParameters *avpar = mStream->codecpar;

      mMediaType = mStream->codecpar->codec_type;
      mFirstKeyPktTimestamp = AV_NOPTS_VALUE;

        ALOGV("[%s] FFmpegSource::FFmpegSource", av_get_media_type_string(mMediaType));

        /* Parse codec specific data */
        if (avpar->codec_id == AV_CODEC_ID_H264 && avpar->extradata_size > 0 && avpar->extradata[0] == 1) {
            mIsAVC = true;

            // uint32_t type;
            void *data;
            size_t size;
            CHECK(AMediaFormat_getBuffer(meta, AMEDIAFORMAT_KEY_CSD_AVC, &data, &size));

            const uint8_t *ptr = (const uint8_t *) data;

            CHECK(size >= 7);
            CHECK_EQ((unsigned) ptr[0], 1u); // configurationVersion == 1

            // The number of bytes used to encode the length of a NAL unit.
            mNALLengthSize = 1 + (ptr[4] & 3);

            ALOGV("[video] the stream is AVC, the length of a NAL unit: %zu", mNALLengthSize);

            mNal2AnnexB = true;
        } else if (avpar->codec_id == AV_CODEC_ID_HEVC && avpar->extradata_size > 3 &&
                   (avpar->extradata[0] || avpar->extradata[1] || avpar->extradata[2] > 1)) {
            /* It seems the extradata is encoded as hvcC format.
             * Temporarily, we support configurationVersion==0 until 14496-15 3rd
             * is finalized. When finalized, configurationVersion will be 1 and we
             * can recognize hvcC by checking if avpar->extradata[0]==1 or not. */
            mIsHEVC = true;

            void *data;
            size_t size;
            CHECK(AMediaFormat_getBuffer(meta, AMEDIAFORMAT_KEY_CSD_HEVC, &data, &size));

            const uint8_t *ptr = (const uint8_t *) data;

            CHECK(size >= 7);
            // CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1

            // The number of bytes used to encode the length of a NAL unit.
            mNALLengthSize = 1 + (ptr[21] & 3);

            ALOGD("[video] the stream is HEVC, the length of a NAL unit: %zu", mNALLengthSize);

            mNal2AnnexB = true;
        }
    }

FFmpegSource::~FFmpegSource() {
    ALOGV("[%s] FFmpegSource::~FFmpegSource",
            av_get_media_type_string(mMediaType));
    mExtractor = NULL;
}

media_status_t FFmpegSource::start() {
    ALOGV("[%s] FFmpegSource::start",
          av_get_media_type_string(mMediaType));
    mBufferGroup->init(1, 1024, 64);
    return AMEDIA_OK;
}

media_status_t FFmpegSource::stop() {
    ALOGV("[%s] FFmpegSource::stop",
          av_get_media_type_string(mMediaType));
    return AMEDIA_OK;
}

media_status_t FFmpegSource::getFormat(AMediaFormat *meta) {
    AMediaFormat_copy(meta, mExtractor->mTracks.itemAt(mTrackIndex).mMeta);
    return AMEDIA_OK;
}

media_status_t FFmpegSource::read(
        MediaBufferHelper **buffer, const ReadOptions *options) {
    *buffer = NULL;

    AVPacket pkt;
    ReadOptions::SeekMode mode;
    int64_t pktTS = AV_NOPTS_VALUE;
    int64_t seekTimeUs = AV_NOPTS_VALUE;
    int64_t timeUs = AV_NOPTS_VALUE;
    int key = 0;
    media_status_t status = AMEDIA_OK;
    int max_negative_time_frame = 100;
    int err;

    // FIXME: should we really use mStream->start_time?
    // int64_t startTimeUs = mStream->start_time == AV_NOPTS_VALUE ? 0 :
    //     av_rescale_q(mStream->start_time, mStream->time_base, AV_TIME_BASE_Q);
    int64_t startTimeUs = 0;

    if (options && options->getSeekTo(&seekTimeUs, &mode)) {
        int64_t seekPTS = seekTimeUs;
        ALOGV("[%s] (seek) seekTimeUs: %" PRId64 ", seekPTS: %" PRId64 ", mode: %d",
              av_get_media_type_string(mMediaType), seekTimeUs, seekPTS, mode);
        /* add the stream start time */
        if (mStream->start_time != AV_NOPTS_VALUE) {
            seekPTS += startTimeUs;
        }
        ALOGV("[%s] (seek) seekTimeUs[+startTime]: %" PRId64 ", mode: %d start_time=%" PRId64,
              av_get_media_type_string(mMediaType), seekPTS, mode, startTimeUs);
        mExtractor->streamSeek(mTrackIndex, seekPTS, mode);
    }

retry:
    err = mExtractor->getPacket(mTrackIndex, &pkt);
    if (err < 0) {
        if (err == AVERROR_EOF) {
            ALOGV("[%s] read EOS", av_get_media_type_string(mMediaType));
        } else {
            ALOGE("[%s] read error: %s (%08x)", av_get_media_type_string(mMediaType), av_err2str(err), err);
        }
        return AMEDIA_ERROR_END_OF_STREAM;
    }

    key = pkt.flags & AV_PKT_FLAG_KEY ? 1 : 0;
    pktTS = pkt.pts == AV_NOPTS_VALUE ? pkt.dts : pkt.pts;

    if (pktTS != AV_NOPTS_VALUE && mFirstKeyPktTimestamp == AV_NOPTS_VALUE) {
        // update the first key timestamp
        mFirstKeyPktTimestamp = pktTS;
    }

    MediaBufferHelper *mediaBuffer;
    mBufferGroup->acquire_buffer(&mediaBuffer, false, pkt.size + AV_INPUT_BUFFER_PADDING_SIZE);
    AMediaFormat_clear(mediaBuffer->meta_data());
    mediaBuffer->set_range(0, pkt.size);

        // copy data
        if ((mIsAVC || mIsHEVC) && mNal2AnnexB) {
            /* This only works for NAL sizes 3-4 */
            if ((mNALLengthSize != 3) && (mNALLengthSize != 4)) {
                ALOGE("[%s] cannot use convertNal2AnnexB, nal size: %zu",
                      av_get_media_type_string(mMediaType), mNALLengthSize);
                mediaBuffer->release();
                mediaBuffer = NULL;
                av_packet_unref(&pkt);
                return AMEDIA_ERROR_MALFORMED;
            }

            uint8_t *dst = (uint8_t *) mediaBuffer->data();
            /* Convert H.264 NAL format to annex b */
            status = convertNal2AnnexB(dst, pkt.size, pkt.data, pkt.size, mNALLengthSize);
            if (status != AMEDIA_OK) {
                ALOGE("[%s] convertNal2AnnexB failed",
                      av_get_media_type_string(mMediaType));
                mediaBuffer->release();
                mediaBuffer = NULL;
                av_packet_unref(&pkt);
                return AMEDIA_ERROR_MALFORMED;
            }
        } else {
            memcpy(mediaBuffer->data(), pkt.data, pkt.size);
        }

    if (pktTS != AV_NOPTS_VALUE)
        timeUs = av_rescale_q(pktTS, mStream->time_base, AV_TIME_BASE_Q) - startTimeUs;
    else
        timeUs = SF_NOPTS_VALUE; //FIXME AV_NOPTS_VALUE is negative, but stagefright need positive

    if (timeUs < 0) {
        ALOGE("[%s] negative timestamp encounter: time: %" PRId64
               " startTimeUs: %" PRId64
               " packet dts: %" PRId64
               " packet pts: %" PRId64
               , av_get_media_type_string(mMediaType), timeUs, startTimeUs, pkt.dts, pkt.pts);
        mediaBuffer->release();
        mediaBuffer = NULL;
        av_packet_unref(&pkt);
        if (max_negative_time_frame-- > 0) {
            goto retry;
        } else {
            ALOGE("[%s] too many negative timestamp packets, abort decoding",
                  av_get_media_type_string(mMediaType));
            return AMEDIA_ERROR_MALFORMED;
        }
    }

    // FIXME: figure out what this is supposed to do...
    // // predict the next PTS to use for exact-frame seek below
    // int64_t nextPTS = AV_NOPTS_VALUE;
    // if (mLastPTS != AV_NOPTS_VALUE && timeUs > mLastPTS) {
    //     nextPTS = timeUs + (timeUs - mLastPTS);
    //     mLastPTS = timeUs;
    // } else if (mLastPTS == AV_NOPTS_VALUE) {
    //     mLastPTS = timeUs;
    // }

#if DEBUG_PKT
    if (pktTS != AV_NOPTS_VALUE)
        ALOGV("[%s] read pkt, size:%d, key:%d, pktPTS: %lld, pts:%lld, dts:%lld, timeUs[-startTime]:%lld us (%.2f secs) start_time=%lld",
            av_get_media_type_string(mMediaType), pkt.size, key, pktTS, pkt.pts, pkt.dts, timeUs, timeUs/1E6, startTimeUs);
    else
        ALOGV("[%s] read pkt, size:%d, key:%d, pts:N/A, dts:N/A, timeUs[-startTime]:N/A",
            av_get_media_type_string(mMediaType), pkt.size, key);
#endif

    AMediaFormat_setInt64(mediaBuffer->meta_data(), AMEDIAFORMAT_KEY_TIME_US, timeUs);
    AMediaFormat_setInt32(mediaBuffer->meta_data(), AMEDIAFORMAT_KEY_IS_SYNC_FRAME, key);

    // FIXME: also figure out what this is supposed to do...
    // // deal with seek-to-exact-frame, we might be off a bit and Stagefright will assert on us
    // if (seekTimeUs != AV_NOPTS_VALUE && timeUs < seekTimeUs &&
    //         mode == MediaSource::ReadOptions::SEEK_CLOSEST) {
    //     mTargetTime = seekTimeUs;
    //     AMediaFormat_setInt64(mediaBuffer->meta_data(), AMEDIAFORMAT_KEY_TARGET_TIME, seekTimeUs);
    // }

    // if (mTargetTime != AV_NOPTS_VALUE) {
    //     if (timeUs == mTargetTime) {
    //         mTargetTime = AV_NOPTS_VALUE;
    //     } else if (nextPTS != AV_NOPTS_VALUE && nextPTS > mTargetTime) {
    //         ALOGV("[%s] adjust target frame time to %" PRId64,
    //               av_get_media_type_string(mMediaType), timeUs);
    //         AMediaFormat_setInt64(mediaBuffer->meta_data(), AMEDIAFORMAT_KEY_TIME_US, mTargetTime);
    //         mTargetTime = AV_NOPTS_VALUE;
    //     }
    // }

    *buffer = mediaBuffer;

    av_packet_unref(&pkt);

    return AMEDIA_OK;
}



typedef struct {
    const char *format;
    const char *container;
} formatmap;

static formatmap FILE_FORMATS[] = {
        {"mpeg",                    MEDIA_MIMETYPE_CONTAINER_MPEG2PS  },
        {"mov,mp4,m4a,3gp,3g2,mj2", MEDIA_MIMETYPE_CONTAINER_MPEG4    },
        {"matroska,webm",           MEDIA_MIMETYPE_CONTAINER_MATROSKA },
        {"asf",                     MEDIA_MIMETYPE_CONTAINER_ASF      },
        {"avi",                     MEDIA_MIMETYPE_CONTAINER_AVI      },
        {"ape",                     MEDIA_MIMETYPE_CONTAINER_APE      },
        {"mp3",                     MEDIA_MIMETYPE_AUDIO_MPEG         },
        {"vc1",                     MEDIA_MIMETYPE_CONTAINER_VC1      },
};

static AVCodecParameters* getCodecParameters(AVFormatContext *ic, AVMediaType codec_type)
{
    unsigned int idx = 0;
    AVCodecParameters *avpar = NULL;

    for (idx = 0; idx < ic->nb_streams; idx++) {
        if (ic->streams[idx]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
            // FFMPEG converts album art to MJPEG, but we don't want to
            // include that in the parsing as MJPEG is not supported by
            // Android, which forces the media to be extracted by FFMPEG
            // while in fact, Android supports it.
            continue;
        }

        avpar = ic->streams[idx]->codecpar;
        if (avpar->codec_tag == MKTAG('j', 'p', 'e', 'g')) {
            // Sometimes the disposition isn't set
            continue;
        }
        if (avpar->codec_type == codec_type) {
            return avpar;
        }
    }

    return NULL;
}

static enum AVCodecID getCodecId(AVFormatContext *ic, AVMediaType codec_type)
{
    AVCodecParameters *avpar = getCodecParameters(ic, codec_type);
    return avpar == NULL ? AV_CODEC_ID_NONE : avpar->codec_id;
}

static bool hasAudioCodecOnly(AVFormatContext *ic)
{
    bool haveVideo = false;
    bool haveAudio = false;

    if (getCodecId(ic, AVMEDIA_TYPE_VIDEO) != AV_CODEC_ID_NONE) {
        haveVideo = true;
    }
    if (getCodecId(ic, AVMEDIA_TYPE_AUDIO) != AV_CODEC_ID_NONE) {
        haveAudio = true;
    }

    if (!haveVideo && haveAudio) {
        return true;
    }

    return false;
}

//FIXME all codecs: frameworks/av/media/libstagefright/codecs/*
static bool isCodecSupportedByStagefright(enum AVCodecID codec_id)
{
    bool supported = false;

    switch(codec_id) {
    //video
    case AV_CODEC_ID_HEVC:
    case AV_CODEC_ID_H264:
    case AV_CODEC_ID_MPEG4:
    case AV_CODEC_ID_H263:
    case AV_CODEC_ID_H263P:
    case AV_CODEC_ID_H263I:
    case AV_CODEC_ID_VP6:
    case AV_CODEC_ID_VP8:
    case AV_CODEC_ID_VP9:
    //audio
    case AV_CODEC_ID_AAC:
    case AV_CODEC_ID_FLAC:
    case AV_CODEC_ID_MP3:
    case AV_CODEC_ID_AMR_NB:
    case AV_CODEC_ID_AMR_WB:
    case AV_CODEC_ID_VORBIS:
    case AV_CODEC_ID_PCM_MULAW: //g711
    case AV_CODEC_ID_PCM_ALAW:  //g711
    case AV_CODEC_ID_GSM_MS:
    case AV_CODEC_ID_PCM_U8:
    case AV_CODEC_ID_PCM_S16LE:
    case AV_CODEC_ID_PCM_S24LE:
    case AV_CODEC_ID_OPUS:
        supported = true;
        break;

    default:
        break;
    }

    ALOGD("%ssupported codec(%s) by official Stagefright",
            (supported ? "" : "un"),
            avcodec_get_name(codec_id));

    return supported;
}

static bool isCodecSupportedByFFMPEG(enum AVCodecID codec_id)
{
    bool supported = false;
    switch(codec_id) {
    case AV_CODEC_ID_WMV1:
    case AV_CODEC_ID_WMV2:
    case AV_CODEC_ID_WMV3:
    case AV_CODEC_ID_VC1:
    case AV_CODEC_ID_APE:
    case AV_CODEC_ID_WMAV1:
    case AV_CODEC_ID_WMAV2:
    case AV_CODEC_ID_WMAPRO:
    case AV_CODEC_ID_WMALOSSLESS:
    case AV_CODEC_ID_MP3:
    case AV_CODEC_ID_H264:
    case AV_CODEC_ID_MPEG4:
    case AV_CODEC_ID_H263:
    case AV_CODEC_ID_H263I:
    case AV_CODEC_ID_H263P:
    case AV_CODEC_ID_MPEG2VIDEO:
        supported = true;
        break;
    default:
        break;
    }
    ALOGD("%ssupported codec(%s) by this NBCodec",
            (supported ? "" : "un"),
            avcodec_get_name(codec_id));
    return supported;
}

static void adjustMPEG2PSConfidence(AVFormatContext *ic, float *confidence) {
    //enum AVCodecID codec_id = AV_CODEC_ID_NONE;

    // codec_id = getCodecId(ic, AVMEDIA_TYPE_VIDEO);
    // if (codec_id != AV_CODEC_ID_NONE && codec_id != AV_CODEC_ID_H264 && codec_id != AV_CODEC_ID_MPEG4 &&
    //     codec_id != AV_CODEC_ID_MPEG1VIDEO && codec_id != AV_CODEC_ID_MPEG2VIDEO) {
    // the MEDIA_MIMETYPE_CONTAINER_MPEG2TS of confidence is 0.25f
    //ALOGI("[mpeg2ps] video codec, confidence should be larger than MPEG2PSExtractor");
    *confidence = 0.88f;
    // }

    // codec_id = getCodecId(ic, AVMEDIA_TYPE_AUDIO);
    // if (codec_id != AV_CODEC_ID_NONE && codec_id != AV_CODEC_ID_AAC && codec_id != AV_CODEC_ID_PCM_S16LE &&
    //     codec_id != AV_CODEC_ID_PCM_S24LE && codec_id != AV_CODEC_ID_MP1 && codec_id != AV_CODEC_ID_MP2 &&
    //     codec_id != AV_CODEC_ID_MP3) {
    //     ALOGI("[mpeg2ps] audio codec(%s), confidence should be larger than MPEG2PSExtractor",
    //           avcodec_get_name(codec_id));
    //     *confidence = 0.26f;
    // }

}
static void adjustAVIConfidence(AVFormatContext *ic, float *confidence)
{
    enum AVCodecID codec_id = AV_CODEC_ID_NONE;
    AVCodecParameters *avpar = NULL;

//    codec_id = getCodecId(ic, AVMEDIA_TYPE_VIDEO);
    avpar = getCodecParameters(ic, AVMEDIA_TYPE_VIDEO);
    if ((avpar != NULL) && isXviDVideo(avpar)) {
        ALOGI("[avi] video codec tag(xvid), confidence should be larger than other AVIExtractor");
        *confidence = 0.9f; // higher than QCOM MM Parser
    }
        avpar = getCodecParameters(ic, AVMEDIA_TYPE_AUDIO);
    if ((avpar != NULL) && (avpar->codec_id == AV_CODEC_ID_MP3)) {
            *confidence = 0.9f;
            ALOGI("[avi] audio codec tag(mp3), confidence should be larger than other AVIExtractor");
    }

}

static void adjustMKVConfidence(AVFormatContext *ic, float *confidence)
{
    enum AVCodecID codec_id = AV_CODEC_ID_NONE;
    AVCodecParameters *avpar = NULL;

    codec_id = getCodecId(ic, AVMEDIA_TYPE_VIDEO);
    avpar = getCodecParameters(ic, AVMEDIA_TYPE_VIDEO);
    if ((avpar != NULL) && isXviDVideo(avpar)) {
        ALOGI("[mkv] video codec(%s) tag(xvid), confidence should be larger than MatroskaExtractor",
                avcodec_get_name(codec_id));
        *confidence = 0.9f; // higher than QCOM MM Parser
    }
}

static void adjustCodecConfidence(AVFormatContext *ic, float *confidence)
{
    enum AVCodecID codec_id = AV_CODEC_ID_NONE;

    codec_id = getCodecId(ic, AVMEDIA_TYPE_VIDEO);
    if (codec_id != AV_CODEC_ID_NONE) {
        if (isCodecSupportedByFFMPEG(codec_id)) {
            *confidence = 0.88f;
        }
        return; // video codec not support, won't use this extractor
    }

    codec_id = getCodecId(ic, AVMEDIA_TYPE_AUDIO);
    if (codec_id != AV_CODEC_ID_NONE) {
        if (isCodecSupportedByFFMPEG(codec_id)) {
            *confidence = 0.88f;
        }
    }
}

//TODO need more checks
static void adjustConfidenceIfNeeded(const char *mime,
        AVFormatContext *ic, float *confidence)
{
    if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_AVI)) {
        adjustAVIConfidence(ic, confidence);
    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MPEG2PS)) {
            adjustMPEG2PSConfidence(ic, confidence);
    } else if (!strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_MATROSKA)) {
        adjustMKVConfidence(ic, confidence);
    } else {
        //todo here
    }
    if (*confidence < 0.88f) {
    adjustCodecConfidence(ic, confidence);
    }
}

static void adjustContainerIfNeeded(const char **mime, AVFormatContext *ic)
{
    const char *newMime = *mime;
    enum AVCodecID codec_id = AV_CODEC_ID_NONE;

    AVCodecParameters *avpar = getCodecParameters(ic, AVMEDIA_TYPE_VIDEO);
    if (hasAudioCodecOnly(ic)) {
        codec_id = getCodecId(ic, AVMEDIA_TYPE_AUDIO);
        CHECK(codec_id != AV_CODEC_ID_NONE);
        switch (codec_id) {
        case AV_CODEC_ID_MP3:
            newMime = MEDIA_MIMETYPE_AUDIO_MPEG;
            break;
        case AV_CODEC_ID_AAC:
            newMime = MEDIA_MIMETYPE_AUDIO_AAC;
            break;
        case AV_CODEC_ID_APE:
            newMime = MEDIA_MIMETYPE_AUDIO_APE;
            break;
        case AV_CODEC_ID_WMAV1:
        case AV_CODEC_ID_WMAV2:
        case AV_CODEC_ID_WMAPRO:
        case AV_CODEC_ID_WMALOSSLESS:
            newMime = MEDIA_MIMETYPE_AUDIO_WMA;
            break;
        default:
            break;
        }

        if (!strcmp(*mime, MEDIA_MIMETYPE_CONTAINER_FFMPEG)) {
            newMime = MEDIA_MIMETYPE_AUDIO_FFMPEG;
        }
    }

    if (strcmp(*mime, newMime)) {
        ALOGI("adjust mime(%s -> %s)", *mime, newMime);
        *mime = newMime;
    }
}

static const char *findMatchingContainer(const char *name)
{
    size_t i = 0;
#if SUPPOURT_UNKNOWN_FORMAT
    //The FFmpegExtractor support all ffmpeg formats!!!
    //Unknown format is defined as MEDIA_MIMETYPE_CONTAINER_FFMPEG
     const char *container = MEDIA_MIMETYPE_CONTAINER_FFMPEG;
#else
    const char *container = NULL;
#endif

    for (i = 0; i < NELEM(FILE_FORMATS); ++i) {
        int len = strlen(FILE_FORMATS[i].format);
        if (!strncasecmp(name, FILE_FORMATS[i].format, len)) {
            container = FILE_FORMATS[i].container;
            break;
        }
    }

    return container;
}

static const char *SniffFFMPEGCommon(const char *url, float *confidence, bool isStreaming)
{
    int err = 0;
    size_t i = 0;
    size_t nb_streams = 0;
    int64_t timeNow = 0;
    const char *container = NULL;
    AVFormatContext *ic = NULL;
    AVDictionary *codec_opts = NULL;
    AVDictionary **opts = NULL;
    bool needProbe = false;

    ALOGV("initFFmpeg url(%s)", url);
    static status_t status = initFFmpeg();
    if (status != OK) {
        ALOGE("could not init ffmpeg");
        return NULL;
    }

    ic = avformat_alloc_context();
    if (!ic)
    {
        ALOGE("oom for alloc avformat context");
        //goto fail;
        return NULL;
    }

    // Don't download more than a meg
    ic->probesize = 1024 * 1024;

    timeNow = ALooper::GetNowUs();

    err = avformat_open_input(&ic, url, NULL, NULL);

    if (err < 0) {
        ALOGE("avformat_open_input(%s) failed: %s (%08x)", url, av_err2str(err), err);
        //goto fail;
        if (ic) {
        avformat_close_input(&ic);
        }
        return container;
    }

    if (ic->iformat != NULL && ic->iformat->name != NULL) {
        container = findMatchingContainer(ic->iformat->name);
    }

    ALOGV("opened, nb_streams: %d container: %s delay: %.2f ms", ic->nb_streams, container,
            ((float)ALooper::GetNowUs() - timeNow) / 1000);

    // Only probe if absolutely necessary. For formats with headers, avformat_open_input will
    // figure out the components.
    for (unsigned int i = 0; i < ic->nb_streams; i++) {
        AVStream* stream = ic->streams[i];
        if (!stream->codecpar || !stream->codecpar->codec_id) {
            needProbe = true;
            break;
        }
        ALOGV("found stream %d id %d codec %s", i, stream->codecpar->codec_id, avcodec_get_name(stream->codecpar->codec_id));
    }

    // We must go deeper.
    if (!isStreaming && (!ic->nb_streams || needProbe)) {
        timeNow = ALooper::GetNowUs();

        opts = setup_find_stream_info_opts(ic, codec_opts);
        nb_streams = ic->nb_streams;
        err = avformat_find_stream_info(ic, opts);
        if (err < 0) {
            ALOGE("avformat_find_stream_info(%s) failed: %s (%08x)", url, av_err2str(err), err);
            //goto fail;
            if (ic) {
                avformat_close_input(&ic);
            }
            return container;
        }

        ALOGV("probed stream info after %.2f ms", ((float)ALooper::GetNowUs() - timeNow) / 1000);

        for (i = 0; i < nb_streams; i++) {
            av_dict_free(&opts[i]);
        }
        av_freep(&opts);

        av_dump_format(ic, 0, url, 0);
    }

    ALOGV("sniff(%s): format_name: %s, format_long_name: %s",
            url, ic->iformat->name, ic->iformat->long_name);

    container = findMatchingContainer(ic->iformat->name);
    if (container) {
        adjustContainerIfNeeded(&container, ic);
        adjustConfidenceIfNeeded(container, ic, confidence);
        if (*confidence == 0)
            container = NULL;
    }

//fail:
    if (ic) {
        avformat_close_input(&ic);
    }

    return container;
}

static const char *BetterSniffFFMPEG(CDataSource *source,
        float *confidence, AMessage *meta)
{
    const char *ret = NULL;
    char url[PATH_MAX] = {0};

    ALOGI("android-source:%p", source);

    // pass the addr of smart pointer("source")
    snprintf(url, sizeof(url), "android-source:%p", source);

    ret = SniffFFMPEGCommon(url, confidence,
            (source->flags(source->handle) & DataSourceBase::kIsCachingDataSource));
    if (ret) {
        meta->setString("extended-extractor-url", url);
    }

    return ret;
}

static const char *LegacySniffFFMPEG(CDataSource *source,
         float *confidence, AMessage *meta)
{
    const char *ret = NULL;
    char uri[PATH_MAX] = {0};
    char url[PATH_MAX] = {0};

    if (!source->getUri(source->handle, uri, sizeof(uri))) {
        return NULL;
    }

    if (source->flags(source->handle) & DataSourceBase::kIsCachingDataSource)
       return NULL;

    ALOGV("source url: %s", uri);

    // pass the addr of smart pointer("source") + file name
    snprintf(url, sizeof(url), "android-source:%p|file:%s", source, uri);

    ret = SniffFFMPEGCommon(url, confidence, false);
    if (ret) {
        meta->setString("extended-extractor-url", url);
    }

    return ret;
}

static void FreeMeta(void *meta) {
    if (meta != nullptr) {
        static_cast<AMessage *>(meta)->decStrong(nullptr);
    }
}

static bool isSupportedFormat(CDataSource *source) {
    const char mpeg_ps_signature[EXTRACTOR_SNIFF_MIN_BYTES] = {'\x00', '\x00', '\x01', '\xBA'};
    const char ape_signature[EXTRACTOR_SNIFF_MIN_BYTES] = {'M', 'A', 'C', '\x20'};
    const char asf_signature[EXTRACTOR_SNIFF_MAX_BYTES] = {'\x30', '\x26', '\xB2', '\x75',
                                                           '\x8E', '\x66', '\xCF', '\x11',
                                                           '\xA6', '\xD9', '\x00', '\xAA',
                                                           '\x00', '\x62', '\xCE', '\x6C'};
    const char avi_signature[EXTRACTOR_SNIFF_MIN_BYTES] = {'R', 'I', 'F', 'F'};
    const char mkv_signature[EXTRACTOR_SNIFF_MIN_BYTES] = {'\x1A', '\x45', '\xDF', '\xA3'};
    bool ret = false;

    DataSourceHelper helper(source);
    uint8_t header[EXTRACTOR_SNIFF_MAX_BYTES];
    if (helper.readAt(0, header, sizeof(header)) != sizeof(header)) {
        return false; // no more file to read.
    }

    if (memcmp(ape_signature, header, EXTRACTOR_SNIFF_MIN_BYTES) == 0) {
        ret = true;
    } else if (memcmp(asf_signature, header, EXTRACTOR_SNIFF_MAX_BYTES) == 0) {
        ret = true;
    } else if (memcmp(avi_signature, header, EXTRACTOR_SNIFF_MIN_BYTES) == 0) {
        ret = true;
    } else if (memcmp(mkv_signature, header, EXTRACTOR_SNIFF_MIN_BYTES) == 0) {
        ret = true;
    } else if (memcmp(mpeg_ps_signature, header, EXTRACTOR_SNIFF_MIN_BYTES) == 0) {
        ALOGI("file is mpeg");
        // MPG or MPEG contains MPEG PS, MPEG TS, and RAW MPEG Video. MPEG TS ended with TS suffix. RAW MPEG Video does not support it for the time being. Here only support MPEG PS here.
        ret = true;
    }

    return ret;
}

static CreatorFunc
SniffFFMPEG(
        CDataSource *source, float *confidence, void **meta,
        FreeMetaFunc *freeMeta) {

    float newConfidence = 0.0f;

    ALOGV("SniffFFMPEG (initial confidence: %f)", *confidence);

    // This is a heavyweight sniffer, don't invoke it if Stagefright knows
    // what it is doing already.
    if (confidence != NULL) {
        if (*confidence > 0.8f) {
            return NULL;
        }
    }

    if (!isSupportedFormat(source)) {
        ALOGI("SniffFFMPEG unsupported file format, skip it");
        return NULL;
    }
    AMessage *msg = new AMessage;

    *meta = msg;
    *freeMeta = FreeMeta;
    msg->incStrong(nullptr);

    const char *container = BetterSniffFFMPEG(source, &newConfidence, msg);
    if (!container) {
        ALOGW("sniff through BetterSniffFFMPEG failed, try LegacySniffFFMPEG");
        container = LegacySniffFFMPEG(source, &newConfidence, msg);
        if (container) {
            ALOGV("sniff through LegacySniffFFMPEG success");
        }
    } else {
        ALOGV("sniff through BetterSniffFFMPEG success");
    }

    if (container == NULL) {
        ALOGD("SniffFFMPEG failed to sniff this source");
        msg->decStrong(nullptr);
        *meta = NULL;
        *freeMeta = NULL;
        return NULL;
    }

    ALOGD("ffmpeg detected media content as '%s' with confidence %.2f",
            container, newConfidence);

    msg->setString("extended-extractor", "extended-extractor");
    msg->setString("extended-extractor-subtype", "ffmpegextractor");
    msg->setString("extended-extractor-mime", container);

    //debug only
    char value[PROPERTY_VALUE_MAX];
    property_get("sys.media.parser.ffmpeg", value, "0");
    if (atoi(value)) {
        ALOGD("[debug] use ffmpeg parser");
        newConfidence = 0.88f;
    }

    if (newConfidence > *confidence) {
        msg->setString("extended-extractor-use", "ffmpegextractor");
        *confidence = newConfidence;
    }

    return CreateFFMPEGExtractor;
}

static CMediaExtractor *CreateFFMPEGExtractor(CDataSource *source, void *meta) {
    CMediaExtractor *ret = NULL;
    sp<AMessage> msg = static_cast<AMessage *>(meta);
    AString mime;
    ALOGD("CreateFFMPEGExtractor");
    if (msg.get() && msg->findString("extended-extractor-mime", &mime) && (
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_APE)           ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_WMA)           ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_AUDIO_FFMPEG)        ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_MATROSKA)  ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_MPEG2PS) ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_AVI)       ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_ASF)       ||
            !strcasecmp(mime.c_str(), "video/mpeg") ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_WMV)       ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_APE)       ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_VC1)       ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_WMA)       ||
            !strcasecmp(mime.c_str(), MEDIA_MIMETYPE_CONTAINER_FFMPEG))) {
        ret = wrap(new FFmpegExtractor(new DataSourceHelper(source), msg));
    }

    ALOGD("%ssupported mime: %s", (ret ? "" : "un"), mime.c_str());
    return ret;
}

static const char* extensions[] = {
    "avi", "wmv", "asf", "ape", "mkv", "wma","mpg","mpeg",NULL
};

extern "C" {

__attribute__ ((visibility ("default")))
ExtractorDef GETEXTRACTORDEF() {
    ALOGD("GETEXTRACTORDEF");
    return {
        EXTRACTORDEF_VERSION,
        UUID("90fcc6d9-748a-4367-a328-497a86dec7e3"),
        1, // version
        "FFMPEG Extractor",
        { .v3 = { SniffFFMPEG, extensions } }
    };
}

}

};  // namespace android

 7. utils目录

(1)、Android.mk

#
# Copyright
#

LOCAL_PATH := $(call my-dir)

include $(SF_COMMON_MK)

LOCAL_SRC_FILES := \
	ffmpeg_source.cpp \
	ffmpeg_utils.cpp \
	ffmpeg_cmdutils.c \
	codec_utils.cpp

LOCAL_SHARED_LIBRARIES += \
	libavcodec        \
	libavformat       \
	libavutil         \
	libcutils         \
	liblog            \
	libstagefright    \
	libstagefright_foundation \
	libswresample     \
	libswscale        \
	libutils \
	libmediandk

LOCAL_STATIC_LIBRARIES += libstagefright_metadatautils

LOCAL_HEADER_LIBRARIES += libaudio_system_headers media_ndk_headers

LOCAL_MODULE := libffmpeg_utils
LOCAL_SYSTEM_EXT_MODULE := true

LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)
LOCAL_EXPORT_SHARED_LIBRARY_HEADERS += \
	libavcodec        \
	libavformat       \
	libswresample     \
	libswscale
LOCAL_EXPORT_HEADER_LIBRARY_HEADERS += libaudio_system_headers media_ndk_headers

# Workaround for inline assembly tricks in FFMPEG which don't play nice with
# Clang when included from C++
LOCAL_CLANG_CFLAGS += -DAVUTIL_ARM_INTREADWRITE_H

include $(BUILD_SHARED_LIBRARY)

(2)、codec_utils.h内容

/*
 * Copyright
 */

#ifndef CODEC_UTILS_H_

#define CODEC_UTILS_H_

#include <unistd.h>
#include <stdlib.h>

#include <utils/Errors.h>
#include <media/NdkMediaError.h>
#include <media/stagefright/foundation/ABuffer.h>

#include "ffmpeg_utils.h"

struct AMediaFormat;

namespace android {

// Helper datastructures to pass extra information from extractor to codecs
typedef struct {
    int32_t codec_id;
    int32_t bits_per_coded_sample;
    int32_t block_align;
} FFMPEGAudioCodecInfo;

typedef struct {
    int32_t codec_id;
} FFMPEGVideoCodecInfo;

//video

media_status_t setAVCFormat(AVCodecParameters *avpar,AMediaFormat *meta);
media_status_t setH264Format(AVCodecParameters *avpar,AMediaFormat *meta) ;
media_status_t setMPEG4Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setH263Format(AVCodecParameters *avpar,AMediaFormat *meta);
media_status_t setMPEG2VIDEOFormat( AVCodecParameters *avpar,AMediaFormat *meta);
media_status_t setVC1Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMV1Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMV2Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMV3Format(AVCodecParameters *avpar, AMediaFormat *meta);
//audio
media_status_t setMP3Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setAC3Format( AVCodecParameters *avpar,AMediaFormat *meta);
media_status_t setAACFormat(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMAV1Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMAV2Format(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMAProFormat(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setWMALossLessFormat(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setAPEFormat(AVCodecParameters *avpar, AMediaFormat *meta);
media_status_t setPCMFormat(AVCodecParameters *avpar, AMediaFormat *meta);







//Convert H.264 NAL format to annex b
media_status_t convertNal2AnnexB(uint8_t *dst, size_t dst_size,
        uint8_t *src, size_t src_size, size_t nal_len_size);

/**
 * @brief 检查是否为 XviD 视频格式
 *
 * @param avpar AVCodecParameters 结构体指针,包含视频编解码器参数信息
 * @return 如果是 XviD 格式则返回 true,否则返回 false
 */
bool isXviDVideo(AVCodecParameters *avpar);
/**
 * @brief 解析媒体文件的元数据标签并映射到 Android 平台的 AMediaFormat
 *
 * @param ctx AVFormatContext 结构体指针,包含媒体文件的格式信息
 * @param meta AMediaFormat 结构体指针,存储解析后的元数据信息
 * @return 成功返回 AMEDIA_OK,否则返回 AMEDIA_ERROR_INVALID_OPERATION
 */
media_status_t parseMetadataTags(AVFormatContext *ctx, AMediaFormat *meta);
/**
 * @brief 将 AVSampleFormat 转换为对应的 AudioEncoding 格式
 *
 * @param fmt AVSampleFormat 枚举值,音频样本的格式
 * @return 转换后的 AudioEncoding 格式
 */
AudioEncoding sampleFormatToEncoding(AVSampleFormat fmt);

}  // namespace android

#endif  // CODEC_UTILS_H_

(3)、codec_utils.cpp内容

/*
 * Copyright
 */

#define LOG_NDEBUG 0
#define LOG_TAG "ffmpeg_codec_utils"
#include <utils/Log.h>

extern "C" {

#include "config.h"
#include "libavcodec/xiph.h"
#include "libavutil/intreadwrite.h"

}

#include <utils/Errors.h>
#include <media/NdkMediaFormat.h>
#include <media/stagefright/foundation/ABitReader.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/avc_utils.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaDataUtils.h>

#include "codec_utils.h"

namespace android {

static void EncodeSize14(uint8_t **_ptr, size_t size) {
    CHECK_LE(size, 0x3fffu);

    uint8_t *ptr = *_ptr;

    *ptr++ = 0x80 | (size >> 7);
    *ptr++ = size & 0x7f;

    *_ptr = ptr;
}

static sp<ABuffer> MakeMPEGVideoESDS(const sp<ABuffer> &csd) {
    sp<ABuffer> esds = new ABuffer(csd->size() + 25);

    uint8_t *ptr = esds->data();
    *ptr++ = 0x03;
    EncodeSize14(&ptr, 22 + csd->size());

    *ptr++ = 0x00;  // ES_ID
    *ptr++ = 0x00;

    *ptr++ = 0x00;  // streamDependenceFlag, URL_Flag, OCRstreamFlag

    *ptr++ = 0x04;
    EncodeSize14(&ptr, 16 + csd->size());

    *ptr++ = 0x40;  // Audio ISO/IEC 14496-3

    for (size_t i = 0; i < 12; ++i) {
        *ptr++ = 0x00;
    }

    *ptr++ = 0x05;
    EncodeSize14(&ptr, csd->size());

    memcpy(ptr, csd->data(), csd->size());

    return esds;
}

//video
    media_status_t setAVCFormat(AVCodecParameters *avpar,AMediaFormat *meta) {
        ALOGV("AVC");

        CHECK_EQ(avpar->codec_id, AV_CODEC_ID_H264);
        CHECK_GT(avpar->extradata_size, 0);
        CHECK_EQ(avpar->extradata[0], 1); // configurationVersion

        if (avpar->width == 0 || avpar->height == 0) {
            int32_t width, height;
            sp<ABuffer> seqParamSet = new ABuffer(avpar->extradata_size - 8);
            memcpy(seqParamSet->data(), avpar->extradata + 8, avpar->extradata_size - 8);
            FindAVCDimensions(seqParamSet, &width, &height);
            avpar->width = width;
            avpar->height = height;
        }

        AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AVC);
        AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_CSD_AVC, avpar->extradata, avpar->extradata_size);

        return AMEDIA_OK;
    }

    // H.264 bitstream with start codes.
    media_status_t setH264Format( AVCodecParameters *avpar,AMediaFormat *meta) {
        ALOGV("H264");

        CHECK_EQ(avpar->codec_id, AV_CODEC_ID_H264);
        CHECK_NE(avpar->extradata[0], 1); // configurationVersion

        if (!MakeAVCCodecSpecificData(meta, avpar->extradata, avpar->extradata_size)) {
            return AMEDIA_ERROR_UNKNOWN;
        }

        return AMEDIA_OK;
    }
media_status_t setMPEG4Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("MPEG4");

    sp<ABuffer> csd = new ABuffer(avpar->extradata_size);
    memcpy(csd->data(), avpar->extradata, avpar->extradata_size);
    sp<ABuffer> esds = MakeMPEGVideoESDS(csd);

    AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_ESDS, esds->data(), esds->size());

    if (isXviDVideo(avpar)) {
        AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_XVID);
    } else {
        AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_MPEG4);
    }
    return AMEDIA_OK;
}

    media_status_t setH263Format( AVCodecParameters *avpar __unused,AMediaFormat *meta) {
        ALOGV("H263");

        AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_H263);

        return AMEDIA_OK;
    }
media_status_t setMPEG2VIDEOFormat(AVCodecParameters *avpar,AMediaFormat *meta ) {
    ALOGV("MPEG%uVIDEO", avpar->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 2 : 1);

    sp<ABuffer> csd = new ABuffer(avpar->extradata_size);
    memcpy(csd->data(), avpar->extradata, avpar->extradata_size);
    sp<ABuffer> esds = MakeMPEGVideoESDS(csd);

    AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_ESDS, esds->data(), esds->size());
    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_MPEG2);

    return AMEDIA_OK;
}

media_status_t setVC1Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("VC1");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_VC1);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);

    return AMEDIA_OK;
}

media_status_t setWMV1Format(AVCodecParameters *avpar __unused, AMediaFormat *meta)
{
    ALOGV("WMV1");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_WMV);
    AMediaFormat_setInt32(meta, "wmv-version", kTypeWMVVer_7);

    return AMEDIA_OK;
}

media_status_t setWMV2Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMV2");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_WMV);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wmv-version", kTypeWMVVer_8);

    return AMEDIA_OK;
}

media_status_t setWMV3Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMV3");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_WMV);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wmv-version", kTypeWMVVer_9);

    return AMEDIA_OK;
}

//audio
media_status_t setMP3Format(AVCodecParameters *avpar __unused, AMediaFormat *meta)
{
    ALOGV("MP3");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);

    return AMEDIA_OK;
}

media_status_t setAC3Format( AVCodecParameters *avpar __unused,AMediaFormat *meta) {
    ALOGV("AC3");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_AC3);

    return AMEDIA_OK;
}

media_status_t setAACFormat(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("AAC");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_AAC);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_AAC_PROFILE, avpar->profile + 1);

    return AMEDIA_OK;
}

media_status_t setWMAV1Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMAV1");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_WMA);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wma-version", kTypeWMA); //FIXME version?

    return AMEDIA_OK;
}

media_status_t setWMAV2Format(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMAV2");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_WMA);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wma-version", kTypeWMA); //FIXME version?

    return AMEDIA_OK;
}

media_status_t setWMAProFormat(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMAPro");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_WMA);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wma-version", kTypeWMAPro);

    return AMEDIA_OK;
}

media_status_t setWMALossLessFormat(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("WMALOSSLESS");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_WMA);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);
    AMediaFormat_setInt32(meta, "wma-version", kTypeWMALossLess);

    return AMEDIA_OK;
}

media_status_t setAPEFormat(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("APE");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_APE);
    AMediaFormat_setBuffer(meta, "raw-codec-specific-data", avpar->extradata, avpar->extradata_size);

    return AMEDIA_OK;
}
media_status_t setPCMFormat(AVCodecParameters *avpar, AMediaFormat *meta)
{
    ALOGV("PCM");

    AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_RAW);

    return AMEDIA_OK;
}
    // Convert H.264 NAL format to annex b
    media_status_t convertNal2AnnexB(uint8_t *dst, size_t dst_size,
                                     uint8_t *src, size_t src_size, size_t nal_len_size) {
        size_t i = 0;
        size_t nal_len = 0;
        media_status_t status = AMEDIA_OK;

        CHECK_EQ(dst_size, src_size);
        CHECK(nal_len_size == 3 || nal_len_size == 4);

        while (src_size >= nal_len_size) {
            nal_len = 0;
            for (i = 0; i < nal_len_size; i++) {
                nal_len = (nal_len << 8) | src[i];
                dst[i] = 0;
            }
            dst[nal_len_size - 1] = 1;
            if (nal_len > INT_MAX || nal_len > src_size) {
                status = AMEDIA_ERROR_MALFORMED;
                break;
            }
            dst += nal_len_size;
            src += nal_len_size;
            src_size -= nal_len_size;

            memcpy(dst, src, nal_len);

            dst += nal_len;
            src += nal_len;
            src_size -= nal_len;
        }

        return status;
    }
bool isXviDVideo(AVCodecParameters *avpar)
{
    if (avpar->codec_tag == AV_RL32("XVID")
            || avpar->codec_tag == AV_RL32("xvid")
            || avpar->codec_tag == AV_RL32("XviD")) {
        return true;
    }
    return false;
}

media_status_t parseMetadataTags(AVFormatContext *ctx, AMediaFormat *meta) {
    if (ctx == NULL) {
        return AMEDIA_ERROR_INVALID_OPERATION;
    }

    AVDictionary *dict = ctx->metadata;
    if (dict == NULL) {
        return AMEDIA_ERROR_INVALID_OPERATION;
    }

    struct MetadataMapping {
        const char *from;
        const char *to;
    };

    // avformat -> android mapping
    static const MetadataMapping kMap[] = {
        { "track", AMEDIAFORMAT_KEY_CDTRACKNUMBER },
        { "disc", AMEDIAFORMAT_KEY_DISCNUMBER },
        { "album", AMEDIAFORMAT_KEY_ALBUM },
        { "artist", AMEDIAFORMAT_KEY_ARTIST },
        { "album_artist", AMEDIAFORMAT_KEY_ALBUMARTIST },
        { "composer", AMEDIAFORMAT_KEY_COMPOSER },
        { "date", AMEDIAFORMAT_KEY_DATE },
        { "genre", AMEDIAFORMAT_KEY_GENRE },
        { "title", AMEDIAFORMAT_KEY_TITLE },
        { "year", AMEDIAFORMAT_KEY_YEAR },
        { "compilation", AMEDIAFORMAT_KEY_COMPILATION },
        { "location", AMEDIAFORMAT_KEY_LOCATION },
    };

    static const size_t kNumEntries = sizeof(kMap) / sizeof(kMap[0]);

    for (size_t i = 0; i < kNumEntries; ++i) {
        AVDictionaryEntry *entry = av_dict_get(dict, kMap[i].from, NULL, 0);
        if (entry != NULL) {
            ALOGV("found key %s with value %s", entry->key, entry->value);
            AMediaFormat_setString(meta, kMap[i].to, entry->value);
        }
    }

    // now look for album art- this will be in a separate stream
    for (size_t i = 0; i < ctx->nb_streams; i++) {
        if (ctx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC) {
            AVPacket& pkt = ctx->streams[i]->attached_pic;
            if (pkt.size > 0 && ctx->streams[i]->codecpar != NULL) {
                const char *mime = NULL;
                if (ctx->streams[i]->codecpar->codec_id == AV_CODEC_ID_MJPEG) {
                    mime = MEDIA_MIMETYPE_IMAGE_JPEG;
                } else if (ctx->streams[i]->codecpar->codec_id == AV_CODEC_ID_PNG) {
                    mime = "image/png";
                }
                if (mime != NULL) {
                    ALOGV("found albumart in stream %zu with type %s len %d", i, mime, pkt.size);
                    AMediaFormat_setBuffer(meta, AMEDIAFORMAT_KEY_ALBUMART, pkt.data, pkt.size);
                }
            }
        }
    }

    return AMEDIA_OK;
}

AudioEncoding sampleFormatToEncoding(AVSampleFormat fmt) {

    // we resample planar formats to interleaved
    switch (fmt) {
        case AV_SAMPLE_FMT_U8:
        case AV_SAMPLE_FMT_U8P:
            return kAudioEncodingPcm8bit;
        case AV_SAMPLE_FMT_S16:
        case AV_SAMPLE_FMT_S16P:
            return kAudioEncodingPcm16bit;
        case AV_SAMPLE_FMT_FLT:
        case AV_SAMPLE_FMT_FLTP:
            return kAudioEncodingPcmFloat;
        case AV_SAMPLE_FMT_DBL:
        case AV_SAMPLE_FMT_DBLP:
            return kAudioEncodingPcmFloat;
        default:
            return kAudioEncodingPcm8bit; //FIXME
    }

}

}  // namespace android

(4)、ffmpeg_cmdutils.h内容

/*
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef FFMPEG_CMDUTILS_H_
#define FFMPEG_CMDUTILS_H_

#ifdef __cplusplus
extern "C" {
#endif

struct AVDictionary;
struct AVFormatContext;

double get_rotation(AVStream *st);
/**
 * @brief 设置查找流信息的选项
 *
 * @return opts
 */
AVDictionary **setup_find_stream_info_opts(AVFormatContext *, AVDictionary *);

#ifdef __cplusplus
}
#endif
#endif  // FFMPEG_CMDUTILS_H_

(5)、ffmpeg_cmdutils.c内容

#if 1
#include "config.h"
#include "libavutil/display.h"
#include "libavutil/eval.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"

/*
void print_error(const char *filename, int err)
{
    char errbuf[128];
    const char *errbuf_ptr = errbuf;

    if (av_strerror(err, errbuf, sizeof(errbuf)) < 0)
        errbuf_ptr = strerror(AVUNERROR(err));
    av_log(NULL, AV_LOG_ERROR, "%s: %s\n", filename, errbuf_ptr);
}
*/

double get_rotation(AVStream *st)
{
    AVDictionaryEntry *rotate_tag = av_dict_get(st->metadata, "rotate", NULL, 0);
    uint8_t* displaymatrix = av_stream_get_side_data(st,
                                                     AV_PKT_DATA_DISPLAYMATRIX, NULL);
    double theta = 0;

    if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
        char *tail;
        theta = av_strtod(rotate_tag->value, &tail);
        if (*tail)
            theta = 0;
    }
    if (displaymatrix && !theta)
        theta = -av_display_rotation_get((int32_t*) displaymatrix);

    theta -= 360*floor(theta/360 + 0.9/360);

    if (fabs(theta - 90*round(theta/90)) > 2)
        av_log(NULL, AV_LOG_WARNING, "Odd rotation angle.");

    return theta;
}

int check_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
{
    int ret = avformat_match_stream_specifier(s, st, spec);
    if (ret < 0)
        av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
    return ret;
}

AVDictionary *filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id,
                                AVFormatContext *s, AVStream *st, AVCodec *codec)
{
    AVDictionary    *ret = NULL;
    AVDictionaryEntry *t = NULL;
    int            flags = s->oformat ? AV_OPT_FLAG_ENCODING_PARAM
                                      : AV_OPT_FLAG_DECODING_PARAM;
    char          prefix = 0;
    const AVClass    *cc = avcodec_get_class();

    if (!codec)
        codec = (AVCodec*)(s->oformat ? avcodec_find_encoder(codec_id)
                                      : avcodec_find_decoder(codec_id));

    switch (st->codecpar->codec_type) {
    case AVMEDIA_TYPE_VIDEO:
        prefix = 'v';
        flags |= AV_OPT_FLAG_VIDEO_PARAM;
        break;
    case AVMEDIA_TYPE_AUDIO:
        prefix = 'a';
        flags |= AV_OPT_FLAG_AUDIO_PARAM;
        break;
    case AVMEDIA_TYPE_SUBTITLE:
        prefix = 's';
        flags |= AV_OPT_FLAG_SUBTITLE_PARAM;
        break;
    default:
        break;
    }

    while ((t = av_dict_get(opts, "", t, AV_DICT_IGNORE_SUFFIX))) {
        char *p = strchr(t->key, ':');

        /* check stream specification in opt name */
        if (p)
            switch (check_stream_specifier(s, st, p + 1)) {
            case  1: *p = 0; break;
            case  0:         continue;
            default:         return NULL;
            }

        if (av_opt_find(&cc, t->key, NULL, flags, AV_OPT_SEARCH_FAKE_OBJ) ||
            (codec && codec->priv_class &&
             av_opt_find(&codec->priv_class, t->key, NULL, flags,
                         AV_OPT_SEARCH_FAKE_OBJ)))
            av_dict_set(&ret, t->key, t->value, 0);
        else if (t->key[0] == prefix &&
                 av_opt_find(&cc, t->key + 1, NULL, flags,
                             AV_OPT_SEARCH_FAKE_OBJ))
            av_dict_set(&ret, t->key + 1, t->value, 0);

        if (p)
            *p = ':';
    }
    return ret;
}

AVDictionary **setup_find_stream_info_opts(AVFormatContext *s,
                                           AVDictionary *codec_opts)
{
    unsigned int i;
    AVDictionary **opts;

    if (!s->nb_streams)
        return NULL;
    opts = av_mallocz(s->nb_streams * sizeof(*opts));
    if (!opts) {
        av_log(NULL, AV_LOG_ERROR,
               "Could not alloc memory for stream options.\n");
        return NULL;
    }
    for (i = 0; i < s->nb_streams; i++)
        opts[i] = filter_codec_opts(codec_opts, s->streams[i]->codecpar->codec_id,
                                    s, s->streams[i], NULL);
    return opts;
}

#else
#define FFMPEG_DATADIR "/system/usr/ffmpeg"
#include "cmdutils.c" /* external/ffmpeg/cmdutils.c */
#endif

 (6)、ffmpeg_source.h

#ifndef FFMPEG_SOURCE_H_

#define FFMPEG_SOURCE_H_

namespace android {
/**
 * @brief 注册 Android 源协议到 FFMPEG
 * 包括设置协议名称和各种操作函数的指针,并注册到 FFMPEG 中
 */
void ffmpeg_register_android_source(void);

}  // namespace android

#endif  // FFMPEG_SOURCE_H_

(7)、ffmpeg_source.cpp

#define LOG_NDEBUG 0
#define LOG_TAG "NBFFMPEG"
#include <utils/Log.h>

#include <inttypes.h>
#include <stdlib.h>
#include "ffmpeg_source.h"

#include <media/MediaExtractorPluginApi.h>
#include <media/stagefright/DataSourceBase.h>
#include <media/stagefright/MediaErrors.h>

extern "C" {

#include "config.h"
#include "libavformat/url.h"
#include "libavutil/error.h"

}

namespace android {

class FFSource
{
public:
    void set(CDataSource *s);
    void reset();
    int init_check();
    int read(unsigned char *buf, size_t size);
    int64_t seek(int64_t pos);
    off64_t getSize();

protected:
    CDataSource *mSource;
    int64_t mOffset;
    uint32_t mFlags;
};

void FFSource::set(CDataSource *s)
{
    mSource = s;
    mOffset = 0;
    mFlags = s->flags(s->handle);

    ALOGV("FFSource[%p]: flags=%08x", mSource, mFlags);
}

void FFSource::reset()
{
    ALOGV("FFSource[%p]: reset", mSource);
    mSource = NULL;
}

int FFSource::init_check()
{
    ALOGV("FFSource[%p]: init_check", mSource);
    return 0;
}

int FFSource::read(unsigned char *buf, size_t size)
{
    ssize_t n = 0;

    n = mSource->readAt(mSource->handle, mOffset, buf, size);
    if (n == ERROR_END_OF_STREAM ||
            // For local file source, 0 bytes read means EOS.
            (n == 0 && (mFlags & DataSourceBase::kIsLocalFileSource) != 0)) {
        ALOGV("FFSource[%p]: end-of-stream", mSource);
        return AVERROR_EOF;
    } else if (n < 0) {
        ALOGE("FFSource[%p]: readAt failed (%zu)", mSource, n);
        return n == UNKNOWN_ERROR ? AVERROR(errno) : n;
    }
    if (n > 0) {
        ALOGV("FFsource[%p]: read = %zd", mSource, n);
        mOffset += n;
    }

    return n;
}

int64_t FFSource::seek(int64_t pos)
{
    ALOGV("FFSource[%p]: seek = %" PRId64, mSource, pos);
    mOffset = pos;
    return 0;
}

off64_t FFSource::getSize()
{
    off64_t sz = -1;

    if (mSource->getSize(mSource->handle, &sz) != OK) {
         ALOGE("FFSource[%p] getSize failed", mSource);
         return AVERROR(errno);
    }
    ALOGV("FFsource[%p] size = %" PRId64, mSource, sz);

    return sz;
}

/

static int android_open(URLContext *h, const char *url, int flags __unused)
{
    // the url in form of "android-source:<CDataSource Ptr>",
    // the DataSourceBase Pointer passed by the ffmpeg extractor
    CDataSource *source = NULL;
    char url_check[PATH_MAX] = {0};

    ALOGV("android source begin open");

    if (!url) {
        ALOGE("android url is null!");
        return -1;
    }

    ALOGV("android open, url: %s", url);
    sscanf(url + strlen("android-source:"), "%p", &source);
    if(source == NULL){
        ALOGE("ffmpeg open data source error! (invalid source)");
        return -1;
    }

    snprintf(url_check, sizeof(url_check), "android-source:%p",
                source);

    if (strcmp(url_check, url) != 0) {

        char uri[PATH_MAX] = {0};
        if (!source->getUri(source->handle, uri, sizeof(uri))) {
            ALOGE("ffmpeg open data source error! (source uri)");
            return -1;
        }

        snprintf(url_check, sizeof(url_check), "android-source:%p|file:%s",
                    source, uri);

        if (strcmp(url_check, url) != 0) {
            ALOGE("ffmpeg open data source error! (url check)");
            return -1;
        }
    }

    ALOGV("ffmpeg open android data source success, source ptr: %p", source);

    reinterpret_cast<FFSource *>(h->priv_data)->set(source);

    ALOGV("android source open success");

    return 0;
}
static int android_read(URLContext *h, unsigned char *buf, int size)
{
    FFSource* ffs = (FFSource *)h->priv_data;
    return ffs->read(buf, size);
}

static int android_write(URLContext *h __unused, const unsigned char *buf __unused, int size __unused)
{
    return -1;
}

static int64_t android_seek(URLContext *h, int64_t pos, int whence)
{
    FFSource* ffs = (FFSource*)h->priv_data;

    if (whence == AVSEEK_SIZE) {
        return ffs->getSize();
    }

    ffs->seek(pos);
    return 0;
}

static int android_close(URLContext *h)
{
    ALOGV("android source close");
    reinterpret_cast<FFSource *>(h->priv_data)->reset();
    return 0;
}

static int android_get_handle(URLContext *h)
{
    return (intptr_t)h->priv_data;
}

static int android_check(URLContext *h, int mask)
{
    FFSource* ffs = (FFSource*)h->priv_data;

    /* url_check does not guarantee url_open will be called
     * (and actually it is not designed to do so)
     * If url_open is not called before url_check called, ffs
     * will be null, and we will assume everything is ok.
     */
    if (ffs && (ffs->init_check() < 0))
        return AVERROR(EACCES); // FIXME

    return (mask & AVIO_FLAG_READ);
}

URLProtocol ff_android_protocol;

void ffmpeg_register_android_source()
{
    if (ff_android_protocol.name) return;

    ALOGI("FFMPEG ffmpeg_register_android_source...");
    ff_android_protocol.name                = "android-source";
    ff_android_protocol.url_open            = android_open;
    ff_android_protocol.url_read            = android_read;
    ff_android_protocol.url_write           = android_write;
    ff_android_protocol.url_seek            = android_seek;
    ff_android_protocol.url_close           = android_close;
    ff_android_protocol.url_get_file_handle = android_get_handle;
    ff_android_protocol.url_check           = android_check;
    ff_android_protocol.priv_data_size      = sizeof(FFSource);

    av_register_android_protocol(&ff_android_protocol, sizeof(ff_android_protocol));
}

}  // namespace android

(8)、ffmpeg_utils.h内容

#ifndef FFMPEG_UTILS_H_

#define FFMPEG_UTILS_H_

#include <unistd.h>
#include <stdlib.h>

#include <utils/Condition.h>
#include <utils/Errors.h>
#include <utils/Mutex.h>

extern "C" {

#include "config.h"
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/bsf.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"

#include <system/audio.h>

}

#define SF_NOPTS_VALUE ((uint64_t)AV_NOPTS_VALUE-1)

namespace android {

//
// log
//
void nbc_av_log_callback(void* ptr, int level, const char* fmt, va_list vl);
void nbc_av_log_set_flags(int arg);

//
// constructor and destructor
//
status_t initFFmpeg();
void deInitFFmpeg();

//
// parser
//
int is_extradata_compatible_with_android(AVCodecParameters *avpar);
int parser_split(AVCodecParameters *avpar, const uint8_t *buf, int buf_size);

//
// packet queue
//

typedef struct PacketQueue PacketQueue;

PacketQueue* packet_queue_alloc();
void packet_queue_free(PacketQueue **q);
void packet_queue_flush(PacketQueue *q);
void packet_queue_start(PacketQueue *q);
void packet_queue_abort(PacketQueue *q);
int packet_queue_is_wait_for_data(PacketQueue *q);
int packet_queue_put(PacketQueue *q, AVPacket *pkt);
int packet_queue_put_nullpacket(PacketQueue *q, int stream_index);
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block);

}  // namespace android

#endif  // FFMPEG_UTILS_H_

(9)、ffmpeg_utils.cpp内容

 

#define LOG_NDEBUG 0
#define LOG_TAG "NBFFMPEG"
#include <utils/Log.h>

#include <utils/Errors.h>

extern "C" {

#include "config.h"

#include <unistd.h>
#include <stdlib.h>
#include <inttypes.h>
#include <math.h>
#include <limits.h> /* INT_MAX */
#include <time.h>

#undef strncpy
#include <string.h>

}

#include <cutils/properties.h>

#include "ffmpeg_utils.h"
#include "ffmpeg_source.h"

// log
static int flags;

// init ffmpeg
static pthread_mutex_t s_init_mutex = PTHREAD_MUTEX_INITIALIZER;
static int s_ref_count = 0;

namespace android {

//
// log
//
static void sanitize(uint8_t *line){
    while(*line){
        if(*line < 0x08 || (*line > 0x0D && *line < 0x20))
            *line='?';
        line++;
    }
}

// TODO, remove static variables to support multi-instances
void nbc_av_log_callback(void* ptr, int level, const char* fmt, va_list vl)
{
    static int print_prefix = 1;
    static int count;
    static char prev[1024];
    char line[1024];

    if (level > av_log_get_level())
        return;
    av_log_format_line(ptr, level, fmt, vl, line, sizeof(line), &print_prefix);

    if (print_prefix && (flags & AV_LOG_SKIP_REPEATED) && !strcmp(line, prev)){
        count++;
        return;
    }
    if (count > 0) {
        ALOGI("Last message repeated %d times\n", count);
        count = 0;
    }
    strcpy(prev, line);
    sanitize((uint8_t *)line);

#if 0
    ALOGI("%s", line);
#else
#define LOG_BUF_SIZE 1024
    static char g_msg[LOG_BUF_SIZE];
    static int g_msg_len = 0;

    int saw_lf, check_len;

    do {
        check_len = g_msg_len + strlen(line) + 1;
        if (check_len <= LOG_BUF_SIZE) {
            /* lf: Line feed ('\n') */
            saw_lf = (strchr(line, '\n') != NULL) ? 1 : 0;
            strncpy(g_msg + g_msg_len, line, strlen(line));
            g_msg_len += strlen(line);
            if (!saw_lf) {
               /* skip */
               return;
            } else {
               /* attach the line feed */
               g_msg_len += 1;
               g_msg[g_msg_len] = '\n';
            }
        } else {
            /* trace is fragmented */
            g_msg_len += 1;
            g_msg[g_msg_len] = '\n';
        }
        ALOGI("%s", g_msg);
        /* reset g_msg and g_msg_len */
        memset(g_msg, 0, LOG_BUF_SIZE);
        g_msg_len = 0;
     } while (check_len > LOG_BUF_SIZE);
#endif
}

void nbc_av_log_set_flags(int arg)
{
    flags = arg;
}

#if 0
const struct { const char *name; int level; } log_levels[] = {
    { "quiet"  , AV_LOG_QUIET   },
    { "panic"  , AV_LOG_PANIC   },
    { "fatal"  , AV_LOG_FATAL   },
    { "error"  , AV_LOG_ERROR   },
    { "warning", AV_LOG_WARNING },
    { "info"   , AV_LOG_INFO    },
    { "verbose", AV_LOG_VERBOSE },
    { "debug"  , AV_LOG_DEBUG   },
};

#define AV_LOG_QUIET    -8
#define AV_LOG_PANIC     0
#define AV_LOG_FATAL     8
#define AV_LOG_ERROR    16
#define AV_LOG_WARNING  24
#define AV_LOG_INFO     32
#define AV_LOG_VERBOSE  40
#define AV_LOG_DEBUG    48
#endif

//
// constructor and destructor
//

static int parseLogLevel(const char* s) {
    if (strcmp(s, "quiet") == 0)
        return AV_LOG_QUIET;
    else if (strcmp(s, "panic") == 0)
        return AV_LOG_PANIC;
    else if (strcmp(s, "fatal") == 0)
        return AV_LOG_FATAL;
    else if (strcmp(s, "error") == 0)
        return AV_LOG_ERROR;
    else if (strcmp(s, "warning") == 0)
        return AV_LOG_WARNING;
    else if (strcmp(s, "info") == 0)
        return AV_LOG_INFO;
    else if (strcmp(s, "verbose") == 0)
        return AV_LOG_VERBOSE;
    else if (strcmp(s, "debug") == 0)
        return AV_LOG_DEBUG;
    else if (strcmp(s, "trace") == 0)
        return AV_LOG_TRACE;
    else {
        ALOGE("unsupported loglevel: %s", s);
        return AV_LOG_INFO;
    }
}

/**
 * To set ffmpeg log level, type this command on the console before starting playback:
 *     setprop debug.ffmpeg.loglevel [quiet|panic|fatal|error|warning|info|verbose|debug|trace]
*/
status_t initFFmpeg()
{
    status_t ret = OK;
    char pval[PROPERTY_VALUE_MAX];

    pthread_mutex_lock(&s_init_mutex);

    if (property_get("debug.ffmpeg.loglevel", pval, "info")) {
        av_log_set_level(parseLogLevel(pval));
    } else {
        av_log_set_level(AV_LOG_INFO);
    }

    if(s_ref_count == 0) {
        nbc_av_log_set_flags(AV_LOG_SKIP_REPEATED);
        av_log_set_callback(nbc_av_log_callback);

        /* global ffmpeg initialization */
        avformat_network_init();

        /* register android source */
        ffmpeg_register_android_source();

        ALOGI("FFMPEG initialized: %s", av_version_info());
    }

    // update counter
    s_ref_count++;

    pthread_mutex_unlock(&s_init_mutex);

    return ret;
}

void deInitFFmpeg()
{
    pthread_mutex_lock(&s_init_mutex);

    // update counter
    s_ref_count--;

    if(s_ref_count == 0) {
        avformat_network_deinit();
        ALOGD("FFMPEG deinitialized");
    }

    pthread_mutex_unlock(&s_init_mutex);
}

//
// parser
//
int parser_split(AVCodecParameters *avpar,
        const uint8_t *buf, int buf_size)
{
    return 0;
}

int is_extradata_compatible_with_android(AVCodecParameters *avpar)
{
    if (avpar->extradata_size <= 0) {
        ALOGI("extradata_size <= 0, extradata is not compatible with "
                "android decoder, the codec id: 0x%0x", avpar->codec_id);
        return 0;
    }

    return !!(avpar->extradata_size > 0);
}

//
// packet queue
//

typedef struct PacketList {
    AVPacket *pkt;
    struct PacketList *next;
} PacketList;

typedef struct PacketQueue {
    PacketList *first_pkt, *last_pkt;
    int nb_packets;
    int size;
    int wait_for_data;
    int abort_request;
    Mutex lock;
    Condition cond;
} PacketQueue;

PacketQueue* packet_queue_alloc()
{
    PacketQueue *queue = (PacketQueue*)av_mallocz(sizeof(PacketQueue));
    if (queue) {
        queue->abort_request = 1;
        return queue;
    }
    return NULL;
}

void packet_queue_free(PacketQueue **q)
{
    packet_queue_abort(*q);
    packet_queue_flush(*q);
    av_freep(q);
}

void packet_queue_abort(PacketQueue *q)
{
    q->abort_request = 1;
    Mutex::Autolock autoLock(q->lock);
    q->cond.signal();
}

static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
{
    PacketList *pkt1;

    if (q->abort_request)
        return -1;

    pkt1 = (PacketList *)av_malloc(sizeof(PacketList));
    if (!pkt1)
        return -1;
    pkt1->pkt = av_packet_alloc();
    if (!pkt1->pkt) {
        av_free(pkt1);
        return -1;
    }
    av_packet_move_ref(pkt1->pkt, pkt);
    pkt1->next = NULL;

    if (!q->last_pkt)
        q->first_pkt = pkt1;
    else
        q->last_pkt->next = pkt1;
    q->last_pkt = pkt1;
    q->nb_packets++;
    //q->size += pkt1->pkt.size + sizeof(*pkt1);
    q->size += pkt1->pkt->size;
    q->cond.signal();
    return 0;
}

int packet_queue_put(PacketQueue *q, AVPacket *pkt)
{
    int ret;

    q->lock.lock();
    ret = packet_queue_put_private(q, pkt);
    q->lock.unlock();

    return ret;
}

int packet_queue_is_wait_for_data(PacketQueue *q)
{
    Mutex::Autolock autoLock(q->lock);
    return q->wait_for_data;
}

void packet_queue_flush(PacketQueue *q)
{
    PacketList *pkt, *pkt1;

    Mutex::Autolock autoLock(q->lock);
    for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
        pkt1 = pkt->next;
        av_packet_free(&pkt->pkt);
        av_freep(&pkt);
    }
    q->last_pkt = NULL;
    q->first_pkt = NULL;
    q->nb_packets = 0;
    q->size = 0;
}

int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
{
    AVPacket *pkt;
    int err;

    pkt = av_packet_alloc();
    pkt->data = NULL;
    pkt->size = 0;
    pkt->stream_index = stream_index;
    err = packet_queue_put(q, pkt);
    av_packet_free(&pkt);

    return err;
}

/* packet queue handling */
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
    PacketList *pkt1;
    int ret = -1;

    Mutex::Autolock autoLock(q->lock);

    while (!q->abort_request) {
        pkt1 = q->first_pkt;
        if (pkt1) {
            q->first_pkt = pkt1->next;
            if (!q->first_pkt)
                q->last_pkt = NULL;
            q->nb_packets--;
            //q->size -= pkt1->pkt.size + sizeof(*pkt1);
            q->size -= pkt1->pkt->size;
            av_packet_move_ref(pkt, pkt1->pkt);
            av_packet_free(&pkt1->pkt);
            av_free(pkt1);
            ret = 1;
            break;
        } else if (!block) {
            ret = 0;
            break;
        } else {
            q->wait_for_data = 1;
            q->cond.waitRelative(q->lock, 10000000LL);
        }
    }
    q->wait_for_data = 0;
    return ret;
}

void packet_queue_start(PacketQueue *q)
{
    Mutex::Autolock autoLock(q->lock);
    q->abort_request = 0;
}

}  // namespace android

 

 

 

 

 

 

 

本文来自互联网用户投稿,该文观点仅代表作者本人,不代表本站立场。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如若转载,请注明出处:http://www.coloradmin.cn/o/1939702.html

如若内容造成侵权/违法违规/事实不符,请联系多彩编程网进行投诉反馈,一经查实,立即删除!

相关文章

Xilinx FPGA DDR4 接口配置基础(PG150)

1. 简介 1.1 DDR4 SDRAM 控制器主要特点 支持8到80位接口宽度的组件&#xff08;支持 RDIMM、LRDIMM、UDIMM 和 SODIMM&#xff09; 最大组件限制为9&#xff0c;此限制仅适用于组件&#xff0c;不适用于 DIMM。密度支持 最高支持 32 GB 的组件密度&#xff0c;64 GB 的 LRDI…

初识godot游戏引擎并安装

简介 Godot是一款自由开源、由社区驱动的2D和3D游戏引擎。游戏开发虽复杂&#xff0c;却蕴含一定的通用规律&#xff0c;正是为了简化这些通用化的工作&#xff0c;游戏引擎应运而生。Godot引擎作为一款功能丰富的跨平台游戏引擎&#xff0c;通过统一的界面支持创建2D和3D游戏。…

数字集成电路(3)

光刻&#xff08;photolithography&#xff09; 工艺步骤&#xff1a; 扩散和离子注入&#xff1a;900~1100℃ 淀积 刻蚀 平面化 衬底选择&#xff1a;常用&#xff08;100&#xff09;晶面&#xff08;原因&#xff1a;面密度小&#xff0c;界面态少&#xff09; 设计规…

【vue教程】四. Vue 计算属性和侦听器

目录 本章涵盖知识点回顾计算属性&#xff08;Computed&#xff09;创建计算属性计算属性的多样性计算属性的数组过滤计算属性的复杂表达式 计算属性 vs 方法计算属性的实例演示 侦听器&#xff08;Watchers&#xff09;创建侦听器侦听器的高级用法侦听器的深度观察侦听器的立即…

【ffmpeg命令基础】过滤处理

文章目录 前言过滤处理的介绍两种过滤类型简单滤波图简单滤波图是什么简单滤波示例 复杂滤波图复杂滤波是什么区别示例 总结 前言 FFmpeg是一款功能强大的开源音视频处理工具&#xff0c;广泛应用于音视频的采集、编解码、转码、流化、过滤和播放等领域。1本文将重点介绍FFmpe…

mysql存储引擎和备份

索引 事务 存储引擎 概念&#xff1a;存储引擎&#xff0c;就是一种数据库存储数据的机制&#xff0c;索引的技巧&#xff0c;锁定水平。 存储引擎。存储的方式和存储的格式。 存储引擎也属于mysql当中的组件&#xff0c;实际上操作的&#xff0c;执行的就是数据的读写I/O。…

ROC曲线和AUC

ROC曲线能更稳定反映模型的性能&#xff0c;对测试集合中数据分布的变化不敏感 AUC&#xff1a;当随机挑选一个正样本和一个负样本&#xff0c;根据当前的分类器计算得到的score将这个正样本排在负样本前面的概率 从AUC判断分类器&#xff08;预测模型&#xff09;优劣的标准&a…

【QT开发(19)】2023-QT 5.14.2实现Android开发,使用新版SDK,试图支持 emulator -avd 虚拟机

之前的博客【QT开发&#xff08;17&#xff09;】2023-QT 5.14.2实现Android开发&#xff0c;SDK是24.x版本的&#xff0c;虚拟机是32位的&#xff0c;但是现在虚拟机是64位的了&#xff0c;需要升级SDK匹配虚拟机 文章目录 最后的效果1.1 下载最新版 SDK tools (仅限命令行工…

JavaWeb-【3】DOM

笔记系列持续更新&#xff0c;真正做到详细&#xff01;&#xff01;本次系列重点讲解后端&#xff0c;那么第一阶段先讲解前端【续上篇CSS和JavaScript】 目录 1、dom介绍 2、html-dom 3、document 4、应用实例 ①、应用实例1 ②、多选框案例 ③、图片切换案例 ④、添…

高性能图数据库Neo4j从入门到实战

图数据库Neo4j介绍 什么是图数据库&#xff08;graph database&#xff09; 随着社交、电商、金融、零售、物联网等行业的快速发展&#xff0c;现实社会织起了了一张庞大而复杂的关系网&#xff0c;传统数据库很难处理关系运算。大数据行业需要处理的数据之间的关系随数据量呈…

密码学基础-Hash、MAC、HMAC 的区别与联系

密码学基础-Hash、MAC、HMAC 的区别与联系 Hash Hash 是一种从一段数据中创建小的数字“指纹”的方法。就像一个人的指纹代表一个人的信息一样&#xff0c;Hash 对输入的数据进行整理&#xff0c;生成一个代表该输入数据的“指纹” 数据。通常该指纹数据也可称之为摘要、散列…

CefSharp音视频编译与免费下载

注&#xff1a;Cefharp 音频和视频播放编译&#xff0c;生成相应的dll文件&#xff0c;从而支持项目开发。 建议编译至少 16G 的 RAM和至少 250G 的 SSD。该脚本以 E 盘为例&#xff0c;您需要在 E 盘上手动创建 cef 文件夹。禁止在转载后通过发布其他平台向用户收取下载费用。…

全国区块链职业技能大赛第八套区块链产品需求分析与方案设计

任务1-1:区块链产品需求分析与方案设计 医疗健康平台中涉及到医院、医生、患者等参与方,他们需要在区块链医疗健康平台中完成账户注册、身份上链、挂号就诊、查询病例等多种业务活动。通过对业务活动的功能分析,可以更好的服务系统的开发流程。基于医疗健康平台系统架构,以…

【数据结构进阶】二叉搜索树

&#x1f525;个人主页&#xff1a; Forcible Bug Maker &#x1f525;专栏&#xff1a; C || 数据结构 目录 &#x1f308;前言&#x1f525;二叉搜索树&#x1f525; 二叉搜索树的实现Insert&#xff08;插入&#xff09;find&#xff08;查找&#xff09;erase(删除)destro…

毕业/期刊论文发表必备:YOLOv5 / v7 / v8 /v10算法网络结构图【文末提供原型文件下载地址】

前言:Hello大家好,我是小哥谈。同学们在写YOLO算法相关毕业论文/期刊论文的时候,不可避免的会用到相关版本的网络结构图,曾有很多小伙伴私信我索要原型文件,本文就给大家提供YOLOv5/v7/v8/v10版本算法网络结构图及原型文件下载地址。🌈 目录 🚀1.网络结构图 �…

Fiddler 导出请求为curl格式

来自:https://www.cnblogs.com/yudongdong/p/15418181.html Fiddler 下载地址: https://downloads.getfiddler.com/fiddler-classic/FiddlerSetup.5.0.20243.10853-latest.exe 这段代码加到类中 public static RulesOption("关闭请求体转代码", "生成代码&qu…

简单页表和多级页表

地址转换(Address Translation) 内存需要被分成固定大小的页(Page)然后再通过虚拟内存地址(Virtual Address) 到物理内存地址(Physical Address) 的地址转换(Address Translation)才能到达实际存放数据的物理内存位置 简单页表 页表的概念 想要把虚拟内存地址&#xff0c;映…

ip地址是电脑还是网线决定的

在数字化时代的浪潮中&#xff0c;网络已经成为了我们日常生活和工作不可或缺的一部分。当我们谈论网络时&#xff0c;IP地址无疑是一个核心的概念。然而&#xff0c;关于IP地址的分配和决定因素&#xff0c;很多人可能存在误解。有些人认为IP地址是由电脑决定的&#xff0c;而…

pytorch 46 将ASpanFormer模型导出onnx运行

ASpanFormer是一个2022年8月份发布的算法,其主要步骤与LoFTR模型类似,因此无法导出为onnx模型。根据ASpanFormer论文中的数据与效果图,可以确定AsPanFormer是可以作为一个比SP+SG更为有效的方案,其在标准数据集上的效果优于SP+SG,在速度上远超SP+SG,与LoFTR接近;在预测点…

C语言:静态库和动态(共享)库

相关阅读 C语言https://blog.csdn.net/weixin_45791458/category_12423166.html?spm1001.2014.3001.5482 在软件开发中&#xff0c;库&#xff08;Library&#xff09;是一个至关重要的概念。它们是由函数和数据的集合构成&#xff0c;用于实现特定的功能&#xff0c;供其他程…