四、FFMPEG播放音頻

本文基於以下文章進行的開發:

一、FFMPEG源碼編譯

二、AndroidStudio集成FFMPEG

三、FFMPEG視頻解碼及播放



1.新建AudioPlayer類

package com.test.ffmpeg;

import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioTrack;
import android.util.Log;

/**
 * Created by ygdx_lk on 17/11/2.
 */

public class AudioPlayer {
    private AudioTrack audioTrack;

    public native void sound(String input, String output);

    public void createAudio(int sampleRateInHz, int nb_channals){
        int channaleConfig;
        if(nb_channals == 1){
            channaleConfig = AudioFormat.CHANNEL_OUT_MONO;//單聲道
        }else if(nb_channals == 2){
            channaleConfig = AudioFormat.CHANNEL_OUT_STEREO;//立體聲
        }else{
            channaleConfig = AudioFormat.CHANNEL_OUT_MONO;
        }

//        AudioFormat : 有ENCODING_PCM_16BIT和ENCODING_PCM_8BIT兩種音頻編碼格式。同樣的,官方聲明只有ENCODING_PCM_16BIT是所有設備都支持的。
        int buffersize = AudioTrack.getMinBufferSize(sampleRateInHz, channaleConfig, AudioFormat.ENCODING_PCM_16BIT);

        //public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,int bufferSizeInBytes, int mode)
        audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRateInHz, channaleConfig,
                AudioFormat.ENCODING_PCM_16BIT, buffersize, AudioTrack.MODE_STREAM);
        audioTrack.play();
    }

    private static final String TAG = "AudioPlayer";
    public synchronized void playTrack(byte[] buffer, int length){
        if(audioTrack != null && audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING){
            Log.i(TAG, "playTrack: " + length);
            audioTrack.write(buffer, 0, length);
        }
    }
}

2.native-lib.h中添加如下方法

JNIEXPORT void JNICALL Java_com_test_ffmpeg_AudioPlayer_sound(JNIEnv *env, jobject instance, jstring input_, jstring output_);

3.native-lib.cpp中實現

void JNICALL Java_com_test_ffmpeg_AudioPlayer_sound(JNIEnv *env, jobject instance, jstring input_, jstring output_) {
    const char *input = env->GetStringUTFChars(input_, 0);
    const char *output = env->GetStringUTFChars(output_, 0);

    //組件註冊
    av_register_all();

    //獲取AVFormatContext, 描述了一個媒體文件或媒體流的構成和基本信息
    AVFormatContext *avFormatContext = avformat_alloc_context();

    if(avformat_open_input(&avFormatContext, input, NULL, NULL) < 0){
        LOGE("%s", "打開文件失敗");
        return;
    }

    if(avformat_find_stream_info(avFormatContext, NULL) < 0){
        LOGE("%s", "獲取音頻信息失敗");
        return;
    }

    //尋找音頻位置
    int idx_audio_stream = -1;
    for (int i = 0; i < avFormatContext->nb_streams; ++i) {
        int type = avFormatContext->streams[i]->codec->codec_type;
        if(type == AVMEDIA_TYPE_AUDIO){
            LOGE("找到音頻 %d", type);
            idx_audio_stream = i;
            break;
        }
    }

    //獲取音頻解碼上下文
    AVCodecContext *avCodecContext = avFormatContext->streams[idx_audio_stream]->codec;

    //AVCodec是存儲編解碼信息的結構體
    AVCodec *avCodec = avcodec_find_decoder(avCodecContext->codec_id);

    //打開解碼器
    if(avcodec_open2(avCodecContext, avCodec, NULL) < 0){
        LOGE("%s", "打開解碼器失敗")
    }

    //初始化參數
    /**
    s:現有Swr上下文(如果可用),如果不可用則爲NULL
    out_ch_layout:輸出通道佈局(AV_CH_LAYOUT_ *)
    out_sample_fmt:輸出採樣格式(AV_SAMPLE_FMT_ *)。
    out_sample_rate:輸出採樣率(頻率(Hz))
    in_ch_layout:輸入通道佈局(AV_CH_LAYOUT_ *)
    in_sample_fmt:輸入樣品格式(AV_SAMPLE_FMT_ *)。
    in_sample_rate:輸入採樣率(頻率(Hz))
    log_offset:記錄級別偏移
    log_ctx:父記錄上下文,可以爲NULL

    struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
                                          int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
                                          int64_t  in_ch_layout, enum AVSampleFormat  in_sample_fmt, int  in_sample_rate,
                                          int log_offset, void *log_ctx);
     */
    //將mp3包含的編碼格式,轉換成pcm
    //SwrContext是重新採樣結構體
    SwrContext *swrContext = swr_alloc();

    uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
    //輸出採樣率
    enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
    //輸出的採樣率與輸入相同
    int out_sample_rate = avCodecContext->sample_rate;

    swr_alloc_set_opts(swrContext, out_ch_layout, out_sample_fmt, out_sample_rate,
                       avCodecContext->channel_layout, avCodecContext->sample_fmt,
                       avCodecContext->sample_rate, 0, NULL);

    //一旦設置了所有值,它必須用swr_init()初始化
    swr_init(swrContext);



    //反射得到Class類型
    jclass audio_player = env->GetObjectClass(instance);
    //反射得到createAudio方法
    jmethodID createAudio = env->GetMethodID(audio_player, "createAudio", "(II)V");

    //獲取通道數
    int out_channel_nb = av_get_channel_layout_nb_channels(AV_CH_LAYOUT_STEREO);
    //反射調用createAudio
    env->CallVoidMethod(instance, createAudio, 44100, out_channel_nb);
    jmethodID playTrack = env->GetMethodID(audio_player, "playTrack", "([BI)V");

    //輸出文件
//    FILE *pcm_file = fopen(output, "wb");

    //AVPacket是存儲壓縮編碼數據相關信息的結構體
    AVPacket *avPacket = (AVPacket *)av_malloc(sizeof(AVPacket));
    //解封裝後的幀數據
    AVFrame *avFrame = av_frame_alloc();
    int got_frame;
    //44100 * 2
    uint8_t  *out_buffer = (uint8_t *) av_malloc(44100 * 2);

    //解碼
    while (av_read_frame(avFormatContext, avPacket) >= 0){
        if(avPacket->stream_index == idx_audio_stream){
            //Decode the audio frame of size avpkt->size from avpkt->data into frame
            //從avPacket中解碼,得到avFrame
            avcodec_decode_audio4(avCodecContext, avFrame, &got_frame, avPacket);
            if(got_frame){
                LOGI("%s", "解碼");
                //讀取avFrame->data,放入out_buffer
                //int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, const uint8_t **in , int in_count)
                swr_convert(swrContext, &out_buffer, 44100 * 2, (const uint8_t **) avFrame->data, avFrame->nb_samples);
                //獲取緩衝區大小
                //int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align);
                int size = av_samples_get_buffer_size(NULL, out_channel_nb, avFrame->nb_samples, AV_SAMPLE_FMT_S16, 1);
                jbyteArray audio_sample_array = env->NewByteArray(size);
//                void SetByteArrayRegion(jbyteArray array, jsize start, jsize len, const jbyte* buf)
                env->SetByteArrayRegion(audio_sample_array, 0, size, (const jbyte *) out_buffer);
                //調用java的playTrack方法,進行播放
                env->CallVoidMethod(instance, playTrack, audio_sample_array, size);
                env->DeleteLocalRef(audio_sample_array);
//                fwrite(out_buffer, 1, size, pcm_file);
            }
            //隨時釋放
            av_free_packet(avPacket);
        }
    }

    //釋放
//    fclose(pcm_file);

    av_frame_free(&avFrame);
    swr_free(&swrContext);
    avcodec_close(avCodecContext);
    avformat_close_input(&avFormatContext);

    env->ReleaseStringUTFChars(input_, input);
    env->ReleaseStringUTFChars(output_, output);
}


4.MainActivity中添加一個按鈕,點擊後調用下面代代

                final String input = new File(Environment.getExternalStorageDirectory(),"input.mp3").getAbsolutePath();
                final String output = new File(Environment.getExternalStorageDirectory(),"output.pcm").getAbsolutePath();

                new Thread(new Runnable() {
                    @Override
                    public void run() {
                        new AudioPlayer().sound(input, output);
                    }
                }).start();


發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章