AudioQueue使用

AudioQueue

 (2011-07-07 11:36:20)
  轉自:http://blog.sina.com.cn/s/blog_6568e7880100uf71.html
http://my.chinaunix.net/space.php?uid=25788167&do=blog&id=366155

AudioQueue
Mac OS XiPhone中提供錄音、播放功能的高級框架,比AudioUnit等框架更方便,而且不要求掌握更多專門的知識。

AudioQueue的名稱就可以看出,AudioQueue框架以隊列的形式處理音頻數據。因此使用時需要給隊列分配緩存空間,由回調(Callback)函數完成向隊列緩存讀寫音頻數據的功能。另外,AudioQueueAudioToolbox框架的一部分,使用前需要將AudioToolbox框架導入進來。

使用AudioQueue來實現音頻播放功能時最主要的步驟,可以更簡練的歸納如下。

1. 打開播放音頻文件

2. 取得播放音頻文件的數據格式

3. 準備播放用的隊列

4. 將緩衝中的數據移動到隊列中

5. 開始播放

6. 在回調函數中進行隊列處理

以下是貫徹上述六個主要步驟的代碼實例,只需要向[play:]中傳入音頻文件的路徑就可以開始音頻播放。稍加修改可以直接應用到自己的程序中。

Source Audioplay.h

#import <Foundation/Foundation.h>

#import <AudioToolbox/AudioToolbox.h>

#import <AudioToolbox/AudioFile.h>

 

#define NUM_BUFFERS 3

 

@interface AudioPlayer : NSObject {

        //播放音頻文件ID

    AudioFileID audioFile;

        //音頻流描述對象

    AudioStreamBasicDescription dataFormat;

        //音頻隊列

    AudioQueueRef queue;

    SInt64 packetIndex;

    UInt32 numPacketsToRead;

    UInt32 bufferByteSize;

    AudioStreamPacketDescription *packetDescs;

    AudioQueueBufferRef buffers[NUM_BUFFERS];

}

//定義隊列爲實例屬性

@property AudioQueueRef queue;

//播放方法定義

- (void) play:(CFURLRef) path;

//定義緩存數據讀取方法

- (void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue

                       queueBuffer:(AudioQueueBufferRef)audioQueueBuffer;

//定義回調(Callback)函數

static void BufferCallback(void *inUserData, AudioQueueRef inAQ,

                                            AudioQueueBufferRef buffer);

//定義包數據的讀取方法

- (UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer;

 

@end

 

Source Audioplay.m

static UInt32 gBufferSizeBytes = 0x10000;

 

@implementation AudioPlayer

 

@synthesize queue;

 

// 回調(Callback)函數的實現

static void BufferCallback(void *inUserData, AudioQueueRef inAQ,

  AudioQueueBufferRef buffer) {

    AudioPlayer* player = (AudioPlayer*)inUserData;

    [player  audioQueueOutputWithQueue:inAQ queueBuffer:buffer];

}

//初始化方法(爲NSObject中定義的初始化方法)

- (id) init {

    for(int i=0; i<NUM_BUFFERS; i++) {

        AudioQueueEnqueueBuffer(queue,buffers[i],0,nil);

    }

    return self;

}

//緩存數據讀取方法的實現

- (void) audioQueueOutputWithQueue:(AudioQueueRef)audioQueue

                       queueBuffer:(AudioQueueBufferRef)audioQueueBuffer {

    OSStatus status;

 

    // 讀取包數據

    UInt32  numBytes;

    UInt32  numPackets = numPacketsToRead;

    status = AudioFileReadPackets(

                audioFile, NO, &numBytes, packetDescs,

                packetIndex, &numPackets, audioQueueBuffer->mAudioData);

 

    // 成功讀取時

    if (numPackets > 0) {

        //將緩衝的容量設置爲與讀取的音頻數據一樣大小(確保內存空間)

        audioQueueBuffer->mAudioDataByteSize = numBytes;

 

        // 完成給隊列配置緩存的處理

        status = AudioQueueEnqueueBuffer(

                audioQueue, audioQueueBuffer, numPackets, packetDescs);

 

        // 移動包的位置

        packetIndex += numPackets;

    }

 

}

//音頻播放方法的實現

-(void) play:(CFURLRef) path {

    UInt32      size, maxPacketSize;

    char        *cookie;

    int         i;

    OSStatus status;

 

    // 打開音頻文件

    status = AudioFileOpenURL(path, kAudioFileReadPermission, 0, &audioFile);

    if (status != noErr) {

        // 錯誤處理

        return;

    }

 

    // 取得音頻數據格式

    size = sizeof(dataFormat);

    AudioFileGetProperty(audioFile, kAudioFilePropertyDataFormat,

                                                   &size, &dataFormat);

 

    // 創建播放用的音頻隊列

    AudioQueueNewOutput(&dataFormat, BufferCallback,

                               self, nil, nil, 0, &queue);

 

    //計算單位時間包含的包數

    if (dataFormat.mBytesPerPacket==0 || dataFormat.mFramesPerPacket==0) {

        size = sizeof(maxPacketSize);

        AudioFileGetProperty(audioFile,

          kAudioFilePropertyPacketSizeUpperBound, &size, &maxPacketSize);

        if (maxPacketSize > gBufferSizeBytes) {

            maxPacketSize = gBufferSizeBytes;

        }

        // 算出單位時間內含有的包數

        numPacketsToRead = gBufferSizeBytes / maxPacketSize;

        packetDescs = malloc(

          sizeof(AudioStreamPacketDescription) * numPacketsToRead);

    } else {

        numPacketsToRead = gBufferSizeBytes / dataFormat.mBytesPerPacket;

        packetDescs = nil;

    }

    //設置Magic Cookie,參見第二十七章的相關介紹

    AudioFileGetPropertyInfo(audioFile,

           kAudioFilePropertyMagicCookieData, &size, nil);

    if (size > 0) {

        cookie = malloc(sizeof(char) * size);

        AudioFileGetProperty(audioFile,

                  kAudioFilePropertyMagicCookieData, &size, cookie);

        AudioQueueSetProperty(queue,

                  kAudioQueueProperty_MagicCookie, cookie, size);

        free(cookie);

    }

 

    // 創建並分配緩存空間

    packetIndex = 0;

    for (i = 0; i < NUM_BUFFERS; i++) {

        AudioQueueAllocateBuffer(queue, gBufferSizeBytes, &buffers[i]);

        //讀取包數據

        if ([self readPacketsIntoBuffer:buffers[i]] == 0) {

            break;

        }

    }

 

    Float32 gain = 1.0;

    //設置音量

    AudioQueueSetParameter (

                            queue,

                            kAudioQueueParam_Volume,

                            gain

                            );

    //隊列處理開始,此後系統會自動調用回調(Callback)函數

    AudioQueueStart(queue, nil);

}

 

- (UInt32)readPacketsIntoBuffer:(AudioQueueBufferRef)buffer {

    UInt32      numBytes, numPackets;

 

    // 從文件中接受包數據並保存到緩存(buffer)

    numPackets = numPacketsToRead;

    AudioFileReadPackets(audioFile, NO, &numBytes, packetDescs,

                       packetIndex, &numPackets, buffer->mAudioData);

    if (numPackets > 0) {

        buffer->mAudioDataByteSize = numBytes;

        AudioQueueEnqueueBuffer(queue, buffer,

              (packetDescs ? numPackets : 0), packetDescs);

        packetIndex += numPackets;

    }

    return numPackets;

}

 

@end

發表評論
所有評論
還沒有人評論,想成為第一個評論的人麼? 請在上方評論欄輸入並且點擊發布.
相關文章