轉載地址:http://blog.csdn.net/shenyi0106/article/details/47004039
在Ios中,實現打開和捕獲麥克風大多是用的AVCaptureSession這個組件來實現的,它可以不僅可以實現音頻捕獲,還可以實現視頻的捕獲。本文將主要實現麥克風音頻的捕獲和編碼。
針對打開麥克風和捕獲音頻的代碼,網上也有一些,我就簡單的整理了一下:
首先,我們需要定義一個AVCaptureSession類型的變量,它是架起在麥克風設備和數據輸出上的一座橋,通過它可以方便的得到麥克風的實時原始數據。
- AVCaptureSession *m_capture
同時,定義一組函數,用來打開和關閉麥克風;爲了能使數據順利的導出,你還需要實現AVCaptureAudioDataOutputSampleBufferDelegate這個協議
- -(void)open;
- -(void)close;
- -(BOOL)isOpen;
- -(void)open {
- NSError *error;
- m_capture = [[AVCaptureSession alloc]init];
- AVCaptureDevice *audioDev = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
- if (audioDev == nil)
- {
- CKPrint("Couldn't create audio capture device");
- return ;
- }
- // create mic device
- AVCaptureDeviceInput *audioIn = [AVCaptureDeviceInput deviceInputWithDevice:audioDev error:&error];
- if (error != nil)
- {
- CKPrint("Couldn't create audio input");
- return ;
- }
- // add mic device in capture object
- if ([m_capture canAddInput:audioIn] == NO)
- {
- CKPrint("Couldn't add audio input")
- return ;
- }
- [m_capture addInput:audioIn];
- // export audio data
- AVCaptureAudioDataOutput *audioOutput = [[AVCaptureAudioDataOutput alloc] init];
- [audioOutput setSampleBufferDelegate:self queue:dispatch_get_main_queue()];
- if ([m_capture canAddOutput:audioOutput] == NO)
- {
- CKPrint("Couldn't add audio output");
- return ;
- }
- [m_capture addOutput:audioOutput];
- [audioOutput connectionWithMediaType:AVMediaTypeAudio];
- [m_capture startRunning];
- return ;
- }
- -(void)close {
- if (m_capture != nil && [m_capture isRunning])
- {
- [m_capture stopRunning];
- }
- return;
- }
- -(BOOL)isOpen {
- if (m_capture == nil)
- {
- return NO;
- }
- return [m_capture isRunning];
- }
- - (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
- char szBuf[4096];
- int nSize = sizeof(szBuf);
- #if SUPPORT_AAC_ENCODER
- if ([self encoderAAC:sampleBuffer aacData:szBuf aacLen:&nSize] == YES)
- {
- [g_pViewController sendAudioData:szBuf len:nSize channel:0];
- }
- #else //#if SUPPORT_AAC_ENCODER
- AudioStreamBasicDescription outputFormat = *(CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer)));
- nSize = CMSampleBufferGetTotalSampleSize(sampleBuffer);
- CMBlockBufferRef databuf = CMSampleBufferGetDataBuffer(sampleBuffer);
- if (CMBlockBufferCopyDataBytes(databuf, 0, nSize, szBuf) == kCMBlockBufferNoErr)
- {
- [g_pViewController sendAudioData:szBuf len:nSize channel:outputFormat.mChannelsPerFrame];
- }
- #endif
- }
當然,由於PCM數據本身比較大,不利於網絡傳輸,所以如果需要進行網絡傳輸時,就需要對數據進行編碼;Ios系統本身支持多種音頻編碼格式,這裏我們就以AAC爲例來實現一個PCM編碼AAC的函數。
在Ios系統中,PCM編碼AAC的例子,在網上也是一找一大片,但是大多都是不太完整的,而且相當一部分都是E文的,對於某些童鞋而言,這些都是深惡痛絕的。我這裏就做做好人,把它們整理了一下,寫成了一個函數,方便使用。
在編碼前,需要先創建一個編碼轉換對象
- AVAudioConverterRef m_converter;
- #if SUPPORT_AAC_ENCODER
- -(BOOL)createAudioConvert:(CMSampleBufferRef)sampleBuffer { //根據輸入樣本初始化一個編碼轉換器
- if (m_converter != nil)
- {
- return TRUE;
- }
- AudioStreamBasicDescription inputFormat = *(CMAudioFormatDescriptionGetStreamBasicDescription(CMSampleBufferGetFormatDescription(sampleBuffer))); // 輸入音頻格式
- AudioStreamBasicDescription outputFormat; // 這裏開始是輸出音頻格式
- memset(&outputFormat, 0, sizeof(outputFormat));
- outputFormat.mSampleRate = inputFormat.mSampleRate; // 採樣率保持一致
- outputFormat.mFormatID = kAudioFormatMPEG4AAC; // AAC編碼
- outputFormat.mChannelsPerFrame = 2;
- outputFormat.mFramesPerPacket = 1024; // AAC一幀是1024個字節
- AudioClassDescription *desc = [self getAudioClassDescriptionWithType:kAudioFormatMPEG4AAC fromManufacturer:kAppleSoftwareAudioCodecManufacturer];
- if (AudioConverterNewSpecific(&inputFormat, &outputFormat, 1, desc, &m_converter) != noErr)
- {
- CKPrint(@"AudioConverterNewSpecific failed");
- return NO;
- }
- return YES;
- }
- -(BOOL)encoderAAC:(CMSampleBufferRef)sampleBuffer aacData:(char*)aacData aacLen:(int*)aacLen { // 編碼PCM成AAC
- if ([self createAudioConvert:sampleBuffer] != YES)
- {
- return NO;
- }
- CMBlockBufferRef blockBuffer = nil;
- AudioBufferList inBufferList;
- if (CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &inBufferList, sizeof(inBufferList), NULL, NULL, 0, &blockBuffer) != noErr)
- {
- CKPrint(@"CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer failed");
- return NO;
- }
- // 初始化一個輸出緩衝列表
- AudioBufferList outBufferList;
- outBufferList.mNumberBuffers = 1;
- outBufferList.mBuffers[0].mNumberChannels = 2;
- outBufferList.mBuffers[0].mDataByteSize = *aacLen; // 設置緩衝區大小
- outBufferList.mBuffers[0].mData = aacData; // 設置AAC緩衝區
- UInt32 outputDataPacketSize = 1;
- if (AudioConverterFillComplexBuffer(m_converter, inputDataProc, &inBufferList, &outputDataPacketSize, &outBufferList, NULL) != noErr)
- {
- CKPrint(@"AudioConverterFillComplexBuffer failed");
- return NO;
- }
- *aacLen = outBufferList.mBuffers[0].mDataByteSize; //設置編碼後的AAC大小
- CFRelease(blockBuffer);
- return YES;
- }
- -(AudioClassDescription*)getAudioClassDescriptionWithType:(UInt32)type fromManufacturer:(UInt32)manufacturer { // 獲得相應的編碼器
- static AudioClassDescription audioDesc;
- UInt32 encoderSpecifier = type, size = 0;
- OSStatus status;
- memset(&audioDesc, 0, sizeof(audioDesc));
- status = AudioFormatGetPropertyInfo(kAudioFormatProperty_Encoders, sizeof(encoderSpecifier), &encoderSpecifier, &size);
- if (status)
- {
- return nil;
- }
- uint32_t count = size / sizeof(AudioClassDescription);
- AudioClassDescription descs[count];
- status = AudioFormatGetProperty(kAudioFormatProperty_Encoders, sizeof(encoderSpecifier), &encoderSpecifier, &size, descs);
- for (uint32_t i = 0; i < count; i++)
- {
- if ((type == descs[i].mSubType) && (manufacturer == descs[i].mManufacturer))
- {
- memcpy(&audioDesc, &descs[i], sizeof(audioDesc));
- break;
- }
- }
- return &audioDesc;
- }
- OSStatus inputDataProc(AudioConverterRef inConverter, UInt32 *ioNumberDataPackets, AudioBufferList *ioData,AudioStreamPacketDescription **outDataPacketDescription, voidvoid *inUserData) { //<span style="font-family: Arial, Helvetica, sans-serif;">AudioConverterFillComplexBuffer 編碼過程中,會要求這個函數來填充輸入數據,也就是原始PCM數據</span>
- AudioBufferList bufferList = *(AudioBufferList*)inUserData;
- ioData->mBuffers[0].mNumberChannels = 1;
- ioData->mBuffers[0].mData = bufferList.mBuffers[0].mData;
- ioData->mBuffers[0].mDataByteSize = bufferList.mBuffers[0].mDataByteSize;
- return noErr;
- }
- #endif
- char szBuf[4096];
- int nSize = sizeof(szBuf);
- if ([self encoderAAC:sampleBuffer aacData:szBuf aacLen:&nSize] == YES)
- {
- // do something
- }