Android Camera系統在啓動預覽或者拍照時,所有預覽幀數據或者拍照數據都是通過graphics buffer來完成傳遞的。本文主要針對graphics buffer在CameraService、CameraProvider、CameraHAL中的申請、傳遞、歸還流程進行簡要的分析。
首先分析下graphics buffer的申請流程
1. graphics buffer申請流程
在Camera 啓動預覽或者拍照時,Camera 應用層會向CameraService發送processCaptureRequest申請,CameraService收到後會申請graphics buffer來承載幀數據,其流程圖如下:
從流程圖上可以清楚的看到,graphics buffer申請是在CameraService::prepareHalRequests開始的,代碼如下:
//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
status_t Camera3Device::RequestThread::prepareHalRequests() {
for (size_t i = 0; i < mNextRequests.size(); i++) {
auto& nextRequest = mNextRequests.editItemAt(i);
sp<CaptureRequest> captureRequest = nextRequest.captureRequest;
camera3_capture_request_t* halRequest = &nextRequest.halRequest;
Vector<camera3_stream_buffer_t>* outputBuffers = &nextRequest.outputBuffers;
....
//有幾個輸出流就創建幾個camera3_stream_buffer_t類型空對象並放入outputBuffers
outputBuffers->insertAt(camera3_stream_buffer_t(), 0,
captureRequest->mOutputStreams.size());
//獲取創建空的outputBuffers對象指針
halRequest->output_buffers = outputBuffers->array();
//逐個申請graphics buffer,然後賦值outputBuffers
for (size_t j = 0; j < captureRequest->mOutputStreams.size(); j++) {
sp<Camera3OutputStreamInterface> outputStream = captureRequest->mOutputStreams.editItemAt(j);
....
//申請graphics buffer,然後賦值給outputBuffers的第j個元素
res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
captureRequest->mOutputSurfaces[j]);
....
halRequest->num_output_buffers++;
}
}
return OK;
}
Camera3Stream::getBuffer又會調用Camera3OutputStream::getBufferLocked來繼續申請graphics buffer
繼續分析下 Camera3OutputStream::getBufferLocked
//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::getBufferLocked(camera3_stream_buffer *buffer,
const std::vector<size_t>&) {
//graphics buffer類型是GraphicBuffer
//ANativeWindowBuffer是GraphicBuffer的父類
ANativeWindowBuffer* anb;
int fenceFd = -1;
status_t res;
//申請graphics buffer及對應的fence並保存到anb和fenceFd
res = getBufferLockedCommon(&anb, &fenceFd);
....
//將獲取到的graphics buffer的handle成員地址賦值給camera3_stream_buffer型buffer的buffer成員
//graphics buffer handle 類型爲 native_handle_t*
//camera3_stream_buffer成員buffer類型爲native_handle_t**
handoutBufferLocked(*buffer, &(anb->handle), /*acquireFence*/fenceFd,
/*releaseFence*/-1, CAMERA3_BUFFER_STATUS_OK, /*output*/true);
return OK;
}
繼續分析下Camera3OutputStream::getBufferLockedCommon是如何申請ANativeWindowBuffer型graphics buffer
//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::getBufferLockedCommon(ANativeWindowBuffer** anb, int* fenceFd) {
status_t res;
....
bool gotBufferFromManager = false;
//mUseBufferManager爲false
if (mUseBufferManager) {
...
}
//gotBufferFromManager爲false
if (!gotBufferFromManager) {
....
//mConsumer是應用傳入CameraService的Surfac的強指針
//ANativeWindow是Surfac的父類
sp<ANativeWindow> currentConsumer = mConsumer;
mLock.unlock();
nsecs_t dequeueStart = systemTime(SYSTEM_TIME_MONOTONIC);
//通過Surfac::dequeueBuffer申請graphics buffer及fenceFd
res = currentConsumer->dequeueBuffer(currentConsumer.get(), anb, fenceFd);
nsecs_t dequeueEnd = systemTime(SYSTEM_TIME_MONOTONIC);
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
mLock.lock();
...
}
if (res == OK) {
std::vector<sp<GraphicBuffer>> removedBuffers;
//獲取需要被釋放的GraphicBuffer的隊列removedBuffers
//removedBuffers也需要傳遞給Hal
res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
if (res == OK) {
//將上一步獲取的removedBuffers暫存到mFreedBuffers中
onBuffersRemovedLocked(removedBuffers);
....
}
}
return res;
}
CaemraService通過Surface::dequeueBuffer來獲取graphics buffer後,
又通過Camera3IOStreamBase::handoutBufferLocked將申請到graphics buffer的buffer_handle_t*型 handle封裝到camera3_stream_buffer型buffer。
Surface::dequeueBuffer的流程請參考
Android GraphicBuffer是系統什麼buffer及其分配過程,本文就不詳細介紹了。
下邊介紹下Camera3IOStreamBase::handoutBufferLocked如何將
//frameworks\av\services\camera\libcameraservice\device3\Camera3IOStreamBase.cpp
void Camera3IOStreamBase::handoutBufferLocked(camera3_stream_buffer &buffer,
buffer_handle_t *handle,
int acquireFence,
int releaseFence,
camera3_buffer_status_t status,
bool output) {
incStrong(this);
buffer.stream = this;
//將ANativeWindowBuffer中native_handle_t* 類型的handle的地址
//賦值給camera3_stream_buffer的buffer的buffer成員,
//camera3_stream_buffer的buffer成員類型爲buffer_handle_t *(即native_handle_t**)
buffer.buffer = handle;
buffer.acquire_fence = acquireFence;
buffer.release_fence = releaseFence;
buffer.status = status;
...
}
至此CameraService完成了graphics buffer的申請並將申請到的 graphics buffer封裝到camera3_stream_buffer 型的buffer中。
因爲在CameraService在向CameraProvider傳遞graphics buffer時,還需要傳遞該removedBuffers,下邊額外分析下onBuffersRemovedLocked函數。
//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
void Camera3OutputStream::onBuffersRemovedLocked(
const std::vector<sp<GraphicBuffer>>& removedBuffers) {
sp<Camera3StreamBufferFreedListener> callback = mBufferFreedListener.promote();
if (callback != nullptr) {
for (auto gb : removedBuffers) {
callback->onBufferFreed(mId, gb->handle);
}
}
}
接着分析下Camera3Device::HalInterface::onBufferFreed
//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
void Camera3Device::HalInterface::onBufferFreed(
int streamId, const native_handle_t* handle) {
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
uint64_t bufferId = BUFFER_ID_NO_BUFFER;
auto mapIt = mBufferIdMaps.find(streamId);
if (mapIt == mBufferIdMaps.end()) {
return;
}
BufferIdMap& bIdMap = mapIt->second;
auto it = bIdMap.find(handle);
if (it == bIdMap.end()) {
return;
} else {
bufferId = it->second;
bIdMap.erase(it);
}
//將bufferId保存至mFreedBuffers
mFreedBuffers.push_back(std::make_pair(streamId, bufferId));
}
經過上述過程完成了將removedBuffers保存到mFreedBuffers。
至此分析完了graphics buffer的申請流程,CameraService首先通過Surface::dequeueBuffer申請獲取ANativeWindowBuffer型graphics buffer,然後將該graphics buffer中類型爲buffer_handle_t*的成員handle地址保存到camera3_stream_buffer中,之後會將該camera3_stream_buffer傳遞給HAL供其使用。
下邊分析下graphics buffer的傳遞流程
2. graphics buffer傳遞流程
在第一小節中已經介紹了graphics buffer的申請過程,接下來介紹下graphics buffer的傳遞過程
2.1 graphics buffer從CameraService傳遞給CameraProvider流程
CameraService通過Surface::dequeueBuffer申請獲取ANativeWindowBuffer型graphics buffer後會將該graphics buffer封裝camera3_stream_buffer。然後將該camera3_stream_buffer型buffer封裝到camera3_capture_request_t中,然後通過Camera3Device::HalInterface::processBatchCaptureRequests將該requests傳遞到CaemraProvider,代碼如下:
//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
status_t Camera3Device::HalInterface::processBatchCaptureRequests(
std::vector<camera3_capture_request_t*>& requests,/*out*/uint32_t* numRequestProcessed) {
ATRACE_NAME("CameraHal::processBatchCaptureRequests");
if (!valid()) return INVALID_OPERATION;
hardware::hidl_vec<device::V3_2::CaptureRequest> captureRequests;
size_t batchSize = requests.size();
captureRequests.resize(batchSize);
std::vector<native_handle_t*> handlesCreated;
//將camera3_capture_request_t型requests轉換爲device::V3_2::CaptureRequest captureRequests
//會將requests中的camera3_stream_buffer_t型的output_buffers轉換爲StreamBuffer類型
for (size_t i = 0; i < batchSize; i++) {
wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
}
//將mFreedBuffers中的數據轉換爲BufferCache類型
std::vector<device::V3_2::BufferCache> cachesToRemove;
{
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
//mFreedBuffers就是之前分析過在申請graphics buffer獲得的需要被刪除的buffers
for (auto& pair : mFreedBuffers) {
// The stream might have been removed since onBufferFreed
if (mBufferIdMaps.find(pair.first) != mBufferIdMaps.end()) {
cachesToRemove.push_back({pair.first, pair.second});
}
}
mFreedBuffers.clear();
}
.....
}
//CameraService與CameraProvider通信,將captureRequests、cachesToRemove傳遞給CameraProvider
//mHidlSession類型爲BpHwCameraDeviceSession,是CameraProvider服務的代理對象
auto err = mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
[&status, &numRequestProcessed] (auto s, uint32_t n) {
status = s;
*numRequestProcessed = n;
});
....
return CameraProviderManager::mapToStatusT(status);
}
CameraService在和CameraProvider通信時,首先將camera3_capture_request_t數據轉換爲CameraProvider識別的數據類型device::V3_2::CaptureRequest,然後通過BpHwCameraDeviceSession型CameraProvider服務的代理對象mHidlSession,跨進程將captureRequests及cachesToRemove傳遞給CaemraProvider。
其中會將camera3_stream_buffer_t型的output_buffers轉換爲StreamBuffer類型,轉換函數爲wrapAsHidlRequest
//frameworks\av\services\camera\libcameraservice\device3\Camera3Device.cpp
void Camera3Device::HalInterface::wrapAsHidlRequest(camera3_capture_request_t* request,
/*out*/device::V3_2::CaptureRequest* captureRequest,
/*out*/std::vector<native_handle_t*>* handlesCreated) {
....
//frameNumber 設置爲request->frame_number
captureRequest->frameNumber = request->frame_number;
//fmqSettingsSize 設置爲0
captureRequest->fmqSettingsSize = 0;
{
.....
//將camera3_stream_buffer_t類型轉換爲StreamBuffer類型
captureRequest->outputBuffers.resize(request->num_output_buffers);
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t *src = request->output_buffers + i;
StreamBuffer &dst = captureRequest->outputBuffers[i];
int32_t streamId = Camera3Stream::cast(src->stream)->getId();
//等同於native_handle_t** buf = *(src->buffer);
buffer_handle_t buf = *(src->buffer);
//查找是否是已分配過的buffer
auto pair = getBufferId(buf, streamId);
bool isNewBuffer = pair.first;
ALOGE("%s: isNewBuffer %d", __FUNCTION__, isNewBuffer);//
dst.streamId = streamId;
dst.bufferId = pair.second;
//詳細解釋見hardware\interfaces\camera\device\3.2\types.hal
//通過源碼註釋看,dst.buffer可以爲空,爲空時,需要hal通過dst.bufferId去查找對應的buffer
dst.buffer = isNewBuffer ? buf : nullptr;
dst.status = BufferStatus::OK;
//fence相關內容
native_handle_t *acquireFence = nullptr;
if (src->acquire_fence != -1) {
acquireFence = native_handle_create(1,0);
acquireFence->data[0] = src->acquire_fence;
handlesCreated->push_back(acquireFence);
}
dst.acquireFence = acquireFence;
dst.releaseFence = nullptr;
//函數作用待調查
pushInflightBufferLocked(captureRequest->frameNumber, streamId,
src->buffer, src->acquire_fence);
}
}
}
上述過程,將camera3_stream_buffer_t類型的buffer轉換爲了StreamBuffer類型
接着介紹下StreamBuffer的定義
//hardware\interfaces\camera\device\3.2\types.hal
struct StreamBuffer {
int32_t streamId;
uint64_t bufferId;
/**
* The graphics buffer handle to the buffer.
*
* For StreamBuffers sent to the HAL in a CaptureRequest, if the bufferId
* is not seen by the HAL before, this buffer handle is guaranteed to be a
* valid handle to a graphics buffer, with dimensions and format matching
* that of the stream. If the bufferId has been sent to the HAL before, this
* buffer handle must be null and HAL must look up the actual buffer handle
* to use from its own bufferId to buffer handle map.
*
* For StreamBuffers returned in a CaptureResult, this must be null, since
* the handle to the buffer is already known to the client (since the client
* sent it in the matching CaptureRequest), and the handle can be identified
* by the combination of frame number and stream ID.
*/
handle buffer;
Status status;
handle acquireFence;
handle releaseFence;
};
從備註上看,只有graphics buffer是第一次申請的時候,纔會傳遞graphics buffer handle,其他時候只需要傳遞bufferId即可。
CameraProvider服務端響應CameraProvider代理對象發來的processCaptureRequest申請的接口是CameraDeviceSession::processCaptureRequest。
CameraProvider響應processCaptureRequest的流程圖如下:
下邊分析下CameraDeviceSession::processCaptureRequest
//hardware\interfaces\camera\device\3.2\default\CameraDeviceSession.cpp
Return<void> CameraDeviceSession::processCaptureRequest(
const hidl_vec<CaptureRequest>& requests,
const hidl_vec<BufferCache>& cachesToRemove,
ICameraDeviceSession::processCaptureRequest_cb _hidl_cb) {
updateBufferCaches(cachesToRemove);
uint32_t numRequestProcessed = 0;
Status s = Status::OK;
for (size_t i = 0; i < requests.size(); i++, numRequestProcessed++) {
//繼續傳遞request
s = processOneCaptureRequest(requests[i]);
}
//暫存該requests到mResultBatcher
if (s == Status::OK && requests.size() > 1) {
mResultBatcher.registerBatch(requests);
}
_hidl_cb(s, numRequestProcessed);
return Void();
}
接着分析下processOneCaptureRequest
Status CameraDeviceSession::processOneCaptureRequest(const CaptureRequest& request) {
Status status = initStatus();
//創建空的camera3_capture_request_t對象halRequest
camera3_capture_request_t halRequest;
halRequest.frame_number = request.frameNumber;
//將HIDL CameraMetadata轉換爲camera_metadata_t類型
converted = convertFromHidl(request.settings, &halRequest.settings);
....
hidl_vec<buffer_handle_t*> allBufPtrs;
hidl_vec<int> allFences;
bool hasInputBuf = (request.inputBuffer.streamId != -1 &&
request.inputBuffer.bufferId != 0);
size_t numOutputBufs = request.outputBuffers.size();
size_t numBufs = numOutputBufs + (hasInputBuf ? 1 : 0);
//將device::V3_2::CaptureRequest request中的StreamBuffer類型轉換爲buffer_handle_t*類型的allBufPtrs
status = importRequest(request, allBufPtrs, allFences);
hidl_vec<camera3_stream_buffer_t> outHalBufs;
outHalBufs.resize(numOutputBufs);
bool aeCancelTriggerNeeded = false;
::android::hardware::camera::common::V1_0::helper::CameraMetadata settingsOverride;
{
Mutex::Autolock _l(mInflightLock);
//將allBufPtrs、allFences封裝爲camera3_stream_buffer_t型buffer:outHalBufs
halRequest.num_output_buffers = numOutputBufs;
for (size_t i = 0; i < numOutputBufs; i++) {
auto key = std::make_pair(request.outputBuffers[i].streamId, request.frameNumber);
auto& bufCache = mInflightBuffers[key] = camera3_stream_buffer_t{};
//將allBufPtrs,request.outputBuffers[i].status等信息封裝爲camera3_stream_buffer_t類型
convertFromHidl(
allBufPtrs[i], request.outputBuffers[i].status,
&mStreamMap[request.outputBuffers[i].streamId], allFences[i],
&bufCache);
outHalBufs[i] = bufCache;
}
//將上一步封裝好的camera3_stream_buffer_t指針賦值給halRequest.output_buffers
halRequest.output_buffers = outHalBufs.data();
.....
}
....
//開始進入HAL3
//halRequest類型爲camera3_capture_request_t
status_t ret = mDevice->ops->process_capture_request(mDevice, &halRequest);
}
從上述代碼可以看出,CameraProvider又會將device::V3_2::CaptureRequest類的request轉換爲camera3_capture_request_t類型,然後傳遞給hal層。
其中importRequest函數,將會將device::V3_2::CaptureReques中的StreamBufferbuffer轉換爲buffer_handle_t類型的buffer
//hardware\interfaces\camera\device\3.2\default\CameraDeviceSession.cpp
Status CameraDeviceSession::importRequest(
const CaptureRequest& request,
hidl_vec<buffer_handle_t*>& allBufPtrs,
hidl_vec<int>& allFences) {
//hasInputBuf爲false
bool hasInputBuf = (request.inputBuffer.streamId != -1 &&
request.inputBuffer.bufferId != 0);
size_t numOutputBufs = request.outputBuffers.size();
size_t numBufs = numOutputBufs + (hasInputBuf ? 1 : 0);
// Validate all I/O buffers
hidl_vec<buffer_handle_t> allBufs;
hidl_vec<uint64_t> allBufIds;
allBufs.resize(numBufs);
allBufIds.resize(numBufs);
allBufPtrs.resize(numBufs);
allFences.resize(numBufs);
std::vector<int32_t> streamIds(numBufs);
for (size_t i = 0; i < numOutputBufs; i++) {
//通過之前的分析,allBufs[i]可能爲空
allBufs[i] = request.outputBuffers[i].buffer.getNativeHandle();
ALOGE("%s: bufferId %" PRIu64 " allBufs[%zu] =%p", __FUNCTION__, allBufIds[i],i,allBufs[i] );
allBufIds[i] = request.outputBuffers[i].bufferId;
allBufPtrs[i] = &allBufs[i];
streamIds[i] = request.outputBuffers[i].streamId;
}
.....
for (size_t i = 0; i < numBufs; i++) {
buffer_handle_t buf = allBufs[i];
uint64_t bufId = allBufIds[i];
CirculatingBuffers& cbs = mCirculatingBuffers[streamIds[i]];
//如果在mCirculatingBuffers未有bufId相關信息,說明之前未傳遞過該buffer,buf不能未空
//如果mCirculatingBuffers存在該bufId相關信息,直接從mCirculatingBuffers取出buffer_handle_t型的buffer
if (cbs.count(bufId) == 0) {
if (buf == nullptr) {
ALOGE("%s: bufferId %" PRIu64 " has null buffer handle!", __FUNCTION__, bufId);
return Status::ILLEGAL_ARGUMENT;
}
//將該新buffer註冊到mCirculatingBuffers中
// Register a newly seen buffer
buffer_handle_t importedBuf = buf;
//需要將buf做一次備份
sHandleImporter.importBuffer(importedBuf);
if (importedBuf == nullptr) {
ALOGE("%s: output buffer %zu is invalid!", __FUNCTION__, i);
return Status::INTERNAL_ERROR;
} else {
//將備份的importedBuf註冊到mCirculatingBuffers中
cbs[bufId] = importedBuf;
}
}
allBufPtrs[i] = &cbs[bufId];
}
//至此allBufPtrs所有元素不爲空了,需要檢測下 acquire fences的有效性
// All buffers are imported. Now validate output buffer acquire fences
for (size_t i = 0; i < numOutputBufs; i++) {
if (!sHandleImporter.importFence(
request.outputBuffers[i].acquireFence, allFences[i])) {
ALOGE("%s: output buffer %zu acquire fence is invalid", __FUNCTION__, i);
cleanupInflightFences(allFences, i);
return Status::INTERNAL_ERROR;
}
}
....
return Status::OK;
}
從上述流程看,在將camera3_stream_buffer_t從CameraService傳入CameraProvider時,CameraProvider會將camera3_stream_buffer_t轉換爲StreamBuffer。在CameraProvider傳遞給hal時又將StreamBuffer轉換回camera3_stream_buffer_t。
至此分析完成了graphics buffer從CameraService傳遞給CameraProvider流程,及CameraProvider傳遞給Hal的分析
接着接續分析下CameraProvider向HAL層傳遞graphics buffer的流程,
2.2 graphics buffer從CameraProvider傳遞給Camera HAL層流程
由於CameraProvider和HAL層屬於一個進程,CameraProvider只需要調用下HAL層的process_capture_request接口及可完成graphics buffer。
HAL層接口定義如下:
camer_module定義如下:
//vendor\qcom\proprietary\camx\src\core\hal\camxhal3entry.cpp
CAMX_VISIBILITY_PUBLIC camera_module_t HAL_MODULE_INFO_SYM =
{
{
....
CAMERA_MODULE_API_VERSION_2_4,
....
"QTI Camera HAL",
"Qualcomm Technologies, Inc.",
&CamX::g_hwModuleMethods,
...
},
CamX::get_number_of_cameras,
...
};
Camera3Device定義如下:
//vendor\qcom\proprietary\camx\src\core\hal\camxcommontypes.h
struct Camera3Device
{
HwDevice hwDevice;
Camera3DeviceOps* pDeviceOps;
VOID* pPrivateData;
};
實現代碼如下:
//vendor\qcom\proprietary\camx\src\core\hal\camxhaldevice.cpp
CamxResult HALDevice::Initialize(
const HwModule* pHwModule,
UINT32 cameraId)
{
m_camera3Device.hwDevice.version = CAMERA_DEVICE_API_VERSION_3_3;
m_camera3Device.pDeviceOps = g_camera3DeviceOps;
m_camera3Device.pPrivateData = this;
}
g_camera3DeviceOps實現代碼如下:
//vendor\qcom\proprietary\camx\src\core\ha\camxhal3entry.cpp
static camera3_device_ops_t g_camera3DeviceOps =
{
....
CamX::construct_default_request_settings,
CamX::process_capture_request,
...
};
下邊介紹下graphics buffer在HAL層的傳遞流程
2.3 graphics buffer在HAL層的傳遞流程
graphics buffer在HAL層的傳遞流程流設計代碼太多了,其大致流程圖如下:
中間詳細流程不在詳細介紹了,我們從CameraUsecaseBase開始分析。
//vendor\qcom\proprietary\chi-cdk\vendor\chioverride\default\chxadvancedcamerausecase.cpp
CDKResult CameraUsecaseBase::ExecuteCaptureRequest(
camera3_capture_request_t* pRequest)
{
....
camera3_stream_buffer_t outputBuffers[NumOutputBuffers] = { { 0 } };
{
//將pRequest中的camera3_stream_buffer_t型output_buffers數據拷貝到outputBuffers
ChxUtils::Memcpy(&outputBuffers[outputCount],
&pRequest->output_buffers[buffer],
sizeof(camera3_stream_buffer_t));
....
}
....
if (0 < outputCount)
{
//將graphics buffer handle封裝到CHICAPTUREREQUEST型request
CHICAPTUREREQUEST request = { 0 };
UINT32 sensorModeIndex;
.....
request.frameNumber = pRequest->frame_number;
....
request.numOutputs = outputCount;
//將outputBuffers轉換爲CHISTREAMBUFFER類型並賦值給request.pOutputBuffers
request.pOutputBuffers = reinterpret_cast<CHISTREAMBUFFER*>(outputBuffers);
request.pMetadata = pRequest->settings;
//將graphics buffer handle封裝到CHIPIPELINEREQUEST 型submitRequest
CHIPIPELINEREQUEST submitRequest = { 0 };
submitRequest.pSessionHandle = reinterpret_cast<CHIHANDLE>(
m_sessions[session].pSession->GetSessionHandle());
submitRequest.numRequests = 1;
//將CHICAPTUREREQUEST 型request賦值給CHIPIPELINEREQUEST 型submitRequest
submitRequest.pCaptureRequests = &request;
submitRequest.m_cameraId = GetCameraId();
.....
//繼續傳遞request申請,其中包含graphics buffer handle
result = ExtensionModule::GetInstance()->SubmitRequest(&submitRequest);
}
}
}
return result;
}
從上邊的分析可以看到,AdvancedCameraUsecase::ExecuteCaptureRequest會將CameraService傳遞下來的包含graphics buffer handle的camera3_stream_buffer_t型buffer封裝到CHISTREAMBUFFER型buffer中,然後通過SubmitRequest繼續下傳Camera Request
//vendor\qcom\proprietary\camx\src\core\chi\camxchi.cpp
static CDKResult ChiSubmitPipelineRequest(
CHIHANDLE hChiContext,
CHIPIPELINEREQUEST* pRequest)
{
...
//繼續傳遞Camera Request申請
result = pChiContext->SubmitRequest(pCHISession, pRequest);
return result;
}
//vendor\qcom\proprietary\camx\src\core\chi\camxchicontext.cpp
CamxResult ChiContext::SubmitRequest(
CHISession* pSession,
ChiPipelineRequest* pRequest)
{
//繼續傳遞Camera Request申請
result = pSession->ProcessCaptureRequest(pRequest);
}
接着進入Session ProcessCaptureRequest
//vendor\qcom\proprietary\camx\src\core\camxsession.cpp
CamxResult Session::ProcessCaptureRequest(
const ChiPipelineRequest* pPipelineRequests)
{
CamxResult result = CamxResultEFailed;
......
//如果m_livePendingRequests大於m_maxLivePendingRequests,再次申請幀時,需要先返回一幀數據
while (m_livePendingRequests >= m_maxLivePendingRequests)
{
...
resultWait = m_pWaitLivePendingRequests->TimedWait(m_pLivePendingRequestsLock->GetNativeHandle(), waitTime);
....
}
.....
//創建空的ChiCaptureRequest數組requests
ChiCaptureRequest requests[MaxPipelinesPerSession];
for (UINT requestIndex = 0; requestIndex < numRequests; requestIndex++)
{
const ChiCaptureRequest* pCaptureRequest = &(pPipelineRequests->pCaptureRequests[requestIndex]);
....
//從ChiPipelineRequest型pPipelineRequests中獲取ChiCaptureRequest型requests
CamX::Utils::Memcpy(&requests[requestIndex], pCaptureRequest, sizeof(ChiCaptureRequest));
//這句話很重要,需要等待申請到的graphics buffer真正被釋放?
result = WaitOnAcquireFence(&requests[requestIndex]);
//將Camera Request的相關參數保存到SessionCaptureRequest型成員變量m_captureRequest
//SessionCaptureRequest定義爲:
//struct SessionCaptureRequest
//{
//CaptureRequest requests[MaxPipelinesPerSession];
//UINT32 numRequests;
//};
CaptureRequest* pRequest = &(m_captureRequest.requests[requestIndex]);
pRequest->streamBuffers[m_batchedFrameIndex[pipelinIndex]].numOutputBuffers =
requests[requestIndex].numOutputs;
for (UINT i = 0; i < requests[requestIndex].numOutputs; i++)
{
//拷貝graphics buffer handle到m_captureRequest.requests中
//requests[requestIndex].pOutputBuffers類型爲ChiStreamBuffer
//pRequest->streamBuffers[m_batchedFrameIndex[pipelinIndex]].outputBuffers
//類型爲Camera3StreamBuffer
//兩種類型完全一樣,真不知道qcom這樣設計的用途
Utils::Memcpy(&pRequest->streamBuffers[m_batchedFrameIndex[pipelinIndex]].outputBuffers[i],
&requests[requestIndex].pOutputBuffers[i],
sizeof(ChiStreamBuffer));
}
.....
//將m_captureRequest插入m_pRequestQueue備份
result = m_pRequestQueue->EnqueueWait(&m_captureRequest);
.....
if (CamxResultSuccess == result)
{
//觸發m_pThreadManager異步處理
//這樣做的好處時,儘早結束了一次binder通信,防止binder通信異常
VOID* pData[] = {this, NULL};
result = m_pThreadManager->PostJob(m_hJobFamilyHandle,
NULL,
&pData[0],
FALSE,
FALSE);
}
m_pThreadManager中的線程被觸發,會執行ThreadJobCallback,接着會執行ThreadJobExecute,
//vendor\qcom\proprietary\camx\src\core\chi\camxchisession.cpp
CamxResult CHISession::ThreadJobExecute()
{
CamxResult result = CamxResultSuccess;
if (TRUE == static_cast<BOOL>(CamxAtomicLoad32(&m_aCheckResults)))
{
result = ProcessResults();
}
if (CamxResultSuccess == result)
{
result = ProcessRequest();
}
else
{
FlushRequests(FALSE);
}
return result;
}
從上述代碼看,線程先執行ProcessResults,再執行ProcessRequest,再執行FlushRequests
我們重點關注graphics buffer下傳hal的相關流程ProcessRequest
//vendor\qcom\proprietary\camx\src\core\camxsession.cpp
CamxResult Session::ProcessRequest()
{
CamxResult result = CamxResultSuccess;
SessionCaptureRequest* pSessionRequest = NULL;
//從m_pRequestQueue中Dequeue出一個幀申請pSessionRequest
pSessionRequest = static_cast<SessionCaptureRequest*>(m_pRequestQueue->Dequeue());
......
if (NULL != pSessionRequest)
{
.....
for (UINT requestIndex = 0; requestIndex < pSessionRequest->numRequests; requestIndex++)
{
CaptureRequest* pRequest = &(pSessionRequest->requests[requestIndex]);
// Pipeline to process this Request
PipelineProcessRequestData pipelineProcessRequestData = { 0 };
//將CaptureRequest賦值給pipelineProcessRequestData.pCaptureRequest
pipelineProcessRequestData.pCaptureRequest = pRequest;
//給pipelineProcessRequestData.perBatchedFrameInfo屬性賦值
DetermineActiveStreams(&pipelineProcessRequestData);
......
//繼續傳遞幀申請給pPipeline,其中包含了對應的graphics buffer handle
result = m_pipelineData[pRequest->pipelineIndex].pPipeline->ProcessRequest(&pipelineProcessRequestData);
.....
}
......
}
return result;
}
在繼續分析Pipeline::ProcessRequest,先分析下DetermineActiveStreams(&pipelineProcessRequestData);
//vendor\qcom\proprietary\camx\src\core\camxsession.cpp
VOID Session::DetermineActiveStreams(
PipelineProcessRequestData* pPipelineProcessRequestData
) const
{
//從m_pRequestQueue->Dequeue()出來的SessionCaptureRequest.requests[]
const CaptureRequest* pCaptureRequest = pPipelineProcessRequestData->pCaptureRequest;
for (UINT frameIndex = 0; frameIndex < pCaptureRequest->numBatchedFrames; frameIndex++)
{
PerBatchedFrameInfo* pTopologyPerFrameInfo = &pPipelineProcessRequestData->perBatchedFrameInfo[frameIndex];
.....
for (UINT i = 0; i < pCaptureRequest->streamBuffers[frameIndex].numOutputBuffers; i++)
{
//從pCaptureRequest中獲取ChiStreamBuffer型outputBuffers
//ChiStreamBuffer類型和camera3_stream_buffer_t類型完全一樣
const ChiStreamBuffer* pOutputBuffer =
reinterpret_cast<const ChiStreamBuffer*>(&pCaptureRequest->streamBuffers[frameIndex].outputBuffers[i]);
....
//從ChiStreamBuffer中獲取的BufferHandle型buffer賦值給pPipelineProcessRequestData的phBuffers
//BufferHandle類型和native_handle類型完全相同
//就是CameraService傳遞下來的graphics buffer handle
pTopologyPerFrameInfo->phBuffers[streamId] = reinterpret_cast<BufferHandle*>(pOutputBuffer->phBuffer);
}
}
}
從上邊的分析看,pipelineProcessRequestData.perBatchedFrameInfo.phBuffers就是CameraService 調用Surface::dequeue()獲取的ANativeWindowBuffer類型graphics buffer handle
接着分析下Pipeline::ProcessRequest
//vendor\qcom\proprietary\camx\src\core\camxpipeline.cpp
CamxResult Pipeline::ProcessRequest(
PipelineProcessRequestData* pPipelineRequestData)
{
.....
PerBatchedFrameInfo* pPerBatchedFrameInfo = &pPipelineRequestData->perBatchedFrameInfo[0];
.....
if (CamxResultSuccess == result)
{
UINT32 nodesEnabled = 0;
for (UINT i = 0; i < m_nodeCount ; i++)
{
BOOL isNodeEnabled = FALSE;
//遍歷所有m_ppNodes 調用其SetupRequest
//將包含graphics buffer的參數pPerBatchedFrameInfo傳遞給每個Node
m_ppNodes[i]->SetupRequest(pPerBatchedFrameInfo,
pDifferentActiveStreams,
requestId,
pCaptureRequest->CSLSyncID,
&isNodeEnabled);
......
}
....
}
return result;
}
我們重點關注下Node::SetupRequest
//vendor\qcom\proprietary\camx\src\core\camxnode.cpp
CamxResult Node::SetupRequest(
PerBatchedFrameInfo* pPerBatchedFrameInfo,
UINT* pDifferentActiveStreams,
UINT64 requestId,
UINT64 syncId,
BOOL* pIsEnabled)
{
......
if (TRUE == IsNodeEnabled())
{
//初始化各個Node的輸入和輸出端口
result = SetupRequestOutputPorts(pPerBatchedFrameInfo);
result = SetupRequestInputPorts(pPerBatchedFrameInfo);
*pIsEnabled = TRUE;
}
......
return result;
}
下邊接着接下Node::SetupRequestOutputPorts
//vendor\qcom\proprietary\camx\src\core\camxnode.cpp
CamxResult Node::SetupRequestOutputPorts(
PerBatchedFrameInfo* pPerBatchedFrameInfo)
{
CamxResult result = CamxResultSuccess;
UINT requestIdIndex = m_tRequestId % MaxRequestQueueDepth;
PerRequestActivePorts* pRequestPorts = &m_perRequestInfo[requestIdIndex].activePorts;
pRequestPorts->numOutputPorts = 0;
for (UINT portIndex = 0; portIndex < m_outputPortsData.numPorts; portIndex++)
{
if (TRUE == IsOutputPortEnabled(portIndex))
{
OutputPort* pOutputPort = &m_outputPortsData.pOutputPorts[portIndex];
if (pOutputPort->bufferProperties.maxImageBuffers > 0)
{
.....
NodeFenceHandlerData* pFenceHandlerData =
&pOutputPort->pFenceHandlerData[(m_tRequestId % maxImageBuffers)];
.....
if (TRUE == IsSinkPortWithBuffer(portIndex))
{
....
//創建Fence同步信號
result = CSLCreatePrivateFence("NodeOutputPortFence", &hNewFence);
......
//將新創建的hNewFence註冊給kernel,
//kernel在完成某些任務時會觸發該Fence,通過註冊的CSLFenceCallback會調給HAL
//如IPE在完成幀處理後,會觸發該fence
//&pFenceHandlerData->outputBufferInfo[]尚未賦值真正的buffer
result = CSLFenceAsyncWait(hNewFence,
Node::CSLFenceCallback,
&pFenceHandlerData->nodeCSLFenceCallbackData);
......
m_pPipeline->RegisterRequestFence(&pFenceHandlerData->hFence, m_tRequestId);
......
for (UINT i = 0; < numBatchedFrames ; i++)
{
// Is the output port enabled for the frame in the batch
if (TRUE == Utils::IsBitSet(pPerBatchedFrameInfo[i].activeStreamIdMask, outputPortStreamId))
{
FenceHandlerBufferInfo* pFenceHandlerBufferInfo =
&pFenceHandlerData->outputBufferInfo[pFenceHandlerData->numOutputBuffers];
//根據graphics buffer handle創建ImageBuffer對象
ImageBuffer* pImageBuffer = NULL;
pImageBuffer = pOutputPort->ppImageBuffers[batchedFrameIndex];
//斷言NULL == pImageBuffer
CAMX_ASSERT(NULL == pImageBuffer);
//從pImageBufferManager獲取pImageBuffer
if (NULL == pImageBuffer)
{
m_pBufferRequestLock->Lock();
pImageBuffer = pOutputPort->pImageBufferManager->GetImageBuffer();
m_pBufferRequestLock->Unlock();
}
if (NULL != pImageBuffer)
{
......
//phNativeHandle是CameraService通過Surface::dequeue()申請到的ANativeWindowBuffer類型graphics buffer handle,類爲爲BufferHandle
//通過processCaptureRequest傳遞下來的
BufferHandle* phNativeHandle = pPerBatchedFrameInfo[i].phBuffers[outputPortStreamId];
if (NULL != phNativeHandle)
{
const ImageFormat* pImageFormat = &pOutputPort->bufferProperties.imageFormat;
UINT32 flags = CSLMemFlagHw;
......
//將graphics buffer handle--phNativeHandle映射到kernel層
result = pImageBuffer->Import(pImageFormat,
*phNativeHandle,
0, // Offset
ImageFormatUtils::GetTotalSize(pImageFormat),
flags,
&m_deviceIndices[0],
m_deviceIndexCount);
.......
//將pImageBuffer賦值給pOutputPort->ppImageBuffers
pOutputPort->ppImageBuffers[batchedFrameIndex] = pImageBuffer;
//給pFenceHandlerData->outputBufferInfo[]填充真正的buffer
//將pImageBuffer賦值給pFenceHandlerBufferInfo->pImageBuffer
//這樣kernel填充完幀數據後,pFenceHandlerBufferInfo->pImageBuffer也就倍填充了幀數據
pFenceHandlerBufferInfo->pImageBuffer = pImageBuffer;
pFenceHandlerBufferInfo->phNativeHandle = phNativeHandle;
.....
}
}
}
}
}
.....
}
.....
}
}
.....
return result;
}
現在重點分析下phNativeHandle的映射函數ImageBuffer::Import
//vendor\qcom\proprietary\camx\src\core\camximagebuffer.cpp
CamxResult ImageBuffer::Import(
const ImageFormat* pFormat,
//cameraservice傳遞過來的graphics buffer handle,NativeHandle和BufferHandle是同一個類型
const NativeHandle* phNativeBuffer,
SIZE_T offset,
SIZE_T size,
UINT32 flags,
const INT32* pDeviceIndices,
UINT deviceCount)
{
CamxResult result = CamxResultSuccess;
if (CamxResultSuccess == result)
{
CSLBufferInfo bufferInfo;
....
//映射內存
result = CSLMapNativeBuffer(&bufferInfo/*output*/,
reinterpret_cast<const CSLNativeHandle*>(phNativeBuffer)/*input*/,
offset,
size,
flags,
pDeviceIndices,
deviceCount);
// Only update internal state if success
//如果映射成功,會通過ImportCSLBuffer更新當前ImageBuffer的成員變量
//m_hBuffer、m_pVirtualAddr、m_fileDescriptor
if (CamxResultSuccess == result)
{
result = ImportCSLBuffer(pFormat, &bufferInfo);
if (CamxResultSuccess == result)
{
m_format = *pFormat;
m_phNativeBuffer = phNativeBuffer;
}
....
}
}
return result;
}
重點看一下內存映射函數
//CSLMapNativeBuffer對應的CSL方法
//vendor\qcom\proprietary\camx\src\csl\hw\camxcslhw.cpp
CamxResult CSLMapBufferHW(
CSLBufferInfo* pBufferInfo/*output*/,
INT bufferFD/*input*/,
SIZE_T offset,
SIZE_T bufferLength,
UINT32 CSLFlags,
const INT32* pDeviceIndices,
UINT deviceCount)
{
CamxResult result = CamxResultSuccess;
CSLHwDeviceOps* pDeviceOp = &g_CSLHwInstance.requestManager.deviceOp;
if ((0 != CSLFlags) && (NULL != pBufferInfo) && (0 != bufferLength) && (bufferFD > 0))
{
if (TRUE == CSLHwInstanceGetRefCount())
{
/*cam_mem_mgr_map_cmd定義:
//kernel\msm-4.9\include\uapi\media\cam_req_mgr.h
struct cam_mem_mgr_map_cmd {
int32_t mmu_hdls[CAM_MEM_MMU_MAX_HANDLE];
uint32_t num_hdl;
uint32_t flags;
int32_t fd;
uint32_t reserved;
struct cam_mem_map_out_params out;
};
cam_mem_map_out_params 定義
struct cam_mem_map_out_params {
uint32_t buf_handle;
uint32_t reserved;
uint64_t vaddr;
};
*/
struct cam_mem_mgr_map_cmd mapCmd = {};
if ((CSLFlags & CSLMemFlagHw) || (CSLFlags & CSLMemFlagProtected))
{
if ((NULL != pDeviceIndices) && (0 != deviceCount))
{
result = CSLHwPopulateMMUHandles(mapCmd.mmu_hdls,
&mapCmd.num_hdl,
pDeviceIndices,
deviceCount,
CSLFlags);
}
}
....
if (CamxResultSuccess == result)
{ //將csl中的格式轉換爲kernel中對應的格式
result = CSLHwMapCSLAllocFlagsToKMD(CSLFlags, &mapCmd.flags);
}
if (CamxResultSuccess == result)
{
g_CSLHwInstance.allocLock->Lock();
// Call the actual IOCTL here
//通過ioctl將傳入的graphics buffer的bufferFD傳遞到kernel層
mapCmd.fd = bufferFD;
result = pDeviceOp->Ioctl2(&g_CSLHwInstance.requestManager, CAM_REQ_MGR_MAP_BUF,
&mapCmd, 0, sizeof(mapCmd));
CSLBufferInfo* pLocalBuffInfo = NULL;
if (CamxResultSuccess == result)
{
// This API adds an entry into CSL's internal data structures
//將映射成功的mapCmd.out.buf_handle及graphics buffer handle fd等信息
//給pLocalBuffInfo賦值
result = CSLHwAddBuffer(mapCmd.out.buf_handle,
bufferFD,/*graphics buffer handle fd*/
bufferLength,
&pLocalBuffInfo/*output*/,
CSLFlags,
TRUE);
// Now we do a deep copy
//將pLocalBuffInfo賦值給函數輸出pBufferInfo
if (CamxResultSuccess == result)
{
CamX::Utils::Memcpy(pBufferInfo, pLocalBuffInfo, sizeof(*pLocalBuffInfo));
UINT idx = CAM_MEM_MGR_GET_HDL_IDX(mapCmd.out.buf_handle);
g_CSLHwInstance.memManager.bufferInfo[idx].refcount++;
}
}
g_CSLHwInstance.allocLock->Unlock();
}
CSLHwInstancePutRefCount();
}
}
...
return result;
}
總結下上述分析的流程
1、CameraService 通過Surface::dequeuebuffer獲取了ANativeWindowBuffer型graphics buffer
2、graphics buffer 在CameraService、CameraProvider、HAL中是以graphics buffer handle形式傳遞的
3、graphics buffer在HAL層通過ioctl傳遞給Camera Kernel共kernel使用
3. graphics buffer歸還流程
camera kernel在收到幀數據後(kernel具體流程以後再研究),會觸發fence,通過CSLFenceCallback回調幀數據給camera HAL。
CSLFenceCallback的註冊之前也提到過,是在Node::SetupRequestOutputPorts中註冊給kernel的
//vendor\qcom\proprietary\camx\src\core\camxnode.cpp
CamxResult Node::SetupRequestOutputPorts(
PerBatchedFrameInfo* pPerBatchedFrameInfo)
{
....
//創建Fence同步信號
result = CSLCreatePrivateFence("NodeOutputPortFence", &hNewFence);
......
//將新創建的hNewFence註冊給kernel,
//kernel在完成某些任務時會觸發該Fence,通過註冊的CSLFenceCallback會調給HAL
//如IPE在完成幀處理後,會觸發該fence
result = CSLFenceAsyncWait(hNewFence,
Node::CSLFenceCallback,
&pFenceHandlerData->nodeCSLFenceCallbackData);
}
CSLFenceCallback代碼如下:
//vendor\qcom\proprietary\camx\src\core\camxnode.cpp
VOID Node::CSLFenceCallback(
VOID* pNodePrivateFenceData,
CSLFence hSyncFence,
CSLFenceResult fenceResult)
{
CamxResult result = CamxResultSuccess;
FenceCallbackData* pFenceCallbackData = static_cast<FenceCallbackData*>(pNodePrivateFenceData);
Node* pNode = pFenceCallbackData->pNode;
NodeFenceHandlerData* pNodeFenceHandlerData = NULL;
pNodeFenceHandlerData = static_cast<NodeFenceHandlerData*>(pFenceCallbackData->pNodePrivateData);
.....
CAMX_LOG_INFO(CamxLogGroupCore,
"Node:%d [%s] InstanceID:%d Fence %d signaled with success in node fence handler FOR %llu",
pNode->Type(),
pNode->m_pNodeName,
pNode->InstanceID(),
fenceResult,
pNodeFenceHandlerData->requestId);
....
VOID* pData[] = { pFenceCallbackData, NULL };
//發消息給hal層camxsession.app
result = pNode->GetThreadManager()->PostJob(pNode->GetJobFamilyHandle(), NULL, &pData[0], FALSE, FALSE);
}
CSLFenceCallback觸發後打印的IPE Node打印的log如下:
"Node:65538 [IPE] InstanceID:0 Fence 0 signaled with success in node fence handler FOR 2"
對應的處理函數爲Node::ProcessFenceCallback(註冊在Session::FinalizePipeline函數中RegisterJobFamily)
VOID Node::ProcessFenceCallback(
NodeFenceHandlerData* pFenceHandlerData)
{
...
OutputPort* pOutputPort = pFenceHandlerData->pOutputPort; // Output port to which the fence belongs to
UINT64 requestId = pFenceHandlerData->requestId;
...
// Only do processing if we havent already signalled the fence (for failure cases)
if (TRUE == CamxAtomicCompareExchangeU(&pFenceHandlerData->isFenceSignaled, 0, 1))
{
...
//該地方對幀數據二次處理,或者dump幀數據
WatermarkImage(pFenceHandlerData);
DumpData(pFenceHandlerData);
...
if (TRUE == pOutputPort->flags.isSinkBuffer)
{
for (UINT i = 0; i < numBatchedFrames; i++)
{
CAMX_LOG_DRQ("Reporting sink fence callback for Fence (%d) node: %s:%d, pipeline: %d, seqId: %d, request: %llu",
static_cast<INT32>(pFenceHandlerData->hFence),
m_pNodeName,
InstanceID(),
GetPipelineId(),
pFenceHandlerData->outputBufferInfo[i].sequenceId,
pFenceHandlerData->requestId);
//幀數據已經生成完成,graphics buffer 執行unmapped了
// HAL buffer can now be unmapped since the HW is done generating the output
pFenceHandlerData->outputBufferInfo[i].pImageBuffer->Release(FALSE);
//通知m_pPipeline
m_pPipeline->SinkPortFenceSignaled(pOutputPort->sinkTargetStreamId,
pFenceHandlerData->outputBufferInfo[i].sequenceId,
pFenceHandlerData->requestId,
//graphics buffer的handle
pFenceHandlerData->outputBufferInfo[i].phNativeHandle,
pFenceHandlerData->fenceResult);
}
已IPE爲例,其在ProcessFenceCallback中打印的Log爲
Reporting sink fence callback for Fence (16) node: IPE:65538 , pipeline: 0, seqId: 0, request: 1
//vendor\qcom\proprietary\camx\src\core\camxpipeline.cpp
VOID Pipeline::SinkPortFenceSignaled(
UINT sinkPortStreamId,
UINT32 sequenceId,
UINT64 requestId,
BufferHandle* phHALBuffer,
CSLFenceResult fenceResult)
{
ResultsData resultsData = {};
...
resultsData.type = CbType::Buffer;
// graphics buffer的handle
//將BufferHandle封裝爲ResultsData 類型
resultsData.cbPayload.buffer.sequenceId = sequenceId;
resultsData.cbPayload.buffer.streamId = sinkPortStreamId;
resultsData.cbPayload.buffer.phBuffer = phHALBuffer;
...
m_pSession->NotifyResult(&resultsData);
}
VOID Session::NotifyResult(
ResultsData* pResultsData)
{
case CbType::Buffer:
//繼續傳遞,buffer類型爲CbPayloadBuffer
HandleBufferCb(&pResultsData->cbPayload.buffer, pResultsData->pipelineIndex,
pResultsData->pPrivData);
break;
}
VOID Session::HandleBufferCb(
CbPayloadBuffer* pPayload,
UINT pipelineIndex,
VOID* pPrivData)
{
ChiStreamBuffer outBuffer = { 0 };
....
//將graphics buffer賦值給outBuffer
//CbPayloadBuffer類型轉換爲ChiStreamBuffer
outBuffer.phBuffer = pPayload->phBuffer;
outBuffer.bufferStatus = BufferStatusOK;
outBuffer.releaseFence = -1; // For the moment
....
//將幀數據插入m_resultHolderList中
InjectResult(ResultType::BufferOK, &outBuffer, pPayload->sequenceId, pPrivData);
}
幀數據插入m_resultHolderList具體實現
CamxResult Session::InjectResult(
ResultType resultType,
//outBuffer
VOID* pPayload,
UINT32 sequenceId,
VOID* pPrivData)
{
//從m_resultHolderList中獲取保存幀的ResultHolder
ResultHolder* pHolder = GetResultHolderBySequenceId(sequenceId);
.....
else if (ResultType::BufferOK == resultType)
{
ChiStreamBuffer* pBuffer = static_cast<ChiStreamBuffer*>(pPayload);
ChiStream* pStream = pBuffer->pStream;
....
if (MaxNumOutputBuffers != streamIndex)
{
....
if (pHolder->bufferHolder[streamIndex].pBuffer->pStream == pStream &&
pHolder->bufferHolder[streamIndex].pBuffer->phBuffer == pBuffer->phBuffer)
{
//將outBuffer拷貝給pHolder->bufferHolder
//即拷貝給m_resultHolderList
Utils::Memcpy(pHolder->bufferHolder[streamIndex].pBuffer,
pBuffer,
sizeof(ChiStreamBuffer));
pHolder->bufferHolder[streamIndex].valid = TRUE;
}
}
}
...
//觸發異步處理
VOID* pData[] = { this, NULL };
result = m_pThreadManager->PostJob(m_hJobFamilyHandle, NULL, &pData[0], FALSE, FALSE);
}
幀數據插入m_resultHolderList後觸發異步處理PostJob,異步處理函數爲Session::ProcessResults
//vendor\qcom\proprietary\camx\src\core\camxsession.cpp
CamxResult Session::ProcessResults()
{
CamxResult result = CamxResultSuccess;
UINT32 i = 0;
UINT32 numResults = 0;
ResultHolder* pResultHolder = NULL;
SessionResultHolder* pSessionResultHolder = NULL;
...
//從m_resultHolderList中獲取ResultHolder
LightweightDoublyLinkedListNode* pNode = m_resultHolderList.Head();
while (NULL != pNode)
{
if (NULL != pNode->pData)
{
pSessionResultHolder = reinterpret_cast<SessionResultHolder*>(pNode->pData);
for (i = 0; i < pSessionResultHolder->numResults; i++)
{
pResultHolder = &(pSessionResultHolder->resultHolders[i]);
if (NULL != pResultHolder)
{
metadataReady = ProcessResultMetadata(pResultHolder, &numResults);
//將pResultHolder中的數值賦值給ChiCaptureResult類型的m_pCaptureResult
//將graphics buffer的handle封裝爲了ChiCaptureResult的pOutputBuffers中
//
bufferReady = ProcessResultBuffers(pResultHolder, metadataReady, &numResults);
}
}
}
}
if (numResults > 0)
{
// Finally dispatch all the results to the Framework\
//繼續回調封裝後的m_pCaptureResult
DispatchResults(&m_pCaptureResult[0], numResults);
}
....
return result;
}
異步處理函數Session::ProcessResults從m_resultHolderList中獲取幀數據賦值給m_pCaptureResult,
然後繼續回調幀數據。
先看一下Session::ProcessResults如何將m_resultHolderList中的信息賦值給m_pCaptureResult
BOOL Session::ProcessResultBuffers(
ResultHolder* pResultHolder,
BOOL metadataAvailable,
UINT* pNumResults)
{
ChiCaptureResult* pResult = &m_pCaptureResult[currentResult];
ChiStreamBuffer* pStreamBuffer =const_cast<ChiStreamBuffer*>(&pResult->pOutputBuffers[pResult->numOutputBuffers]);
....
Utils::Memcpy(pStreamBuffer, pResultHolder->bufferHolder[bufIndex].pBuffer, sizeof(ChiStreamBuffer));
.....
return gotResult;
}
流程如下:
CHISession::DispatchResults
AdvancedCameraUsecase::ProcessResultCb
AdvancedCameraUsecase::ProcessResult
CameraUsecaseBase::SessionCbCaptureResult
CameraUsecaseBase::SessionProcessResult
下邊介紹下SessionProcessResult
SessionProcessResult
//vendor\qcom\proprietary\chi-cdk\vendor\chioverride\default\chxadvancedcamerausecase.cpp
VOID CameraUsecaseBase::SessionProcessResult(
ChiCaptureResult* pResult,
const SessionPrivateData* pSessionPrivateData)
{
UINT32 resultFrameNum = pResult->frameworkFrameNum;
UINT32 resultFrameIndex = resultFrameNum % MaxOutstandingRequests;
BOOL isAppResultsAvailable = FALSE;
//強制類型轉換,將ChiCaptureResult型轉換爲camera3_capture_result_t
camera3_capture_result_t* pInternalResult = reinterpret_cast<camera3_capture_result_t*>(pResult);
//獲取數組成員變量m_captureResult的第resultFrameIndex個元素,類型爲camera3_capture_result_t
//然後通過pResult更新m_captureResult[resultFrameIndex]中的信息
camera3_capture_result_t* pUsecaseResult = this->GetCaptureResult(resultFrameIndex);
...
// Fill all the info in m_captureResult and call ProcessAndReturnFinishedResults to send the meta
// callback in sequence
//填充m_captureResult,將類型ChiCaptureResult中的output_buffers信息拷貝給m_captureResult
m_pAppResultMutex->Lock();
for (UINT i = 0; i < pResult->numOutputBuffers; i++)
{
camera3_stream_buffer_t* pResultBuffer =
const_cast<camera3_stream_buffer_t*>(&pUsecaseResult->output_buffers[i + pUsecaseResult->num_output_buffers]);
ChxUtils::Memcpy(pResultBuffer, &pResult->pOutputBuffers[i], sizeof(camera3_stream_buffer_t));
isAppResultsAvailable = TRUE;
}
pUsecaseResult->num_output_buffers += pResult->numOutputBuffers;
m_pAppResultMutex->Unlock();
.....
if (TRUE == isAppResultsAvailable)
{ //通過ProcessAndReturnFinishedResults繼續回調m_captureResult
ProcessAndReturnFinishedResults();
}
}
通過ProcessAndReturnFinishedResults繼續回調m_captureResult
//vendor\qcom\proprietary\chi-cdk\vendor\chioverride\default\chxadvancedcamerausecase.cpp
VOID CameraUsecaseBase::ProcessAndReturnFinishedResults()
{
.....
camera3_capture_result_t result = { 0 };
result.frame_number = m_captureResult[frameIndex].frame_number;
result.num_output_buffers = m_captureResult[frameIndex].num_output_buffers;
result.output_buffers = m_captureResult[frameIndex].output_buffers;
....
ReturnFrameworkResult(&result, m_cameraId);
....
}
通過ReturnFrameworkResult繼續回調幀數據
VOID Usecase::ReturnFrameworkResult(
const camera3_capture_result_t* pResult, UINT32 cameraID)
{
camera3_capture_result_t* pOverrideResult = const_cast<camera3_capture_result_t*>(pResult);
....
ExtensionModule::GetInstance()->ReturnFrameworkResult(reinterpret_cast<const camera3_capture_result_t*>(pOverrideResult),
cameraID);
}
通過 ExtensionModule::GetInstance()->ReturnFrameworkResult繼續回調幀數據
VOID ExtensionModule::ReturnFrameworkResult(
const camera3_capture_result_t* pResult,
UINT32 cameraID)
{
//通過m_pHALOps回調給CameraProvider
m_pHALOps[cameraID]->process_capture_result(m_logicalCameraInfo[cameraID].m_pCamera3Device, pResult);
}
總體流程大約如下:
hal回調會provider的大體流程:
//camxsession.cpp
:Session::ProcessResults()
//camxchisession.cpp
CHISession::DispatchResults(ChiCaptureResult*, unsigned int)
//chxadvancedcamerausecase.h
AdvancedCameraUsecase::ProcessResultCb(ChiCaptureResult*, void*)
//chxadvancedcamerausecase.cpp
AdvancedCameraUsecase::ProcessResult(ChiCaptureResult*, void*)
//chxadvancedcamerausecase.cpp
CameraUsecaseBase::SessionCbCaptureResult(ChiCaptureResult*, void*)
//chxadvancedcamerausecase.cpp
CameraUsecaseBase::SessionProcessResult(ChiCaptureResult*, SessionPrivateData const*)
//chxadvancedcamerausecase.cpp
//這裏會調用兩次Usecase::ReturnFrameworkResult,一次傳metadata,一次buffer
CameraUsecaseBase::ProcessAndReturnFinishedResults()
//chxusecase.cpp
Usecase::ReturnFrameworkResult(camera3_capture_result const*, unsigned int)
//chxextensionmodule.cpp
ExtensionModule::ReturnFrameworkResult(camera3_capture_result const*, unsigned int)
//camxhaldevice.cpp
CamX::HALDevice::ProcessCaptureResult(CamX::Camera3Device const*, CamX::Camera3CaptureResult const*)
//camxhal3.cpp
CamX::process_capture_result(camera3_callback_ops const*, camera3_capture_result const*)
///CameraDeviceSession.cpp
CameraDeviceSession::sProcessCaptureResult(camera3_callback_ops const*, camera3_capture_result const*)
流程圖如下:
CameraProvider到CameraService的歸還流程,不在繼續研究了,只是一些指針傳遞流程,
最終在CameraService將該graphics buffer會通過Surface::queueBuffer歸還到SurfaceFlinger進程中
CameraService歸還代碼如下:
//frameworks\av\services\camera\libcameraservice\device3\Camera3OutputStream.cpp
status_t Camera3OutputStream::returnBufferCheckedLocked(
const camera3_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
/*out*/
sp<Fence> *releaseFenceOut) {
status_t res;
// Fence management - always honor release fence from HAL
sp<Fence> releaseFence = new Fence(buffer.release_fence);
int anwReleaseFence = releaseFence->dup();
//從camera3_stream_buffer中獲取ANativeWindowBuffer型anwBuffer
ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
/**
* Return buffer back to ANativeWindow
*/
//通過currentConsumer歸還anwBuffer
//其實currentConsumer就是Surface類對象
res = queueBufferToConsumer(currentConsumer, anwBuffer, anwReleaseFence);
*releaseFenceOut = releaseFence;
return res;
}
囉裏囉唆一大堆,現在總計下
在HAL層中,在Fence觸發後,kernel通過回調CSLFenceCallback將含有幀數據的graphics buffer的handle回調給HAL層 ,
代碼如下:
m_pPipeline->SinkPortFenceSignaled(pOutputPort->sinkTargetStreamId,
pFenceHandlerData->outputBufferInfo[i].sequenceId,
pFenceHandlerData->requestId,
//graphics buffer的handle,類型爲BufferHandle
pFenceHandlerData->outputBufferInfo[i].phNativeHandle,
pFenceHandlerData->fenceResult);
pFenceHandlerData->outputBufferInfo[i].phNativeHandle類型爲
//vendor\qcom\proprietary\camx\src\core\hal\camxcommontypes.h
struct NativeHandle
{
INT version;
INT numFDs;
INT numInts;
INT data[0];
};
和Android 的native_handle是完全一樣的
//system/core/include/cutils/native_handle.h
typedef struct native_handle
{ int version; /* sizeof(native_handle_t) */
int numFds; /* number of file-descriptors at &data[0] */
int numInts; /* number of ints at &data[numFds] */
int data[0]; /* numFds + numInts ints */
} native_handle_t;
該graphics buffer經過HAL層、CameraProvdier、CameraService,最總在CameraService將該graphics buffer會通過Surface::queueBuffer歸還到SurfaceFlinger
//從camera3_stream_buffer中獲取ANativeWindowBuffer型anwBuffer
ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
/**
* Return buffer back to ANativeWindow
*/
//通過currentConsumer歸還anwBuffer
//其實currentConsumer就是Surface類對象
currentConsumer->queueBuffer(consumer.get(), anwBuffer, anwReleaseFence);
至此完成了graphics buffer在Android Camera子系統中的申請、傳遞、歸還流程的分析