diff options
Diffstat (limited to 'camera/QCamera2/HAL3')
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Channel.cpp | 4774 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Channel.h | 628 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp | 272 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3CropRegionMapper.h | 65 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3HALHeader.h | 96 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3HWI.cpp | 10672 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3HWI.h | 536 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Mem.cpp | 1199 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Mem.h | 158 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3PostProc.cpp | 3142 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3PostProc.h | 192 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Stream.cpp | 1527 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3Stream.h | 170 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3StreamMem.cpp | 477 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3StreamMem.h | 97 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3VendorTags.cpp | 429 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/QCamera3VendorTags.h | 223 | ||||
-rw-r--r-- | camera/QCamera2/HAL3/android/QCamera3External.h | 47 |
18 files changed, 24704 insertions, 0 deletions
diff --git a/camera/QCamera2/HAL3/QCamera3Channel.cpp b/camera/QCamera2/HAL3/QCamera3Channel.cpp new file mode 100644 index 0000000..ff4ceca --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Channel.cpp @@ -0,0 +1,4774 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#define LOG_TAG "QCamera3Channel" + +// To remove +#include <cutils/properties.h> + +// System dependencies +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include "gralloc.h" + +// Camera dependencies +#include "QCamera3Channel.h" +#include "QCamera3HWI.h" +#include "QCameraTrace.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +using namespace android; + +namespace qcamera { +#define IS_BUFFER_ERROR(x) (((x) & V4L2_BUF_FLAG_ERROR) == V4L2_BUF_FLAG_ERROR) + +/*=========================================================================== + * FUNCTION : QCamera3Channel + * + * DESCRIPTION: constrcutor of QCamera3Channel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * + * RETURN : none + *==========================================================================*/ +QCamera3Channel::QCamera3Channel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, uint32_t numBuffers) +{ + m_camHandle = cam_handle; + m_handle = channel_handle; + m_camOps = cam_ops; + m_bIsActive = false; + + m_numStreams = 0; + memset(mStreams, 0, sizeof(mStreams)); + mUserData = userData; + + mStreamInfoBuf = NULL; + mChannelCB = cb_routine; + mPaddingInfo = *paddingInfo; + mPaddingInfo.offset_info.offset_x = 0; + mPaddingInfo.offset_info.offset_y = 0; + + mPostProcMask = postprocess_mask; + + mIsType = IS_TYPE_NONE; + mNumBuffers = numBuffers; + mPerFrameMapUnmapEnable = true; + dumpFrmCnt = 0; +} + +/*=========================================================================== + * FUNCTION : ~QCamera3Channel + * + * DESCRIPTION: destructor of QCamera3Channel + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3Channel::~QCamera3Channel() +{ +} + +/*=========================================================================== + * FUNCTION : destroy + * + * DESCRIPTION: internal destructor of QCamera3Channel called by the subclasses + * this destructor will call pure virtual functions. stop will eventuall call + * QCamera3Stream::putBufs. The putBufs function will + * call QCamera3Channel::putStreamBufs which is pure virtual + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +void QCamera3Channel::destroy() +{ + if (m_bIsActive) + stop(); + + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mStreams[i] != NULL) { + delete mStreams[i]; + mStreams[i] = 0; + } + } + m_numStreams = 0; +} + +/*=========================================================================== + * FUNCTION : addStream + * + * DESCRIPTION: add a stream into channel + * + * PARAMETERS : + * @streamType : stream type + * @streamFormat : stream format + * @streamDim : stream dimension + * @streamRotation : rotation of the stream + * @minStreamBufNum : minimal buffer count for particular stream type + * @postprocessMask : post-proccess feature mask + * @isType : type of image stabilization required on the stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::addStream(cam_stream_type_t streamType, + cam_format_t streamFormat, + cam_dimension_t streamDim, + cam_rotation_t streamRotation, + uint8_t minStreamBufNum, + cam_feature_mask_t postprocessMask, + cam_is_type_t isType, + uint32_t batchSize) +{ + int32_t rc = NO_ERROR; + + if (m_numStreams >= 1) { + LOGE("Only one stream per channel supported in v3 Hal"); + return BAD_VALUE; + } + + if (m_numStreams >= MAX_STREAM_NUM_IN_BUNDLE) { + LOGE("stream number (%d) exceeds max limit (%d)", + m_numStreams, MAX_STREAM_NUM_IN_BUNDLE); + return BAD_VALUE; + } + QCamera3Stream *pStream = new QCamera3Stream(m_camHandle, + m_handle, + m_camOps, + &mPaddingInfo, + this); + if (pStream == NULL) { + LOGE("No mem for Stream"); + return NO_MEMORY; + } + LOGD("batch size is %d", batchSize); + + rc = pStream->init(streamType, streamFormat, streamDim, streamRotation, + NULL, minStreamBufNum, postprocessMask, isType, batchSize, + streamCbRoutine, this); + if (rc == 0) { + mStreams[m_numStreams] = pStream; + m_numStreams++; + } else { + delete pStream; + } + return rc; +} + +/*=========================================================================== + * FUNCTION : start + * + * DESCRIPTION: start channel, which will start all streams belong to this channel + * + * PARAMETERS : + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::start() +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + + if (m_numStreams > 1) { + LOGW("bundle not supported"); + } else if (m_numStreams == 0) { + return NO_INIT; + } + + if(m_bIsActive) { + LOGW("Attempt to start active channel"); + return rc; + } + + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mStreams[i] != NULL) { + mStreams[i]->start(); + } + } + + m_bIsActive = true; + + return rc; +} + +/*=========================================================================== + * FUNCTION : stop + * + * DESCRIPTION: stop a channel, which will stop all streams belong to this channel + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::stop() +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + if(!m_bIsActive) { + LOGE("Attempt to stop inactive channel"); + return rc; + } + + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mStreams[i] != NULL) { + mStreams[i]->stop(); + } + } + + m_bIsActive = false; + return rc; +} + +/*=========================================================================== + * FUNCTION : setBatchSize + * + * DESCRIPTION: Set batch size for the channel. This is a dummy implementation + * for the base class + * + * PARAMETERS : + * @batchSize : Number of image buffers in a batch + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::setBatchSize(uint32_t batchSize) +{ + LOGD("Dummy method. batchSize: %d unused ", batchSize); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : queueBatchBuf + * + * DESCRIPTION: This is a dummy implementation for the base class + * + * PARAMETERS : + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::queueBatchBuf() +{ + LOGD("Dummy method. Unused "); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : setPerFrameMapUnmap + * + * DESCRIPTION: Sets internal enable flag + * + * PARAMETERS : + * @enable : Bool value for the enable flag + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::setPerFrameMapUnmap(bool enable) +{ + mPerFrameMapUnmapEnable = enable; + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : flush + * + * DESCRIPTION: flush a channel + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::flush() +{ + ATRACE_CALL(); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : bufDone + * + * DESCRIPTION: return a stream buf back to kernel + * + * PARAMETERS : + * @recvd_frame : stream buf frame to be returned + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Channel::bufDone(mm_camera_super_buf_t *recvd_frame) +{ + int32_t rc = NO_ERROR; + for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) { + if (recvd_frame->bufs[i] != NULL) { + for (uint32_t j = 0; j < m_numStreams; j++) { + if (mStreams[j] != NULL && + mStreams[j]->getMyHandle() == recvd_frame->bufs[i]->stream_id) { + rc = mStreams[j]->bufDone(recvd_frame->bufs[i]->buf_idx); + break; // break loop j + } + } + } + } + + return rc; +} + +int32_t QCamera3Channel::setBundleInfo(const cam_bundle_config_t &bundleInfo) +{ + int32_t rc = NO_ERROR; + cam_stream_parm_buffer_t param; + memset(¶m, 0, sizeof(cam_stream_parm_buffer_t)); + param.type = CAM_STREAM_PARAM_TYPE_SET_BUNDLE_INFO; + param.bundleInfo = bundleInfo; + rc = mStreams[0]->setParameter(param); + if (rc != NO_ERROR) { + LOGE("stream setParameter for set bundle failed"); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : getStreamTypeMask + * + * DESCRIPTION: Get bit mask of all stream types in this channel + * + * PARAMETERS : None + * + * RETURN : Bit mask of all stream types in this channel + *==========================================================================*/ +uint32_t QCamera3Channel::getStreamTypeMask() +{ + uint32_t mask = 0; + for (uint32_t i = 0; i < m_numStreams; i++) { + mask |= (1U << mStreams[i]->getMyType()); + } + return mask; +} + +/*=========================================================================== + * FUNCTION : getStreamID + * + * DESCRIPTION: Get StreamID of requested stream type + * + * PARAMETERS : streamMask + * + * RETURN : Stream ID + *==========================================================================*/ +uint32_t QCamera3Channel::getStreamID(uint32_t streamMask) +{ + uint32_t streamID = 0; + for (uint32_t i = 0; i < m_numStreams; i++) { + if (streamMask == (uint32_t )(0x1 << mStreams[i]->getMyType())) { + streamID = mStreams[i]->getMyServerID(); + break; + } + } + return streamID; +} + +/*=========================================================================== + * FUNCTION : getStreamByHandle + * + * DESCRIPTION: return stream object by stream handle + * + * PARAMETERS : + * @streamHandle : stream handle + * + * RETURN : stream object. NULL if not found + *==========================================================================*/ +QCamera3Stream *QCamera3Channel::getStreamByHandle(uint32_t streamHandle) +{ + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mStreams[i] != NULL && mStreams[i]->getMyHandle() == streamHandle) { + return mStreams[i]; + } + } + return NULL; +} + +/*=========================================================================== + * FUNCTION : getStreamByIndex + * + * DESCRIPTION: return stream object by index + * + * PARAMETERS : + * @streamHandle : stream handle + * + * RETURN : stream object. NULL if not found + *==========================================================================*/ +QCamera3Stream *QCamera3Channel::getStreamByIndex(uint32_t index) +{ + if (index < m_numStreams) { + return mStreams[index]; + } + return NULL; +} + +/*=========================================================================== + * FUNCTION : streamCbRoutine + * + * DESCRIPTION: callback routine for stream + * + * PARAMETERS : + * @streamHandle : stream handle + * + * RETURN : stream object. NULL if not found + *==========================================================================*/ +void QCamera3Channel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream, void *userdata) +{ + QCamera3Channel *channel = (QCamera3Channel *)userdata; + if (channel == NULL) { + LOGE("invalid channel pointer"); + return; + } + channel->streamCbRoutine(super_frame, stream); +} + +/*=========================================================================== + * FUNCTION : dumpYUV + * + * DESCRIPTION: function to dump the YUV data from ISP/pproc + * + * PARAMETERS : + * @frame : frame to be dumped + * @dim : dimension of the stream + * @offset : offset of the data + * @name : 1 if it is ISP output/pproc input, 2 if it is pproc output + * + * RETURN : + *==========================================================================*/ +void QCamera3Channel::dumpYUV(mm_camera_buf_def_t *frame, cam_dimension_t dim, + cam_frame_len_offset_t offset, uint8_t dump_type) +{ + char buf[FILENAME_MAX]; + memset(buf, 0, sizeof(buf)); + static int counter = 0; + char prop[PROPERTY_VALUE_MAX]; + property_get("persist.camera.dumpimg", prop, "0"); + mYUVDump = (uint8_t) atoi(prop); + if (mYUVDump & dump_type) { + frm_num = ((mYUVDump & 0xffff0000) >> 16); + if (frm_num == 0) { + frm_num = 10; + } + if (frm_num > 256) { + frm_num = 256; + } + skip_mode = ((mYUVDump & 0x0000ff00) >> 8); + if (skip_mode == 0) { + skip_mode = 1; + } + if (mDumpSkipCnt == 0) { + mDumpSkipCnt = 1; + } + if (mDumpSkipCnt % skip_mode == 0) { + if (dumpFrmCnt <= frm_num) { + /* Note that the image dimension will be the unrotated stream dimension. + * If you feel that the image would have been rotated during reprocess + * then swap the dimensions while opening the file + * */ + switch (dump_type) { + case QCAMERA_DUMP_FRM_PREVIEW: + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"p_%d_%d_%dx%d.yuv", + counter, frame->frame_idx, dim.width, dim.height); + break; + case QCAMERA_DUMP_FRM_VIDEO: + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"v_%d_%d_%dx%d.yuv", + counter, frame->frame_idx, dim.width, dim.height); + break; + case QCAMERA_DUMP_FRM_SNAPSHOT: + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"s_%d_%d_%dx%d.yuv", + counter, frame->frame_idx, dim.width, dim.height); + break; + case QCAMERA_DUMP_FRM_INPUT_REPROCESS: + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"ir_%d_%d_%dx%d.yuv", + counter, frame->frame_idx, dim.width, dim.height); + break; + case QCAMERA_DUMP_FRM_CALLBACK: + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"c_%d_%d_%dx%d.yuv", + counter, frame->frame_idx, dim.width, dim.height); + break; + default : + LOGE("dumping not enabled for stream type %d",dump_type); + break; + } + counter++; + int file_fd = open(buf, O_RDWR | O_CREAT, 0777); + ssize_t written_len = 0; + if (file_fd >= 0) { + void *data = NULL; + fchmod(file_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); + for (uint32_t i = 0; i < offset.num_planes; i++) { + uint32_t index = offset.mp[i].offset; + if (i > 0) { + index += offset.mp[i-1].len; + } + for (int j = 0; j < offset.mp[i].height; j++) { + data = (void *)((uint8_t *)frame->buffer + index); + written_len += write(file_fd, data, + (size_t)offset.mp[i].width); + index += (uint32_t)offset.mp[i].stride; + } + } + LOGH("written number of bytes %ld\n", written_len); + dumpFrmCnt++; + close(file_fd); + } else { + LOGE("failed to open file to dump image"); + } + } + } else { + mDumpSkipCnt++; + } + } +} + +/*=========================================================================== + * FUNCTION : isUBWCEnabled + * + * DESCRIPTION: Function to get UBWC hardware support. + * + * PARAMETERS : None + * + * RETURN : TRUE -- UBWC format supported + * FALSE -- UBWC is not supported. + *==========================================================================*/ +bool QCamera3Channel::isUBWCEnabled() +{ +#ifdef UBWC_PRESENT + char value[PROPERTY_VALUE_MAX]; + int prop_value = 0; + memset(value, 0, sizeof(value)); + property_get("debug.gralloc.gfx_ubwc_disable", value, "0"); + prop_value = atoi(value); + if (prop_value) { + return FALSE; + } + + //Disable UBWC if Eztune is enabled + //EzTune process CPP output frame and cannot understand UBWC. + memset(value, 0, sizeof(value)); + property_get("persist.camera.eztune.enable", value, "0"); + prop_value = atoi(value); + if (prop_value) { + return FALSE; + } + return TRUE; +#else + return FALSE; +#endif +} + +/*=========================================================================== + * FUNCTION : getStreamDefaultFormat + * + * DESCRIPTION: return default buffer format for the stream + * + * PARAMETERS : type : Stream type + * + ** RETURN : format for stream type + * + *==========================================================================*/ +cam_format_t QCamera3Channel::getStreamDefaultFormat(cam_stream_type_t type) +{ + cam_format_t streamFormat; + + switch (type) { + case CAM_STREAM_TYPE_PREVIEW: + if (isUBWCEnabled()) { + char prop[PROPERTY_VALUE_MAX]; + int pFormat; + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.preview.ubwc", prop, "1"); + pFormat = atoi(prop); + if (pFormat == 1) { + streamFormat = CAM_FORMAT_YUV_420_NV12_UBWC; + } else { + streamFormat = CAM_FORMAT_YUV_420_NV12_VENUS; + } + } else { + streamFormat = CAM_FORMAT_YUV_420_NV12_VENUS; + } + break; + case CAM_STREAM_TYPE_VIDEO: + if (isUBWCEnabled()) { + char prop[PROPERTY_VALUE_MAX]; + int pFormat; + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.video.ubwc", prop, "1"); + pFormat = atoi(prop); + if (pFormat == 1) { + streamFormat = CAM_FORMAT_YUV_420_NV12_UBWC; + } else { + streamFormat = CAM_FORMAT_YUV_420_NV12_VENUS; + } + } else { +#if VENUS_PRESENT + streamFormat = CAM_FORMAT_YUV_420_NV12_VENUS; +#else + streamFormat = CAM_FORMAT_YUV_420_NV12; +#endif + } + break; + case CAM_STREAM_TYPE_SNAPSHOT: + streamFormat = CAM_FORMAT_YUV_420_NV21; + break; + case CAM_STREAM_TYPE_CALLBACK: + streamFormat = CAM_FORMAT_YUV_420_NV21; + break; + case CAM_STREAM_TYPE_RAW: + streamFormat = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG; + break; + default: + streamFormat = CAM_FORMAT_YUV_420_NV21; + break; + } + return streamFormat; +} + + +/* QCamera3ProcessingChannel methods */ + +/*=========================================================================== + * FUNCTION : QCamera3ProcessingChannel + * + * DESCRIPTION: constructor of QCamera3ProcessingChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @cb_routine : callback routine to frame aggregator + * @paddingInfo: stream padding info + * @userData : HWI handle + * @stream : camera3_stream_t structure + * @stream_type: Channel stream type + * @postprocess_mask: the postprocess mask for streams of this channel + * @metadataChannel: handle to the metadataChannel + * @numBuffers : number of max dequeued buffers + * RETURN : none + *==========================================================================*/ +QCamera3ProcessingChannel::QCamera3ProcessingChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + uint32_t numBuffers) : + QCamera3Channel(cam_handle, channel_handle, cam_ops, cb_routine, + paddingInfo, postprocess_mask, userData, numBuffers), + m_postprocessor(this), + mFrameCount(0), + mLastFrameCount(0), + mLastFpsTime(0), + mMemory(numBuffers), + mCamera3Stream(stream), + mNumBufs(CAM_MAX_NUM_BUFS_PER_STREAM), + mStreamType(stream_type), + mPostProcStarted(false), + mInputBufferConfig(false), + m_pMetaChannel(metadataChannel), + mMetaFrame(NULL), + mOfflineMemory(0), + mOfflineMetaMemory(numBuffers + (MAX_REPROCESS_PIPELINE_STAGES - 1), + false) +{ + char prop[PROPERTY_VALUE_MAX]; + property_get("persist.debug.sf.showfps", prop, "0"); + mDebugFPS = (uint8_t) atoi(prop); + + int32_t rc = m_postprocessor.init(&mMemory); + if (rc != 0) { + LOGE("Init Postprocessor failed"); + } +} + +/*=========================================================================== + * FUNCTION : ~QCamera3ProcessingChannel + * + * DESCRIPTION: destructor of QCamera3ProcessingChannel + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3ProcessingChannel::~QCamera3ProcessingChannel() +{ + destroy(); + + int32_t rc = m_postprocessor.deinit(); + if (rc != 0) { + LOGE("De-init Postprocessor failed"); + } + + if (0 < mOfflineMetaMemory.getCnt()) { + mOfflineMetaMemory.deallocate(); + } + if (0 < mOfflineMemory.getCnt()) { + mOfflineMemory.unregisterBuffers(); + } + +} + +/*=========================================================================== + * FUNCTION : streamCbRoutine + * + * DESCRIPTION: + * + * PARAMETERS : + * @super_frame : the super frame with filled buffer + * @stream : stream on which the buffer was requested and filled + * + * RETURN : none + *==========================================================================*/ +void QCamera3ProcessingChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) +{ + ATRACE_CALL(); + //FIXME Q Buf back in case of error? + uint8_t frameIndex; + buffer_handle_t *resultBuffer; + int32_t resultFrameNumber; + camera3_stream_buffer_t result; + cam_dimension_t dim; + cam_frame_len_offset_t offset; + + memset(&dim, 0, sizeof(dim)); + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) { + LOGE("Error with the stream callback"); + return; + } + + frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx; + if(frameIndex >= mNumBufs) { + LOGE("Error, Invalid index for buffer"); + stream->bufDone(frameIndex); + return; + } + + if (mDebugFPS) { + showDebugFPS(stream->getMyType()); + } + stream->getFrameDimension(dim); + stream->getFrameOffset(offset); + if (stream->getMyType() == CAM_STREAM_TYPE_PREVIEW) { + dumpYUV(super_frame->bufs[0], dim, offset, QCAMERA_DUMP_FRM_PREVIEW); + } else if (stream->getMyType() == CAM_STREAM_TYPE_VIDEO) { + dumpYUV(super_frame->bufs[0], dim, offset, QCAMERA_DUMP_FRM_VIDEO); + } else if (stream->getMyType() == CAM_STREAM_TYPE_CALLBACK) { + dumpYUV(super_frame->bufs[0], dim, offset, QCAMERA_DUMP_FRM_CALLBACK); + } + ////Use below data to issue framework callback + resultBuffer = (buffer_handle_t *)mMemory.getBufferHandle(frameIndex); + resultFrameNumber = mMemory.getFrameNumber(frameIndex); + + result.stream = mCamera3Stream; + result.buffer = resultBuffer; + if (IS_BUFFER_ERROR(super_frame->bufs[0]->flags)) { + result.status = CAMERA3_BUFFER_STATUS_ERROR; + LOGW("CAMERA3_BUFFER_STATUS_ERROR for stream_type: %d", + mStreams[0]->getMyType()); + } else { + result.status = CAMERA3_BUFFER_STATUS_OK; + } + result.acquire_fence = -1; + result.release_fence = -1; + if(mPerFrameMapUnmapEnable) { + int32_t rc = stream->bufRelease(frameIndex); + if (NO_ERROR != rc) { + LOGE("Error %d releasing stream buffer %d", + rc, frameIndex); + } + + rc = mMemory.unregisterBuffer(frameIndex); + if (NO_ERROR != rc) { + LOGE("Error %d unregistering stream buffer %d", + rc, frameIndex); + } + } + + if (0 <= resultFrameNumber) { + if (mChannelCB) { + mChannelCB(NULL, &result, (uint32_t)resultFrameNumber, false, mUserData); + } + } else { + LOGE("Bad frame number"); + } + free(super_frame); + return; +} + +/*=========================================================================== + * FUNCTION : putStreamBufs + * + * DESCRIPTION: release the buffers allocated to the stream + * + * PARAMETERS : NONE + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3YUVChannel::putStreamBufs() +{ + QCamera3ProcessingChannel::putStreamBufs(); + + // Free allocated heap buffer. + mMemory.deallocate(); + // Clear free heap buffer list. + mFreeHeapBufferList.clear(); + // Clear offlinePpInfoList + mOfflinePpInfoList.clear(); +} + +/*=========================================================================== + * FUNCTION : request + * + * DESCRIPTION: handle the request - either with an input buffer or a direct + * output request + * + * PARAMETERS : + * @buffer : pointer to the output buffer + * @frameNumber : frame number of the request + * @pInputBuffer : pointer to input buffer if an input request + * @metadata : parameters associated with the request + * + * RETURN : 0 on a success start of capture + * -EINVAL on invalid input + * -ENODEV on serious error + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t* pInputBuffer, + metadata_buffer_t* metadata) +{ + int32_t rc = NO_ERROR; + int index; + + if (NULL == buffer || NULL == metadata) { + LOGE("Invalid buffer/metadata in channel request"); + return BAD_VALUE; + } + + if (pInputBuffer) { + //need to send to reprocessing + LOGD("Got a request with input buffer, output streamType = %d", mStreamType); + reprocess_config_t reproc_cfg; + cam_dimension_t dim; + memset(&reproc_cfg, 0, sizeof(reprocess_config_t)); + memset(&dim, 0, sizeof(dim)); + setReprocConfig(reproc_cfg, pInputBuffer, metadata, mStreamFormat, dim); + startPostProc(reproc_cfg); + + qcamera_fwk_input_pp_data_t *src_frame = NULL; + src_frame = (qcamera_fwk_input_pp_data_t *)calloc(1, + sizeof(qcamera_fwk_input_pp_data_t)); + if (src_frame == NULL) { + LOGE("No memory for src frame"); + return NO_MEMORY; + } + rc = setFwkInputPPData(src_frame, pInputBuffer, &reproc_cfg, metadata, buffer, frameNumber); + if (NO_ERROR != rc) { + LOGE("Error %d while setting framework input PP data", rc); + free(src_frame); + return rc; + } + LOGH("Post-process started"); + m_postprocessor.processData(src_frame); + } else { + //need to fill output buffer with new data and return + if(!m_bIsActive) { + rc = registerBuffer(buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + rc = start(); + if (NO_ERROR != rc) + return rc; + } else { + LOGD("Request on an existing stream"); + } + + index = mMemory.getMatchBufIndex((void*)buffer); + if(index < 0) { + rc = registerBuffer(buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + index = mMemory.getMatchBufIndex((void*)buffer); + if (index < 0) { + LOGE("Could not find object among registered buffers"); + return DEAD_OBJECT; + } + } + rc = mStreams[0]->bufDone(index); + if(rc != NO_ERROR) { + LOGE("Failed to Q new buffer to stream"); + return rc; + } + rc = mMemory.markFrameNumber(index, frameNumber); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: + * + * PARAMETERS : isType : type of image stabilization on the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::initialize(__unused cam_is_type_t isType) +{ + int32_t rc = NO_ERROR; + rc = mOfflineMetaMemory.allocateAll(sizeof(metadata_buffer_t)); + if (rc == NO_ERROR) { + Mutex::Autolock lock(mFreeOfflineMetaBuffersLock); + mFreeOfflineMetaBuffersList.clear(); + for (uint32_t i = 0; i < mNumBuffers + (MAX_REPROCESS_PIPELINE_STAGES - 1); + i++) { + mFreeOfflineMetaBuffersList.push_back(i); + } + } else { + LOGE("Could not allocate offline meta buffers for input reprocess"); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : registerBuffer + * + * DESCRIPTION: register streaming buffer to the channel object + * + * PARAMETERS : + * @buffer : buffer to be registered + * @isType : image stabilization type on the stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::registerBuffer(buffer_handle_t *buffer, + cam_is_type_t isType) +{ + ATRACE_CALL(); + int rc = 0; + mIsType = isType; + cam_stream_type_t streamType; + + if ((uint32_t)mMemory.getCnt() > (mNumBufs - 1)) { + LOGE("Trying to register more buffers than initially requested"); + return BAD_VALUE; + } + + if (0 == m_numStreams) { + rc = initialize(mIsType); + if (rc != NO_ERROR) { + LOGE("Couldn't initialize camera stream %d", rc); + return rc; + } + } + + streamType = mStreams[0]->getMyType(); + rc = mMemory.registerBuffer(buffer, streamType); + if (ALREADY_EXISTS == rc) { + return NO_ERROR; + } else if (NO_ERROR != rc) { + LOGE("Buffer %p couldn't be registered %d", buffer, rc); + return rc; + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : setFwkInputPPData + * + * DESCRIPTION: fill out the framework src frame information for reprocessing + * + * PARAMETERS : + * @src_frame : input pp data to be filled out + * @pInputBuffer : input buffer for reprocessing + * @reproc_cfg : pointer to the reprocess config + * @metadata : pointer to the metadata buffer + * @output_buffer : output buffer for reprocessing; could be NULL if not + * framework allocated + * @frameNumber : frame number of the request + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::setFwkInputPPData(qcamera_fwk_input_pp_data_t *src_frame, + camera3_stream_buffer_t *pInputBuffer, reprocess_config_t *reproc_cfg, + metadata_buffer_t *metadata, buffer_handle_t *output_buffer, + uint32_t frameNumber) +{ + int32_t rc = NO_ERROR; + int input_index = mOfflineMemory.getMatchBufIndex((void*)pInputBuffer->buffer); + if(input_index < 0) { + rc = mOfflineMemory.registerBuffer(pInputBuffer->buffer, mStreamType); + if (NO_ERROR != rc) { + LOGE("On-the-fly input buffer registration failed %d", + rc); + return rc; + } + input_index = mOfflineMemory.getMatchBufIndex((void*)pInputBuffer->buffer); + if (input_index < 0) { + LOGE("Could not find object among registered buffers"); + return DEAD_OBJECT; + } + } + mOfflineMemory.markFrameNumber(input_index, frameNumber); + + src_frame->src_frame = *pInputBuffer; + rc = mOfflineMemory.getBufDef(reproc_cfg->input_stream_plane_info.plane_info, + src_frame->input_buffer, input_index); + if (rc != 0) { + return rc; + } + dumpYUV(&src_frame->input_buffer, reproc_cfg->input_stream_dim, + reproc_cfg->input_stream_plane_info.plane_info, QCAMERA_DUMP_FRM_INPUT_REPROCESS); + cam_dimension_t dim = {sizeof(metadata_buffer_t), 1}; + cam_stream_buf_plane_info_t meta_planes; + rc = mm_stream_calc_offset_metadata(&dim, &mPaddingInfo, &meta_planes); + if (rc != 0) { + LOGE("Metadata stream plane info calculation failed!"); + return rc; + } + uint32_t metaBufIdx; + { + Mutex::Autolock lock(mFreeOfflineMetaBuffersLock); + if (mFreeOfflineMetaBuffersList.empty()) { + LOGE("mFreeOfflineMetaBuffersList is null. Fatal"); + return BAD_VALUE; + } + + metaBufIdx = *(mFreeOfflineMetaBuffersList.begin()); + mFreeOfflineMetaBuffersList.erase(mFreeOfflineMetaBuffersList.begin()); + LOGD("erasing %d, mFreeOfflineMetaBuffersList.size %d", metaBufIdx, + mFreeOfflineMetaBuffersList.size()); + } + + mOfflineMetaMemory.markFrameNumber(metaBufIdx, frameNumber); + + mm_camera_buf_def_t meta_buf; + cam_frame_len_offset_t offset = meta_planes.plane_info; + rc = mOfflineMetaMemory.getBufDef(offset, meta_buf, metaBufIdx); + if (NO_ERROR != rc) { + return rc; + } + memcpy(meta_buf.buffer, metadata, sizeof(metadata_buffer_t)); + src_frame->metadata_buffer = meta_buf; + src_frame->reproc_config = *reproc_cfg; + src_frame->output_buffer = output_buffer; + src_frame->frameNumber = frameNumber; + return rc; +} + +/*=========================================================================== + * FUNCTION : checkStreamCbErrors + * + * DESCRIPTION: check the stream callback for errors + * + * PARAMETERS : + * @super_frame : the super frame with filled buffer + * @stream : stream on which the buffer was requested and filled + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::checkStreamCbErrors(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) +{ + if (NULL == stream) { + LOGE("Invalid stream"); + return BAD_VALUE; + } + + if(NULL == super_frame) { + LOGE("Invalid Super buffer"); + return BAD_VALUE; + } + + if(super_frame->num_bufs != 1) { + LOGE("Multiple streams are not supported"); + return BAD_VALUE; + } + if(NULL == super_frame->bufs[0]) { + LOGE("Error, Super buffer frame does not contain valid buffer"); + return BAD_VALUE; + } + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getStreamSize + * + * DESCRIPTION: get the size from the camera3_stream_t for the channel + * + * PARAMETERS : + * @dim : Return the size of the stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::getStreamSize(cam_dimension_t &dim) +{ + if (mCamera3Stream) { + dim.width = mCamera3Stream->width; + dim.height = mCamera3Stream->height; + return NO_ERROR; + } else { + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getStreamBufs + * + * DESCRIPTION: get the buffers allocated to the stream + * + * PARAMETERS : + * @len : buffer length + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +QCamera3StreamMem* QCamera3ProcessingChannel::getStreamBufs(uint32_t /*len*/) +{ + KPI_ATRACE_CALL(); + return &mMemory; +} + +/*=========================================================================== + * FUNCTION : putStreamBufs + * + * DESCRIPTION: release the buffers allocated to the stream + * + * PARAMETERS : NONE + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3ProcessingChannel::putStreamBufs() +{ + mMemory.unregisterBuffers(); + + /* Reclaim all the offline metabuffers and push them to free list */ + { + Mutex::Autolock lock(mFreeOfflineMetaBuffersLock); + mFreeOfflineMetaBuffersList.clear(); + for (uint32_t i = 0; i < mOfflineMetaMemory.getCnt(); i++) { + mFreeOfflineMetaBuffersList.push_back(i); + } + } +} + + +/*=========================================================================== + * FUNCTION : stop + * + * DESCRIPTION: stop processing channel, which will stop all streams within, + * including the reprocessing channel in postprocessor. + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::stop() +{ + int32_t rc = NO_ERROR; + if(!m_bIsActive) { + LOGE("Attempt to stop inactive channel"); + return rc; + } + + m_postprocessor.stop(); + mPostProcStarted = false; + rc |= QCamera3Channel::stop(); + return rc; +} + +/*=========================================================================== + * FUNCTION : startPostProc + * + * DESCRIPTION: figure out if the postprocessor needs to be restarted and if yes + * start it + * + * PARAMETERS : + * @inputBufExists : whether there is an input buffer for post processing + * @config : reprocessing configuration + * @metadata : metadata associated with the reprocessing request + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3ProcessingChannel::startPostProc(const reprocess_config_t &config) +{ + if(!mPostProcStarted) { + m_postprocessor.start(config); + mPostProcStarted = true; + } +} + +/*=========================================================================== + * FUNCTION : queueReprocMetadata + * + * DESCRIPTION: queue the reprocess metadata to the postprocessor + * + * PARAMETERS : metadata : the metadata corresponding to the pp frame + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::queueReprocMetadata(mm_camera_super_buf_t *metadata) +{ + return m_postprocessor.processPPMetadata(metadata); +} + +/*=========================================================================== + * FUNCTION : metadataBufDone + * + * DESCRIPTION: Buffer done method for a metadata buffer + * + * PARAMETERS : + * @recvd_frame : received metadata frame + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::metadataBufDone(mm_camera_super_buf_t *recvd_frame) +{ + int32_t rc = NO_ERROR;; + if ((NULL == m_pMetaChannel) || (NULL == recvd_frame)) { + LOGE("Metadata channel or metadata buffer invalid"); + return BAD_VALUE; + } + + rc = ((QCamera3MetadataChannel*)m_pMetaChannel)->bufDone(recvd_frame); + + return rc; +} + +/*=========================================================================== + * FUNCTION : translateStreamTypeAndFormat + * + * DESCRIPTION: translates the framework stream format into HAL stream type + * and format + * + * PARAMETERS : + * @streamType : translated stream type + * @streamFormat : translated stream format + * @stream : fwk stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::translateStreamTypeAndFormat(camera3_stream_t *stream, + cam_stream_type_t &streamType, cam_format_t &streamFormat) +{ + switch (stream->format) { + case HAL_PIXEL_FORMAT_YCbCr_420_888: + if(stream->stream_type == CAMERA3_STREAM_INPUT){ + streamType = CAM_STREAM_TYPE_SNAPSHOT; + streamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_SNAPSHOT); + } else { + streamType = CAM_STREAM_TYPE_CALLBACK; + streamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_CALLBACK); + } + break; + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + if (stream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) { + streamType = CAM_STREAM_TYPE_VIDEO; + streamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_VIDEO); + } else if(stream->stream_type == CAMERA3_STREAM_INPUT || + stream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL || + IS_USAGE_ZSL(stream->usage)){ + streamType = CAM_STREAM_TYPE_SNAPSHOT; + streamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_SNAPSHOT); + } else { + streamType = CAM_STREAM_TYPE_PREVIEW; + streamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_PREVIEW); + } + break; + case HAL_PIXEL_FORMAT_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW16: + case HAL_PIXEL_FORMAT_RAW10: + streamType = CAM_STREAM_TYPE_RAW; + streamFormat = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG; + break; + default: + return -EINVAL; + } + LOGD("fwk_format = %d, streamType = %d, streamFormat = %d", + stream->format, streamType, streamFormat); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : setReprocConfig + * + * DESCRIPTION: sets the reprocessing parameters for the input buffer + * + * PARAMETERS : + * @reproc_cfg : the configuration to be set + * @pInputBuffer : pointer to the input buffer + * @metadata : pointer to the reprocessing metadata buffer + * @streamFormat : format of the input stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::setReprocConfig(reprocess_config_t &reproc_cfg, + camera3_stream_buffer_t *pInputBuffer, + __unused metadata_buffer_t *metadata, + cam_format_t streamFormat, cam_dimension_t dim) +{ + int32_t rc = 0; + reproc_cfg.padding = &mPaddingInfo; + //to ensure a big enough buffer size set the height and width + //padding to max(height padding, width padding) + if (reproc_cfg.padding->height_padding > reproc_cfg.padding->width_padding) { + reproc_cfg.padding->width_padding = reproc_cfg.padding->height_padding; + } else { + reproc_cfg.padding->height_padding = reproc_cfg.padding->width_padding; + } + if (NULL != pInputBuffer) { + reproc_cfg.input_stream_dim.width = (int32_t)pInputBuffer->stream->width; + reproc_cfg.input_stream_dim.height = (int32_t)pInputBuffer->stream->height; + } else { + reproc_cfg.input_stream_dim.width = (int32_t)dim.width; + reproc_cfg.input_stream_dim.height = (int32_t)dim.height; + } + reproc_cfg.src_channel = this; + reproc_cfg.output_stream_dim.width = mCamera3Stream->width; + reproc_cfg.output_stream_dim.height = mCamera3Stream->height; + reproc_cfg.reprocess_type = getReprocessType(); + + //offset calculation + if (NULL != pInputBuffer) { + rc = translateStreamTypeAndFormat(pInputBuffer->stream, + reproc_cfg.stream_type, reproc_cfg.stream_format); + if (rc != NO_ERROR) { + LOGE("Stream format %d is not supported", + pInputBuffer->stream->format); + return rc; + } + } else { + reproc_cfg.stream_type = mStreamType; + reproc_cfg.stream_format = streamFormat; + } + + switch (reproc_cfg.stream_type) { + case CAM_STREAM_TYPE_PREVIEW: + if (getStreamByIndex(0) == NULL) { + LOGE("Could not find stream"); + rc = -1; + break; + } + rc = mm_stream_calc_offset_preview( + getStreamByIndex(0)->getStreamInfo(), + &reproc_cfg.input_stream_dim, + reproc_cfg.padding, + &reproc_cfg.input_stream_plane_info); + break; + case CAM_STREAM_TYPE_VIDEO: + rc = mm_stream_calc_offset_video(reproc_cfg.stream_format, + &reproc_cfg.input_stream_dim, + &reproc_cfg.input_stream_plane_info); + break; + case CAM_STREAM_TYPE_RAW: + rc = mm_stream_calc_offset_raw(reproc_cfg.stream_format, + &reproc_cfg.input_stream_dim, + reproc_cfg.padding, &reproc_cfg.input_stream_plane_info); + break; + case CAM_STREAM_TYPE_SNAPSHOT: + case CAM_STREAM_TYPE_CALLBACK: + default: + rc = mm_stream_calc_offset_snapshot(streamFormat, &reproc_cfg.input_stream_dim, + reproc_cfg.padding, &reproc_cfg.input_stream_plane_info); + break; + } + if (rc != 0) { + LOGE("Stream %d plane info calculation failed!", mStreamType); + return rc; + } + return rc; +} + +/*=========================================================================== + * FUNCTION : reprocessCbRoutine + * + * DESCRIPTION: callback function for the reprocessed frame. This frame now + * should be returned to the framework + * + * PARAMETERS : + * @resultBuffer : buffer containing the reprocessed data + * @resultFrameNumber : frame number on which the buffer was requested + * + * RETURN : NONE + * + *==========================================================================*/ +void QCamera3ProcessingChannel::reprocessCbRoutine(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber) +{ + ATRACE_CALL(); + int rc = NO_ERROR; + + rc = releaseOfflineMemory(resultFrameNumber); + if (NO_ERROR != rc) { + LOGE("Error releasing offline memory %d", rc); + } + /* Since reprocessing is done, send the callback to release the input buffer */ + if (mChannelCB) { + mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData); + } + issueChannelCb(resultBuffer, resultFrameNumber); + + return; +} + +/*=========================================================================== + * FUNCTION : issueChannelCb + * + * DESCRIPTION: function to set the result and issue channel callback + * + * PARAMETERS : + * @resultBuffer : buffer containing the data + * @resultFrameNumber : frame number on which the buffer was requested + * + * RETURN : NONE + * + * + *==========================================================================*/ +void QCamera3ProcessingChannel::issueChannelCb(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber) +{ + camera3_stream_buffer_t result; + //Use below data to issue framework callback + result.stream = mCamera3Stream; + result.buffer = resultBuffer; + result.status = CAMERA3_BUFFER_STATUS_OK; + result.acquire_fence = -1; + result.release_fence = -1; + + if (mChannelCB) { + mChannelCB(NULL, &result, resultFrameNumber, false, mUserData); + } +} + +/*=========================================================================== + * FUNCTION : showDebugFPS + * + * DESCRIPTION: Function to log the fps for preview, video, callback and raw + * streams + * + * PARAMETERS : Stream type + * + * RETURN : None + *==========================================================================*/ +void QCamera3ProcessingChannel::showDebugFPS(int32_t streamType) +{ + double fps = 0; + mFrameCount++; + nsecs_t now = systemTime(); + nsecs_t diff = now - mLastFpsTime; + if (diff > ms2ns(250)) { + fps = (((double)(mFrameCount - mLastFrameCount)) * + (double)(s2ns(1))) / (double)diff; + switch(streamType) { + case CAM_STREAM_TYPE_PREVIEW: + LOGH("PROFILE_PREVIEW_FRAMES_PER_SECOND : %.4f", + fps); + break; + case CAM_STREAM_TYPE_VIDEO: + LOGH("PROFILE_VIDEO_FRAMES_PER_SECOND : %.4f", + fps); + break; + case CAM_STREAM_TYPE_CALLBACK: + LOGH("PROFILE_CALLBACK_FRAMES_PER_SECOND : %.4f", + fps); + break; + case CAM_STREAM_TYPE_RAW: + LOGH("PROFILE_RAW_FRAMES_PER_SECOND : %.4f", + fps); + break; + default: + LOGH("logging not supported for the stream"); + break; + } + mLastFpsTime = now; + mLastFrameCount = mFrameCount; + } +} + +/*=========================================================================== + * FUNCTION : releaseOfflineMemory + * + * DESCRIPTION: function to clean up the offline memory used for input reprocess + * + * PARAMETERS : + * @resultFrameNumber : frame number on which the buffer was requested + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * non-zero failure code + * + * + *==========================================================================*/ +int32_t QCamera3ProcessingChannel::releaseOfflineMemory(uint32_t resultFrameNumber) +{ + int32_t rc = NO_ERROR; + int32_t inputBufIndex = + mOfflineMemory.getGrallocBufferIndex(resultFrameNumber); + if (0 <= inputBufIndex) { + rc = mOfflineMemory.unregisterBuffer(inputBufIndex); + } else { + LOGW("Could not find offline input buffer, resultFrameNumber %d", + resultFrameNumber); + } + if (rc != NO_ERROR) { + LOGE("Failed to unregister offline input buffer"); + } + + int32_t metaBufIndex = + mOfflineMetaMemory.getHeapBufferIndex(resultFrameNumber); + if (0 <= metaBufIndex) { + Mutex::Autolock lock(mFreeOfflineMetaBuffersLock); + mFreeOfflineMetaBuffersList.push_back((uint32_t)metaBufIndex); + } else { + LOGW("Could not find offline meta buffer, resultFrameNumber %d", + resultFrameNumber); + } + + return rc; +} + +/* Regular Channel methods */ +/*=========================================================================== + * FUNCTION : QCamera3RegularChannel + * + * DESCRIPTION: constructor of QCamera3RegularChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @cb_routine : callback routine to frame aggregator + * @stream : camera3_stream_t structure + * @stream_type: Channel stream type + * @postprocess_mask: feature mask for postprocessing + * @metadataChannel : metadata channel for the session + * @numBuffers : number of max dequeued buffers + * + * RETURN : none + *==========================================================================*/ +QCamera3RegularChannel::QCamera3RegularChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + uint32_t numBuffers) : + QCamera3ProcessingChannel(cam_handle, channel_handle, cam_ops, + cb_routine, paddingInfo, userData, stream, stream_type, + postprocess_mask, metadataChannel, numBuffers), + mBatchSize(0), + mRotation(ROTATE_0) +{ +} + +/*=========================================================================== + * FUNCTION : ~QCamera3RegularChannel + * + * DESCRIPTION: destructor of QCamera3RegularChannel + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3RegularChannel::~QCamera3RegularChannel() +{ + destroy(); +} + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: Initialize and add camera channel & stream + * + * PARAMETERS : + * @isType : type of image stabilization required on this stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ + +int32_t QCamera3RegularChannel::initialize(cam_is_type_t isType) +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + + cam_dimension_t streamDim; + + if (NULL == mCamera3Stream) { + LOGE("Camera stream uninitialized"); + return NO_INIT; + } + + if (1 <= m_numStreams) { + // Only one stream per channel supported in v3 Hal + return NO_ERROR; + } + + mIsType = isType; + + rc = translateStreamTypeAndFormat(mCamera3Stream, mStreamType, + mStreamFormat); + if (rc != NO_ERROR) { + return -EINVAL; + } + + + if ((mStreamType == CAM_STREAM_TYPE_VIDEO) || + (mStreamType == CAM_STREAM_TYPE_PREVIEW)) { + if ((mCamera3Stream->rotation != CAMERA3_STREAM_ROTATION_0) && + ((mPostProcMask & CAM_QCOM_FEATURE_ROTATION) == 0)) { + LOGE("attempting rotation %d when rotation is disabled", + mCamera3Stream->rotation); + return -EINVAL; + } + + switch (mCamera3Stream->rotation) { + case CAMERA3_STREAM_ROTATION_0: + mRotation = ROTATE_0; + break; + case CAMERA3_STREAM_ROTATION_90: { + mRotation = ROTATE_90; + break; + } + case CAMERA3_STREAM_ROTATION_180: + mRotation = ROTATE_180; + break; + case CAMERA3_STREAM_ROTATION_270: { + mRotation = ROTATE_270; + break; + } + default: + LOGE("Unknown rotation: %d", + mCamera3Stream->rotation); + return -EINVAL; + } + } else if (mCamera3Stream->rotation != CAMERA3_STREAM_ROTATION_0) { + LOGE("Rotation %d is not supported by stream type %d", + mCamera3Stream->rotation, + mStreamType); + return -EINVAL; + } + + streamDim.width = mCamera3Stream->width; + streamDim.height = mCamera3Stream->height; + + LOGD("batch size is %d", mBatchSize); + rc = QCamera3Channel::addStream(mStreamType, + mStreamFormat, + streamDim, + mRotation, + mNumBufs, + mPostProcMask, + mIsType, + mBatchSize); + + return rc; +} + +/*=========================================================================== + * FUNCTION : setBatchSize + * + * DESCRIPTION: Set batch size for the channel. + * + * PARAMETERS : + * @batchSize : Number of image buffers in a batch + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3RegularChannel::setBatchSize(uint32_t batchSize) +{ + int32_t rc = NO_ERROR; + + mBatchSize = batchSize; + LOGD("Batch size set: %d", mBatchSize); + return rc; +} + +/*=========================================================================== + * FUNCTION : getStreamTypeMask + * + * DESCRIPTION: Get bit mask of all stream types in this channel. + * If stream is not initialized, then generate mask based on + * local streamType + * + * PARAMETERS : None + * + * RETURN : Bit mask of all stream types in this channel + *==========================================================================*/ +uint32_t QCamera3RegularChannel::getStreamTypeMask() +{ + if (mStreams[0]) { + return QCamera3Channel::getStreamTypeMask(); + } else { + return (1U << mStreamType); + } +} + +/*=========================================================================== + * FUNCTION : queueBatchBuf + * + * DESCRIPTION: queue batch container to downstream + * + * PARAMETERS : + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3RegularChannel::queueBatchBuf() +{ + int32_t rc = NO_ERROR; + + if (mStreams[0]) { + rc = mStreams[0]->queueBatchBuf(); + } + if (rc != NO_ERROR) { + LOGE("stream->queueBatchContainer failed"); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : request + * + * DESCRIPTION: process a request from camera service. Stream on if ncessary. + * + * PARAMETERS : + * @buffer : buffer to be filled for this request + * + * RETURN : 0 on a success start of capture + * -EINVAL on invalid input + * -ENODEV on serious error + *==========================================================================*/ +int32_t QCamera3RegularChannel::request(buffer_handle_t *buffer, uint32_t frameNumber) +{ + ATRACE_CALL(); + //FIX ME: Return buffer back in case of failures below. + + int32_t rc = NO_ERROR; + int index; + + if (NULL == buffer) { + LOGE("Invalid buffer in channel request"); + return BAD_VALUE; + } + + if(!m_bIsActive) { + rc = registerBuffer(buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + rc = start(); + if (NO_ERROR != rc) { + return rc; + } + } else { + LOGD("Request on an existing stream"); + } + + index = mMemory.getMatchBufIndex((void*)buffer); + if(index < 0) { + rc = registerBuffer(buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + index = mMemory.getMatchBufIndex((void*)buffer); + if (index < 0) { + LOGE("Could not find object among registered buffers"); + return DEAD_OBJECT; + } + } + + rc = mStreams[0]->bufDone((uint32_t)index); + if(rc != NO_ERROR) { + LOGE("Failed to Q new buffer to stream"); + return rc; + } + + rc = mMemory.markFrameNumber((uint32_t)index, frameNumber); + return rc; +} + +/*=========================================================================== + * FUNCTION : getReprocessType + * + * DESCRIPTION: get the type of reprocess output supported by this channel + * + * PARAMETERS : NONE + * + * RETURN : reprocess_type_t : type of reprocess + *==========================================================================*/ +reprocess_type_t QCamera3RegularChannel::getReprocessType() +{ + return REPROCESS_TYPE_PRIVATE; +} + + +QCamera3MetadataChannel::QCamera3MetadataChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, uint32_t numBuffers) : + QCamera3Channel(cam_handle, channel_handle, cam_ops, + cb_routine, paddingInfo, postprocess_mask, + userData, numBuffers), + mMemory(NULL) +{ +} + +QCamera3MetadataChannel::~QCamera3MetadataChannel() +{ + destroy(); + + if (mMemory) { + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; + } +} + +int32_t QCamera3MetadataChannel::initialize(cam_is_type_t isType) +{ + ATRACE_CALL(); + int32_t rc; + cam_dimension_t streamDim; + + if (mMemory || m_numStreams > 0) { + LOGE("metadata channel already initialized"); + return -EINVAL; + } + + streamDim.width = (int32_t)sizeof(metadata_buffer_t), + streamDim.height = 1; + + mIsType = isType; + rc = QCamera3Channel::addStream(CAM_STREAM_TYPE_METADATA, CAM_FORMAT_MAX, + streamDim, ROTATE_0, (uint8_t)mNumBuffers, mPostProcMask, mIsType); + if (rc < 0) { + LOGE("addStream failed"); + } + return rc; +} + +int32_t QCamera3MetadataChannel::request(buffer_handle_t * /*buffer*/, + uint32_t /*frameNumber*/) +{ + if (!m_bIsActive) { + return start(); + } + else + return 0; +} + +void QCamera3MetadataChannel::streamCbRoutine( + mm_camera_super_buf_t *super_frame, + QCamera3Stream * /*stream*/) +{ + ATRACE_NAME("metadata_stream_cb_routine"); + uint32_t requestNumber = 0; + if (super_frame == NULL || super_frame->num_bufs != 1) { + LOGE("super_frame is not valid"); + return; + } + if (mChannelCB) { + mChannelCB(super_frame, NULL, requestNumber, false, mUserData); + } +} + +QCamera3StreamMem* QCamera3MetadataChannel::getStreamBufs(uint32_t len) +{ + int rc; + if (len < sizeof(metadata_buffer_t)) { + LOGE("Metadata buffer size less than structure %d vs %d", + len, + sizeof(metadata_buffer_t)); + return NULL; + } + mMemory = new QCamera3StreamMem(MIN_STREAMING_BUFFER_NUM); + if (!mMemory) { + LOGE("unable to create metadata memory"); + return NULL; + } + rc = mMemory->allocateAll(len); + if (rc < 0) { + LOGE("unable to allocate metadata memory"); + delete mMemory; + mMemory = NULL; + return NULL; + } + clear_metadata_buffer((metadata_buffer_t*)mMemory->getPtr(0)); + return mMemory; +} + +void QCamera3MetadataChannel::putStreamBufs() +{ + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; +} +/*************************************************************************************/ +// RAW Channel related functions +QCamera3RawChannel::QCamera3RawChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + bool raw_16, uint32_t numBuffers) : + QCamera3RegularChannel(cam_handle, channel_handle, cam_ops, + cb_routine, paddingInfo, userData, stream, + CAM_STREAM_TYPE_RAW, postprocess_mask, + metadataChannel, numBuffers), + mIsRaw16(raw_16) +{ + char prop[PROPERTY_VALUE_MAX]; + property_get("persist.camera.raw.debug.dump", prop, "0"); + mRawDump = atoi(prop); +} + +QCamera3RawChannel::~QCamera3RawChannel() +{ +} + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: Initialize and add camera channel & stream + * + * PARAMETERS : + * @isType : image stabilization type on the stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ + +int32_t QCamera3RawChannel::initialize(cam_is_type_t isType) +{ + return QCamera3RegularChannel::initialize(isType); +} + +void QCamera3RawChannel::streamCbRoutine( + mm_camera_super_buf_t *super_frame, + QCamera3Stream * stream) +{ + ATRACE_CALL(); + /* Move this back down once verified */ + if (mRawDump) + dumpRawSnapshot(super_frame->bufs[0]); + + if (mIsRaw16) { + if (getStreamDefaultFormat(CAM_STREAM_TYPE_RAW) == + CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG) + convertMipiToRaw16(super_frame->bufs[0]); + else + convertLegacyToRaw16(super_frame->bufs[0]); + } + + //Make sure cache coherence because extra processing is done + mMemory.cleanInvalidateCache(super_frame->bufs[0]->buf_idx); + + QCamera3RegularChannel::streamCbRoutine(super_frame, stream); + return; +} + +void QCamera3RawChannel::dumpRawSnapshot(mm_camera_buf_def_t *frame) +{ + QCamera3Stream *stream = getStreamByIndex(0); + if (stream != NULL) { + char buf[FILENAME_MAX]; + memset(buf, 0, sizeof(buf)); + cam_dimension_t dim; + memset(&dim, 0, sizeof(dim)); + stream->getFrameDimension(dim); + + cam_frame_len_offset_t offset; + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + stream->getFrameOffset(offset); + snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"r_%d_%dx%d.raw", + frame->frame_idx, offset.mp[0].stride, offset.mp[0].scanline); + + int file_fd = open(buf, O_RDWR| O_CREAT, 0644); + if (file_fd >= 0) { + ssize_t written_len = write(file_fd, frame->buffer, frame->frame_len); + LOGD("written number of bytes %zd", written_len); + close(file_fd); + } else { + LOGE("failed to open file to dump image"); + } + } else { + LOGE("Could not find stream"); + } + +} + +void QCamera3RawChannel::convertLegacyToRaw16(mm_camera_buf_def_t *frame) +{ + // Convert image buffer from Opaque raw format to RAW16 format + // 10bit Opaque raw is stored in the format of: + // 0000 - p5 - p4 - p3 - p2 - p1 - p0 + // where p0 to p5 are 6 pixels (each is 10bit)_and most significant + // 4 bits are 0s. Each 64bit word contains 6 pixels. + + QCamera3Stream *stream = getStreamByIndex(0); + if (stream != NULL) { + cam_dimension_t dim; + memset(&dim, 0, sizeof(dim)); + stream->getFrameDimension(dim); + + cam_frame_len_offset_t offset; + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + stream->getFrameOffset(offset); + + uint32_t raw16_stride = ((uint32_t)dim.width + 15U) & ~15U; + uint16_t* raw16_buffer = (uint16_t *)frame->buffer; + + // In-place format conversion. + // Raw16 format always occupy more memory than opaque raw10. + // Convert to Raw16 by iterating through all pixels from bottom-right + // to top-left of the image. + // One special notes: + // 1. Cross-platform raw16's stride is 16 pixels. + // 2. Opaque raw10's stride is 6 pixels, and aligned to 16 bytes. + for (int32_t ys = dim.height - 1; ys >= 0; ys--) { + uint32_t y = (uint32_t)ys; + uint64_t* row_start = (uint64_t *)frame->buffer + + y * (uint32_t)offset.mp[0].stride_in_bytes / 8; + for (int32_t xs = dim.width - 1; xs >= 0; xs--) { + uint32_t x = (uint32_t)xs; + uint16_t raw16_pixel = 0x3FF & (row_start[x/6] >> (10*(x%6))); + raw16_buffer[y*raw16_stride+x] = raw16_pixel; + } + } + } else { + LOGE("Could not find stream"); + } + +} + +void QCamera3RawChannel::convertMipiToRaw16(mm_camera_buf_def_t *frame) +{ + // Convert image buffer from mipi10 raw format to RAW16 format + // mipi10 opaque raw is stored in the format of: + // P3(1:0) P2(1:0) P1(1:0) P0(1:0) P3(9:2) P2(9:2) P1(9:2) P0(9:2) + // 4 pixels occupy 5 bytes, no padding needed + + QCamera3Stream *stream = getStreamByIndex(0); + if (stream != NULL) { + cam_dimension_t dim; + memset(&dim, 0, sizeof(dim)); + stream->getFrameDimension(dim); + + cam_frame_len_offset_t offset; + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + stream->getFrameOffset(offset); + + uint32_t raw16_stride = ((uint32_t)dim.width + 15U) & ~15U; + uint16_t* raw16_buffer = (uint16_t *)frame->buffer; + + // In-place format conversion. + // Raw16 format always occupy more memory than opaque raw10. + // Convert to Raw16 by iterating through all pixels from bottom-right + // to top-left of the image. + // One special notes: + // 1. Cross-platform raw16's stride is 16 pixels. + // 2. mipi raw10's stride is 4 pixels, and aligned to 16 bytes. + for (int32_t ys = dim.height - 1; ys >= 0; ys--) { + uint32_t y = (uint32_t)ys; + uint8_t* row_start = (uint8_t *)frame->buffer + + y * (uint32_t)offset.mp[0].stride_in_bytes; + for (int32_t xs = dim.width - 1; xs >= 0; xs--) { + uint32_t x = (uint32_t)xs; + uint8_t upper_8bit = row_start[5*(x/4)+x%4]; + uint8_t lower_2bit = ((row_start[5*(x/4)+4] >> (x%4)) & 0x3); + uint16_t raw16_pixel = + (uint16_t)(((uint16_t)upper_8bit)<<2 | + (uint16_t)lower_2bit); + raw16_buffer[y*raw16_stride+x] = raw16_pixel; + } + } + } else { + LOGE("Could not find stream"); + } + +} + +/*=========================================================================== + * FUNCTION : getReprocessType + * + * DESCRIPTION: get the type of reprocess output supported by this channel + * + * PARAMETERS : NONE + * + * RETURN : reprocess_type_t : type of reprocess + *==========================================================================*/ +reprocess_type_t QCamera3RawChannel::getReprocessType() +{ + return REPROCESS_TYPE_RAW; +} + + +/*************************************************************************************/ +// RAW Dump Channel related functions + +/*=========================================================================== + * FUNCTION : QCamera3RawDumpChannel + * + * DESCRIPTION: Constructor for RawDumpChannel + * + * PARAMETERS : + * @cam_handle : Handle for Camera + * @cam_ops : Function pointer table + * @rawDumpSize : Dimensions for the Raw stream + * @paddinginfo : Padding information for stream + * @userData : Cookie for parent + * @pp mask : PP feature mask for this stream + * @numBuffers : number of max dequeued buffers + * + * RETURN : NA + *==========================================================================*/ +QCamera3RawDumpChannel::QCamera3RawDumpChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + cam_dimension_t rawDumpSize, + cam_padding_info_t *paddingInfo, + void *userData, + cam_feature_mask_t postprocess_mask, uint32_t numBuffers) : + QCamera3Channel(cam_handle, channel_handle, cam_ops, NULL, + paddingInfo, postprocess_mask, + userData, numBuffers), + mDim(rawDumpSize), + mMemory(NULL) +{ + char prop[PROPERTY_VALUE_MAX]; + property_get("persist.camera.raw.dump", prop, "0"); + mRawDump = atoi(prop); +} + +/*=========================================================================== + * FUNCTION : QCamera3RawDumpChannel + * + * DESCRIPTION: Destructor for RawDumpChannel + * + * PARAMETERS : + * + * RETURN : NA + *==========================================================================*/ + +QCamera3RawDumpChannel::~QCamera3RawDumpChannel() +{ + destroy(); +} + +/*=========================================================================== + * FUNCTION : dumpRawSnapshot + * + * DESCRIPTION: Helper function to dump Raw frames + * + * PARAMETERS : + * @frame : stream buf frame to be dumped + * + * RETURN : NA + *==========================================================================*/ +void QCamera3RawDumpChannel::dumpRawSnapshot(mm_camera_buf_def_t *frame) +{ + QCamera3Stream *stream = getStreamByIndex(0); + if (stream != NULL) { + char buf[FILENAME_MAX]; + struct timeval tv; + struct tm timeinfo_data; + struct tm *timeinfo; + + cam_dimension_t dim; + memset(&dim, 0, sizeof(dim)); + stream->getFrameDimension(dim); + + cam_frame_len_offset_t offset; + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + stream->getFrameOffset(offset); + + gettimeofday(&tv, NULL); + timeinfo = localtime_r(&tv.tv_sec, &timeinfo_data); + + if (NULL != timeinfo) { + memset(buf, 0, sizeof(buf)); + snprintf(buf, sizeof(buf), + QCAMERA_DUMP_FRM_LOCATION + "%04d-%02d-%02d-%02d-%02d-%02d-%06ld_%d_%dx%d.raw", + timeinfo->tm_year + 1900, timeinfo->tm_mon + 1, + timeinfo->tm_mday, timeinfo->tm_hour, + timeinfo->tm_min, timeinfo->tm_sec,tv.tv_usec, + frame->frame_idx, dim.width, dim.height); + + int file_fd = open(buf, O_RDWR| O_CREAT, 0777); + if (file_fd >= 0) { + ssize_t written_len = + write(file_fd, frame->buffer, offset.frame_len); + LOGD("written number of bytes %zd", written_len); + close(file_fd); + } else { + LOGE("failed to open file to dump image"); + } + } else { + LOGE("localtime_r() error"); + } + } else { + LOGE("Could not find stream"); + } + +} + +/*=========================================================================== + * FUNCTION : streamCbRoutine + * + * DESCRIPTION: Callback routine invoked for each frame generated for + * Rawdump channel + * + * PARAMETERS : + * @super_frame : stream buf frame generated + * @stream : Underlying Stream object cookie + * + * RETURN : NA + *==========================================================================*/ +void QCamera3RawDumpChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + __unused QCamera3Stream *stream) +{ + LOGD("E"); + if (super_frame == NULL || super_frame->num_bufs != 1) { + LOGE("super_frame is not valid"); + return; + } + + if (mRawDump) + dumpRawSnapshot(super_frame->bufs[0]); + + bufDone(super_frame); + free(super_frame); +} + +/*=========================================================================== + * FUNCTION : getStreamBufs + * + * DESCRIPTION: Callback function provided to interface to get buffers. + * + * PARAMETERS : + * @len : Length of each buffer to be allocated + * + * RETURN : NULL on buffer allocation failure + * QCamera3StreamMem object on sucess + *==========================================================================*/ +QCamera3StreamMem* QCamera3RawDumpChannel::getStreamBufs(uint32_t len) +{ + int rc; + mMemory = new QCamera3StreamMem(mNumBuffers); + + if (!mMemory) { + LOGE("unable to create heap memory"); + return NULL; + } + rc = mMemory->allocateAll((size_t)len); + if (rc < 0) { + LOGE("unable to allocate heap memory"); + delete mMemory; + mMemory = NULL; + return NULL; + } + return mMemory; +} + +/*=========================================================================== + * FUNCTION : putStreamBufs + * + * DESCRIPTION: Callback function provided to interface to return buffers. + * Although no handles are actually returned, implicitl assumption + * that interface will no longer use buffers and channel can + * deallocated if necessary. + * + * PARAMETERS : NA + * + * RETURN : NA + *==========================================================================*/ +void QCamera3RawDumpChannel::putStreamBufs() +{ + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; +} + +/*=========================================================================== + * FUNCTION : request + * + * DESCRIPTION: Request function used as trigger + * + * PARAMETERS : + * @recvd_frame : buffer- this will be NULL since this is internal channel + * @frameNumber : Undefined again since this is internal stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3RawDumpChannel::request(buffer_handle_t * /*buffer*/, + uint32_t /*frameNumber*/) +{ + if (!m_bIsActive) { + return QCamera3Channel::start(); + } + else + return 0; +} + +/*=========================================================================== + * FUNCTION : intialize + * + * DESCRIPTION: Initializes channel params and creates underlying stream + * + * PARAMETERS : + * @isType : type of image stabilization required on this stream + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3RawDumpChannel::initialize(cam_is_type_t isType) +{ + int32_t rc; + + mIsType = isType; + rc = QCamera3Channel::addStream(CAM_STREAM_TYPE_RAW, + CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG, mDim, ROTATE_0, (uint8_t)mNumBuffers, + mPostProcMask, mIsType); + if (rc < 0) { + LOGE("addStream failed"); + } + return rc; +} +/*************************************************************************************/ + +/* QCamera3YUVChannel methods */ + +/*=========================================================================== + * FUNCTION : QCamera3YUVChannel + * + * DESCRIPTION: constructor of QCamera3YUVChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @cb_routine : callback routine to frame aggregator + * @paddingInfo : padding information for the stream + * @stream : camera3_stream_t structure + * @stream_type: Channel stream type + * @postprocess_mask: the postprocess mask for streams of this channel + * @metadataChannel: handle to the metadataChannel + * RETURN : none + *==========================================================================*/ +QCamera3YUVChannel::QCamera3YUVChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel) : + QCamera3ProcessingChannel(cam_handle, channel_handle, cam_ops, + cb_routine, paddingInfo, userData, stream, stream_type, + postprocess_mask, metadataChannel) +{ + + mBypass = (postprocess_mask == CAM_QCOM_FEATURE_NONE); + mFrameLen = 0; + mEdgeMode.edge_mode = CAM_EDGE_MODE_OFF; + mEdgeMode.sharpness = 0; + mNoiseRedMode = CAM_NOISE_REDUCTION_MODE_OFF; + memset(&mCropRegion, 0, sizeof(mCropRegion)); +} + +/*=========================================================================== + * FUNCTION : ~QCamera3YUVChannel + * + * DESCRIPTION: destructor of QCamera3YUVChannel + * + * PARAMETERS : none + * + * + * RETURN : none + *==========================================================================*/ +QCamera3YUVChannel::~QCamera3YUVChannel() +{ + // Deallocation of heap buffers allocated in mMemory is freed + // automatically by its destructor +} + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: Initialize and add camera channel & stream + * + * PARAMETERS : + * @isType : the image stabilization type + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3YUVChannel::initialize(cam_is_type_t isType) +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + cam_dimension_t streamDim; + + if (NULL == mCamera3Stream) { + LOGE("Camera stream uninitialized"); + return NO_INIT; + } + + if (1 <= m_numStreams) { + // Only one stream per channel supported in v3 Hal + return NO_ERROR; + } + + mIsType = isType; + mStreamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_CALLBACK); + streamDim.width = mCamera3Stream->width; + streamDim.height = mCamera3Stream->height; + + rc = QCamera3Channel::addStream(mStreamType, + mStreamFormat, + streamDim, + ROTATE_0, + mNumBufs, + mPostProcMask, + mIsType); + if (rc < 0) { + LOGE("addStream failed"); + return rc; + } + + cam_stream_buf_plane_info_t buf_planes; + cam_padding_info_t paddingInfo = mPaddingInfo; + + memset(&buf_planes, 0, sizeof(buf_planes)); + //to ensure a big enough buffer size set the height and width + //padding to max(height padding, width padding) + paddingInfo.width_padding = MAX(paddingInfo.width_padding, paddingInfo.height_padding); + paddingInfo.height_padding = paddingInfo.width_padding; + + rc = mm_stream_calc_offset_snapshot(mStreamFormat, &streamDim, &paddingInfo, + &buf_planes); + if (rc < 0) { + LOGE("mm_stream_calc_offset_preview failed"); + return rc; + } + + mFrameLen = buf_planes.plane_info.frame_len; + + if (NO_ERROR != rc) { + LOGE("Initialize failed, rc = %d", rc); + return rc; + } + + /* initialize offline meta memory for input reprocess */ + rc = QCamera3ProcessingChannel::initialize(isType); + if (NO_ERROR != rc) { + LOGE("Processing Channel initialize failed, rc = %d", + rc); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : request + * + * DESCRIPTION: entry function for a request on a YUV stream. This function + * has the logic to service a request based on its type + * + * PARAMETERS : + * @buffer : pointer to the output buffer + * @frameNumber : frame number of the request + * @pInputBuffer : pointer to input buffer if an input request + * @metadata : parameters associated with the request + * + * RETURN : 0 on a success start of capture + * -EINVAL on invalid input + * -ENODEV on serious error + *==========================================================================*/ +int32_t QCamera3YUVChannel::request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t* pInputBuffer, + metadata_buffer_t* metadata, bool &needMetadata) +{ + int32_t rc = NO_ERROR; + Mutex::Autolock lock(mOfflinePpLock); + + LOGD("pInputBuffer is %p frame number %d", pInputBuffer, frameNumber); + if (NULL == buffer || NULL == metadata) { + LOGE("Invalid buffer/metadata in channel request"); + return BAD_VALUE; + } + + PpInfo ppInfo; + memset(&ppInfo, 0, sizeof(ppInfo)); + ppInfo.frameNumber = frameNumber; + ppInfo.offlinePpFlag = false; + if (mBypass && !pInputBuffer ) { + ppInfo.offlinePpFlag = needsFramePostprocessing(metadata); + ppInfo.output = buffer; + mOfflinePpInfoList.push_back(ppInfo); + } + + LOGD("offlinePpFlag is %d", ppInfo.offlinePpFlag); + needMetadata = ppInfo.offlinePpFlag; + if (!ppInfo.offlinePpFlag) { + // regular request + return QCamera3ProcessingChannel::request(buffer, frameNumber, + pInputBuffer, metadata); + } else { + if(!m_bIsActive) { + rc = start(); + if (NO_ERROR != rc) + return rc; + } else { + LOGD("Request on an existing stream"); + } + + //we need to send this frame through the CPP + //Allocate heap memory, then buf done on the buffer + uint32_t bufIdx; + if (mFreeHeapBufferList.empty()) { + rc = mMemory.allocateOne(mFrameLen); + if (rc < 0) { + LOGE("Failed allocating heap buffer. Fatal"); + return BAD_VALUE; + } else { + bufIdx = (uint32_t)rc; + } + } else { + bufIdx = *(mFreeHeapBufferList.begin()); + mFreeHeapBufferList.erase(mFreeHeapBufferList.begin()); + } + + /* Configure and start postproc if necessary */ + reprocess_config_t reproc_cfg; + cam_dimension_t dim; + memset(&reproc_cfg, 0, sizeof(reprocess_config_t)); + memset(&dim, 0, sizeof(dim)); + mStreams[0]->getFrameDimension(dim); + setReprocConfig(reproc_cfg, NULL, metadata, mStreamFormat, dim); + + // Start postprocessor without input buffer + startPostProc(reproc_cfg); + + LOGD("erasing %d", bufIdx); + + mMemory.markFrameNumber(bufIdx, frameNumber); + mStreams[0]->bufDone(bufIdx); + + } + return rc; +} + +/*=========================================================================== + * FUNCTION : streamCbRoutine + * + * DESCRIPTION: + * + * PARAMETERS : + * @super_frame : the super frame with filled buffer + * @stream : stream on which the buffer was requested and filled + * + * RETURN : none + *==========================================================================*/ +void QCamera3YUVChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) +{ + ATRACE_CALL(); + uint8_t frameIndex; + int32_t resultFrameNumber; + + if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) { + LOGE("Error with the stream callback"); + return; + } + + frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx; + if(frameIndex >= mNumBufs) { + LOGE("Error, Invalid index for buffer"); + stream->bufDone(frameIndex); + return; + } + + if (mBypass) { + List<PpInfo>::iterator ppInfo; + + Mutex::Autolock lock(mOfflinePpLock); + resultFrameNumber = mMemory.getFrameNumber(frameIndex); + for (ppInfo = mOfflinePpInfoList.begin(); + ppInfo != mOfflinePpInfoList.end(); ppInfo++) { + if (ppInfo->frameNumber == (uint32_t)resultFrameNumber) { + break; + } + } + LOGD("frame index %d, frame number %d", frameIndex, resultFrameNumber); + //check the reprocessing required flag against the frame number + if (ppInfo == mOfflinePpInfoList.end()) { + LOGE("Error, request for frame number is a reprocess."); + stream->bufDone(frameIndex); + return; + } + + if (ppInfo->offlinePpFlag) { + mm_camera_super_buf_t *frame = + (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t)); + if (frame == NULL) { + LOGE("Error allocating memory to save received_frame structure."); + if(stream) { + stream->bufDone(frameIndex); + } + return; + } + + *frame = *super_frame; + m_postprocessor.processData(frame, ppInfo->output, resultFrameNumber); + free(super_frame); + return; + } else { + if (ppInfo != mOfflinePpInfoList.begin()) { + // There is pending reprocess buffer, cache current buffer + if (ppInfo->callback_buffer != NULL) { + LOGE("Fatal: cached callback_buffer is already present"); + } + ppInfo->callback_buffer = super_frame; + return; + } else { + mOfflinePpInfoList.erase(ppInfo); + } + } + } + + QCamera3ProcessingChannel::streamCbRoutine(super_frame, stream); + return; +} + +/*=========================================================================== + * FUNCTION : reprocessCbRoutine + * + * DESCRIPTION: callback function for the reprocessed frame. This frame now + * should be returned to the framework. This same callback is + * used during input reprocessing or offline postprocessing + * + * PARAMETERS : + * @resultBuffer : buffer containing the reprocessed data + * @resultFrameNumber : frame number on which the buffer was requested + * + * RETURN : NONE + * + *==========================================================================*/ +void QCamera3YUVChannel::reprocessCbRoutine(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber) +{ + LOGD("E: frame number %d", resultFrameNumber); + Vector<mm_camera_super_buf_t *> pendingCbs; + + /* release the input buffer and input metadata buffer if used */ + if (0 > mMemory.getHeapBufferIndex(resultFrameNumber)) { + /* mOfflineMemory and mOfflineMetaMemory used only for input reprocessing */ + int32_t rc = releaseOfflineMemory(resultFrameNumber); + if (NO_ERROR != rc) { + LOGE("Error releasing offline memory rc = %d", rc); + } + /* Since reprocessing is done, send the callback to release the input buffer */ + if (mChannelCB) { + mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData); + } + } + + if (mBypass) { + int32_t rc = handleOfflinePpCallback(resultFrameNumber, pendingCbs); + if (rc != NO_ERROR) { + return; + } + } + + issueChannelCb(resultBuffer, resultFrameNumber); + + // Call all pending callbacks to return buffers + for (size_t i = 0; i < pendingCbs.size(); i++) { + QCamera3ProcessingChannel::streamCbRoutine( + pendingCbs[i], mStreams[0]); + } + +} + +/*=========================================================================== + * FUNCTION : needsFramePostprocessing + * + * DESCRIPTION: + * + * PARAMETERS : + * + * RETURN : + * TRUE if frame needs to be postprocessed + * FALSE is frame does not need to be postprocessed + * + *==========================================================================*/ +bool QCamera3YUVChannel::needsFramePostprocessing(metadata_buffer_t *meta) +{ + bool ppNeeded = false; + + //sharpness + IF_META_AVAILABLE(cam_edge_application_t, edgeMode, + CAM_INTF_META_EDGE_MODE, meta) { + mEdgeMode = *edgeMode; + } + + //wnr + IF_META_AVAILABLE(uint32_t, noiseRedMode, + CAM_INTF_META_NOISE_REDUCTION_MODE, meta) { + mNoiseRedMode = *noiseRedMode; + } + + //crop region + IF_META_AVAILABLE(cam_crop_region_t, scalerCropRegion, + CAM_INTF_META_SCALER_CROP_REGION, meta) { + mCropRegion = *scalerCropRegion; + } + + if ((CAM_EDGE_MODE_OFF != mEdgeMode.edge_mode) && + (CAM_EDGE_MODE_ZERO_SHUTTER_LAG != mEdgeMode.edge_mode)) { + ppNeeded = true; + } + if ((CAM_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG != mNoiseRedMode) && + (CAM_NOISE_REDUCTION_MODE_OFF != mNoiseRedMode) && + (CAM_NOISE_REDUCTION_MODE_MINIMAL != mNoiseRedMode)) { + ppNeeded = true; + } + if ((mCropRegion.width < (int32_t)mCamera3Stream->width) || + (mCropRegion.height < (int32_t)mCamera3Stream->height)) { + ppNeeded = true; + } + + return ppNeeded; +} + +/*=========================================================================== + * FUNCTION : handleOfflinePpCallback + * + * DESCRIPTION: callback function for the reprocessed frame from offline + * postprocessing. + * + * PARAMETERS : + * @resultFrameNumber : frame number on which the buffer was requested + * @pendingCbs : pending buffers to be returned first + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3YUVChannel::handleOfflinePpCallback(uint32_t resultFrameNumber, + Vector<mm_camera_super_buf_t *>& pendingCbs) +{ + Mutex::Autolock lock(mOfflinePpLock); + List<PpInfo>::iterator ppInfo; + + for (ppInfo = mOfflinePpInfoList.begin(); + ppInfo != mOfflinePpInfoList.end(); ppInfo++) { + if (ppInfo->frameNumber == resultFrameNumber) { + break; + } + } + + if (ppInfo == mOfflinePpInfoList.end()) { + LOGI("Request of frame number %d is reprocessing", + resultFrameNumber); + return NO_ERROR; + } else if (ppInfo != mOfflinePpInfoList.begin()) { + LOGE("callback for frame number %d should be head of list", + resultFrameNumber); + return BAD_VALUE; + } + + if (ppInfo->offlinePpFlag) { + // Need to get the input buffer frame index from the + // mMemory object and add that to the free heap buffers list. + int32_t bufferIndex = + mMemory.getHeapBufferIndex(resultFrameNumber); + if (bufferIndex < 0) { + LOGE("Fatal %d: no buffer index for frame number %d", + bufferIndex, resultFrameNumber); + return BAD_VALUE; + } + mFreeHeapBufferList.push_back(bufferIndex); + ppInfo = mOfflinePpInfoList.erase(ppInfo); + + // Return pending buffer callbacks + while (ppInfo != mOfflinePpInfoList.end() && + !ppInfo->offlinePpFlag && ppInfo->callback_buffer) { + + // Call stream callbacks for cached buffers + pendingCbs.push_back(ppInfo->callback_buffer); + + ppInfo = mOfflinePpInfoList.erase(ppInfo); + } + + } else { + LOGE("Fatal: request of frame number %d doesn't need" + " offline postprocessing. However there is" + " reprocessing callback.", + resultFrameNumber); + return BAD_VALUE; + } + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getReprocessType + * + * DESCRIPTION: get the type of reprocess output supported by this channel + * + * PARAMETERS : NONE + * + * RETURN : reprocess_type_t : type of reprocess + *==========================================================================*/ +reprocess_type_t QCamera3YUVChannel::getReprocessType() +{ + return REPROCESS_TYPE_YUV; +} + +/* QCamera3PicChannel methods */ + +/*=========================================================================== + * FUNCTION : jpegEvtHandle + * + * DESCRIPTION: Function registerd to mm-jpeg-interface to handle jpeg events. + Construct result payload and call mChannelCb to deliver buffer + to framework. + * + * PARAMETERS : + * @status : status of jpeg job + * @client_hdl: jpeg client handle + * @jobId : jpeg job Id + * @p_ouput : ptr to jpeg output result struct + * @userdata : user data ptr + * + * RETURN : none + *==========================================================================*/ +void QCamera3PicChannel::jpegEvtHandle(jpeg_job_status_t status, + uint32_t /*client_hdl*/, + uint32_t jobId, + mm_jpeg_output_t *p_output, + void *userdata) +{ + ATRACE_CALL(); + buffer_handle_t *resultBuffer = NULL; + buffer_handle_t *jpegBufferHandle = NULL; + int resultStatus = CAMERA3_BUFFER_STATUS_OK; + camera3_stream_buffer_t result; + camera3_jpeg_blob_t jpegHeader; + + KPI_ATRACE_INT("SNAPSHOT", 0); + QCamera3PicChannel *obj = (QCamera3PicChannel *)userdata; + if (obj) { + //Construct payload for process_capture_result. Call mChannelCb + + qcamera_hal3_jpeg_data_t *job = obj->m_postprocessor.findJpegJobByJobId(jobId); + + if ((job == NULL) || (status == JPEG_JOB_STATUS_ERROR)) { + LOGE("Error in jobId: (%d) with status: %d", jobId, status); + resultStatus = CAMERA3_BUFFER_STATUS_ERROR; + } + + if (NULL != job) { + uint32_t bufIdx = (uint32_t)job->jpeg_settings->out_buf_index; + LOGD("jpeg out_buf_index: %d", bufIdx); + + //Construct jpeg transient header of type camera3_jpeg_blob_t + //Append at the end of jpeg image of buf_filled_len size + + jpegHeader.jpeg_blob_id = CAMERA3_JPEG_BLOB_ID; + if (JPEG_JOB_STATUS_DONE == status) { + jpegHeader.jpeg_size = (uint32_t)p_output->buf_filled_len; + char* jpeg_buf = (char *)p_output->buf_vaddr; + + ssize_t maxJpegSize = -1; + + // Gralloc buffer may have additional padding for 4K page size + // Follow size guidelines based on spec since framework relies + // on that to reach end of buffer and with it the header + + //Handle same as resultBuffer, but for readablity + jpegBufferHandle = + (buffer_handle_t *)obj->mMemory.getBufferHandle(bufIdx); + + if (NULL != jpegBufferHandle) { + maxJpegSize = ((private_handle_t*)(*jpegBufferHandle))->width; + if (maxJpegSize > obj->mMemory.getSize(bufIdx)) { + maxJpegSize = obj->mMemory.getSize(bufIdx); + } + + size_t jpeg_eof_offset = + (size_t)(maxJpegSize - (ssize_t)sizeof(jpegHeader)); + char *jpeg_eof = &jpeg_buf[jpeg_eof_offset]; + memcpy(jpeg_eof, &jpegHeader, sizeof(jpegHeader)); + obj->mMemory.cleanInvalidateCache(bufIdx); + } else { + LOGE("JPEG buffer not found and index: %d", + bufIdx); + resultStatus = CAMERA3_BUFFER_STATUS_ERROR; + } + } + + ////Use below data to issue framework callback + resultBuffer = + (buffer_handle_t *)obj->mMemory.getBufferHandle(bufIdx); + int32_t resultFrameNumber = obj->mMemory.getFrameNumber(bufIdx); + int32_t rc = obj->mMemory.unregisterBuffer(bufIdx); + if (NO_ERROR != rc) { + LOGE("Error %d unregistering stream buffer %d", + rc, bufIdx); + } + + result.stream = obj->mCamera3Stream; + result.buffer = resultBuffer; + result.status = resultStatus; + result.acquire_fence = -1; + result.release_fence = -1; + + // Release any snapshot buffers before calling + // the user callback. The callback can potentially + // unblock pending requests to snapshot stream. + int32_t snapshotIdx = -1; + mm_camera_super_buf_t* src_frame = NULL; + + if (job->src_reproc_frame) + src_frame = job->src_reproc_frame; + else + src_frame = job->src_frame; + + if (src_frame) { + if (obj->mStreams[0]->getMyHandle() == + src_frame->bufs[0]->stream_id) { + snapshotIdx = (int32_t)src_frame->bufs[0]->buf_idx; + } else { + LOGE("Snapshot stream id %d and source frame %d don't match!", + obj->mStreams[0]->getMyHandle(), + src_frame->bufs[0]->stream_id); + } + } + if (0 <= snapshotIdx) { + Mutex::Autolock lock(obj->mFreeBuffersLock); + obj->mFreeBufferList.push_back((uint32_t)snapshotIdx); + } else { + LOGE("Snapshot buffer not found!"); + } + + LOGI("Issue Jpeg Callback frameNumber = %d status = %d", + resultFrameNumber, resultStatus); + if (obj->mChannelCB) { + obj->mChannelCB(NULL, + &result, + (uint32_t)resultFrameNumber, + false, + obj->mUserData); + } + + // release internal data for jpeg job + if ((NULL != job->fwk_frame) || (NULL != job->fwk_src_buffer)) { + /* unregister offline input buffer */ + int32_t inputBufIndex = + obj->mOfflineMemory.getGrallocBufferIndex((uint32_t)resultFrameNumber); + if (0 <= inputBufIndex) { + rc = obj->mOfflineMemory.unregisterBuffer(inputBufIndex); + } else { + LOGE("could not find the input buf index, frame number %d", + resultFrameNumber); + } + if (NO_ERROR != rc) { + LOGE("Error %d unregistering input buffer %d", + rc, bufIdx); + } + + /* unregister offline meta buffer */ + int32_t metaBufIndex = + obj->mOfflineMetaMemory.getHeapBufferIndex((uint32_t)resultFrameNumber); + if (0 <= metaBufIndex) { + Mutex::Autolock lock(obj->mFreeOfflineMetaBuffersLock); + obj->mFreeOfflineMetaBuffersList.push_back((uint32_t)metaBufIndex); + } else { + LOGE("could not find the input meta buf index, frame number %d", + resultFrameNumber); + } + } + obj->m_postprocessor.releaseOfflineBuffers(false); + obj->m_postprocessor.releaseJpegJobData(job); + free(job); + } + + return; + // } + } else { + LOGE("Null userdata in jpeg callback"); + } +} + +QCamera3PicChannel::QCamera3PicChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_feature_mask_t postprocess_mask, + bool is4KVideo, + bool isInputStreamConfigured, + QCamera3Channel *metadataChannel, + uint32_t numBuffers) : + QCamera3ProcessingChannel(cam_handle, channel_handle, + cam_ops, cb_routine, paddingInfo, userData, + stream, CAM_STREAM_TYPE_SNAPSHOT, + postprocess_mask, metadataChannel, numBuffers), + mNumSnapshotBufs(0), + mInputBufferHint(isInputStreamConfigured), + mYuvMemory(NULL), + mFrameLen(0) +{ + QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData; + m_max_pic_dim = hal_obj->calcMaxJpegDim(); + mYuvWidth = stream->width; + mYuvHeight = stream->height; + mStreamType = CAM_STREAM_TYPE_SNAPSHOT; + // Use same pixelformat for 4K video case + mStreamFormat = is4KVideo ? + getStreamDefaultFormat(CAM_STREAM_TYPE_VIDEO) + :getStreamDefaultFormat(CAM_STREAM_TYPE_SNAPSHOT); + int32_t rc = m_postprocessor.initJpeg(jpegEvtHandle, &m_max_pic_dim, this); + if (rc != 0) { + LOGE("Init Postprocessor failed"); + } +} + +/*=========================================================================== + * FUNCTION : flush + * + * DESCRIPTION: flush pic channel, which will stop all processing within, including + * the reprocessing channel in postprocessor and YUV stream. + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PicChannel::flush() +{ + int32_t rc = NO_ERROR; + if(!m_bIsActive) { + LOGE("Attempt to flush inactive channel"); + return NO_INIT; + } + + rc = m_postprocessor.flush(); + if (rc == 0) { + LOGE("Postprocessor flush failed, rc = %d", rc); + return rc; + } + + if (0 < mOfflineMetaMemory.getCnt()) { + mOfflineMetaMemory.deallocate(); + } + if (0 < mOfflineMemory.getCnt()) { + mOfflineMemory.unregisterBuffers(); + } + Mutex::Autolock lock(mFreeBuffersLock); + mFreeBufferList.clear(); + for (uint32_t i = 0; i < mCamera3Stream->max_buffers; i++) { + mFreeBufferList.push_back(i); + } + return rc; +} + + +QCamera3PicChannel::~QCamera3PicChannel() +{ +} + +int32_t QCamera3PicChannel::initialize(cam_is_type_t isType) +{ + int32_t rc = NO_ERROR; + cam_dimension_t streamDim; + cam_stream_type_t streamType; + cam_format_t streamFormat; + + if (NULL == mCamera3Stream) { + LOGE("Camera stream uninitialized"); + return NO_INIT; + } + + if (1 <= m_numStreams) { + // Only one stream per channel supported in v3 Hal + return NO_ERROR; + } + + mIsType = isType; + streamType = mStreamType; + streamFormat = mStreamFormat; + streamDim.width = (int32_t)mYuvWidth; + streamDim.height = (int32_t)mYuvHeight; + + mNumSnapshotBufs = mCamera3Stream->max_buffers; + rc = QCamera3Channel::addStream(streamType, streamFormat, streamDim, + ROTATE_0, (uint8_t)mCamera3Stream->max_buffers, mPostProcMask, + mIsType); + + if (NO_ERROR != rc) { + LOGE("Initialize failed, rc = %d", rc); + return rc; + } + + /* initialize offline meta memory for input reprocess */ + rc = QCamera3ProcessingChannel::initialize(isType); + if (NO_ERROR != rc) { + LOGE("Processing Channel initialize failed, rc = %d", + rc); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : request + * + * DESCRIPTION: handle the request - either with an input buffer or a direct + * output request + * + * PARAMETERS : + * @buffer : pointer to the output buffer + * @frameNumber : frame number of the request + * @pInputBuffer : pointer to input buffer if an input request + * @metadata : parameters associated with the request + * + * RETURN : 0 on a success start of capture + * -EINVAL on invalid input + * -ENODEV on serious error + *==========================================================================*/ +int32_t QCamera3PicChannel::request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t *pInputBuffer, + metadata_buffer_t *metadata) +{ + ATRACE_CALL(); + //FIX ME: Return buffer back in case of failures below. + + int32_t rc = NO_ERROR; + + reprocess_config_t reproc_cfg; + cam_dimension_t dim; + memset(&reproc_cfg, 0, sizeof(reprocess_config_t)); + //make sure to set the correct input stream dim in case of YUV size override + //and recalculate the plane info + dim.width = (int32_t)mYuvWidth; + dim.height = (int32_t)mYuvHeight; + setReprocConfig(reproc_cfg, pInputBuffer, metadata, mStreamFormat, dim); + + // Picture stream has already been started before any request comes in + if (!m_bIsActive) { + LOGE("Channel not started!!"); + return NO_INIT; + } + + int index = mMemory.getMatchBufIndex((void*)buffer); + + if(index < 0) { + rc = registerBuffer(buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + index = mMemory.getMatchBufIndex((void*)buffer); + if (index < 0) { + LOGE("Could not find object among registered buffers"); + return DEAD_OBJECT; + } + } + LOGD("buffer index %d, frameNumber: %u", index, frameNumber); + + rc = mMemory.markFrameNumber((uint32_t)index, frameNumber); + + // Start postprocessor + startPostProc(reproc_cfg); + + // Queue jpeg settings + rc = queueJpegSetting((uint32_t)index, metadata); + + if (pInputBuffer == NULL) { + Mutex::Autolock lock(mFreeBuffersLock); + uint32_t bufIdx; + if (mFreeBufferList.empty()) { + rc = mYuvMemory->allocateOne(mFrameLen); + if (rc < 0) { + LOGE("Failed to allocate heap buffer. Fatal"); + return rc; + } else { + bufIdx = (uint32_t)rc; + } + } else { + List<uint32_t>::iterator it = mFreeBufferList.begin(); + bufIdx = *it; + mFreeBufferList.erase(it); + } + mYuvMemory->markFrameNumber(bufIdx, frameNumber); + mStreams[0]->bufDone(bufIdx); + } else { + qcamera_fwk_input_pp_data_t *src_frame = NULL; + src_frame = (qcamera_fwk_input_pp_data_t *)calloc(1, + sizeof(qcamera_fwk_input_pp_data_t)); + if (src_frame == NULL) { + LOGE("No memory for src frame"); + return NO_MEMORY; + } + rc = setFwkInputPPData(src_frame, pInputBuffer, &reproc_cfg, metadata, + NULL /*fwk output buffer*/, frameNumber); + if (NO_ERROR != rc) { + LOGE("Error %d while setting framework input PP data", rc); + free(src_frame); + return rc; + } + LOGH("Post-process started"); + m_postprocessor.processData(src_frame); + } + return rc; +} + + + +/*=========================================================================== + * FUNCTION : dataNotifyCB + * + * DESCRIPTION: Channel Level callback used for super buffer data notify. + * This function is registered with mm-camera-interface to handle + * data notify + * + * PARAMETERS : + * @recvd_frame : stream frame received + * userdata : user data ptr + * + * RETURN : none + *==========================================================================*/ +void QCamera3PicChannel::dataNotifyCB(mm_camera_super_buf_t *recvd_frame, + void *userdata) +{ + ATRACE_CALL(); + LOGD("E\n"); + QCamera3PicChannel *channel = (QCamera3PicChannel *)userdata; + + if (channel == NULL) { + LOGE("invalid channel pointer"); + return; + } + + if(channel->m_numStreams != 1) { + LOGE("Error: Bug: This callback assumes one stream per channel"); + return; + } + + + if(channel->mStreams[0] == NULL) { + LOGE("Error: Invalid Stream object"); + return; + } + + channel->QCamera3PicChannel::streamCbRoutine(recvd_frame, channel->mStreams[0]); + + LOGD("X\n"); + return; +} + +/*=========================================================================== + * FUNCTION : streamCbRoutine + * + * DESCRIPTION: + * + * PARAMETERS : + * @super_frame : the super frame with filled buffer + * @stream : stream on which the buffer was requested and filled + * + * RETURN : none + *==========================================================================*/ +void QCamera3PicChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) +{ + ATRACE_CALL(); + //TODO + //Used only for getting YUV. Jpeg callback will be sent back from channel + //directly to HWI. Refer to func jpegEvtHandle + + //Got the yuv callback. Calling yuv callback handler in PostProc + uint8_t frameIndex; + mm_camera_super_buf_t* frame = NULL; + cam_dimension_t dim; + cam_frame_len_offset_t offset; + + memset(&dim, 0, sizeof(dim)); + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + + if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) { + LOGE("Error with the stream callback"); + return; + } + + frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx; + LOGD("recvd buf_idx: %u for further processing", + (uint32_t)frameIndex); + if(frameIndex >= mNumSnapshotBufs) { + LOGE("Error, Invalid index for buffer"); + if(stream) { + Mutex::Autolock lock(mFreeBuffersLock); + mFreeBufferList.push_back(frameIndex); + stream->bufDone(frameIndex); + } + return; + } + + frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t)); + if (frame == NULL) { + LOGE("Error allocating memory to save received_frame structure."); + if(stream) { + Mutex::Autolock lock(mFreeBuffersLock); + mFreeBufferList.push_back(frameIndex); + stream->bufDone(frameIndex); + } + return; + } + *frame = *super_frame; + stream->getFrameDimension(dim); + stream->getFrameOffset(offset); + dumpYUV(frame->bufs[0], dim, offset, QCAMERA_DUMP_FRM_INPUT_REPROCESS); + + m_postprocessor.processData(frame); + free(super_frame); + return; +} + +QCamera3StreamMem* QCamera3PicChannel::getStreamBufs(uint32_t len) +{ + mYuvMemory = new QCamera3StreamMem(mCamera3Stream->max_buffers, false); + if (!mYuvMemory) { + LOGE("unable to create metadata memory"); + return NULL; + } + mFrameLen = len; + + return mYuvMemory; +} + +void QCamera3PicChannel::putStreamBufs() +{ + QCamera3ProcessingChannel::putStreamBufs(); + + mYuvMemory->deallocate(); + delete mYuvMemory; + mYuvMemory = NULL; + mFreeBufferList.clear(); +} + +int32_t QCamera3PicChannel::queueJpegSetting(uint32_t index, metadata_buffer_t *metadata) +{ + QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData; + jpeg_settings_t *settings = + (jpeg_settings_t *)malloc(sizeof(jpeg_settings_t)); + + if (!settings) { + LOGE("out of memory allocating jpeg_settings"); + return -ENOMEM; + } + + memset(settings, 0, sizeof(jpeg_settings_t)); + + settings->out_buf_index = index; + + settings->jpeg_orientation = 0; + IF_META_AVAILABLE(int32_t, orientation, CAM_INTF_META_JPEG_ORIENTATION, metadata) { + settings->jpeg_orientation = *orientation; + } + + settings->jpeg_quality = 85; + IF_META_AVAILABLE(uint32_t, quality1, CAM_INTF_META_JPEG_QUALITY, metadata) { + settings->jpeg_quality = (uint8_t) *quality1; + } + + IF_META_AVAILABLE(uint32_t, quality2, CAM_INTF_META_JPEG_THUMB_QUALITY, metadata) { + settings->jpeg_thumb_quality = (uint8_t) *quality2; + } + + IF_META_AVAILABLE(cam_dimension_t, dimension, CAM_INTF_META_JPEG_THUMB_SIZE, metadata) { + settings->thumbnail_size = *dimension; + } + + settings->gps_timestamp_valid = 0; + IF_META_AVAILABLE(int64_t, timestamp, CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata) { + settings->gps_timestamp = *timestamp; + settings->gps_timestamp_valid = 1; + } + + settings->gps_coordinates_valid = 0; + IF_META_AVAILABLE(double, coordinates, CAM_INTF_META_JPEG_GPS_COORDINATES, metadata) { + memcpy(settings->gps_coordinates, coordinates, 3*sizeof(double)); + settings->gps_coordinates_valid = 1; + } + + IF_META_AVAILABLE(uint8_t, proc_methods, CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata) { + memset(settings->gps_processing_method, 0, + sizeof(settings->gps_processing_method)); + strlcpy(settings->gps_processing_method, (const char *)proc_methods, + sizeof(settings->gps_processing_method)); + } + + // Image description + const char *eepromVersion = hal_obj->getEepromVersionInfo(); + const uint32_t *ldafCalib = hal_obj->getLdafCalib(); + if ((eepromVersion && strlen(eepromVersion)) || + ldafCalib) { + int len = 0; + settings->image_desc_valid = true; + if (eepromVersion && strlen(eepromVersion)) { + len = snprintf(settings->image_desc, sizeof(settings->image_desc), + "M:%s ", eepromVersion); + } + if (ldafCalib) { + snprintf(settings->image_desc + len, + sizeof(settings->image_desc) - len, "L:%u-%u", + ldafCalib[0], ldafCalib[1]); + } + } + + return m_postprocessor.processJpegSettingData(settings); +} + + +void QCamera3PicChannel::overrideYuvSize(uint32_t width, uint32_t height) +{ + mYuvWidth = width; + mYuvHeight = height; +} + +/*=========================================================================== + * FUNCTION : getReprocessType + * + * DESCRIPTION: get the type of reprocess output supported by this channel + * + * PARAMETERS : NONE + * + * RETURN : reprocess_type_t : type of reprocess + *==========================================================================*/ +reprocess_type_t QCamera3PicChannel::getReprocessType() +{ + /* a picture channel could either use the postprocessor for reprocess+jpeg + or only for reprocess */ + reprocess_type_t expectedReprocess; + if (mPostProcMask == CAM_QCOM_FEATURE_NONE || mInputBufferHint) { + expectedReprocess = REPROCESS_TYPE_JPEG; + } else { + expectedReprocess = REPROCESS_TYPE_NONE; + } + LOGH("expectedReprocess from Pic Channel is %d", expectedReprocess); + return expectedReprocess; +} + + +/*=========================================================================== + * FUNCTION : QCamera3ReprocessChannel + * + * DESCRIPTION: constructor of QCamera3ReprocessChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @pp_mask : post-proccess feature mask + * + * RETURN : none + *==========================================================================*/ +QCamera3ReprocessChannel::QCamera3ReprocessChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, void *ch_hdl) : + /* In case of framework reprocessing, pproc and jpeg operations could be + * parallelized by allowing 1 extra buffer for reprocessing output: + * ch_hdl->getNumBuffers() + 1 */ + QCamera3Channel(cam_handle, channel_handle, cam_ops, cb_routine, paddingInfo, + postprocess_mask, userData, + ((QCamera3ProcessingChannel *)ch_hdl)->getNumBuffers() + + (MAX_REPROCESS_PIPELINE_STAGES - 1)), + inputChHandle(ch_hdl), + mOfflineBuffersIndex(-1), + mFrameLen(0), + mReprocessType(REPROCESS_TYPE_NONE), + m_pSrcChannel(NULL), + m_pMetaChannel(NULL), + mMemory(NULL), + mGrallocMemory(0), + mReprocessPerfMode(false) +{ + memset(mSrcStreamHandles, 0, sizeof(mSrcStreamHandles)); + mOfflineBuffersIndex = mNumBuffers -1; + mOfflineMetaIndex = (int32_t) (2*mNumBuffers -1); +} + + +/*=========================================================================== + * FUNCTION : QCamera3ReprocessChannel + * + * DESCRIPTION: constructor of QCamera3ReprocessChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @pp_mask : post-proccess feature mask + * + * RETURN : none + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::initialize(cam_is_type_t isType) +{ + int32_t rc = NO_ERROR; + mm_camera_channel_attr_t attr; + + memset(&attr, 0, sizeof(mm_camera_channel_attr_t)); + attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS; + attr.max_unmatched_frames = 1; + + m_handle = m_camOps->add_channel(m_camHandle, + &attr, + NULL, + this); + if (m_handle == 0) { + LOGE("Add channel failed"); + return UNKNOWN_ERROR; + } + + mIsType = isType; + return rc; +} + +/*=========================================================================== + * FUNCTION : registerBuffer + * + * DESCRIPTION: register streaming buffer to the channel object + * + * PARAMETERS : + * @buffer : buffer to be registered + * @isType : the image stabilization type for the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::registerBuffer(buffer_handle_t *buffer, + cam_is_type_t isType) +{ + ATRACE_CALL(); + int rc = 0; + mIsType = isType; + cam_stream_type_t streamType; + + if (buffer == NULL) { + LOGE("Error: Cannot register a NULL buffer"); + return BAD_VALUE; + } + + if ((uint32_t)mGrallocMemory.getCnt() > (mNumBuffers - 1)) { + LOGE("Trying to register more buffers than initially requested"); + return BAD_VALUE; + } + + if (0 == m_numStreams) { + rc = initialize(mIsType); + if (rc != NO_ERROR) { + LOGE("Couldn't initialize camera stream %d", + rc); + return rc; + } + } + + streamType = mStreams[0]->getMyType(); + rc = mGrallocMemory.registerBuffer(buffer, streamType); + if (ALREADY_EXISTS == rc) { + return NO_ERROR; + } else if (NO_ERROR != rc) { + LOGE("Buffer %p couldn't be registered %d", buffer, rc); + return rc; + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : QCamera3ReprocessChannel + * + * DESCRIPTION: constructor of QCamera3ReprocessChannel + * + * PARAMETERS : + * @cam_handle : camera handle + * @cam_ops : ptr to camera ops table + * @pp_mask : post-proccess feature mask + * + * RETURN : none + *==========================================================================*/ +void QCamera3ReprocessChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) +{ + //Got the pproc data callback. Now send to jpeg encoding + uint8_t frameIndex; + uint32_t resultFrameNumber; + mm_camera_super_buf_t* frame = NULL; + QCamera3ProcessingChannel *obj = (QCamera3ProcessingChannel *)inputChHandle; + cam_dimension_t dim; + cam_frame_len_offset_t offset; + + memset(&dim, 0, sizeof(dim)); + memset(&offset, 0, sizeof(cam_frame_len_offset_t)); + if(!super_frame) { + LOGE("Invalid Super buffer"); + return; + } + + if(super_frame->num_bufs != 1) { + LOGE("Multiple streams are not supported"); + return; + } + if(super_frame->bufs[0] == NULL ) { + LOGE("Error, Super buffer frame does not contain valid buffer"); + return; + } + frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx; + + + if (mReprocessType == REPROCESS_TYPE_JPEG) { + resultFrameNumber = mMemory->getFrameNumber(frameIndex); + frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t)); + if (frame == NULL) { + LOGE("Error allocating memory to save received_frame structure."); + if(stream) { + stream->bufDone(frameIndex); + } + return; + } + LOGI("bufIndex: %u recvd from post proc", + (uint32_t)frameIndex); + *frame = *super_frame; + + stream->getFrameDimension(dim); + stream->getFrameOffset(offset); + dumpYUV(frame->bufs[0], dim, offset, QCAMERA_DUMP_FRM_SNAPSHOT); + /* Since reprocessing is done, send the callback to release the input buffer */ + if (mChannelCB) { + mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData); + } + obj->m_postprocessor.processPPData(frame); + } else { + buffer_handle_t *resultBuffer; + frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx; + resultBuffer = (buffer_handle_t *)mGrallocMemory.getBufferHandle(frameIndex); + resultFrameNumber = mGrallocMemory.getFrameNumber(frameIndex); + int32_t rc = stream->bufRelease(frameIndex); + if (NO_ERROR != rc) { + LOGE("Error %d releasing stream buffer %d", + rc, frameIndex); + } + rc = mGrallocMemory.unregisterBuffer(frameIndex); + if (NO_ERROR != rc) { + LOGE("Error %d unregistering stream buffer %d", + rc, frameIndex); + } + obj->reprocessCbRoutine(resultBuffer, resultFrameNumber); + + obj->m_postprocessor.releaseOfflineBuffers(false); + qcamera_hal3_pp_data_t *pp_job = obj->m_postprocessor.dequeuePPJob(resultFrameNumber); + if (pp_job != NULL) { + obj->m_postprocessor.releasePPJobData(pp_job); + } + free(pp_job); + resetToCamPerfNormal(resultFrameNumber); + } + free(super_frame); + return; +} + +/*=========================================================================== + * FUNCTION : resetToCamPerfNormal + * + * DESCRIPTION: Set the perf mode to normal if all the priority frames + * have been reprocessed + * + * PARAMETERS : + * @frameNumber: Frame number of the reprocess completed frame + * + * RETURN : QCamera3StreamMem * + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::resetToCamPerfNormal(uint32_t frameNumber) +{ + int32_t rc = NO_ERROR; + bool resetToPerfNormal = false; + { + Mutex::Autolock lock(mPriorityFramesLock); + /* remove the priority frame number from the list */ + for (size_t i = 0; i < mPriorityFrames.size(); i++) { + if (mPriorityFrames[i] == frameNumber) { + mPriorityFrames.removeAt(i); + } + } + /* reset the perf mode if pending priority frame list is empty */ + if (mReprocessPerfMode && mPriorityFrames.empty()) { + resetToPerfNormal = true; + } + } + if (resetToPerfNormal) { + QCamera3Stream *pStream = mStreams[0]; + cam_stream_parm_buffer_t param; + memset(¶m, 0, sizeof(cam_stream_parm_buffer_t)); + + param.type = CAM_STREAM_PARAM_TYPE_REQUEST_OPS_MODE; + param.perf_mode = CAM_PERF_NORMAL; + rc = pStream->setParameter(param); + { + Mutex::Autolock lock(mPriorityFramesLock); + mReprocessPerfMode = false; + } + } + return rc; +} + +/*=========================================================================== + * FUNCTION : getStreamBufs + * + * DESCRIPTION: register the buffers of the reprocess channel + * + * PARAMETERS : none + * + * RETURN : QCamera3StreamMem * + *==========================================================================*/ +QCamera3StreamMem* QCamera3ReprocessChannel::getStreamBufs(uint32_t len) +{ + if (mReprocessType == REPROCESS_TYPE_JPEG) { + mMemory = new QCamera3StreamMem(mNumBuffers, false); + if (!mMemory) { + LOGE("unable to create reproc memory"); + return NULL; + } + mFrameLen = len; + return mMemory; + } + return &mGrallocMemory; +} + +/*=========================================================================== + * FUNCTION : putStreamBufs + * + * DESCRIPTION: release the reprocess channel buffers + * + * PARAMETERS : none + * + * RETURN : + *==========================================================================*/ +void QCamera3ReprocessChannel::putStreamBufs() +{ + if (mReprocessType == REPROCESS_TYPE_JPEG) { + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; + mFreeBufferList.clear(); + } else { + mGrallocMemory.unregisterBuffers(); + } +} + +/*=========================================================================== + * FUNCTION : ~QCamera3ReprocessChannel + * + * DESCRIPTION: destructor of QCamera3ReprocessChannel + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3ReprocessChannel::~QCamera3ReprocessChannel() +{ + destroy(); + + if (m_handle) { + m_camOps->delete_channel(m_camHandle, m_handle); + LOGD("deleting channel %d", m_handle); + m_handle = 0; + } +} + +/*=========================================================================== + * FUNCTION : start + * + * DESCRIPTION: start reprocess channel. + * + * PARAMETERS : + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::start() +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + + rc = QCamera3Channel::start(); + + if (rc == NO_ERROR) { + rc = m_camOps->start_channel(m_camHandle, m_handle); + + // Check failure + if (rc != NO_ERROR) { + LOGE("start_channel failed %d", rc); + QCamera3Channel::stop(); + } + } + return rc; +} + +/*=========================================================================== + * FUNCTION : stop + * + * DESCRIPTION: stop reprocess channel. + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::stop() +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + + rc = QCamera3Channel::stop(); + unmapOfflineBuffers(true); + + rc |= m_camOps->stop_channel(m_camHandle, m_handle); + + return rc; +} + +/*=========================================================================== + * FUNCTION : getStreamBySrcHandle + * + * DESCRIPTION: find reprocess stream by its source stream handle + * + * PARAMETERS : + * @srcHandle : source stream handle + * + * RETURN : ptr to reprocess stream if found. NULL if not found + *==========================================================================*/ +QCamera3Stream * QCamera3ReprocessChannel::getStreamBySrcHandle(uint32_t srcHandle) +{ + QCamera3Stream *pStream = NULL; + + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mSrcStreamHandles[i] == srcHandle) { + pStream = mStreams[i]; + break; + } + } + return pStream; +} + +/*=========================================================================== + * FUNCTION : getSrcStreamBySrcHandle + * + * DESCRIPTION: find source stream by source stream handle + * + * PARAMETERS : + * @srcHandle : source stream handle + * + * RETURN : ptr to reprocess stream if found. NULL if not found + *==========================================================================*/ +QCamera3Stream * QCamera3ReprocessChannel::getSrcStreamBySrcHandle(uint32_t srcHandle) +{ + QCamera3Stream *pStream = NULL; + + if (NULL == m_pSrcChannel) { + return NULL; + } + + for (uint32_t i = 0; i < m_numStreams; i++) { + if (mSrcStreamHandles[i] == srcHandle) { + pStream = m_pSrcChannel->getStreamByIndex(i); + break; + } + } + return pStream; +} + +/*=========================================================================== + * FUNCTION : unmapOfflineBuffers + * + * DESCRIPTION: Unmaps offline buffers + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::unmapOfflineBuffers(bool all) +{ + int rc = NO_ERROR; + if (!mOfflineBuffers.empty()) { + QCamera3Stream *stream = NULL; + List<OfflineBuffer>::iterator it = mOfflineBuffers.begin(); + for (; it != mOfflineBuffers.end(); it++) { + stream = (*it).stream; + if (NULL != stream) { + rc = stream->unmapBuf((*it).type, + (*it).index, + -1); + if (NO_ERROR != rc) { + LOGE("Error during offline buffer unmap %d", + rc); + } + LOGD("Unmapped buffer with index %d", (*it).index); + } + if (!all) { + mOfflineBuffers.erase(it); + break; + } + } + if (all) { + mOfflineBuffers.clear(); + } + } + + if (!mOfflineMetaBuffers.empty()) { + QCamera3Stream *stream = NULL; + List<OfflineBuffer>::iterator it = mOfflineMetaBuffers.begin(); + for (; it != mOfflineMetaBuffers.end(); it++) { + stream = (*it).stream; + if (NULL != stream) { + rc = stream->unmapBuf((*it).type, + (*it).index, + -1); + if (NO_ERROR != rc) { + LOGE("Error during offline buffer unmap %d", + rc); + } + LOGD("Unmapped meta buffer with index %d", (*it).index); + } + if (!all) { + mOfflineMetaBuffers.erase(it); + break; + } + } + if (all) { + mOfflineMetaBuffers.clear(); + } + } + return rc; +} + +/*=========================================================================== + * FUNCTION : bufDone + * + * DESCRIPTION: Return reprocess stream buffer to free buffer list. + * Note that this function doesn't queue buffer back to kernel. + * It's up to doReprocessOffline to do that instead. + * PARAMETERS : + * @recvd_frame : stream buf frame to be returned + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::bufDone(mm_camera_super_buf_t *recvd_frame) +{ + int rc = NO_ERROR; + if (recvd_frame && recvd_frame->num_bufs == 1) { + Mutex::Autolock lock(mFreeBuffersLock); + uint32_t buf_idx = recvd_frame->bufs[0]->buf_idx; + mFreeBufferList.push_back(buf_idx); + + } else { + LOGE("Fatal. Not supposed to be here"); + rc = BAD_VALUE; + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : overrideMetadata + * + * DESCRIPTION: Override metadata entry such as rotation, crop, and CDS info. + * + * PARAMETERS : + * @frame : input frame from source stream + * meta_buffer: metadata buffer + * @metadata : corresponding metadata + * @fwk_frame : + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::overrideMetadata(qcamera_hal3_pp_buffer_t *pp_buffer, + mm_camera_buf_def_t *meta_buffer, jpeg_settings_t *jpeg_settings, + qcamera_fwk_input_pp_data_t &fwk_frame) +{ + int32_t rc = NO_ERROR; + QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData; + if ((NULL == meta_buffer) || (NULL == pp_buffer) || (NULL == pp_buffer->input) || + (NULL == hal_obj)) { + return BAD_VALUE; + } + + metadata_buffer_t *meta = (metadata_buffer_t *)meta_buffer->buffer; + mm_camera_super_buf_t *frame = pp_buffer->input; + if (NULL == meta) { + return BAD_VALUE; + } + + for (uint32_t i = 0; i < frame->num_bufs; i++) { + QCamera3Stream *pStream = getStreamBySrcHandle(frame->bufs[i]->stream_id); + QCamera3Stream *pSrcStream = getSrcStreamBySrcHandle(frame->bufs[i]->stream_id); + + if (pStream != NULL && pSrcStream != NULL) { + if (jpeg_settings) { + // Find rotation info for reprocess stream + cam_rotation_info_t rotation_info; + memset(&rotation_info, 0, sizeof(rotation_info)); + if (jpeg_settings->jpeg_orientation == 0) { + rotation_info.rotation = ROTATE_0; + } else if (jpeg_settings->jpeg_orientation == 90) { + rotation_info.rotation = ROTATE_90; + } else if (jpeg_settings->jpeg_orientation == 180) { + rotation_info.rotation = ROTATE_180; + } else if (jpeg_settings->jpeg_orientation == 270) { + rotation_info.rotation = ROTATE_270; + } + rotation_info.streamId = mStreams[0]->getMyServerID(); + ADD_SET_PARAM_ENTRY_TO_BATCH(meta, CAM_INTF_PARM_ROTATION, rotation_info); + } + + // Find and insert crop info for reprocess stream + IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, meta) { + if (MAX_NUM_STREAMS > crop_data->num_of_streams) { + for (int j = 0; j < crop_data->num_of_streams; j++) { + if (crop_data->crop_info[j].stream_id == + pSrcStream->getMyServerID()) { + + // Store crop/roi information for offline reprocess + // in the reprocess stream slot + crop_data->crop_info[crop_data->num_of_streams].crop = + crop_data->crop_info[j].crop; + crop_data->crop_info[crop_data->num_of_streams].roi_map = + crop_data->crop_info[j].roi_map; + crop_data->crop_info[crop_data->num_of_streams].stream_id = + mStreams[0]->getMyServerID(); + crop_data->num_of_streams++; + + LOGD("Reprocess stream server id: %d", + mStreams[0]->getMyServerID()); + LOGD("Found offline reprocess crop %dx%d %dx%d", + crop_data->crop_info[j].crop.left, + crop_data->crop_info[j].crop.top, + crop_data->crop_info[j].crop.width, + crop_data->crop_info[j].crop.height); + LOGD("Found offline reprocess roimap %dx%d %dx%d", + crop_data->crop_info[j].roi_map.left, + crop_data->crop_info[j].roi_map.top, + crop_data->crop_info[j].roi_map.width, + crop_data->crop_info[j].roi_map.height); + + break; + } + } + } else { + LOGE("No space to add reprocess stream crop/roi information"); + } + } + + IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, meta) { + uint8_t cnt = cdsInfo->num_of_streams; + if (cnt <= MAX_NUM_STREAMS) { + cam_stream_cds_info_t repro_cds_info; + memset(&repro_cds_info, 0, sizeof(repro_cds_info)); + repro_cds_info.stream_id = mStreams[0]->getMyServerID(); + for (size_t i = 0; i < cnt; i++) { + if (cdsInfo->cds_info[i].stream_id == + pSrcStream->getMyServerID()) { + repro_cds_info.cds_enable = + cdsInfo->cds_info[i].cds_enable; + break; + } + } + cdsInfo->num_of_streams = 1; + cdsInfo->cds_info[0] = repro_cds_info; + } else { + LOGE("No space to add reprocess stream cds information"); + } + } + + fwk_frame.input_buffer = *frame->bufs[i]; + fwk_frame.metadata_buffer = *meta_buffer; + fwk_frame.output_buffer = pp_buffer->output; + break; + } else { + LOGE("Source/Re-process streams are invalid"); + rc |= BAD_VALUE; + } + } + + return rc; +} + +/*=========================================================================== +* FUNCTION : overrideFwkMetadata +* +* DESCRIPTION: Override frameworks metadata such as rotation, crop, and CDS data. +* +* PARAMETERS : +* @frame : input frame for reprocessing +* +* RETURN : int32_t type of status +* NO_ERROR -- success +* none-zero failure code +*==========================================================================*/ +int32_t QCamera3ReprocessChannel::overrideFwkMetadata( + qcamera_fwk_input_pp_data_t *frame) +{ + if (NULL == frame) { + LOGE("Incorrect input frame"); + return BAD_VALUE; + } + + if (NULL == frame->metadata_buffer.buffer) { + LOGE("No metadata available"); + return BAD_VALUE; + } + metadata_buffer_t *meta = (metadata_buffer_t *) frame->metadata_buffer.buffer; + + // Not doing rotation at all for YUV to YUV reprocess + if (mReprocessType != REPROCESS_TYPE_JPEG) { + LOGD("Override rotation to 0 for channel reprocess type %d", + mReprocessType); + cam_rotation_info_t rotation_info; + memset(&rotation_info, 0, sizeof(rotation_info)); + rotation_info.rotation = ROTATE_0; + rotation_info.streamId = mStreams[0]->getMyServerID(); + ADD_SET_PARAM_ENTRY_TO_BATCH(meta, CAM_INTF_PARM_ROTATION, rotation_info); + } + + // Find and insert crop info for reprocess stream + IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, meta) { + if (1 == crop_data->num_of_streams) { + // Store crop/roi information for offline reprocess + // in the reprocess stream slot + crop_data->crop_info[crop_data->num_of_streams].crop = + crop_data->crop_info[0].crop; + crop_data->crop_info[crop_data->num_of_streams].roi_map = + crop_data->crop_info[0].roi_map; + crop_data->crop_info[crop_data->num_of_streams].stream_id = + mStreams[0]->getMyServerID(); + crop_data->num_of_streams++; + + LOGD("Reprocess stream server id: %d", + mStreams[0]->getMyServerID()); + LOGD("Found offline reprocess crop %dx%d %dx%d", + crop_data->crop_info[0].crop.left, + crop_data->crop_info[0].crop.top, + crop_data->crop_info[0].crop.width, + crop_data->crop_info[0].crop.height); + LOGD("Found offline reprocess roi map %dx%d %dx%d", + crop_data->crop_info[0].roi_map.left, + crop_data->crop_info[0].roi_map.top, + crop_data->crop_info[0].roi_map.width, + crop_data->crop_info[0].roi_map.height); + } else { + LOGE("Incorrect number of offline crop data entries %d", + crop_data->num_of_streams); + return BAD_VALUE; + } + } else { + LOGW("Crop data not present"); + } + + IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, meta) { + if (1 == cdsInfo->num_of_streams) { + cdsInfo->cds_info[0].stream_id = mStreams[0]->getMyServerID(); + } else { + LOGE("Incorrect number of offline cds info entries %d", + cdsInfo->num_of_streams); + return BAD_VALUE; + } + } + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : doReprocessOffline + * + * DESCRIPTION: request to do a reprocess on the frame + * + * PARAMETERS : + * @frame : input frame for reprocessing + * @isPriorityFrame: Hint that this frame is of priority, equivalent to + * real time, even though it is processed in offline mechanism + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ + int32_t QCamera3ReprocessChannel::doReprocessOffline( + qcamera_fwk_input_pp_data_t *frame, bool isPriorityFrame) +{ + int32_t rc = 0; + int index; + OfflineBuffer mappedBuffer; + + if (m_numStreams < 1) { + LOGE("No reprocess stream is created"); + return -1; + } + + if (NULL == frame) { + LOGE("Incorrect input frame"); + return BAD_VALUE; + } + + if (NULL == frame->metadata_buffer.buffer) { + LOGE("No metadata available"); + return BAD_VALUE; + } + + if (NULL == frame->input_buffer.buffer) { + LOGE("No input buffer available"); + return BAD_VALUE; + } + + if ((0 == m_numStreams) || (NULL == mStreams[0])) { + LOGE("Reprocess stream not initialized!"); + return NO_INIT; + } + + QCamera3Stream *pStream = mStreams[0]; + + //qbuf the output buffer if it was allocated by the framework + if (mReprocessType != REPROCESS_TYPE_JPEG && frame->output_buffer != NULL) { + if(!m_bIsActive) { + rc = registerBuffer(frame->output_buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + rc = start(); + if (NO_ERROR != rc) { + return rc; + } + } + index = mGrallocMemory.getMatchBufIndex((void*)frame->output_buffer); + if(index < 0) { + rc = registerBuffer(frame->output_buffer, mIsType); + if (NO_ERROR != rc) { + LOGE("On-the-fly buffer registration failed %d", + rc); + return rc; + } + + index = mGrallocMemory.getMatchBufIndex((void*)frame->output_buffer); + if (index < 0) { + LOGE("Could not find object among registered buffers"); + return DEAD_OBJECT; + } + } + rc = pStream->bufDone(index); + if(rc != NO_ERROR) { + LOGE("Failed to Q new buffer to stream"); + return rc; + } + rc = mGrallocMemory.markFrameNumber(index, frame->frameNumber); + + } else if (mReprocessType == REPROCESS_TYPE_JPEG) { + Mutex::Autolock lock(mFreeBuffersLock); + uint32_t bufIdx; + if (mFreeBufferList.empty()) { + rc = mMemory->allocateOne(mFrameLen); + if (rc < 0) { + LOGE("Failed allocating heap buffer. Fatal"); + return BAD_VALUE; + } else { + bufIdx = (uint32_t)rc; + } + } else { + bufIdx = *(mFreeBufferList.begin()); + mFreeBufferList.erase(mFreeBufferList.begin()); + } + + mMemory->markFrameNumber(bufIdx, frame->frameNumber); + rc = pStream->bufDone(bufIdx); + if (rc != NO_ERROR) { + LOGE("Failed to queue new buffer to stream"); + return rc; + } + } + + int32_t max_idx = (int32_t) (mNumBuffers - 1); + //loop back the indices if max burst count reached + if (mOfflineBuffersIndex == max_idx) { + mOfflineBuffersIndex = -1; + } + uint32_t buf_idx = (uint32_t)(mOfflineBuffersIndex + 1); + rc = pStream->mapBuf( + CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF, + buf_idx, -1, + frame->input_buffer.fd, frame->input_buffer.frame_len); + if (NO_ERROR == rc) { + mappedBuffer.index = buf_idx; + mappedBuffer.stream = pStream; + mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF; + mOfflineBuffers.push_back(mappedBuffer); + mOfflineBuffersIndex = (int32_t)buf_idx; + LOGD("Mapped buffer with index %d", mOfflineBuffersIndex); + } + + max_idx = (int32_t) ((mNumBuffers * 2) - 1); + //loop back the indices if max burst count reached + if (mOfflineMetaIndex == max_idx) { + mOfflineMetaIndex = (int32_t) (mNumBuffers - 1); + } + uint32_t meta_buf_idx = (uint32_t)(mOfflineMetaIndex + 1); + rc |= pStream->mapBuf( + CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF, + meta_buf_idx, -1, + frame->metadata_buffer.fd, frame->metadata_buffer.frame_len); + if (NO_ERROR == rc) { + mappedBuffer.index = meta_buf_idx; + mappedBuffer.stream = pStream; + mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF; + mOfflineMetaBuffers.push_back(mappedBuffer); + mOfflineMetaIndex = (int32_t)meta_buf_idx; + LOGD("Mapped meta buffer with index %d", mOfflineMetaIndex); + } + + if (rc == NO_ERROR) { + cam_stream_parm_buffer_t param; + uint32_t numPendingPriorityFrames = 0; + + if(isPriorityFrame && (mReprocessType != REPROCESS_TYPE_JPEG)) { + Mutex::Autolock lock(mPriorityFramesLock); + /* read the length before pushing the frame number to check if + * vector is empty */ + numPendingPriorityFrames = mPriorityFrames.size(); + mPriorityFrames.push(frame->frameNumber); + } + + if(isPriorityFrame && !numPendingPriorityFrames && + (mReprocessType != REPROCESS_TYPE_JPEG)) { + memset(¶m, 0, sizeof(cam_stream_parm_buffer_t)); + param.type = CAM_STREAM_PARAM_TYPE_REQUEST_OPS_MODE; + param.perf_mode = CAM_PERF_HIGH_PERFORMANCE; + rc = pStream->setParameter(param); + if (rc != NO_ERROR) { + ALOGE("%s: setParameter for CAM_PERF_HIGH_PERFORMANCE failed", + __func__); + } + { + Mutex::Autolock lock(mPriorityFramesLock); + mReprocessPerfMode = true; + } + } + + memset(¶m, 0, sizeof(cam_stream_parm_buffer_t)); + param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS; + param.reprocess.buf_index = buf_idx; + param.reprocess.frame_idx = frame->input_buffer.frame_idx; + param.reprocess.meta_present = 1; + param.reprocess.meta_buf_index = meta_buf_idx; + + LOGI("Offline reprocessing id = %d buf Id = %d meta index = %d", + param.reprocess.frame_idx, param.reprocess.buf_index, + param.reprocess.meta_buf_index); + rc = pStream->setParameter(param); + if (rc != NO_ERROR) { + LOGE("stream setParameter for reprocess failed"); + resetToCamPerfNormal(frame->frameNumber); + } + } else { + LOGE("Input buffer memory map failed: %d", rc); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : doReprocess + * + * DESCRIPTION: request to do a reprocess on the frame + * + * PARAMETERS : + * @buf_fd : fd to the input buffer that needs reprocess + * @buf_lenght : length of the input buffer + * @ret_val : result of reprocess. + * Example: Could be faceID in case of register face image. + * @meta_frame : metadata frame. + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::doReprocess(int buf_fd, size_t buf_length, + int32_t &ret_val, mm_camera_super_buf_t *meta_frame) +{ + int32_t rc = 0; + if (m_numStreams < 1) { + LOGE("No reprocess stream is created"); + return -1; + } + if (meta_frame == NULL) { + LOGE("Did not get corresponding metadata in time"); + return -1; + } + + uint8_t buf_idx = 0; + for (uint32_t i = 0; i < m_numStreams; i++) { + rc = mStreams[i]->mapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF, + buf_idx, -1, + buf_fd, buf_length); + + if (rc == NO_ERROR) { + cam_stream_parm_buffer_t param; + memset(¶m, 0, sizeof(cam_stream_parm_buffer_t)); + param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS; + param.reprocess.buf_index = buf_idx; + param.reprocess.meta_present = 1; + param.reprocess.meta_stream_handle = m_pMetaChannel->mStreams[0]->getMyServerID(); + param.reprocess.meta_buf_index = meta_frame->bufs[0]->buf_idx; + + LOGI("Online reprocessing id = %d buf Id = %d meta index = %d", + param.reprocess.frame_idx, param.reprocess.buf_index, + param.reprocess.meta_buf_index); + rc = mStreams[i]->setParameter(param); + if (rc == NO_ERROR) { + ret_val = param.reprocess.ret_val; + } + mStreams[i]->unmapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF, + buf_idx, -1); + } + } + return rc; +} + +/*=========================================================================== + * FUNCTION : addReprocStreamsFromSource + * + * DESCRIPTION: add reprocess streams from input source channel + * + * PARAMETERS : + * @config : pp feature configuration + * @src_config : source reprocess configuration + * @isType : type of image stabilization required on this stream + * @pMetaChannel : ptr to metadata channel to get corresp. metadata + * + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3ReprocessChannel::addReprocStreamsFromSource(cam_pp_feature_config_t &pp_config, + const reprocess_config_t &src_config , cam_is_type_t is_type, + QCamera3Channel *pMetaChannel) +{ + int32_t rc = 0; + cam_stream_reproc_config_t reprocess_config; + cam_stream_type_t streamType; + + cam_dimension_t streamDim = src_config.output_stream_dim; + + if (NULL != src_config.src_channel) { + QCamera3Stream *pSrcStream = src_config.src_channel->getStreamByIndex(0); + if (pSrcStream == NULL) { + LOGE("source channel doesn't have a stream"); + return BAD_VALUE; + } + mSrcStreamHandles[m_numStreams] = pSrcStream->getMyHandle(); + } + + streamType = CAM_STREAM_TYPE_OFFLINE_PROC; + reprocess_config.pp_type = CAM_OFFLINE_REPROCESS_TYPE; + + reprocess_config.offline.input_fmt = src_config.stream_format; + reprocess_config.offline.input_dim = src_config.input_stream_dim; + reprocess_config.offline.input_buf_planes.plane_info = + src_config.input_stream_plane_info.plane_info; + reprocess_config.offline.num_of_bufs = (uint8_t)mNumBuffers; + reprocess_config.offline.input_type = src_config.stream_type; + + reprocess_config.pp_feature_config = pp_config; + QCamera3Stream *pStream = new QCamera3Stream(m_camHandle, + m_handle, + m_camOps, + &mPaddingInfo, + (QCamera3Channel*)this); + if (pStream == NULL) { + LOGE("No mem for Stream"); + return NO_MEMORY; + } + + rc = pStream->init(streamType, src_config.stream_format, + streamDim, ROTATE_0, &reprocess_config, + (uint8_t)mNumBuffers, + reprocess_config.pp_feature_config.feature_mask, + is_type, + 0,/* batchSize */ + QCamera3Channel::streamCbRoutine, this); + + if (rc == 0) { + mStreams[m_numStreams] = pStream; + m_numStreams++; + } else { + LOGE("failed to create reprocess stream"); + delete pStream; + } + + if (rc == NO_ERROR) { + m_pSrcChannel = src_config.src_channel; + m_pMetaChannel = pMetaChannel; + mReprocessType = src_config.reprocess_type; + LOGD("mReprocessType is %d", mReprocessType); + } + mm_camera_req_buf_t buf; + memset(&buf, 0x0, sizeof(buf)); + buf.type = MM_CAMERA_REQ_SUPER_BUF; + buf.num_buf_requested = 1; + if(m_camOps->request_super_buf(m_camHandle,m_handle, &buf) < 0) { + LOGE("Request for super buffer failed"); + } + return rc; +} + +/* QCamera3SupportChannel methods */ + +cam_dimension_t QCamera3SupportChannel::kDim = {640, 480}; + +QCamera3SupportChannel::QCamera3SupportChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + cam_stream_type_t streamType, + cam_dimension_t *dim, + cam_format_t streamFormat, + uint8_t hw_analysis_supported, + cam_color_filter_arrangement_t color_arrangement, + void *userData, uint32_t numBuffers) : + QCamera3Channel(cam_handle, channel_handle, cam_ops, + NULL, paddingInfo, postprocess_mask, + userData, numBuffers), + mMemory(NULL) +{ + memcpy(&mDim, dim, sizeof(cam_dimension_t)); + mStreamType = streamType; + mStreamFormat = streamFormat; + // Make Analysis same as Preview format + if (!hw_analysis_supported && mStreamType == CAM_STREAM_TYPE_ANALYSIS && + color_arrangement != CAM_FILTER_ARRANGEMENT_Y) { + mStreamFormat = getStreamDefaultFormat(CAM_STREAM_TYPE_PREVIEW); + } +} + +QCamera3SupportChannel::~QCamera3SupportChannel() +{ + destroy(); + + if (mMemory) { + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; + } +} + +int32_t QCamera3SupportChannel::initialize(cam_is_type_t isType) +{ + int32_t rc; + + if (mMemory || m_numStreams > 0) { + LOGE("metadata channel already initialized"); + return -EINVAL; + } + + mIsType = isType; + rc = QCamera3Channel::addStream(mStreamType, + mStreamFormat, mDim, ROTATE_0, MIN_STREAMING_BUFFER_NUM, + mPostProcMask, mIsType); + if (rc < 0) { + LOGE("addStream failed"); + } + return rc; +} + +int32_t QCamera3SupportChannel::request(buffer_handle_t * /*buffer*/, + uint32_t /*frameNumber*/) +{ + return NO_ERROR; +} + +void QCamera3SupportChannel::streamCbRoutine( + mm_camera_super_buf_t *super_frame, + QCamera3Stream * /*stream*/) +{ + if (super_frame == NULL || super_frame->num_bufs != 1) { + LOGE("super_frame is not valid"); + return; + } + bufDone(super_frame); + free(super_frame); +} + +QCamera3StreamMem* QCamera3SupportChannel::getStreamBufs(uint32_t len) +{ + int rc; + mMemory = new QCamera3StreamMem(mNumBuffers); + if (!mMemory) { + LOGE("unable to create heap memory"); + return NULL; + } + rc = mMemory->allocateAll(len); + if (rc < 0) { + LOGE("unable to allocate heap memory"); + delete mMemory; + mMemory = NULL; + return NULL; + } + return mMemory; +} + +void QCamera3SupportChannel::putStreamBufs() +{ + mMemory->deallocate(); + delete mMemory; + mMemory = NULL; +} + +}; // namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3Channel.h b/camera/QCamera2/HAL3/QCamera3Channel.h new file mode 100644 index 0000000..7338c18 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Channel.h @@ -0,0 +1,628 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __QCAMERA3_CHANNEL_H__ +#define __QCAMERA3_CHANNEL_H__ + +// System dependencies +#include <utils/List.h> +#include <utils/Mutex.h> +#include <utils/Vector.h> +#include "gralloc_priv.h" + +// Camera dependencies +#include "cam_intf.h" +#include "cam_types.h" +#include "camera3.h" +#include "QCamera3HALHeader.h" +#include "QCamera3Mem.h" +#include "QCamera3PostProc.h" +#include "QCamera3Stream.h" +#include "QCamera3StreamMem.h" + +extern "C" { +#include "mm_camera_interface.h" +#include "mm_jpeg_interface.h" +} + +using namespace android; + +#define MIN_STREAMING_BUFFER_NUM 7+11 + +#define QCAMERA_DUMP_FRM_PREVIEW 1 +#define QCAMERA_DUMP_FRM_VIDEO (1<<1) +#define QCAMERA_DUMP_FRM_SNAPSHOT (1<<2) +#define QCAMERA_DUMP_FRM_CALLBACK (1<<3) +#define QCAMERA_DUMP_FRM_INPUT_REPROCESS (1<<6) + +typedef int64_t nsecs_t; + +namespace qcamera { + +typedef void (*channel_cb_routine)(mm_camera_super_buf_t *metadata, + camera3_stream_buffer_t *buffer, + uint32_t frame_number, bool isInputBuffer, + void *userdata); +class QCamera3Channel +{ +public: + QCamera3Channel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, uint32_t numBuffers); + virtual ~QCamera3Channel(); + + virtual int32_t start(); + virtual int32_t stop(); + virtual int32_t setBatchSize(uint32_t); + virtual int32_t queueBatchBuf(); + virtual int32_t setPerFrameMapUnmap(bool enable); + int32_t bufDone(mm_camera_super_buf_t *recvd_frame); + int32_t setBundleInfo(const cam_bundle_config_t &bundleInfo); + + virtual uint32_t getStreamTypeMask(); + uint32_t getStreamID(uint32_t streamMask); + void destroy(); + virtual int32_t initialize(cam_is_type_t isType) = 0; + virtual int32_t request(buffer_handle_t * /*buffer*/, + uint32_t /*frameNumber*/){ return 0;}; + virtual int32_t request(buffer_handle_t * /*buffer*/, + uint32_t /*frameNumber*/, + camera3_stream_buffer_t* /*pInputBuffer*/, + metadata_buffer_t* /*metadata*/){ return 0;}; + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream) = 0; + + virtual int32_t registerBuffer(buffer_handle_t *buffer, cam_is_type_t isType) = 0; + virtual QCamera3StreamMem *getStreamBufs(uint32_t len) = 0; + virtual void putStreamBufs() = 0; + virtual int32_t flush(); + + QCamera3Stream *getStreamByHandle(uint32_t streamHandle); + uint32_t getMyHandle() const {return m_handle;}; + uint32_t getNumOfStreams() const {return m_numStreams;}; + uint32_t getNumBuffers() const {return mNumBuffers;}; + QCamera3Stream *getStreamByIndex(uint32_t index); + + static void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream, void *userdata); + void dumpYUV(mm_camera_buf_def_t *frame, cam_dimension_t dim, + cam_frame_len_offset_t offset, uint8_t name); + bool isUBWCEnabled(); + cam_format_t getStreamDefaultFormat(cam_stream_type_t type); + + void *mUserData; + cam_padding_info_t mPaddingInfo; + QCamera3Stream *mStreams[MAX_STREAM_NUM_IN_BUNDLE]; + uint32_t m_numStreams; +protected: + + int32_t addStream(cam_stream_type_t streamType, + cam_format_t streamFormat, + cam_dimension_t streamDim, + cam_rotation_t streamRotation, + uint8_t minStreamBufnum, + cam_feature_mask_t postprocessMask, + cam_is_type_t isType, + uint32_t batchSize = 0); + + int32_t allocateStreamInfoBuf(camera3_stream_t *stream); + + uint32_t m_camHandle; + mm_camera_ops_t *m_camOps; + bool m_bIsActive; + + uint32_t m_handle; + + + mm_camera_buf_notify_t mDataCB; + + + QCamera3HeapMemory *mStreamInfoBuf; + channel_cb_routine mChannelCB; + //cam_padding_info_t *mPaddingInfo; + cam_feature_mask_t mPostProcMask; + uint32_t mYUVDump; + cam_is_type_t mIsType; + uint32_t mNumBuffers; + /* Enable unmapping of buffer before issuing buffer callback. Default value + * for this flag is true and is selectively set to false for the usecases + * such as HFR to avoid any performance hit due to mapping/unmapping */ + bool mPerFrameMapUnmapEnable; + uint32_t frm_num; + uint32_t dumpFrmCnt; + uint32_t skip_mode; + uint32_t mDumpSkipCnt; +}; + +/* QCamera3ProcessingChannel is used to handle all streams that are directly + * generated by hardware and given to frameworks without any postprocessing at HAL. + * It also handles input streams that require reprocessing by hardware and then + * returned to frameworks. */ +class QCamera3ProcessingChannel : public QCamera3Channel +{ +public: + QCamera3ProcessingChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + uint32_t numBuffers = MAX_INFLIGHT_REQUESTS); + + ~QCamera3ProcessingChannel(); + + virtual int32_t initialize(cam_is_type_t isType); + virtual int32_t request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t* pInputBuffer, + metadata_buffer_t* metadata); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + virtual QCamera3StreamMem *getStreamBufs(uint32_t len); + virtual void putStreamBufs(); + virtual int32_t registerBuffer(buffer_handle_t *buffer, cam_is_type_t isType); + + virtual int32_t stop(); + + virtual reprocess_type_t getReprocessType() = 0; + + virtual void reprocessCbRoutine(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber); + + int32_t queueReprocMetadata(mm_camera_super_buf_t *metadata); + int32_t metadataBufDone(mm_camera_super_buf_t *recvd_frame); + int32_t translateStreamTypeAndFormat(camera3_stream_t *stream, + cam_stream_type_t &streamType, + cam_format_t &streamFormat); + int32_t setReprocConfig(reprocess_config_t &reproc_cfg, + camera3_stream_buffer_t *pInputBuffer, + metadata_buffer_t *metadata, + cam_format_t streamFormat, cam_dimension_t dim); + int32_t setFwkInputPPData(qcamera_fwk_input_pp_data_t *src_frame, + camera3_stream_buffer_t *pInputBuffer, + reprocess_config_t *reproc_cfg, + metadata_buffer_t *metadata, + buffer_handle_t *output_buffer, + uint32_t frameNumber); + int32_t checkStreamCbErrors(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + int32_t getStreamSize(cam_dimension_t &dim); + + QCamera3PostProcessor m_postprocessor; // post processor + void showDebugFPS(int32_t streamType); + +protected: + uint8_t mDebugFPS; + int mFrameCount; + int mLastFrameCount; + nsecs_t mLastFpsTime; + bool isWNREnabled() {return m_bWNROn;}; + void startPostProc(const reprocess_config_t &reproc_cfg); + void issueChannelCb(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber); + int32_t releaseOfflineMemory(uint32_t resultFrameNumber); + + QCamera3StreamMem mMemory; //output buffer allocated by fwk + camera3_stream_t *mCamera3Stream; + uint32_t mNumBufs; + cam_stream_type_t mStreamType; + cam_format_t mStreamFormat; + uint8_t mIntent; + + bool mPostProcStarted; + bool mInputBufferConfig; // Set when the processing channel is configured + // for processing input(framework) buffers + + QCamera3Channel *m_pMetaChannel; + mm_camera_super_buf_t *mMetaFrame; + QCamera3StreamMem mOfflineMemory; //reprocessing input buffer + QCamera3StreamMem mOfflineMetaMemory; //reprocessing metadata buffer + List<uint32_t> mFreeOfflineMetaBuffersList; + Mutex mFreeOfflineMetaBuffersLock; + +private: + + bool m_bWNROn; +}; + +/* QCamera3RegularChannel is used to handle all streams that are directly + * generated by hardware and given to frameworks without any postprocessing at HAL. + * Examples are: all IMPLEMENTATION_DEFINED streams, CPU_READ streams. */ +class QCamera3RegularChannel : public QCamera3ProcessingChannel +{ +public: + QCamera3RegularChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + uint32_t numBuffers = MAX_INFLIGHT_REQUESTS); + + virtual ~QCamera3RegularChannel(); + + virtual int32_t setBatchSize(uint32_t batchSize); + virtual uint32_t getStreamTypeMask(); + virtual int32_t queueBatchBuf(); + virtual int32_t initialize(cam_is_type_t isType); + using QCamera3ProcessingChannel::request; + virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber); + virtual reprocess_type_t getReprocessType(); + +private: + int32_t initialize(struct private_handle_t *priv_handle); + + uint32_t mBatchSize; + cam_rotation_t mRotation; +}; + +/* QCamera3MetadataChannel is for metadata stream generated by camera daemon. */ +class QCamera3MetadataChannel : public QCamera3Channel +{ +public: + QCamera3MetadataChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, + uint32_t numBuffers = MIN_STREAMING_BUFFER_NUM); + virtual ~QCamera3MetadataChannel(); + + virtual int32_t initialize(cam_is_type_t isType); + + virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + + virtual QCamera3StreamMem *getStreamBufs(uint32_t le); + virtual void putStreamBufs(); + virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/) + { return NO_ERROR; }; + +private: + QCamera3StreamMem *mMemory; +}; + +/* QCamera3RawChannel is for opaqueu/cross-platform raw stream containing + * vendor specific bayer data or 16-bit unpacked bayer data */ +class QCamera3RawChannel : public QCamera3RegularChannel +{ +public: + QCamera3RawChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel, + bool raw_16 = false, + uint32_t numBuffers = MAX_INFLIGHT_REQUESTS); + + virtual ~QCamera3RawChannel(); + + virtual int32_t initialize(cam_is_type_t isType); + + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + + virtual reprocess_type_t getReprocessType(); + +private: + bool mRawDump; + bool mIsRaw16; + + void dumpRawSnapshot(mm_camera_buf_def_t *frame); + void convertLegacyToRaw16(mm_camera_buf_def_t *frame); + void convertMipiToRaw16(mm_camera_buf_def_t *frame); +}; + +/* + * QCamera3RawDumpChannel is for internal use only for Raw dump + */ + +class QCamera3RawDumpChannel : public QCamera3Channel +{ +public: + QCamera3RawDumpChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + cam_dimension_t rawDumpSize, + cam_padding_info_t *paddingInfo, + void *userData, + cam_feature_mask_t postprocess_mask, uint32_t numBuffers = 3U); + virtual ~QCamera3RawDumpChannel(); + virtual int32_t initialize(cam_is_type_t isType); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + virtual QCamera3StreamMem *getStreamBufs(uint32_t le); + virtual void putStreamBufs(); + virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/) + { return NO_ERROR; }; + virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber); + void dumpRawSnapshot(mm_camera_buf_def_t *frame); + +public: + cam_dimension_t mDim; + +private: + bool mRawDump; + QCamera3StreamMem *mMemory; +}; + +/* QCamera3YUVChannel is used to handle flexible YUV streams that are directly + * generated by hardware and given to frameworks without any postprocessing at HAL. + * It is also used to handle input buffers that generate YUV outputs */ +class QCamera3YUVChannel : public QCamera3ProcessingChannel +{ +public: + QCamera3YUVChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_stream_type_t stream_type, + cam_feature_mask_t postprocess_mask, + QCamera3Channel *metadataChannel); + ~QCamera3YUVChannel(); + virtual int32_t initialize(cam_is_type_t isType); + using QCamera3ProcessingChannel::request; + virtual int32_t request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t* pInputBuffer, + metadata_buffer_t* metadata, bool &needMetadata); + virtual reprocess_type_t getReprocessType(); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + virtual void putStreamBufs(); + virtual void reprocessCbRoutine(buffer_handle_t *resultBuffer, + uint32_t resultFrameNumber); + +private: + typedef struct { + uint32_t frameNumber; + bool offlinePpFlag; + buffer_handle_t *output; + mm_camera_super_buf_t *callback_buffer; + } PpInfo; + + // Whether offline postprocessing is required for this channel + bool mBypass; + uint32_t mFrameLen; + + // Current edge, noise, and crop region setting + cam_edge_application_t mEdgeMode; + uint32_t mNoiseRedMode; + cam_crop_region_t mCropRegion; + + // Mutex to protect mOfflinePpFlagMap and mFreeHeapBufferList + Mutex mOfflinePpLock; + // Map between free number and whether the request needs to be + // postprocessed. + List<PpInfo> mOfflinePpInfoList; + // Heap buffer index list + List<uint32_t> mFreeHeapBufferList; + +private: + bool needsFramePostprocessing(metadata_buffer_t* meta); + int32_t handleOfflinePpCallback(uint32_t resultFrameNumber, + Vector<mm_camera_super_buf_t *>& pendingCbs); +}; + +/* QCamera3PicChannel is for JPEG stream, which contains a YUV stream generated + * by the hardware, and encoded to a JPEG stream */ +class QCamera3PicChannel : public QCamera3ProcessingChannel +{ +public: + QCamera3PicChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + void *userData, + camera3_stream_t *stream, + cam_feature_mask_t postprocess_mask, + bool is4KVideo, + bool isInputStreamConfigured, + QCamera3Channel *metadataChannel, + uint32_t numBuffers = MAX_INFLIGHT_REQUESTS); + ~QCamera3PicChannel(); + + virtual int32_t initialize(cam_is_type_t isType); + virtual int32_t flush(); + virtual int32_t request(buffer_handle_t *buffer, + uint32_t frameNumber, + camera3_stream_buffer_t* pInputBuffer, + metadata_buffer_t* metadata); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + + virtual QCamera3StreamMem *getStreamBufs(uint32_t le); + virtual void putStreamBufs(); + virtual reprocess_type_t getReprocessType(); + + QCamera3Exif *getExifData(metadata_buffer_t *metadata, + jpeg_settings_t *jpeg_settings); + void overrideYuvSize(uint32_t width, uint32_t height); + static void jpegEvtHandle(jpeg_job_status_t status, + uint32_t /*client_hdl*/, + uint32_t jobId, + mm_jpeg_output_t *p_output, + void *userdata); + static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame, + void *userdata); + +private: + int32_t queueJpegSetting(uint32_t out_buf_index, metadata_buffer_t *metadata); + +public: + cam_dimension_t m_max_pic_dim; + +private: + uint32_t mNumSnapshotBufs; + uint32_t mYuvWidth, mYuvHeight; + int32_t mCurrentBufIndex; + bool mInputBufferHint; + QCamera3StreamMem *mYuvMemory; + // Keep a list of free buffers + Mutex mFreeBuffersLock; + List<uint32_t> mFreeBufferList; + uint32_t mFrameLen; +}; + +// reprocess channel class +class QCamera3ReprocessChannel : public QCamera3Channel +{ +public: + QCamera3ReprocessChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + channel_cb_routine cb_routine, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + void *userData, void *ch_hdl); + QCamera3ReprocessChannel(); + virtual ~QCamera3ReprocessChannel(); + // offline reprocess + virtual int32_t start(); + virtual int32_t stop(); + int32_t doReprocessOffline(qcamera_fwk_input_pp_data_t *frame, + bool isPriorityFrame = false); + int32_t doReprocess(int buf_fd, size_t buf_length, int32_t &ret_val, + mm_camera_super_buf_t *meta_buf); + int32_t overrideMetadata(qcamera_hal3_pp_buffer_t *pp_buffer, + mm_camera_buf_def_t *meta_buffer, + jpeg_settings_t *jpeg_settings, + qcamera_fwk_input_pp_data_t &fwk_frame); + int32_t overrideFwkMetadata(qcamera_fwk_input_pp_data_t *frame); + virtual QCamera3StreamMem *getStreamBufs(uint32_t len); + virtual void putStreamBufs(); + virtual int32_t initialize(cam_is_type_t isType); + int32_t unmapOfflineBuffers(bool all); + int32_t bufDone(mm_camera_super_buf_t *recvd_frame); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame, + void* userdata); + int32_t addReprocStreamsFromSource(cam_pp_feature_config_t &pp_config, + const reprocess_config_t &src_config, + cam_is_type_t is_type, + QCamera3Channel *pMetaChannel); + QCamera3Stream *getStreamBySrcHandle(uint32_t srcHandle); + QCamera3Stream *getSrcStreamBySrcHandle(uint32_t srcHandle); + virtual int32_t registerBuffer(buffer_handle_t * buffer, cam_is_type_t isType); + +public: + void *inputChHandle; + +private: + typedef struct { + QCamera3Stream *stream; + cam_mapping_buf_type type; + uint32_t index; + } OfflineBuffer; + + int32_t resetToCamPerfNormal(uint32_t frameNumber); + android::List<OfflineBuffer> mOfflineBuffers; + android::List<OfflineBuffer> mOfflineMetaBuffers; + int32_t mOfflineBuffersIndex; + int32_t mOfflineMetaIndex; + uint32_t mFrameLen; + Mutex mFreeBuffersLock; // Lock for free heap buffers + List<int32_t> mFreeBufferList; // Free heap buffers list + reprocess_type_t mReprocessType; + uint32_t mSrcStreamHandles[MAX_STREAM_NUM_IN_BUNDLE]; + QCamera3ProcessingChannel *m_pSrcChannel; // ptr to source channel for reprocess + QCamera3Channel *m_pMetaChannel; + QCamera3StreamMem *mMemory; + QCamera3StreamMem mGrallocMemory; + Vector<uint32_t> mPriorityFrames; + Mutex mPriorityFramesLock; + bool mReprocessPerfMode; +}; + + +/* QCamera3SupportChannel is for HAL internal consumption only */ +class QCamera3SupportChannel : public QCamera3Channel +{ +public: + QCamera3SupportChannel(uint32_t cam_handle, + uint32_t channel_handle, + mm_camera_ops_t *cam_ops, + cam_padding_info_t *paddingInfo, + cam_feature_mask_t postprocess_mask, + cam_stream_type_t streamType, + cam_dimension_t *dim, + cam_format_t streamFormat, + uint8_t hw_analysis_supported, + cam_color_filter_arrangement_t color_arrangement, + void *userData, + uint32_t numBuffers = MIN_STREAMING_BUFFER_NUM + ); + virtual ~QCamera3SupportChannel(); + + virtual int32_t initialize(cam_is_type_t isType); + + virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber); + virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame, + QCamera3Stream *stream); + + virtual QCamera3StreamMem *getStreamBufs(uint32_t le); + virtual void putStreamBufs(); + virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/) + { return NO_ERROR; }; + + static cam_dimension_t kDim; +private: + QCamera3StreamMem *mMemory; + cam_dimension_t mDim; + cam_stream_type_t mStreamType; + cam_format_t mStreamFormat; +}; + +}; // namespace qcamera + +#endif /* __QCAMERA_CHANNEL_H__ */ diff --git a/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp new file mode 100644 index 0000000..94a398b --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp @@ -0,0 +1,272 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + + +#define ATRACE_TAG ATRACE_TAG_CAMERA +#define LOG_TAG "QCamera3CropRegionMapper" + +// Camera dependencies +#include "QCamera3CropRegionMapper.h" +#include "QCamera3HWI.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +using namespace android; + +namespace qcamera { + +/*=========================================================================== + * FUNCTION : QCamera3CropRegionMapper + * + * DESCRIPTION: Constructor + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +QCamera3CropRegionMapper::QCamera3CropRegionMapper() + : mSensorW(0), + mSensorH(0), + mActiveArrayW(0), + mActiveArrayH(0) +{ +} + +/*=========================================================================== + * FUNCTION : ~QCamera3CropRegionMapper + * + * DESCRIPTION: destructor + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ + +QCamera3CropRegionMapper::~QCamera3CropRegionMapper() +{ +} + +/*=========================================================================== + * FUNCTION : update + * + * DESCRIPTION: update sensor active array size and sensor output size + * + * PARAMETERS : + * @active_array_w : active array width + * @active_array_h : active array height + * @sensor_w : sensor output width + * @sensor_h : sensor output height + * + * RETURN : none + *==========================================================================*/ +void QCamera3CropRegionMapper::update(uint32_t active_array_w, + uint32_t active_array_h, uint32_t sensor_w, + uint32_t sensor_h) +{ + // Sanity check + if (active_array_w == 0 || active_array_h == 0 || + sensor_w == 0 || sensor_h == 0) { + LOGE("active_array size and sensor output size must be non zero"); + return; + } + if (active_array_w < sensor_w || active_array_h < sensor_h) { + LOGE("invalid input: active_array [%d, %d], sensor size [%d, %d]", + active_array_w, active_array_h, sensor_w, sensor_h); + return; + } + mSensorW = sensor_w; + mSensorH = sensor_h; + mActiveArrayW = active_array_w; + mActiveArrayH = active_array_h; + + LOGH("active_array: %d x %d, sensor size %d x %d", + mActiveArrayW, mActiveArrayH, mSensorW, mSensorH); +} + +/*=========================================================================== + * FUNCTION : toActiveArray + * + * DESCRIPTION: Map crop rectangle from sensor output space to active array space + * + * PARAMETERS : + * @crop_left : x coordinate of top left corner of rectangle + * @crop_top : y coordinate of top left corner of rectangle + * @crop_width : width of rectangle + * @crop_height : height of rectangle + * + * RETURN : none + *==========================================================================*/ +void QCamera3CropRegionMapper::toActiveArray(int32_t& crop_left, int32_t& crop_top, + int32_t& crop_width, int32_t& crop_height) +{ + if (mSensorW == 0 || mSensorH == 0 || + mActiveArrayW == 0 || mActiveArrayH == 0) { + LOGE("sensor/active array sizes are not initialized!"); + return; + } + + crop_left = crop_left * mActiveArrayW / mSensorW; + crop_top = crop_top * mActiveArrayH / mSensorH; + crop_width = crop_width * mActiveArrayW / mSensorW; + crop_height = crop_height * mActiveArrayH / mSensorH; + + boundToSize(crop_left, crop_top, crop_width, crop_height, + mActiveArrayW, mActiveArrayH); +} + +/*=========================================================================== + * FUNCTION : toSensor + * + * DESCRIPTION: Map crop rectangle from active array space to sensor output space + * + * PARAMETERS : + * @crop_left : x coordinate of top left corner of rectangle + * @crop_top : y coordinate of top left corner of rectangle + * @crop_width : width of rectangle + * @crop_height : height of rectangle + * + * RETURN : none + *==========================================================================*/ + +void QCamera3CropRegionMapper::toSensor(int32_t& crop_left, int32_t& crop_top, + int32_t& crop_width, int32_t& crop_height) +{ + if (mSensorW == 0 || mSensorH == 0 || + mActiveArrayW == 0 || mActiveArrayH == 0) { + LOGE("sensor/active array sizes are not initialized!"); + return; + } + + crop_left = crop_left * mSensorW / mActiveArrayW; + crop_top = crop_top * mSensorH / mActiveArrayH; + crop_width = crop_width * mSensorW / mActiveArrayW; + crop_height = crop_height * mSensorH / mActiveArrayH; + + LOGD("before bounding left %d, top %d, width %d, height %d", + crop_left, crop_top, crop_width, crop_height); + boundToSize(crop_left, crop_top, crop_width, crop_height, + mSensorW, mSensorH); + LOGD("after bounding left %d, top %d, width %d, height %d", + crop_left, crop_top, crop_width, crop_height); +} + +/*=========================================================================== + * FUNCTION : boundToSize + * + * DESCRIPTION: Bound a particular rectangle inside a bounding box + * + * PARAMETERS : + * @left : x coordinate of top left corner of rectangle + * @top : y coordinate of top left corner of rectangle + * @width : width of rectangle + * @height : height of rectangle + * @bound_w : width of bounding box + * @bound_y : height of bounding box + * + * RETURN : none + *==========================================================================*/ +void QCamera3CropRegionMapper::boundToSize(int32_t& left, int32_t& top, + int32_t& width, int32_t& height, int32_t bound_w, int32_t bound_h) +{ + if (left < 0) { + left = 0; + } + if (top < 0) { + top = 0; + } + + if ((left + width) > bound_w) { + width = bound_w - left; + } + if ((top + height) > bound_h) { + height = bound_h - top; + } +} + +/*=========================================================================== + * FUNCTION : toActiveArray + * + * DESCRIPTION: Map co-ordinate from sensor output space to active array space + * + * PARAMETERS : + * @x : x coordinate + * @y : y coordinate + * + * RETURN : none + *==========================================================================*/ +void QCamera3CropRegionMapper::toActiveArray(uint32_t& x, uint32_t& y) +{ + if (mSensorW == 0 || mSensorH == 0 || + mActiveArrayW == 0 || mActiveArrayH == 0) { + LOGE("sensor/active array sizes are not initialized!"); + return; + } + if ((x > static_cast<uint32_t>(mSensorW)) || + (y > static_cast<uint32_t>(mSensorH))) { + LOGE("invalid co-ordinate (%d, %d) in (0, 0, %d, %d) space", + x, y, mSensorW, mSensorH); + return; + } + x = x * mActiveArrayW / mSensorW; + y = y * mActiveArrayH / mSensorH; +} + +/*=========================================================================== + * FUNCTION : toSensor + * + * DESCRIPTION: Map co-ordinate from active array space to sensor output space + * + * PARAMETERS : + * @x : x coordinate + * @y : y coordinate + * + * RETURN : none + *==========================================================================*/ + +void QCamera3CropRegionMapper::toSensor(uint32_t& x, uint32_t& y) +{ + if (mSensorW == 0 || mSensorH == 0 || + mActiveArrayW == 0 || mActiveArrayH == 0) { + LOGE("sensor/active array sizes are not initialized!"); + return; + } + + if ((x > static_cast<uint32_t>(mActiveArrayW)) || + (y > static_cast<uint32_t>(mActiveArrayH))) { + LOGE("invalid co-ordinate (%d, %d) in (0, 0, %d, %d) space", + x, y, mSensorW, mSensorH); + return; + } + x = x * mSensorW / mActiveArrayW; + y = y * mSensorH / mActiveArrayH; +} + +}; //end namespace android diff --git a/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h new file mode 100644 index 0000000..31c8578 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#ifndef __QCAMERA3CROPREGIONMAPPER_H__ +#define __QCAMERA3CROPREGIONMAPPER_H__ + +// System dependencies +#include <utils/Errors.h> + +using namespace android; + +namespace qcamera { + +class QCamera3CropRegionMapper { +public: + QCamera3CropRegionMapper(); + virtual ~QCamera3CropRegionMapper(); + + void update(uint32_t active_array_w, uint32_t active_array_h, + uint32_t sensor_w, uint32_t sensor_h); + void toActiveArray(int32_t& crop_left, int32_t& crop_top, + int32_t& crop_width, int32_t& crop_height); + void toSensor(int32_t& crop_left, int32_t& crop_top, + int32_t& crop_width, int32_t& crop_height); + void toActiveArray(uint32_t& x, uint32_t& y); + void toSensor(uint32_t& x, uint32_t& y); + +private: + /* sensor output size */ + int32_t mSensorW, mSensorH; + int32_t mActiveArrayW, mActiveArrayH; + + void boundToSize(int32_t& left, int32_t& top, int32_t& width, + int32_t& height, int32_t bound_w, int32_t bound_h); +}; + +}; // namespace qcamera + +#endif /* __QCAMERA3CROPREGIONMAPPER_H__ */ diff --git a/camera/QCamera2/HAL3/QCamera3HALHeader.h b/camera/QCamera2/HAL3/QCamera3HALHeader.h new file mode 100644 index 0000000..ac68196 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3HALHeader.h @@ -0,0 +1,96 @@ +/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ +#ifndef __QCAMERA_HALHEADER_H__ +#define __QCAMERA_HALHEADER_H__ + +// System dependencies +#include "gralloc.h" + +// Camera dependencies +#include "cam_types.h" + +using namespace android; + +namespace qcamera { + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define MIN(a, b) ((a) < (b) ? (a) : (b)) + +#define IS_USAGE_ZSL(usage) (((usage) & (GRALLOC_USAGE_HW_CAMERA_ZSL)) \ + == (GRALLOC_USAGE_HW_CAMERA_ZSL)) + +class QCamera3ProcessingChannel; + + typedef enum { + INVALID, + VALID, + } stream_status_t; + + typedef enum { + REPROCESS_TYPE_NONE, + REPROCESS_TYPE_JPEG, + REPROCESS_TYPE_YUV, + REPROCESS_TYPE_PRIVATE, + REPROCESS_TYPE_RAW + } reprocess_type_t; + + typedef struct { + uint32_t out_buf_index; + int32_t jpeg_orientation; + uint8_t jpeg_quality; + uint8_t jpeg_thumb_quality; + cam_dimension_t thumbnail_size; + uint8_t gps_timestamp_valid; + int64_t gps_timestamp; + uint8_t gps_coordinates_valid; + double gps_coordinates[3]; + char gps_processing_method[GPS_PROCESSING_METHOD_SIZE]; + uint8_t image_desc_valid; + char image_desc[EXIF_IMAGE_DESCRIPTION_SIZE]; + } jpeg_settings_t; + + typedef struct { + int32_t iso_speed; + int64_t exposure_time; + } metadata_response_t; + + typedef struct { + cam_stream_type_t stream_type; + cam_format_t stream_format; + cam_dimension_t input_stream_dim; + cam_stream_buf_plane_info_t input_stream_plane_info; + cam_dimension_t output_stream_dim; + cam_padding_info_t *padding; + reprocess_type_t reprocess_type; + QCamera3ProcessingChannel *src_channel; + } reprocess_config_t; + +};//namespace qcamera + +#endif diff --git a/camera/QCamera2/HAL3/QCamera3HWI.cpp b/camera/QCamera2/HAL3/QCamera3HWI.cpp new file mode 100644 index 0000000..1803b82 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3HWI.cpp @@ -0,0 +1,10672 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#define LOG_TAG "QCamera3HWI" +//#define LOG_NDEBUG 0 + +#define __STDC_LIMIT_MACROS + +// To remove +#include <cutils/properties.h> + +// System dependencies +#include <dlfcn.h> +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <sync/sync.h> +#include "gralloc_priv.h" + +// Display dependencies +#include "qdMetaData.h" + +// Camera dependencies +#include "android/QCamera3External.h" +#include "util/QCameraFlash.h" +#include "QCamera3HWI.h" +#include "QCamera3VendorTags.h" +#include "QCameraTrace.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +using namespace android; + +namespace qcamera { + +#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX ) + +#define EMPTY_PIPELINE_DELAY 2 +#define PARTIAL_RESULT_COUNT 2 +#define FRAME_SKIP_DELAY 0 + +#define MAX_VALUE_8BIT ((1<<8)-1) +#define MAX_VALUE_10BIT ((1<<10)-1) +#define MAX_VALUE_12BIT ((1<<12)-1) + +#define VIDEO_4K_WIDTH 3840 +#define VIDEO_4K_HEIGHT 2160 + +#define MAX_EIS_WIDTH 1920 +#define MAX_EIS_HEIGHT 1080 + +#define MAX_RAW_STREAMS 1 +#define MAX_STALLING_STREAMS 1 +#define MAX_PROCESSED_STREAMS 3 +/* Batch mode is enabled only if FPS set is equal to or greater than this */ +#define MIN_FPS_FOR_BATCH_MODE (120) +#define PREVIEW_FPS_FOR_HFR (30) +#define DEFAULT_VIDEO_FPS (30.0) +#define MAX_HFR_BATCH_SIZE (8) +#define REGIONS_TUPLE_COUNT 5 +#define HDR_PLUS_PERF_TIME_OUT (7000) // milliseconds +#define BURST_REPROCESS_PERF_TIME_OUT (1000) // milliseconds +// Set a threshold for detection of missing buffers //seconds +#define MISSING_REQUEST_BUF_TIMEOUT 3 +#define FLUSH_TIMEOUT 3 +#define METADATA_MAP_SIZE(MAP) (sizeof(MAP)/sizeof(MAP[0])) + +#define CAM_QCOM_FEATURE_PP_SUPERSET_HAL3 ( CAM_QCOM_FEATURE_DENOISE2D |\ + CAM_QCOM_FEATURE_CROP |\ + CAM_QCOM_FEATURE_ROTATION |\ + CAM_QCOM_FEATURE_SHARPNESS |\ + CAM_QCOM_FEATURE_SCALE |\ + CAM_QCOM_FEATURE_CAC |\ + CAM_QCOM_FEATURE_CDS ) + +#define TIMEOUT_NEVER -1 + +cam_capability_t *gCamCapability[MM_CAMERA_MAX_NUM_SENSORS]; +const camera_metadata_t *gStaticMetadata[MM_CAMERA_MAX_NUM_SENSORS]; +extern pthread_mutex_t gCamLock; +volatile uint32_t gCamHal3LogLevel = 1; +extern uint8_t gNumCameraSessions; + +const QCamera3HardwareInterface::QCameraPropMap QCamera3HardwareInterface::CDS_MAP [] = { + {"On", CAM_CDS_MODE_ON}, + {"Off", CAM_CDS_MODE_OFF}, + {"Auto",CAM_CDS_MODE_AUTO} +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_effect_mode_t, + cam_effect_mode_type> QCamera3HardwareInterface::EFFECT_MODES_MAP[] = { + { ANDROID_CONTROL_EFFECT_MODE_OFF, CAM_EFFECT_MODE_OFF }, + { ANDROID_CONTROL_EFFECT_MODE_MONO, CAM_EFFECT_MODE_MONO }, + { ANDROID_CONTROL_EFFECT_MODE_NEGATIVE, CAM_EFFECT_MODE_NEGATIVE }, + { ANDROID_CONTROL_EFFECT_MODE_SOLARIZE, CAM_EFFECT_MODE_SOLARIZE }, + { ANDROID_CONTROL_EFFECT_MODE_SEPIA, CAM_EFFECT_MODE_SEPIA }, + { ANDROID_CONTROL_EFFECT_MODE_POSTERIZE, CAM_EFFECT_MODE_POSTERIZE }, + { ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD, CAM_EFFECT_MODE_WHITEBOARD }, + { ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD, CAM_EFFECT_MODE_BLACKBOARD }, + { ANDROID_CONTROL_EFFECT_MODE_AQUA, CAM_EFFECT_MODE_AQUA } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_awb_mode_t, + cam_wb_mode_type> QCamera3HardwareInterface::WHITE_BALANCE_MODES_MAP[] = { + { ANDROID_CONTROL_AWB_MODE_OFF, CAM_WB_MODE_OFF }, + { ANDROID_CONTROL_AWB_MODE_AUTO, CAM_WB_MODE_AUTO }, + { ANDROID_CONTROL_AWB_MODE_INCANDESCENT, CAM_WB_MODE_INCANDESCENT }, + { ANDROID_CONTROL_AWB_MODE_FLUORESCENT, CAM_WB_MODE_FLUORESCENT }, + { ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT,CAM_WB_MODE_WARM_FLUORESCENT}, + { ANDROID_CONTROL_AWB_MODE_DAYLIGHT, CAM_WB_MODE_DAYLIGHT }, + { ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT, CAM_WB_MODE_CLOUDY_DAYLIGHT }, + { ANDROID_CONTROL_AWB_MODE_TWILIGHT, CAM_WB_MODE_TWILIGHT }, + { ANDROID_CONTROL_AWB_MODE_SHADE, CAM_WB_MODE_SHADE } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_scene_mode_t, + cam_scene_mode_type> QCamera3HardwareInterface::SCENE_MODES_MAP[] = { + { ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY, CAM_SCENE_MODE_FACE_PRIORITY }, + { ANDROID_CONTROL_SCENE_MODE_ACTION, CAM_SCENE_MODE_ACTION }, + { ANDROID_CONTROL_SCENE_MODE_PORTRAIT, CAM_SCENE_MODE_PORTRAIT }, + { ANDROID_CONTROL_SCENE_MODE_LANDSCAPE, CAM_SCENE_MODE_LANDSCAPE }, + { ANDROID_CONTROL_SCENE_MODE_NIGHT, CAM_SCENE_MODE_NIGHT }, + { ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT, CAM_SCENE_MODE_NIGHT_PORTRAIT }, + { ANDROID_CONTROL_SCENE_MODE_THEATRE, CAM_SCENE_MODE_THEATRE }, + { ANDROID_CONTROL_SCENE_MODE_BEACH, CAM_SCENE_MODE_BEACH }, + { ANDROID_CONTROL_SCENE_MODE_SNOW, CAM_SCENE_MODE_SNOW }, + { ANDROID_CONTROL_SCENE_MODE_SUNSET, CAM_SCENE_MODE_SUNSET }, + { ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO, CAM_SCENE_MODE_ANTISHAKE }, + { ANDROID_CONTROL_SCENE_MODE_FIREWORKS , CAM_SCENE_MODE_FIREWORKS }, + { ANDROID_CONTROL_SCENE_MODE_SPORTS , CAM_SCENE_MODE_SPORTS }, + { ANDROID_CONTROL_SCENE_MODE_PARTY, CAM_SCENE_MODE_PARTY }, + { ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT, CAM_SCENE_MODE_CANDLELIGHT }, + { ANDROID_CONTROL_SCENE_MODE_BARCODE, CAM_SCENE_MODE_BARCODE} +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_af_mode_t, + cam_focus_mode_type> QCamera3HardwareInterface::FOCUS_MODES_MAP[] = { + { ANDROID_CONTROL_AF_MODE_OFF, CAM_FOCUS_MODE_OFF }, + { ANDROID_CONTROL_AF_MODE_OFF, CAM_FOCUS_MODE_FIXED }, + { ANDROID_CONTROL_AF_MODE_AUTO, CAM_FOCUS_MODE_AUTO }, + { ANDROID_CONTROL_AF_MODE_MACRO, CAM_FOCUS_MODE_MACRO }, + { ANDROID_CONTROL_AF_MODE_EDOF, CAM_FOCUS_MODE_EDOF }, + { ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE, CAM_FOCUS_MODE_CONTINOUS_PICTURE }, + { ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO, CAM_FOCUS_MODE_CONTINOUS_VIDEO } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_color_correction_aberration_mode_t, + cam_aberration_mode_t> QCamera3HardwareInterface::COLOR_ABERRATION_MAP[] = { + { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF, + CAM_COLOR_CORRECTION_ABERRATION_OFF }, + { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST, + CAM_COLOR_CORRECTION_ABERRATION_FAST }, + { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY, + CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY }, +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_ae_antibanding_mode_t, + cam_antibanding_mode_type> QCamera3HardwareInterface::ANTIBANDING_MODES_MAP[] = { + { ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF, CAM_ANTIBANDING_MODE_OFF }, + { ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ, CAM_ANTIBANDING_MODE_50HZ }, + { ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ, CAM_ANTIBANDING_MODE_60HZ }, + { ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO, CAM_ANTIBANDING_MODE_AUTO } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_control_ae_mode_t, + cam_flash_mode_t> QCamera3HardwareInterface::AE_FLASH_MODE_MAP[] = { + { ANDROID_CONTROL_AE_MODE_OFF, CAM_FLASH_MODE_OFF }, + { ANDROID_CONTROL_AE_MODE_ON, CAM_FLASH_MODE_OFF }, + { ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH, CAM_FLASH_MODE_AUTO}, + { ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH, CAM_FLASH_MODE_ON }, + { ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE, CAM_FLASH_MODE_AUTO} +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_flash_mode_t, + cam_flash_mode_t> QCamera3HardwareInterface::FLASH_MODES_MAP[] = { + { ANDROID_FLASH_MODE_OFF, CAM_FLASH_MODE_OFF }, + { ANDROID_FLASH_MODE_SINGLE, CAM_FLASH_MODE_SINGLE }, + { ANDROID_FLASH_MODE_TORCH, CAM_FLASH_MODE_TORCH } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_statistics_face_detect_mode_t, + cam_face_detect_mode_t> QCamera3HardwareInterface::FACEDETECT_MODES_MAP[] = { + { ANDROID_STATISTICS_FACE_DETECT_MODE_OFF, CAM_FACE_DETECT_MODE_OFF }, + { ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE, CAM_FACE_DETECT_MODE_SIMPLE }, + { ANDROID_STATISTICS_FACE_DETECT_MODE_FULL, CAM_FACE_DETECT_MODE_FULL } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_lens_info_focus_distance_calibration_t, + cam_focus_calibration_t> QCamera3HardwareInterface::FOCUS_CALIBRATION_MAP[] = { + { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED, + CAM_FOCUS_UNCALIBRATED }, + { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE, + CAM_FOCUS_APPROXIMATE }, + { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED, + CAM_FOCUS_CALIBRATED } +}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_lens_state_t, + cam_af_lens_state_t> QCamera3HardwareInterface::LENS_STATE_MAP[] = { + { ANDROID_LENS_STATE_STATIONARY, CAM_AF_LENS_STATE_STATIONARY}, + { ANDROID_LENS_STATE_MOVING, CAM_AF_LENS_STATE_MOVING} +}; + +const int32_t available_thumbnail_sizes[] = {0, 0, + 176, 144, + 240, 144, + 256, 144, + 240, 160, + 256, 154, + 240, 240, + 320, 240}; + +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_sensor_test_pattern_mode_t, + cam_test_pattern_mode_t> QCamera3HardwareInterface::TEST_PATTERN_MAP[] = { + { ANDROID_SENSOR_TEST_PATTERN_MODE_OFF, CAM_TEST_PATTERN_OFF }, + { ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR, CAM_TEST_PATTERN_SOLID_COLOR }, + { ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS, CAM_TEST_PATTERN_COLOR_BARS }, + { ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY, CAM_TEST_PATTERN_COLOR_BARS_FADE_TO_GRAY }, + { ANDROID_SENSOR_TEST_PATTERN_MODE_PN9, CAM_TEST_PATTERN_PN9 }, + { ANDROID_SENSOR_TEST_PATTERN_MODE_CUSTOM1, CAM_TEST_PATTERN_CUSTOM1}, +}; + +/* Since there is no mapping for all the options some Android enum are not listed. + * Also, the order in this list is important because while mapping from HAL to Android it will + * traverse from lower to higher index which means that for HAL values that are map to different + * Android values, the traverse logic will select the first one found. + */ +const QCamera3HardwareInterface::QCameraMap< + camera_metadata_enum_android_sensor_reference_illuminant1_t, + cam_illuminat_t> QCamera3HardwareInterface::REFERENCE_ILLUMINANT_MAP[] = { + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT, CAM_AWB_WARM_FLO}, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A, CAM_AWB_A }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55, CAM_AWB_NOON }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65, CAM_AWB_D65 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75, CAM_AWB_D75 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50, CAM_AWB_D50 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN, CAM_AWB_CUSTOM_A}, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT, CAM_AWB_D50 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN, CAM_AWB_A }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER, CAM_AWB_D50 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER, CAM_AWB_D65 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE, CAM_AWB_D75 }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT }, + { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO}, +}; + +const QCamera3HardwareInterface::QCameraMap< + int32_t, cam_hfr_mode_t> QCamera3HardwareInterface::HFR_MODE_MAP[] = { + { 60, CAM_HFR_MODE_60FPS}, + { 90, CAM_HFR_MODE_90FPS}, + { 120, CAM_HFR_MODE_120FPS}, + { 150, CAM_HFR_MODE_150FPS}, + { 180, CAM_HFR_MODE_180FPS}, + { 210, CAM_HFR_MODE_210FPS}, + { 240, CAM_HFR_MODE_240FPS}, + { 480, CAM_HFR_MODE_480FPS}, +}; + +camera3_device_ops_t QCamera3HardwareInterface::mCameraOps = { + .initialize = QCamera3HardwareInterface::initialize, + .configure_streams = QCamera3HardwareInterface::configure_streams, + .register_stream_buffers = NULL, + .construct_default_request_settings = QCamera3HardwareInterface::construct_default_request_settings, + .process_capture_request = QCamera3HardwareInterface::process_capture_request, + .get_metadata_vendor_tag_ops = NULL, + .dump = QCamera3HardwareInterface::dump, + .flush = QCamera3HardwareInterface::flush, + .reserved = {0}, +}; + +// initialise to some default value +uint32_t QCamera3HardwareInterface::sessionId[] = {0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF}; + +/*=========================================================================== + * FUNCTION : QCamera3HardwareInterface + * + * DESCRIPTION: constructor of QCamera3HardwareInterface + * + * PARAMETERS : + * @cameraId : camera ID + * + * RETURN : none + *==========================================================================*/ +QCamera3HardwareInterface::QCamera3HardwareInterface(uint32_t cameraId, + const camera_module_callbacks_t *callbacks) + : mCameraId(cameraId), + mCameraHandle(NULL), + mCameraInitialized(false), + mCallbackOps(NULL), + mMetadataChannel(NULL), + mPictureChannel(NULL), + mRawChannel(NULL), + mSupportChannel(NULL), + mAnalysisChannel(NULL), + mRawDumpChannel(NULL), + mDummyBatchChannel(NULL), + m_perfLock(), + mCommon(), + mChannelHandle(0), + mFirstConfiguration(true), + mFlush(false), + mFlushPerf(false), + mParamHeap(NULL), + mParameters(NULL), + mPrevParameters(NULL), + m_bIsVideo(false), + m_bIs4KVideo(false), + m_bEisSupportedSize(false), + m_bEisEnable(false), + m_MobicatMask(0), + mMinProcessedFrameDuration(0), + mMinJpegFrameDuration(0), + mMinRawFrameDuration(0), + mMetaFrameCount(0U), + mUpdateDebugLevel(false), + mCallbacks(callbacks), + mCaptureIntent(0), + mCacMode(0), + mBatchSize(0), + mToBeQueuedVidBufs(0), + mHFRVideoFps(DEFAULT_VIDEO_FPS), + mOpMode(CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE), + mFirstFrameNumberInBatch(0), + mNeedSensorRestart(false), + mLdafCalibExist(false), + mPowerHintEnabled(false), + mLastCustIntentFrmNum(-1), + mState(CLOSED), + mIsDeviceLinked(false), + mIsMainCamera(true), + mLinkedCameraId(0), + m_pRelCamSyncHeap(NULL), + m_pRelCamSyncBuf(NULL) +{ + getLogLevel(); + m_perfLock.lock_init(); + mCommon.init(gCamCapability[cameraId]); + mCameraDevice.common.tag = HARDWARE_DEVICE_TAG; + mCameraDevice.common.version = CAMERA_DEVICE_API_VERSION_3_3; + mCameraDevice.common.close = close_camera_device; + mCameraDevice.ops = &mCameraOps; + mCameraDevice.priv = this; + gCamCapability[cameraId]->version = CAM_HAL_V3; + // TODO: hardcode for now until mctl add support for min_num_pp_bufs + //TBD - To see if this hardcoding is needed. Check by printing if this is filled by mctl to 3 + gCamCapability[cameraId]->min_num_pp_bufs = 3; + + pthread_cond_init(&mBuffersCond, NULL); + + pthread_cond_init(&mRequestCond, NULL); + mPendingLiveRequest = 0; + mCurrentRequestId = -1; + pthread_mutex_init(&mMutex, NULL); + + for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) + mDefaultMetadata[i] = NULL; + + // Getting system props of different kinds + char prop[PROPERTY_VALUE_MAX]; + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.raw.dump", prop, "0"); + mEnableRawDump = atoi(prop); + if (mEnableRawDump) + LOGD("Raw dump from Camera HAL enabled"); + + memset(&mInputStreamInfo, 0, sizeof(mInputStreamInfo)); + memset(mLdafCalib, 0, sizeof(mLdafCalib)); + + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.tnr.preview", prop, "0"); + m_bTnrPreview = (uint8_t)atoi(prop); + + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.tnr.video", prop, "0"); + m_bTnrVideo = (uint8_t)atoi(prop); + + //Load and read GPU library. + lib_surface_utils = NULL; + LINK_get_surface_pixel_alignment = NULL; + mSurfaceStridePadding = CAM_PAD_TO_32; + lib_surface_utils = dlopen("libadreno_utils.so", RTLD_NOW); + if (lib_surface_utils) { + *(void **)&LINK_get_surface_pixel_alignment = + dlsym(lib_surface_utils, "get_gpu_pixel_alignment"); + if (LINK_get_surface_pixel_alignment) { + mSurfaceStridePadding = LINK_get_surface_pixel_alignment(); + } + dlclose(lib_surface_utils); + } +} + +/*=========================================================================== + * FUNCTION : ~QCamera3HardwareInterface + * + * DESCRIPTION: destructor of QCamera3HardwareInterface + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3HardwareInterface::~QCamera3HardwareInterface() +{ + LOGD("E"); + + int32_t rc = 0; + + /* Turn off current power hint before acquiring perfLock in case they + * conflict with each other */ + disablePowerHint(); + + m_perfLock.lock_acq(); + + // unlink of dualcam + if (mIsDeviceLinked) { + m_pRelCamSyncBuf->sync_control = CAM_SYNC_RELATED_SENSORS_OFF; + pthread_mutex_lock(&gCamLock); + + if (mIsMainCamera == 1) { + m_pRelCamSyncBuf->mode = CAM_MODE_PRIMARY; + m_pRelCamSyncBuf->type = CAM_TYPE_MAIN; + // related session id should be session id of linked session + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } else { + m_pRelCamSyncBuf->mode = CAM_MODE_SECONDARY; + m_pRelCamSyncBuf->type = CAM_TYPE_AUX; + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } + pthread_mutex_unlock(&gCamLock); + + rc = mCameraHandle->ops->sync_related_sensors( + mCameraHandle->camera_handle, m_pRelCamSyncBuf); + if (rc < 0) { + LOGE("Dualcam: Unlink failed, but still proceed to close"); + } + mIsDeviceLinked = false; + } + + /* We need to stop all streams before deleting any stream */ + if (mRawDumpChannel) { + mRawDumpChannel->stop(); + } + + // NOTE: 'camera3_stream_t *' objects are already freed at + // this stage by the framework + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3ProcessingChannel *channel = (*it)->channel; + if (channel) { + channel->stop(); + } + } + if (mSupportChannel) + mSupportChannel->stop(); + + if (mAnalysisChannel) { + mAnalysisChannel->stop(); + } + if (mMetadataChannel) { + mMetadataChannel->stop(); + } + if (mChannelHandle) { + mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle, + mChannelHandle); + LOGD("stopping channel %d", mChannelHandle); + } + + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3ProcessingChannel *channel = (*it)->channel; + if (channel) + delete channel; + free (*it); + } + if (mSupportChannel) { + delete mSupportChannel; + mSupportChannel = NULL; + } + + if (mAnalysisChannel) { + delete mAnalysisChannel; + mAnalysisChannel = NULL; + } + if (mRawDumpChannel) { + delete mRawDumpChannel; + mRawDumpChannel = NULL; + } + if (mDummyBatchChannel) { + delete mDummyBatchChannel; + mDummyBatchChannel = NULL; + } + + mPictureChannel = NULL; + + if (mMetadataChannel) { + delete mMetadataChannel; + mMetadataChannel = NULL; + } + + /* Clean up all channels */ + if (mCameraInitialized) { + if(!mFirstConfiguration){ + //send the last unconfigure + cam_stream_size_info_t stream_config_info; + memset(&stream_config_info, 0, sizeof(cam_stream_size_info_t)); + stream_config_info.buffer_info.min_buffers = MIN_INFLIGHT_REQUESTS; + stream_config_info.buffer_info.max_buffers = + m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS; + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_STREAM_INFO, + stream_config_info); + int rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters); + if (rc < 0) { + LOGE("set_parms failed for unconfigure"); + } + } + deinitParameters(); + } + + if (mChannelHandle) { + mCameraHandle->ops->delete_channel(mCameraHandle->camera_handle, + mChannelHandle); + LOGH("deleting channel %d", mChannelHandle); + mChannelHandle = 0; + } + + if (mState != CLOSED) + closeCamera(); + + for (auto &req : mPendingBuffersMap.mPendingBuffersInRequest) { + req.mPendingBufferList.clear(); + } + mPendingBuffersMap.mPendingBuffersInRequest.clear(); + mPendingReprocessResultList.clear(); + for (pendingRequestIterator i = mPendingRequestsList.begin(); + i != mPendingRequestsList.end();) { + i = erasePendingRequest(i); + } + for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) + if (mDefaultMetadata[i]) + free_camera_metadata(mDefaultMetadata[i]); + + m_perfLock.lock_rel(); + m_perfLock.lock_deinit(); + + pthread_cond_destroy(&mRequestCond); + + pthread_cond_destroy(&mBuffersCond); + + pthread_mutex_destroy(&mMutex); + LOGD("X"); +} + +/*=========================================================================== + * FUNCTION : erasePendingRequest + * + * DESCRIPTION: function to erase a desired pending request after freeing any + * allocated memory + * + * PARAMETERS : + * @i : iterator pointing to pending request to be erased + * + * RETURN : iterator pointing to the next request + *==========================================================================*/ +QCamera3HardwareInterface::pendingRequestIterator + QCamera3HardwareInterface::erasePendingRequest (pendingRequestIterator i) +{ + if (i->input_buffer != NULL) { + free(i->input_buffer); + i->input_buffer = NULL; + } + if (i->settings != NULL) + free_camera_metadata((camera_metadata_t*)i->settings); + return mPendingRequestsList.erase(i); +} + +/*=========================================================================== + * FUNCTION : camEvtHandle + * + * DESCRIPTION: Function registered to mm-camera-interface to handle events + * + * PARAMETERS : + * @camera_handle : interface layer camera handle + * @evt : ptr to event + * @user_data : user data ptr + * + * RETURN : none + *==========================================================================*/ +void QCamera3HardwareInterface::camEvtHandle(uint32_t /*camera_handle*/, + mm_camera_event_t *evt, + void *user_data) +{ + QCamera3HardwareInterface *obj = (QCamera3HardwareInterface *)user_data; + if (obj && evt) { + switch(evt->server_event_type) { + case CAM_EVENT_TYPE_DAEMON_DIED: + pthread_mutex_lock(&obj->mMutex); + obj->mState = ERROR; + pthread_mutex_unlock(&obj->mMutex); + LOGE("Fatal, camera daemon died"); + break; + + case CAM_EVENT_TYPE_DAEMON_PULL_REQ: + LOGD("HAL got request pull from Daemon"); + pthread_mutex_lock(&obj->mMutex); + obj->mWokenUpByDaemon = true; + obj->unblockRequestIfNecessary(); + pthread_mutex_unlock(&obj->mMutex); + break; + + default: + LOGW("Warning: Unhandled event %d", + evt->server_event_type); + break; + } + } else { + LOGE("NULL user_data/evt"); + } +} + +/*=========================================================================== + * FUNCTION : openCamera + * + * DESCRIPTION: open camera + * + * PARAMETERS : + * @hw_device : double ptr for camera device struct + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::openCamera(struct hw_device_t **hw_device) +{ + int rc = 0; + if (mState != CLOSED) { + *hw_device = NULL; + return PERMISSION_DENIED; + } + + m_perfLock.lock_acq(); + LOGI("[KPI Perf]: E PROFILE_OPEN_CAMERA camera id %d", + mCameraId); + + rc = openCamera(); + if (rc == 0) { + *hw_device = &mCameraDevice.common; + } else + *hw_device = NULL; + + m_perfLock.lock_rel(); + LOGI("[KPI Perf]: X PROFILE_OPEN_CAMERA camera id %d, rc: %d", + mCameraId, rc); + + if (rc == NO_ERROR) { + mState = OPENED; + } + return rc; +} + +/*=========================================================================== + * FUNCTION : openCamera + * + * DESCRIPTION: open camera + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::openCamera() +{ + int rc = 0; + char value[PROPERTY_VALUE_MAX]; + + KPI_ATRACE_CALL(); + if (mCameraHandle) { + LOGE("Failure: Camera already opened"); + return ALREADY_EXISTS; + } + + rc = QCameraFlash::getInstance().reserveFlashForCamera(mCameraId); + if (rc < 0) { + LOGE("Failed to reserve flash for camera id: %d", + mCameraId); + return UNKNOWN_ERROR; + } + + rc = camera_open((uint8_t)mCameraId, &mCameraHandle); + if (rc) { + LOGE("camera_open failed. rc = %d, mCameraHandle = %p", rc, mCameraHandle); + return rc; + } + + if (!mCameraHandle) { + LOGE("camera_open failed. mCameraHandle = %p", mCameraHandle); + return -ENODEV; + } + + rc = mCameraHandle->ops->register_event_notify(mCameraHandle->camera_handle, + camEvtHandle, (void *)this); + + if (rc < 0) { + LOGE("Error, failed to register event callback"); + /* Not closing camera here since it is already handled in destructor */ + return FAILED_TRANSACTION; + } + + mExifParams.debug_params = + (mm_jpeg_debug_exif_params_t *) malloc (sizeof(mm_jpeg_debug_exif_params_t)); + if (mExifParams.debug_params) { + memset(mExifParams.debug_params, 0, sizeof(mm_jpeg_debug_exif_params_t)); + } else { + LOGE("Out of Memory. Allocation failed for 3A debug exif params"); + return NO_MEMORY; + } + mFirstConfiguration = true; + + //Notify display HAL that a camera session is active. + //But avoid calling the same during bootup because camera service might open/close + //cameras at boot time during its initialization and display service will also internally + //wait for camera service to initialize first while calling this display API, resulting in a + //deadlock situation. Since boot time camera open/close calls are made only to fetch + //capabilities, no need of this display bw optimization. + //Use "service.bootanim.exit" property to know boot status. + property_get("service.bootanim.exit", value, "0"); + if (atoi(value) == 1) { + pthread_mutex_lock(&gCamLock); + if (gNumCameraSessions++ == 0) { + setCameraLaunchStatus(true); + } + pthread_mutex_unlock(&gCamLock); + } + + //fill the session id needed while linking dual cam + pthread_mutex_lock(&gCamLock); + rc = mCameraHandle->ops->get_session_id(mCameraHandle->camera_handle, + &sessionId[mCameraId]); + pthread_mutex_unlock(&gCamLock); + + if (rc < 0) { + LOGE("Error, failed to get sessiion id"); + return UNKNOWN_ERROR; + } else { + //Allocate related cam sync buffer + //this is needed for the payload that goes along with bundling cmd for related + //camera use cases + m_pRelCamSyncHeap = new QCamera3HeapMemory(1); + rc = m_pRelCamSyncHeap->allocate(sizeof(cam_sync_related_sensors_event_info_t)); + if(rc != OK) { + rc = NO_MEMORY; + LOGE("Dualcam: Failed to allocate Related cam sync Heap memory"); + return NO_MEMORY; + } + + //Map memory for related cam sync buffer + rc = mCameraHandle->ops->map_buf(mCameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_SYNC_RELATED_SENSORS_BUF, + m_pRelCamSyncHeap->getFd(0), + sizeof(cam_sync_related_sensors_event_info_t)); + if(rc < 0) { + LOGE("Dualcam: failed to map Related cam sync buffer"); + rc = FAILED_TRANSACTION; + return NO_MEMORY; + } + m_pRelCamSyncBuf = + (cam_sync_related_sensors_event_info_t*) DATA_PTR(m_pRelCamSyncHeap,0); + } + + LOGH("mCameraId=%d",mCameraId); + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : closeCamera + * + * DESCRIPTION: close camera + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::closeCamera() +{ + KPI_ATRACE_CALL(); + int rc = NO_ERROR; + char value[PROPERTY_VALUE_MAX]; + + LOGI("[KPI Perf]: E PROFILE_CLOSE_CAMERA camera id %d", + mCameraId); + + // unmap memory for related cam sync buffer + mCameraHandle->ops->unmap_buf(mCameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_SYNC_RELATED_SENSORS_BUF); + if (NULL != m_pRelCamSyncHeap) { + m_pRelCamSyncHeap->deallocate(); + delete m_pRelCamSyncHeap; + m_pRelCamSyncHeap = NULL; + m_pRelCamSyncBuf = NULL; + } + + rc = mCameraHandle->ops->close_camera(mCameraHandle->camera_handle); + mCameraHandle = NULL; + + //reset session id to some invalid id + pthread_mutex_lock(&gCamLock); + sessionId[mCameraId] = 0xDEADBEEF; + pthread_mutex_unlock(&gCamLock); + + //Notify display HAL that there is no active camera session + //but avoid calling the same during bootup. Refer to openCamera + //for more details. + property_get("service.bootanim.exit", value, "0"); + if (atoi(value) == 1) { + pthread_mutex_lock(&gCamLock); + if (--gNumCameraSessions == 0) { + setCameraLaunchStatus(false); + } + pthread_mutex_unlock(&gCamLock); + } + + if (mExifParams.debug_params) { + free(mExifParams.debug_params); + mExifParams.debug_params = NULL; + } + if (QCameraFlash::getInstance().releaseFlashFromCamera(mCameraId) != 0) { + LOGW("Failed to release flash for camera id: %d", + mCameraId); + } + mState = CLOSED; + LOGI("[KPI Perf]: X PROFILE_CLOSE_CAMERA camera id %d, rc: %d", + mCameraId, rc); + return rc; +} + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: Initialize frameworks callback functions + * + * PARAMETERS : + * @callback_ops : callback function to frameworks + * + * RETURN : + * + *==========================================================================*/ +int QCamera3HardwareInterface::initialize( + const struct camera3_callback_ops *callback_ops) +{ + ATRACE_CALL(); + int rc; + + LOGI("E :mCameraId = %d mState = %d", mCameraId, mState); + pthread_mutex_lock(&mMutex); + + // Validate current state + switch (mState) { + case OPENED: + /* valid state */ + break; + default: + LOGE("Invalid state %d", mState); + rc = -ENODEV; + goto err1; + } + + rc = initParameters(); + if (rc < 0) { + LOGE("initParamters failed %d", rc); + goto err1; + } + mCallbackOps = callback_ops; + + mChannelHandle = mCameraHandle->ops->add_channel( + mCameraHandle->camera_handle, NULL, NULL, this); + if (mChannelHandle == 0) { + LOGE("add_channel failed"); + rc = -ENOMEM; + pthread_mutex_unlock(&mMutex); + return rc; + } + + pthread_mutex_unlock(&mMutex); + mCameraInitialized = true; + mState = INITIALIZED; + LOGI("X"); + return 0; + +err1: + pthread_mutex_unlock(&mMutex); +err2: + return rc; +} + +/*=========================================================================== + * FUNCTION : validateStreamDimensions + * + * DESCRIPTION: Check if the configuration requested are those advertised + * + * PARAMETERS : + * @stream_list : streams to be configured + * + * RETURN : + * + *==========================================================================*/ +int QCamera3HardwareInterface::validateStreamDimensions( + camera3_stream_configuration_t *streamList) +{ + int rc = NO_ERROR; + size_t count = 0; + + camera3_stream_t *inputStream = NULL; + /* + * Loop through all streams to find input stream if it exists* + */ + for (size_t i = 0; i< streamList->num_streams; i++) { + if (streamList->streams[i]->stream_type == CAMERA3_STREAM_INPUT) { + if (inputStream != NULL) { + LOGE("Error, Multiple input streams requested"); + return -EINVAL; + } + inputStream = streamList->streams[i]; + } + } + /* + * Loop through all streams requested in configuration + * Check if unsupported sizes have been requested on any of them + */ + for (size_t j = 0; j < streamList->num_streams; j++) { + bool sizeFound = false; + camera3_stream_t *newStream = streamList->streams[j]; + + uint32_t rotatedHeight = newStream->height; + uint32_t rotatedWidth = newStream->width; + if ((newStream->rotation == CAMERA3_STREAM_ROTATION_90) || + (newStream->rotation == CAMERA3_STREAM_ROTATION_270)) { + rotatedHeight = newStream->width; + rotatedWidth = newStream->height; + } + + /* + * Sizes are different for each type of stream format check against + * appropriate table. + */ + switch (newStream->format) { + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16: + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW10: + count = MIN(gCamCapability[mCameraId]->supported_raw_dim_cnt, MAX_SIZES_CNT); + for (size_t i = 0; i < count; i++) { + if ((gCamCapability[mCameraId]->raw_dim[i].width == (int32_t)rotatedWidth) && + (gCamCapability[mCameraId]->raw_dim[i].height == (int32_t)rotatedHeight)) { + sizeFound = true; + break; + } + } + break; + case HAL_PIXEL_FORMAT_BLOB: + count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT); + /* Verify set size against generated sizes table */ + for (size_t i = 0; i < count; i++) { + if (((int32_t)rotatedWidth == + gCamCapability[mCameraId]->picture_sizes_tbl[i].width) && + ((int32_t)rotatedHeight == + gCamCapability[mCameraId]->picture_sizes_tbl[i].height)) { + sizeFound = true; + break; + } + } + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + default: + if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL + || newStream->stream_type == CAMERA3_STREAM_INPUT + || IS_USAGE_ZSL(newStream->usage)) { + if (((int32_t)rotatedWidth == + gCamCapability[mCameraId]->active_array_size.width) && + ((int32_t)rotatedHeight == + gCamCapability[mCameraId]->active_array_size.height)) { + sizeFound = true; + break; + } + /* We could potentially break here to enforce ZSL stream + * set from frameworks always is full active array size + * but it is not clear from the spc if framework will always + * follow that, also we have logic to override to full array + * size, so keeping the logic lenient at the moment + */ + } + count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt, + MAX_SIZES_CNT); + for (size_t i = 0; i < count; i++) { + if (((int32_t)rotatedWidth == + gCamCapability[mCameraId]->picture_sizes_tbl[i].width) && + ((int32_t)rotatedHeight == + gCamCapability[mCameraId]->picture_sizes_tbl[i].height)) { + sizeFound = true; + break; + } + } + break; + } /* End of switch(newStream->format) */ + + /* We error out even if a single stream has unsupported size set */ + if (!sizeFound) { + LOGE("Error: Unsupported size: %d x %d type: %d array size: %d x %d", + rotatedWidth, rotatedHeight, newStream->format, + gCamCapability[mCameraId]->active_array_size.width, + gCamCapability[mCameraId]->active_array_size.height); + rc = -EINVAL; + break; + } + } /* End of for each stream */ + return rc; +} + +/*============================================================================== + * FUNCTION : isSupportChannelNeeded + * + * DESCRIPTION: Simple heuristic func to determine if support channels is needed + * + * PARAMETERS : + * @stream_list : streams to be configured + * @stream_config_info : the config info for streams to be configured + * + * RETURN : Boolen true/false decision + * + *==========================================================================*/ +bool QCamera3HardwareInterface::isSupportChannelNeeded( + camera3_stream_configuration_t *streamList, + cam_stream_size_info_t stream_config_info) +{ + uint32_t i; + bool pprocRequested = false; + /* Check for conditions where PProc pipeline does not have any streams*/ + for (i = 0; i < stream_config_info.num_streams; i++) { + if (stream_config_info.type[i] != CAM_STREAM_TYPE_ANALYSIS && + stream_config_info.postprocess_mask[i] != CAM_QCOM_FEATURE_NONE) { + pprocRequested = true; + break; + } + } + + if (pprocRequested == false ) + return true; + + /* Dummy stream needed if only raw or jpeg streams present */ + for (i = 0; i < streamList->num_streams; i++) { + switch(streamList->streams[i]->format) { + case HAL_PIXEL_FORMAT_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW10: + case HAL_PIXEL_FORMAT_RAW16: + case HAL_PIXEL_FORMAT_BLOB: + break; + default: + return false; + } + } + return true; +} + +/*============================================================================== + * FUNCTION : getSensorOutputSize + * + * DESCRIPTION: Get sensor output size based on current stream configuratoin + * + * PARAMETERS : + * @sensor_dim : sensor output dimension (output) + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::getSensorOutputSize(cam_dimension_t &sensor_dim) +{ + int32_t rc = NO_ERROR; + + cam_dimension_t max_dim = {0, 0}; + for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) { + if (mStreamConfigInfo.stream_sizes[i].width > max_dim.width) + max_dim.width = mStreamConfigInfo.stream_sizes[i].width; + if (mStreamConfigInfo.stream_sizes[i].height > max_dim.height) + max_dim.height = mStreamConfigInfo.stream_sizes[i].height; + } + + clear_metadata_buffer(mParameters); + + rc = ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_MAX_DIMENSION, + max_dim); + if (rc != NO_ERROR) { + LOGE("Failed to update table for CAM_INTF_PARM_MAX_DIMENSION"); + return rc; + } + + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters); + if (rc != NO_ERROR) { + LOGE("Failed to set CAM_INTF_PARM_MAX_DIMENSION"); + return rc; + } + + clear_metadata_buffer(mParameters); + ADD_GET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_RAW_DIMENSION); + + rc = mCameraHandle->ops->get_parms(mCameraHandle->camera_handle, + mParameters); + if (rc != NO_ERROR) { + LOGE("Failed to get CAM_INTF_PARM_RAW_DIMENSION"); + return rc; + } + + READ_PARAM_ENTRY(mParameters, CAM_INTF_PARM_RAW_DIMENSION, sensor_dim); + LOGH("sensor output dimension = %d x %d", sensor_dim.width, sensor_dim.height); + + return rc; +} + +/*============================================================================== + * FUNCTION : enablePowerHint + * + * DESCRIPTION: enable single powerhint for preview and different video modes. + * + * PARAMETERS : + * + * RETURN : NULL + * + *==========================================================================*/ +void QCamera3HardwareInterface::enablePowerHint() +{ + if (!mPowerHintEnabled) { + m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, true); + mPowerHintEnabled = true; + } +} + +/*============================================================================== + * FUNCTION : disablePowerHint + * + * DESCRIPTION: disable current powerhint. + * + * PARAMETERS : + * + * RETURN : NULL + * + *==========================================================================*/ +void QCamera3HardwareInterface::disablePowerHint() +{ + if (mPowerHintEnabled) { + m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, false); + mPowerHintEnabled = false; + } +} + +/*============================================================================== + * FUNCTION : addToPPFeatureMask + * + * DESCRIPTION: add additional features to pp feature mask based on + * stream type and usecase + * + * PARAMETERS : + * @stream_format : stream type for feature mask + * @stream_idx : stream idx within postprocess_mask list to change + * + * RETURN : NULL + * + *==========================================================================*/ +void QCamera3HardwareInterface::addToPPFeatureMask(int stream_format, + uint32_t stream_idx) +{ + char feature_mask_value[PROPERTY_VALUE_MAX]; + cam_feature_mask_t feature_mask; + int args_converted; + int property_len; + + /* Get feature mask from property */ + property_len = property_get("persist.camera.hal3.feature", + feature_mask_value, "0"); + if ((property_len > 2) && (feature_mask_value[0] == '0') && + (feature_mask_value[1] == 'x')) { + args_converted = sscanf(feature_mask_value, "0x%llx", &feature_mask); + } else { + args_converted = sscanf(feature_mask_value, "%lld", &feature_mask); + } + if (1 != args_converted) { + feature_mask = 0; + LOGE("Wrong feature mask %s", feature_mask_value); + return; + } + + switch (stream_format) { + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: { + /* Add LLVD to pp feature mask only if video hint is enabled */ + if ((m_bIsVideo) && (feature_mask & CAM_QTI_FEATURE_SW_TNR)) { + mStreamConfigInfo.postprocess_mask[stream_idx] + |= CAM_QTI_FEATURE_SW_TNR; + LOGH("Added SW TNR to pp feature mask"); + } else if ((m_bIsVideo) && (feature_mask & CAM_QCOM_FEATURE_LLVD)) { + mStreamConfigInfo.postprocess_mask[stream_idx] + |= CAM_QCOM_FEATURE_LLVD; + LOGH("Added LLVD SeeMore to pp feature mask"); + } + break; + } + default: + break; + } + LOGD("PP feature mask %llx", + mStreamConfigInfo.postprocess_mask[stream_idx]); +} + +/*============================================================================== + * FUNCTION : updateFpsInPreviewBuffer + * + * DESCRIPTION: update FPS information in preview buffer. + * + * PARAMETERS : + * @metadata : pointer to metadata buffer + * @frame_number: frame_number to look for in pending buffer list + * + * RETURN : None + * + *==========================================================================*/ +void QCamera3HardwareInterface::updateFpsInPreviewBuffer(metadata_buffer_t *metadata, + uint32_t frame_number) +{ + // Mark all pending buffers for this particular request + // with corresponding framerate information + for (List<PendingBuffersInRequest>::iterator req = + mPendingBuffersMap.mPendingBuffersInRequest.begin(); + req != mPendingBuffersMap.mPendingBuffersInRequest.end(); req++) { + for(List<PendingBufferInfo>::iterator j = + req->mPendingBufferList.begin(); + j != req->mPendingBufferList.end(); j++) { + QCamera3Channel *channel = (QCamera3Channel *)j->stream->priv; + if ((req->frame_number == frame_number) && + (channel->getStreamTypeMask() & + (1U << CAM_STREAM_TYPE_PREVIEW))) { + IF_META_AVAILABLE(cam_fps_range_t, float_range, + CAM_INTF_PARM_FPS_RANGE, metadata) { + int32_t cameraFps = float_range->max_fps; + struct private_handle_t *priv_handle = + (struct private_handle_t *)(*(j->buffer)); + setMetaData(priv_handle, UPDATE_REFRESH_RATE, &cameraFps); + } + } + } + } +} + +/*=========================================================================== + * FUNCTION : configureStreams + * + * DESCRIPTION: Reset HAL camera device processing pipeline and set up new input + * and output streams. + * + * PARAMETERS : + * @stream_list : streams to be configured + * + * RETURN : + * + *==========================================================================*/ +int QCamera3HardwareInterface::configureStreams( + camera3_stream_configuration_t *streamList) +{ + ATRACE_CALL(); + int rc = 0; + + // Acquire perfLock before configure streams + m_perfLock.lock_acq(); + rc = configureStreamsPerfLocked(streamList); + m_perfLock.lock_rel(); + + return rc; +} + +/*=========================================================================== + * FUNCTION : configureStreamsPerfLocked + * + * DESCRIPTION: configureStreams while perfLock is held. + * + * PARAMETERS : + * @stream_list : streams to be configured + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::configureStreamsPerfLocked( + camera3_stream_configuration_t *streamList) +{ + ATRACE_CALL(); + int rc = 0; + + // Sanity check stream_list + if (streamList == NULL) { + LOGE("NULL stream configuration"); + return BAD_VALUE; + } + if (streamList->streams == NULL) { + LOGE("NULL stream list"); + return BAD_VALUE; + } + + if (streamList->num_streams < 1) { + LOGE("Bad number of streams requested: %d", + streamList->num_streams); + return BAD_VALUE; + } + + if (streamList->num_streams >= MAX_NUM_STREAMS) { + LOGE("Maximum number of streams %d exceeded: %d", + MAX_NUM_STREAMS, streamList->num_streams); + return BAD_VALUE; + } + + mOpMode = streamList->operation_mode; + LOGD("mOpMode: %d", mOpMode); + + /* first invalidate all the steams in the mStreamList + * if they appear again, they will be validated */ + for (List<stream_info_t*>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3ProcessingChannel *channel = (QCamera3ProcessingChannel*)(*it)->stream->priv; + channel->stop(); + (*it)->status = INVALID; + } + + if (mRawDumpChannel) { + mRawDumpChannel->stop(); + delete mRawDumpChannel; + mRawDumpChannel = NULL; + } + + if (mSupportChannel) + mSupportChannel->stop(); + + if (mAnalysisChannel) { + mAnalysisChannel->stop(); + } + if (mMetadataChannel) { + /* If content of mStreamInfo is not 0, there is metadata stream */ + mMetadataChannel->stop(); + } + if (mChannelHandle) { + mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle, + mChannelHandle); + LOGD("stopping channel %d", mChannelHandle); + } + + pthread_mutex_lock(&mMutex); + + // Check state + switch (mState) { + case INITIALIZED: + case CONFIGURED: + case STARTED: + /* valid state */ + break; + default: + LOGE("Invalid state %d", mState); + pthread_mutex_unlock(&mMutex); + return -ENODEV; + } + + /* Check whether we have video stream */ + m_bIs4KVideo = false; + m_bIsVideo = false; + m_bEisSupportedSize = false; + m_bTnrEnabled = false; + bool isZsl = false; + uint32_t videoWidth = 0U; + uint32_t videoHeight = 0U; + size_t rawStreamCnt = 0; + size_t stallStreamCnt = 0; + size_t processedStreamCnt = 0; + // Number of streams on ISP encoder path + size_t numStreamsOnEncoder = 0; + size_t numYuv888OnEncoder = 0; + bool bYuv888OverrideJpeg = false; + cam_dimension_t largeYuv888Size = {0, 0}; + cam_dimension_t maxViewfinderSize = {0, 0}; + bool bJpegExceeds4K = false; + bool bUseCommonFeatureMask = false; + cam_feature_mask_t commonFeatureMask = 0; + bool bSmallJpegSize = false; + uint32_t width_ratio; + uint32_t height_ratio; + maxViewfinderSize = gCamCapability[mCameraId]->max_viewfinder_size; + camera3_stream_t *inputStream = NULL; + bool isJpeg = false; + cam_dimension_t jpegSize = {0, 0}; + + cam_padding_info_t padding_info = gCamCapability[mCameraId]->padding_info; + + /*EIS configuration*/ + bool eisSupported = false; + bool oisSupported = false; + int32_t margin_index = -1; + uint8_t eis_prop_set; + uint32_t maxEisWidth = 0; + uint32_t maxEisHeight = 0; + + memset(&mInputStreamInfo, 0, sizeof(mInputStreamInfo)); + + size_t count = IS_TYPE_MAX; + count = MIN(gCamCapability[mCameraId]->supported_is_types_cnt, count); + for (size_t i = 0; i < count; i++) { + if ((gCamCapability[mCameraId]->supported_is_types[i] == IS_TYPE_EIS_2_0) || + (gCamCapability[mCameraId]->supported_is_types[i] == IS_TYPE_EIS_3_0)) + { + eisSupported = true; + margin_index = (int32_t)i; + break; + } + } + + count = CAM_OPT_STAB_MAX; + count = MIN(gCamCapability[mCameraId]->optical_stab_modes_count, count); + for (size_t i = 0; i < count; i++) { + if (gCamCapability[mCameraId]->optical_stab_modes[i] == CAM_OPT_STAB_ON) { + oisSupported = true; + break; + } + } + + if (eisSupported) { + maxEisWidth = MAX_EIS_WIDTH; + maxEisHeight = MAX_EIS_HEIGHT; + } + + /* EIS setprop control */ + char eis_prop[PROPERTY_VALUE_MAX]; + memset(eis_prop, 0, sizeof(eis_prop)); + property_get("persist.camera.eis.enable", eis_prop, "0"); + eis_prop_set = (uint8_t)atoi(eis_prop); + + m_bEisEnable = eis_prop_set && (!oisSupported && eisSupported) && + (mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE); + + /* stream configurations */ + for (size_t i = 0; i < streamList->num_streams; i++) { + camera3_stream_t *newStream = streamList->streams[i]; + LOGI("stream[%d] type = %d, format = %d, width = %d, " + "height = %d, rotation = %d, usage = 0x%x", + i, newStream->stream_type, newStream->format, + newStream->width, newStream->height, newStream->rotation, + newStream->usage); + if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL || + newStream->stream_type == CAMERA3_STREAM_INPUT){ + isZsl = true; + } + if (newStream->stream_type == CAMERA3_STREAM_INPUT){ + inputStream = newStream; + } + + if (newStream->format == HAL_PIXEL_FORMAT_BLOB) { + isJpeg = true; + jpegSize.width = newStream->width; + jpegSize.height = newStream->height; + if (newStream->width > VIDEO_4K_WIDTH || + newStream->height > VIDEO_4K_HEIGHT) + bJpegExceeds4K = true; + } + + if ((HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED == newStream->format) && + (newStream->usage & private_handle_t::PRIV_FLAGS_VIDEO_ENCODER)) { + m_bIsVideo = true; + videoWidth = newStream->width; + videoHeight = newStream->height; + if ((VIDEO_4K_WIDTH <= newStream->width) && + (VIDEO_4K_HEIGHT <= newStream->height)) { + m_bIs4KVideo = true; + } + m_bEisSupportedSize = (newStream->width <= maxEisWidth) && + (newStream->height <= maxEisHeight); + } + if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL || + newStream->stream_type == CAMERA3_STREAM_OUTPUT) { + switch (newStream->format) { + case HAL_PIXEL_FORMAT_BLOB: + stallStreamCnt++; + if (isOnEncoder(maxViewfinderSize, newStream->width, + newStream->height)) { + numStreamsOnEncoder++; + } + width_ratio = CEIL_DIVISION(gCamCapability[mCameraId]->active_array_size.width, + newStream->width); + height_ratio = CEIL_DIVISION(gCamCapability[mCameraId]->active_array_size.height, + newStream->height);; + FATAL_IF(gCamCapability[mCameraId]->max_downscale_factor == 0, + "FATAL: max_downscale_factor cannot be zero and so assert"); + if ( (width_ratio > gCamCapability[mCameraId]->max_downscale_factor) || + (height_ratio > gCamCapability[mCameraId]->max_downscale_factor)) { + LOGH("Setting small jpeg size flag to true"); + bSmallJpegSize = true; + } + break; + case HAL_PIXEL_FORMAT_RAW10: + case HAL_PIXEL_FORMAT_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW16: + rawStreamCnt++; + break; + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + processedStreamCnt++; + if (isOnEncoder(maxViewfinderSize, newStream->width, + newStream->height)) { + if (newStream->stream_type != CAMERA3_STREAM_BIDIRECTIONAL && + !IS_USAGE_ZSL(newStream->usage)) { + commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + } + numStreamsOnEncoder++; + } + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + processedStreamCnt++; + if (isOnEncoder(maxViewfinderSize, newStream->width, + newStream->height)) { + // If Yuv888 size is not greater than 4K, set feature mask + // to SUPERSET so that it support concurrent request on + // YUV and JPEG. + if (newStream->width <= VIDEO_4K_WIDTH && + newStream->height <= VIDEO_4K_HEIGHT) { + commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + } + numStreamsOnEncoder++; + numYuv888OnEncoder++; + largeYuv888Size.width = newStream->width; + largeYuv888Size.height = newStream->height; + } + break; + default: + processedStreamCnt++; + if (isOnEncoder(maxViewfinderSize, newStream->width, + newStream->height)) { + commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + numStreamsOnEncoder++; + } + break; + } + + } + } + + if (gCamCapability[mCameraId]->position == CAM_POSITION_FRONT || + gCamCapability[mCameraId]->position == CAM_POSITION_FRONT_AUX || + !m_bIsVideo) { + m_bEisEnable = false; + } + + /* Logic to enable/disable TNR based on specific config size/etc.*/ + if ((m_bTnrPreview || m_bTnrVideo) && m_bIsVideo && + ((videoWidth == 1920 && videoHeight == 1080) || + (videoWidth == 1280 && videoHeight == 720)) && + (mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE)) + m_bTnrEnabled = true; + + /* Check if num_streams is sane */ + if (stallStreamCnt > MAX_STALLING_STREAMS || + rawStreamCnt > MAX_RAW_STREAMS || + processedStreamCnt > MAX_PROCESSED_STREAMS) { + LOGE("Invalid stream configu: stall: %d, raw: %d, processed %d", + stallStreamCnt, rawStreamCnt, processedStreamCnt); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + /* Check whether we have zsl stream or 4k video case */ + if (isZsl && m_bIsVideo) { + LOGE("Currently invalid configuration ZSL&Video!"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + /* Check if stream sizes are sane */ + if (numStreamsOnEncoder > 2) { + LOGE("Number of streams on ISP encoder path exceeds limits of 2"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } else if (1 < numStreamsOnEncoder){ + bUseCommonFeatureMask = true; + LOGH("Multiple streams above max viewfinder size, common mask needed"); + } + + /* Check if BLOB size is greater than 4k in 4k recording case */ + if (m_bIs4KVideo && bJpegExceeds4K) { + LOGE("HAL doesn't support Blob size greater than 4k in 4k recording"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + + // If jpeg stream is available, and a YUV 888 stream is on Encoder path, and + // the YUV stream's size is greater or equal to the JPEG size, set common + // postprocess mask to NONE, so that we can take advantage of postproc bypass. + if (numYuv888OnEncoder && isOnEncoder(maxViewfinderSize, + jpegSize.width, jpegSize.height) && + largeYuv888Size.width > jpegSize.width && + largeYuv888Size.height > jpegSize.height) { + bYuv888OverrideJpeg = true; + } else if (!isJpeg && numStreamsOnEncoder > 1) { + commonFeatureMask = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + } + + LOGH("max viewfinder width %d height %d isZsl %d bUseCommonFeature %x commonFeatureMask %llx", + maxViewfinderSize.width, maxViewfinderSize.height, isZsl, bUseCommonFeatureMask, + commonFeatureMask); + LOGH("numStreamsOnEncoder %d, processedStreamCnt %d, stallcnt %d bSmallJpegSize %d", + numStreamsOnEncoder, processedStreamCnt, stallStreamCnt, bSmallJpegSize); + + rc = validateStreamDimensions(streamList); + if (rc == NO_ERROR) { + rc = validateStreamRotations(streamList); + } + if (rc != NO_ERROR) { + LOGE("Invalid stream configuration requested!"); + pthread_mutex_unlock(&mMutex); + return rc; + } + + camera3_stream_t *zslStream = NULL; //Only use this for size and not actual handle! + for (size_t i = 0; i < streamList->num_streams; i++) { + camera3_stream_t *newStream = streamList->streams[i]; + LOGH("newStream type = %d, stream format = %d " + "stream size : %d x %d, stream rotation = %d", + newStream->stream_type, newStream->format, + newStream->width, newStream->height, newStream->rotation); + //if the stream is in the mStreamList validate it + bool stream_exists = false; + for (List<stream_info_t*>::iterator it=mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + if ((*it)->stream == newStream) { + QCamera3ProcessingChannel *channel = + (QCamera3ProcessingChannel*)(*it)->stream->priv; + stream_exists = true; + if (channel) + delete channel; + (*it)->status = VALID; + (*it)->stream->priv = NULL; + (*it)->channel = NULL; + } + } + if (!stream_exists && newStream->stream_type != CAMERA3_STREAM_INPUT) { + //new stream + stream_info_t* stream_info; + stream_info = (stream_info_t* )malloc(sizeof(stream_info_t)); + if (!stream_info) { + LOGE("Could not allocate stream info"); + rc = -ENOMEM; + pthread_mutex_unlock(&mMutex); + return rc; + } + stream_info->stream = newStream; + stream_info->status = VALID; + stream_info->channel = NULL; + mStreamInfo.push_back(stream_info); + } + /* Covers Opaque ZSL and API1 F/W ZSL */ + if (IS_USAGE_ZSL(newStream->usage) + || newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ) { + if (zslStream != NULL) { + LOGE("Multiple input/reprocess streams requested!"); + pthread_mutex_unlock(&mMutex); + return BAD_VALUE; + } + zslStream = newStream; + } + /* Covers YUV reprocess */ + if (inputStream != NULL) { + if (newStream->stream_type == CAMERA3_STREAM_OUTPUT + && newStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888 + && inputStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888 + && inputStream->width == newStream->width + && inputStream->height == newStream->height) { + if (zslStream != NULL) { + /* This scenario indicates multiple YUV streams with same size + * as input stream have been requested, since zsl stream handle + * is solely use for the purpose of overriding the size of streams + * which share h/w streams we will just make a guess here as to + * which of the stream is a ZSL stream, this will be refactored + * once we make generic logic for streams sharing encoder output + */ + LOGH("Warning, Multiple ip/reprocess streams requested!"); + } + zslStream = newStream; + } + } + } + + /* If a zsl stream is set, we know that we have configured at least one input or + bidirectional stream */ + if (NULL != zslStream) { + mInputStreamInfo.dim.width = (int32_t)zslStream->width; + mInputStreamInfo.dim.height = (int32_t)zslStream->height; + mInputStreamInfo.format = zslStream->format; + mInputStreamInfo.usage = zslStream->usage; + LOGD("Input stream configured! %d x %d, format %d, usage %d", + mInputStreamInfo.dim.width, + mInputStreamInfo.dim.height, + mInputStreamInfo.format, mInputStreamInfo.usage); + } + + cleanAndSortStreamInfo(); + if (mMetadataChannel) { + delete mMetadataChannel; + mMetadataChannel = NULL; + } + if (mSupportChannel) { + delete mSupportChannel; + mSupportChannel = NULL; + } + + if (mAnalysisChannel) { + delete mAnalysisChannel; + mAnalysisChannel = NULL; + } + + if (mDummyBatchChannel) { + delete mDummyBatchChannel; + mDummyBatchChannel = NULL; + } + + //Create metadata channel and initialize it + cam_feature_mask_t metadataFeatureMask = CAM_QCOM_FEATURE_NONE; + setPAAFSupport(metadataFeatureMask, CAM_STREAM_TYPE_METADATA, + gCamCapability[mCameraId]->color_arrangement); + mMetadataChannel = new QCamera3MetadataChannel(mCameraHandle->camera_handle, + mChannelHandle, mCameraHandle->ops, captureResultCb, + &padding_info, metadataFeatureMask, this); + if (mMetadataChannel == NULL) { + LOGE("failed to allocate metadata channel"); + rc = -ENOMEM; + pthread_mutex_unlock(&mMutex); + return rc; + } + rc = mMetadataChannel->initialize(IS_TYPE_NONE); + if (rc < 0) { + LOGE("metadata channel initialization failed"); + delete mMetadataChannel; + mMetadataChannel = NULL; + pthread_mutex_unlock(&mMutex); + return rc; + } + + // Create analysis stream all the time, even when h/w support is not available + { + cam_feature_mask_t analysisFeatureMask = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(analysisFeatureMask, CAM_STREAM_TYPE_ANALYSIS, + gCamCapability[mCameraId]->color_arrangement); + cam_analysis_info_t analysisInfo; + rc = mCommon.getAnalysisInfo( + FALSE, + TRUE, + analysisFeatureMask, + &analysisInfo); + if (rc != NO_ERROR) { + LOGE("getAnalysisInfo failed, ret = %d", rc); + pthread_mutex_unlock(&mMutex); + return rc; + } + + mAnalysisChannel = new QCamera3SupportChannel( + mCameraHandle->camera_handle, + mChannelHandle, + mCameraHandle->ops, + &analysisInfo.analysis_padding_info, + analysisFeatureMask, + CAM_STREAM_TYPE_ANALYSIS, + &analysisInfo.analysis_max_res, + (analysisInfo.analysis_format + == CAM_FORMAT_Y_ONLY ? CAM_FORMAT_Y_ONLY + : CAM_FORMAT_YUV_420_NV21), + analysisInfo.hw_analysis_supported, + gCamCapability[mCameraId]->color_arrangement, + this, + 0); // force buffer count to 0 + if (!mAnalysisChannel) { + LOGE("H/W Analysis channel cannot be created"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + } + + bool isRawStreamRequested = false; + memset(&mStreamConfigInfo, 0, sizeof(cam_stream_size_info_t)); + /* Allocate channel objects for the requested streams */ + for (size_t i = 0; i < streamList->num_streams; i++) { + camera3_stream_t *newStream = streamList->streams[i]; + uint32_t stream_usage = newStream->usage; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = (int32_t)newStream->width; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = (int32_t)newStream->height; + struct camera_info *p_info = NULL; + pthread_mutex_lock(&gCamLock); + p_info = get_cam_info(mCameraId, &mStreamConfigInfo.sync_type); + pthread_mutex_unlock(&gCamLock); + if ((newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL + || IS_USAGE_ZSL(newStream->usage)) && + newStream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED){ + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_SNAPSHOT; + if (bUseCommonFeatureMask) { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + commonFeatureMask; + } else { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_NONE; + } + + } else if(newStream->stream_type == CAMERA3_STREAM_INPUT) { + LOGH("Input stream configured, reprocess config"); + } else { + //for non zsl streams find out the format + switch (newStream->format) { + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED : + { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + /* add additional features to pp feature mask */ + addToPPFeatureMask(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, + mStreamConfigInfo.num_streams); + + if (stream_usage & private_handle_t::PRIV_FLAGS_VIDEO_ENCODER) { + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_VIDEO; + if (m_bTnrEnabled && m_bTnrVideo) { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] |= + CAM_QCOM_FEATURE_CPP_TNR; + } + } else { + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_PREVIEW; + if (m_bTnrEnabled && m_bTnrPreview) { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] |= + CAM_QCOM_FEATURE_CPP_TNR; + } + padding_info.width_padding = mSurfaceStridePadding; + padding_info.height_padding = CAM_PAD_TO_2; + } + if ((newStream->rotation == CAMERA3_STREAM_ROTATION_90) || + (newStream->rotation == CAMERA3_STREAM_ROTATION_270)) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = + newStream->height; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = + newStream->width; + } + } + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_CALLBACK; + if (isOnEncoder(maxViewfinderSize, newStream->width, newStream->height)) { + if (bUseCommonFeatureMask) + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + commonFeatureMask; + else + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_NONE; + } else { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + } + break; + case HAL_PIXEL_FORMAT_BLOB: + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_SNAPSHOT; + // No need to check bSmallJpegSize if ZSL is present since JPEG uses ZSL stream + if ((m_bIs4KVideo && !isZsl) || (bSmallJpegSize && !isZsl)) { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + } else { + if (bUseCommonFeatureMask && + isOnEncoder(maxViewfinderSize, newStream->width, + newStream->height)) { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = commonFeatureMask; + } else { + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = CAM_QCOM_FEATURE_NONE; + } + } + if (isZsl) { + if (zslStream) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = + (int32_t)zslStream->width; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = + (int32_t)zslStream->height; + } else { + LOGE("Error, No ZSL stream identified"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + } else if (m_bIs4KVideo) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = (int32_t)videoWidth; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = (int32_t)videoHeight; + } else if (bYuv888OverrideJpeg) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = + (int32_t)largeYuv888Size.width; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = + (int32_t)largeYuv888Size.height; + } + break; + case HAL_PIXEL_FORMAT_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW16: + case HAL_PIXEL_FORMAT_RAW10: + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_RAW; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = CAM_QCOM_FEATURE_NONE; + isRawStreamRequested = true; + break; + default: + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_DEFAULT; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = CAM_QCOM_FEATURE_NONE; + break; + } + } + + setPAAFSupport(mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + (cam_stream_type_t) mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + gCamCapability[mCameraId]->color_arrangement); + + if (newStream->priv == NULL) { + //New stream, construct channel + switch (newStream->stream_type) { + case CAMERA3_STREAM_INPUT: + newStream->usage |= GRALLOC_USAGE_HW_CAMERA_READ; + newStream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;//WR for inplace algo's + break; + case CAMERA3_STREAM_BIDIRECTIONAL: + newStream->usage |= GRALLOC_USAGE_HW_CAMERA_READ | + GRALLOC_USAGE_HW_CAMERA_WRITE; + break; + case CAMERA3_STREAM_OUTPUT: + /* For video encoding stream, set read/write rarely + * flag so that they may be set to un-cached */ + if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) + newStream->usage |= + (GRALLOC_USAGE_SW_READ_RARELY | + GRALLOC_USAGE_SW_WRITE_RARELY | + GRALLOC_USAGE_HW_CAMERA_WRITE); + else if (IS_USAGE_ZSL(newStream->usage)) + { + LOGD("ZSL usage flag skipping"); + } + else if (newStream == zslStream + || newStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) { + newStream->usage |= GRALLOC_USAGE_HW_CAMERA_ZSL; + } else + newStream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE; + break; + default: + LOGE("Invalid stream_type %d", newStream->stream_type); + break; + } + + if (newStream->stream_type == CAMERA3_STREAM_OUTPUT || + newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) { + QCamera3ProcessingChannel *channel = NULL; + switch (newStream->format) { + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + if ((newStream->usage & + private_handle_t::PRIV_FLAGS_VIDEO_ENCODER) && + (streamList->operation_mode == + CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE) + ) { + channel = new QCamera3RegularChannel(mCameraHandle->camera_handle, + mChannelHandle, mCameraHandle->ops, captureResultCb, + &gCamCapability[mCameraId]->padding_info, + this, + newStream, + (cam_stream_type_t) + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mMetadataChannel, + 0); //heap buffers are not required for HFR video channel + if (channel == NULL) { + LOGE("allocation of channel failed"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + //channel->getNumBuffers() will return 0 here so use + //MAX_INFLIGH_HFR_REQUESTS + newStream->max_buffers = MAX_INFLIGHT_HFR_REQUESTS; + newStream->priv = channel; + LOGI("num video buffers in HFR mode: %d", + MAX_INFLIGHT_HFR_REQUESTS); + } else { + /* Copy stream contents in HFR preview only case to create + * dummy batch channel so that sensor streaming is in + * HFR mode */ + if (!m_bIsVideo && (streamList->operation_mode == + CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE)) { + mDummyBatchStream = *newStream; + } + channel = new QCamera3RegularChannel(mCameraHandle->camera_handle, + mChannelHandle, mCameraHandle->ops, captureResultCb, + &gCamCapability[mCameraId]->padding_info, + this, + newStream, + (cam_stream_type_t) + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mMetadataChannel, + MAX_INFLIGHT_REQUESTS); + if (channel == NULL) { + LOGE("allocation of channel failed"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + newStream->max_buffers = channel->getNumBuffers(); + newStream->priv = channel; + } + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: { + channel = new QCamera3YUVChannel(mCameraHandle->camera_handle, + mChannelHandle, + mCameraHandle->ops, captureResultCb, + &padding_info, + this, + newStream, + (cam_stream_type_t) + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mMetadataChannel); + if (channel == NULL) { + LOGE("allocation of YUV channel failed"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + newStream->max_buffers = channel->getNumBuffers(); + newStream->priv = channel; + break; + } + case HAL_PIXEL_FORMAT_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW16: + case HAL_PIXEL_FORMAT_RAW10: + mRawChannel = new QCamera3RawChannel( + mCameraHandle->camera_handle, mChannelHandle, + mCameraHandle->ops, captureResultCb, + &padding_info, + this, newStream, + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mMetadataChannel, + (newStream->format == HAL_PIXEL_FORMAT_RAW16)); + if (mRawChannel == NULL) { + LOGE("allocation of raw channel failed"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + newStream->max_buffers = mRawChannel->getNumBuffers(); + newStream->priv = (QCamera3ProcessingChannel*)mRawChannel; + break; + case HAL_PIXEL_FORMAT_BLOB: + // Max live snapshot inflight buffer is 1. This is to mitigate + // frame drop issues for video snapshot. The more buffers being + // allocated, the more frame drops there are. + mPictureChannel = new QCamera3PicChannel( + mCameraHandle->camera_handle, mChannelHandle, + mCameraHandle->ops, captureResultCb, + &padding_info, this, newStream, + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + m_bIs4KVideo, isZsl, mMetadataChannel, + (m_bIsVideo ? 1 : MAX_INFLIGHT_BLOB)); + if (mPictureChannel == NULL) { + LOGE("allocation of channel failed"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + newStream->priv = (QCamera3ProcessingChannel*)mPictureChannel; + newStream->max_buffers = mPictureChannel->getNumBuffers(); + mPictureChannel->overrideYuvSize( + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width, + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height); + break; + + default: + LOGE("not a supported format 0x%x", newStream->format); + break; + } + } else if (newStream->stream_type == CAMERA3_STREAM_INPUT) { + newStream->max_buffers = MAX_INFLIGHT_REPROCESS_REQUESTS; + } else { + LOGE("Error, Unknown stream type"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + + QCamera3Channel *channel = (QCamera3Channel*) newStream->priv; + if (channel != NULL && channel->isUBWCEnabled()) { + cam_format_t fmt = channel->getStreamDefaultFormat( + mStreamConfigInfo.type[mStreamConfigInfo.num_streams]); + if(fmt == CAM_FORMAT_YUV_420_NV12_UBWC) { + newStream->usage |= GRALLOC_USAGE_PRIVATE_ALLOC_UBWC; + } + } + + for (List<stream_info_t*>::iterator it=mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + if ((*it)->stream == newStream) { + (*it)->channel = (QCamera3ProcessingChannel*) newStream->priv; + break; + } + } + } else { + // Channel already exists for this stream + // Do nothing for now + } + padding_info = gCamCapability[mCameraId]->padding_info; + + /* Do not add entries for input stream in metastream info + * since there is no real stream associated with it + */ + if (newStream->stream_type != CAMERA3_STREAM_INPUT) + mStreamConfigInfo.num_streams++; + } + + //RAW DUMP channel + if (mEnableRawDump && isRawStreamRequested == false){ + cam_dimension_t rawDumpSize; + rawDumpSize = getMaxRawSize(mCameraId); + cam_feature_mask_t rawDumpFeatureMask = CAM_QCOM_FEATURE_NONE; + setPAAFSupport(rawDumpFeatureMask, + CAM_STREAM_TYPE_RAW, + gCamCapability[mCameraId]->color_arrangement); + mRawDumpChannel = new QCamera3RawDumpChannel(mCameraHandle->camera_handle, + mChannelHandle, + mCameraHandle->ops, + rawDumpSize, + &padding_info, + this, rawDumpFeatureMask); + if (!mRawDumpChannel) { + LOGE("Raw Dump channel cannot be created"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + } + + + if (mAnalysisChannel) { + cam_analysis_info_t analysisInfo; + memset(&analysisInfo, 0, sizeof(cam_analysis_info_t)); + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_ANALYSIS; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + gCamCapability[mCameraId]->color_arrangement); + rc = mCommon.getAnalysisInfo(FALSE, TRUE, + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + &analysisInfo); + if (rc != NO_ERROR) { + LOGE("getAnalysisInfo failed, ret = %d", rc); + pthread_mutex_unlock(&mMutex); + return rc; + } + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] = + analysisInfo.analysis_max_res; + mStreamConfigInfo.num_streams++; + } + + if (isSupportChannelNeeded(streamList, mStreamConfigInfo)) { + cam_analysis_info_t supportInfo; + memset(&supportInfo, 0, sizeof(cam_analysis_info_t)); + cam_feature_mask_t callbackFeatureMask = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(callbackFeatureMask, + CAM_STREAM_TYPE_CALLBACK, + gCamCapability[mCameraId]->color_arrangement); + rc = mCommon.getAnalysisInfo(FALSE, TRUE, callbackFeatureMask, &supportInfo); + if (rc != NO_ERROR) { + /* Ignore the error for Mono camera + * because the PAAF bit mask is only set + * for CAM_STREAM_TYPE_ANALYSIS stream type + */ + if (gCamCapability[mCameraId]->color_arrangement == CAM_FILTER_ARRANGEMENT_Y) { + rc = NO_ERROR; + } else { + LOGE("getAnalysisInfo failed, ret = %d", rc); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + mSupportChannel = new QCamera3SupportChannel( + mCameraHandle->camera_handle, + mChannelHandle, + mCameraHandle->ops, + &gCamCapability[mCameraId]->padding_info, + callbackFeatureMask, + CAM_STREAM_TYPE_CALLBACK, + &QCamera3SupportChannel::kDim, + CAM_FORMAT_YUV_420_NV21, + supportInfo.hw_analysis_supported, + gCamCapability[mCameraId]->color_arrangement, + this); + if (!mSupportChannel) { + LOGE("dummy channel cannot be created"); + pthread_mutex_unlock(&mMutex); + return -ENOMEM; + } + } + + if (mSupportChannel) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] = + QCamera3SupportChannel::kDim; + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_CALLBACK; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + gCamCapability[mCameraId]->color_arrangement); + mStreamConfigInfo.num_streams++; + } + + if (mRawDumpChannel) { + cam_dimension_t rawSize; + rawSize = getMaxRawSize(mCameraId); + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] = + rawSize; + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_RAW; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_NONE; + setPAAFSupport(mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + gCamCapability[mCameraId]->color_arrangement); + mStreamConfigInfo.num_streams++; + } + /* In HFR mode, if video stream is not added, create a dummy channel so that + * ISP can create a batch mode even for preview only case. This channel is + * never 'start'ed (no stream-on), it is only 'initialized' */ + if ((mOpMode == CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE) && + !m_bIsVideo) { + cam_feature_mask_t dummyFeatureMask = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(dummyFeatureMask, + CAM_STREAM_TYPE_VIDEO, + gCamCapability[mCameraId]->color_arrangement); + mDummyBatchChannel = new QCamera3RegularChannel(mCameraHandle->camera_handle, + mChannelHandle, + mCameraHandle->ops, captureResultCb, + &gCamCapability[mCameraId]->padding_info, + this, + &mDummyBatchStream, + CAM_STREAM_TYPE_VIDEO, + dummyFeatureMask, + mMetadataChannel); + if (NULL == mDummyBatchChannel) { + LOGE("creation of mDummyBatchChannel failed." + "Preview will use non-hfr sensor mode "); + } + } + if (mDummyBatchChannel) { + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = + mDummyBatchStream.width; + mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = + mDummyBatchStream.height; + mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = + CAM_STREAM_TYPE_VIDEO; + mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = + CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + setPAAFSupport(mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams], + mStreamConfigInfo.type[mStreamConfigInfo.num_streams], + gCamCapability[mCameraId]->color_arrangement); + mStreamConfigInfo.num_streams++; + } + + mStreamConfigInfo.buffer_info.min_buffers = MIN_INFLIGHT_REQUESTS; + mStreamConfigInfo.buffer_info.max_buffers = + m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS; + + /* Initialize mPendingRequestInfo and mPendingBuffersMap */ + for (pendingRequestIterator i = mPendingRequestsList.begin(); + i != mPendingRequestsList.end();) { + i = erasePendingRequest(i); + } + mPendingFrameDropList.clear(); + // Initialize/Reset the pending buffers list + for (auto &req : mPendingBuffersMap.mPendingBuffersInRequest) { + req.mPendingBufferList.clear(); + } + mPendingBuffersMap.mPendingBuffersInRequest.clear(); + + mPendingReprocessResultList.clear(); + + mCurJpegMeta.clear(); + //Get min frame duration for this streams configuration + deriveMinFrameDuration(); + + // Update state + mState = CONFIGURED; + + pthread_mutex_unlock(&mMutex); + + return rc; +} + +/*=========================================================================== + * FUNCTION : validateCaptureRequest + * + * DESCRIPTION: validate a capture request from camera service + * + * PARAMETERS : + * @request : request from framework to process + * + * RETURN : + * + *==========================================================================*/ +int QCamera3HardwareInterface::validateCaptureRequest( + camera3_capture_request_t *request) +{ + ssize_t idx = 0; + const camera3_stream_buffer_t *b; + CameraMetadata meta; + + /* Sanity check the request */ + if (request == NULL) { + LOGE("NULL capture request"); + return BAD_VALUE; + } + + if ((request->settings == NULL) && (mState == CONFIGURED)) { + /*settings cannot be null for the first request*/ + return BAD_VALUE; + } + + uint32_t frameNumber = request->frame_number; + if (request->num_output_buffers < 1 || request->output_buffers == NULL) { + LOGE("Request %d: No output buffers provided!", + __FUNCTION__, frameNumber); + return BAD_VALUE; + } + if (request->num_output_buffers >= MAX_NUM_STREAMS) { + LOGE("Number of buffers %d equals or is greater than maximum number of streams!", + request->num_output_buffers, MAX_NUM_STREAMS); + return BAD_VALUE; + } + if (request->input_buffer != NULL) { + b = request->input_buffer; + if (b->status != CAMERA3_BUFFER_STATUS_OK) { + LOGE("Request %d: Buffer %ld: Status not OK!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (b->release_fence != -1) { + LOGE("Request %d: Buffer %ld: Has a release fence!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (b->buffer == NULL) { + LOGE("Request %d: Buffer %ld: NULL buffer handle!", + frameNumber, (long)idx); + return BAD_VALUE; + } + } + + // Validate all buffers + b = request->output_buffers; + do { + QCamera3ProcessingChannel *channel = + static_cast<QCamera3ProcessingChannel*>(b->stream->priv); + if (channel == NULL) { + LOGE("Request %d: Buffer %ld: Unconfigured stream!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (b->status != CAMERA3_BUFFER_STATUS_OK) { + LOGE("Request %d: Buffer %ld: Status not OK!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (b->release_fence != -1) { + LOGE("Request %d: Buffer %ld: Has a release fence!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (b->buffer == NULL) { + LOGE("Request %d: Buffer %ld: NULL buffer handle!", + frameNumber, (long)idx); + return BAD_VALUE; + } + if (*(b->buffer) == NULL) { + LOGE("Request %d: Buffer %ld: NULL private handle!", + frameNumber, (long)idx); + return BAD_VALUE; + } + idx++; + b = request->output_buffers + idx; + } while (idx < (ssize_t)request->num_output_buffers); + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : deriveMinFrameDuration + * + * DESCRIPTION: derive mininum processed, jpeg, and raw frame durations based + * on currently configured streams. + * + * PARAMETERS : NONE + * + * RETURN : NONE + * + *==========================================================================*/ +void QCamera3HardwareInterface::deriveMinFrameDuration() +{ + int32_t maxJpegDim, maxProcessedDim, maxRawDim; + + maxJpegDim = 0; + maxProcessedDim = 0; + maxRawDim = 0; + + // Figure out maximum jpeg, processed, and raw dimensions + for (List<stream_info_t*>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + + // Input stream doesn't have valid stream_type + if ((*it)->stream->stream_type == CAMERA3_STREAM_INPUT) + continue; + + int32_t dimension = (int32_t)((*it)->stream->width * (*it)->stream->height); + if ((*it)->stream->format == HAL_PIXEL_FORMAT_BLOB) { + if (dimension > maxJpegDim) + maxJpegDim = dimension; + } else if ((*it)->stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE || + (*it)->stream->format == HAL_PIXEL_FORMAT_RAW10 || + (*it)->stream->format == HAL_PIXEL_FORMAT_RAW16) { + if (dimension > maxRawDim) + maxRawDim = dimension; + } else { + if (dimension > maxProcessedDim) + maxProcessedDim = dimension; + } + } + + size_t count = MIN(gCamCapability[mCameraId]->supported_raw_dim_cnt, + MAX_SIZES_CNT); + + //Assume all jpeg dimensions are in processed dimensions. + if (maxJpegDim > maxProcessedDim) + maxProcessedDim = maxJpegDim; + //Find the smallest raw dimension that is greater or equal to jpeg dimension + if (maxProcessedDim > maxRawDim) { + maxRawDim = INT32_MAX; + + for (size_t i = 0; i < count; i++) { + int32_t dimension = gCamCapability[mCameraId]->raw_dim[i].width * + gCamCapability[mCameraId]->raw_dim[i].height; + if (dimension >= maxProcessedDim && dimension < maxRawDim) + maxRawDim = dimension; + } + } + + //Find minimum durations for processed, jpeg, and raw + for (size_t i = 0; i < count; i++) { + if (maxRawDim == gCamCapability[mCameraId]->raw_dim[i].width * + gCamCapability[mCameraId]->raw_dim[i].height) { + mMinRawFrameDuration = gCamCapability[mCameraId]->raw_min_duration[i]; + break; + } + } + count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT); + for (size_t i = 0; i < count; i++) { + if (maxProcessedDim == + gCamCapability[mCameraId]->picture_sizes_tbl[i].width * + gCamCapability[mCameraId]->picture_sizes_tbl[i].height) { + mMinProcessedFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i]; + mMinJpegFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i]; + break; + } + } +} + +/*=========================================================================== + * FUNCTION : getMinFrameDuration + * + * DESCRIPTION: get minimum frame draution based on the current maximum frame durations + * and current request configuration. + * + * PARAMETERS : @request: requset sent by the frameworks + * + * RETURN : min farme duration for a particular request + * + *==========================================================================*/ +int64_t QCamera3HardwareInterface::getMinFrameDuration(const camera3_capture_request_t *request) +{ + bool hasJpegStream = false; + bool hasRawStream = false; + for (uint32_t i = 0; i < request->num_output_buffers; i ++) { + const camera3_stream_t *stream = request->output_buffers[i].stream; + if (stream->format == HAL_PIXEL_FORMAT_BLOB) + hasJpegStream = true; + else if (stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE || + stream->format == HAL_PIXEL_FORMAT_RAW10 || + stream->format == HAL_PIXEL_FORMAT_RAW16) + hasRawStream = true; + } + + if (!hasJpegStream) + return MAX(mMinRawFrameDuration, mMinProcessedFrameDuration); + else + return MAX(MAX(mMinRawFrameDuration, mMinProcessedFrameDuration), mMinJpegFrameDuration); +} + +/*=========================================================================== + * FUNCTION : handleBuffersDuringFlushLock + * + * DESCRIPTION: Account for buffers returned from back-end during flush + * This function is executed while mMutex is held by the caller. + * + * PARAMETERS : + * @buffer: image buffer for the callback + * + * RETURN : + *==========================================================================*/ +void QCamera3HardwareInterface::handleBuffersDuringFlushLock(camera3_stream_buffer_t *buffer) +{ + bool buffer_found = false; + for (List<PendingBuffersInRequest>::iterator req = + mPendingBuffersMap.mPendingBuffersInRequest.begin(); + req != mPendingBuffersMap.mPendingBuffersInRequest.end(); req++) { + for (List<PendingBufferInfo>::iterator i = + req->mPendingBufferList.begin(); + i != req->mPendingBufferList.end(); i++) { + if (i->buffer == buffer->buffer) { + mPendingBuffersMap.numPendingBufsAtFlush--; + LOGD("Found buffer %p for Frame %d, numPendingBufsAtFlush = %d", + buffer->buffer, req->frame_number, + mPendingBuffersMap.numPendingBufsAtFlush); + buffer_found = true; + break; + } + } + if (buffer_found) { + break; + } + } + if (mPendingBuffersMap.numPendingBufsAtFlush == 0) { + //signal the flush() + LOGD("All buffers returned to HAL. Continue flush"); + pthread_cond_signal(&mBuffersCond); + } +} + + +/*=========================================================================== + * FUNCTION : handlePendingReprocResults + * + * DESCRIPTION: check and notify on any pending reprocess results + * + * PARAMETERS : + * @frame_number : Pending request frame number + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3HardwareInterface::handlePendingReprocResults(uint32_t frame_number) +{ + for (List<PendingReprocessResult>::iterator j = mPendingReprocessResultList.begin(); + j != mPendingReprocessResultList.end(); j++) { + if (j->frame_number == frame_number) { + mCallbackOps->notify(mCallbackOps, &j->notify_msg); + + LOGD("Delayed reprocess notify %d", + frame_number); + + for (pendingRequestIterator k = mPendingRequestsList.begin(); + k != mPendingRequestsList.end(); k++) { + + if (k->frame_number == j->frame_number) { + LOGD("Found reprocess frame number %d in pending reprocess List " + "Take it out!!", + k->frame_number); + + camera3_capture_result result; + memset(&result, 0, sizeof(camera3_capture_result)); + result.frame_number = frame_number; + result.num_output_buffers = 1; + result.output_buffers = &j->buffer; + result.input_buffer = k->input_buffer; + result.result = k->settings; + result.partial_result = PARTIAL_RESULT_COUNT; + mCallbackOps->process_capture_result(mCallbackOps, &result); + + erasePendingRequest(k); + break; + } + } + mPendingReprocessResultList.erase(j); + break; + } + } + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : handleBatchMetadata + * + * DESCRIPTION: Handles metadata buffer callback in batch mode + * + * PARAMETERS : @metadata_buf: metadata buffer + * @free_and_bufdone_meta_buf: Buf done on the meta buf and free + * the meta buf in this method + * + * RETURN : + * + *==========================================================================*/ +void QCamera3HardwareInterface::handleBatchMetadata( + mm_camera_super_buf_t *metadata_buf, bool free_and_bufdone_meta_buf) +{ + ATRACE_CALL(); + + if (NULL == metadata_buf) { + LOGE("metadata_buf is NULL"); + return; + } + /* In batch mode, the metdata will contain the frame number and timestamp of + * the last frame in the batch. Eg: a batch containing buffers from request + * 5,6,7 and 8 will have frame number and timestamp corresponding to 8. + * multiple process_capture_requests => 1 set_param => 1 handleBatchMetata => + * multiple process_capture_results */ + metadata_buffer_t *metadata = + (metadata_buffer_t *)metadata_buf->bufs[0]->buffer; + int32_t frame_number_valid = 0, urgent_frame_number_valid = 0; + uint32_t last_frame_number = 0, last_urgent_frame_number = 0; + uint32_t first_frame_number = 0, first_urgent_frame_number = 0; + uint32_t frame_number = 0, urgent_frame_number = 0; + int64_t last_frame_capture_time = 0, first_frame_capture_time, capture_time; + bool invalid_metadata = false; + size_t urgentFrameNumDiff = 0, frameNumDiff = 0; + size_t loopCount = 1; + + int32_t *p_frame_number_valid = + POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata); + uint32_t *p_frame_number = + POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata); + int64_t *p_capture_time = + POINTER_OF_META(CAM_INTF_META_SENSOR_TIMESTAMP, metadata); + int32_t *p_urgent_frame_number_valid = + POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata); + uint32_t *p_urgent_frame_number = + POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata); + + if ((NULL == p_frame_number_valid) || (NULL == p_frame_number) || + (NULL == p_capture_time) || (NULL == p_urgent_frame_number_valid) || + (NULL == p_urgent_frame_number)) { + LOGE("Invalid metadata"); + invalid_metadata = true; + } else { + frame_number_valid = *p_frame_number_valid; + last_frame_number = *p_frame_number; + last_frame_capture_time = *p_capture_time; + urgent_frame_number_valid = *p_urgent_frame_number_valid; + last_urgent_frame_number = *p_urgent_frame_number; + } + + /* In batchmode, when no video buffers are requested, set_parms are sent + * for every capture_request. The difference between consecutive urgent + * frame numbers and frame numbers should be used to interpolate the + * corresponding frame numbers and time stamps */ + pthread_mutex_lock(&mMutex); + if (urgent_frame_number_valid) { + first_urgent_frame_number = + mPendingBatchMap.valueFor(last_urgent_frame_number); + urgentFrameNumDiff = last_urgent_frame_number + 1 - + first_urgent_frame_number; + + LOGD("urgent_frm: valid: %d frm_num: %d - %d", + urgent_frame_number_valid, + first_urgent_frame_number, last_urgent_frame_number); + } + + if (frame_number_valid) { + first_frame_number = mPendingBatchMap.valueFor(last_frame_number); + frameNumDiff = last_frame_number + 1 - + first_frame_number; + mPendingBatchMap.removeItem(last_frame_number); + + LOGD("frm: valid: %d frm_num: %d - %d", + frame_number_valid, + first_frame_number, last_frame_number); + + } + pthread_mutex_unlock(&mMutex); + + if (urgent_frame_number_valid || frame_number_valid) { + loopCount = MAX(urgentFrameNumDiff, frameNumDiff); + if (urgentFrameNumDiff > MAX_HFR_BATCH_SIZE) + LOGE("urgentFrameNumDiff: %d urgentFrameNum: %d", + urgentFrameNumDiff, last_urgent_frame_number); + if (frameNumDiff > MAX_HFR_BATCH_SIZE) + LOGE("frameNumDiff: %d frameNum: %d", + frameNumDiff, last_frame_number); + } + + for (size_t i = 0; i < loopCount; i++) { + /* handleMetadataWithLock is called even for invalid_metadata for + * pipeline depth calculation */ + if (!invalid_metadata) { + /* Infer frame number. Batch metadata contains frame number of the + * last frame */ + if (urgent_frame_number_valid) { + if (i < urgentFrameNumDiff) { + urgent_frame_number = + first_urgent_frame_number + i; + LOGD("inferred urgent frame_number: %d", + urgent_frame_number); + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, + CAM_INTF_META_URGENT_FRAME_NUMBER, urgent_frame_number); + } else { + /* This is to handle when urgentFrameNumDiff < frameNumDiff */ + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, + CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, 0); + } + } + + /* Infer frame number. Batch metadata contains frame number of the + * last frame */ + if (frame_number_valid) { + if (i < frameNumDiff) { + frame_number = first_frame_number + i; + LOGD("inferred frame_number: %d", frame_number); + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, + CAM_INTF_META_FRAME_NUMBER, frame_number); + } else { + /* This is to handle when urgentFrameNumDiff > frameNumDiff */ + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, + CAM_INTF_META_FRAME_NUMBER_VALID, 0); + } + } + + if (last_frame_capture_time) { + //Infer timestamp + first_frame_capture_time = last_frame_capture_time - + (((loopCount - 1) * NSEC_PER_SEC) / mHFRVideoFps); + capture_time = + first_frame_capture_time + (i * NSEC_PER_SEC / mHFRVideoFps); + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, + CAM_INTF_META_SENSOR_TIMESTAMP, capture_time); + LOGD("batch capture_time: %lld, capture_time: %lld", + last_frame_capture_time, capture_time); + } + } + pthread_mutex_lock(&mMutex); + handleMetadataWithLock(metadata_buf, + false /* free_and_bufdone_meta_buf */); + pthread_mutex_unlock(&mMutex); + } + + /* BufDone metadata buffer */ + if (free_and_bufdone_meta_buf) { + mMetadataChannel->bufDone(metadata_buf); + free(metadata_buf); + } +} + +void QCamera3HardwareInterface::notifyError(uint32_t frameNumber, + camera3_error_msg_code_t errorCode) +{ + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + notify_msg.type = CAMERA3_MSG_ERROR; + notify_msg.message.error.error_code = errorCode; + notify_msg.message.error.error_stream = NULL; + notify_msg.message.error.frame_number = frameNumber; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + + return; +} +/*=========================================================================== + * FUNCTION : handleMetadataWithLock + * + * DESCRIPTION: Handles metadata buffer callback with mMutex lock held. + * + * PARAMETERS : @metadata_buf: metadata buffer + * @free_and_bufdone_meta_buf: Buf done on the meta buf and free + * the meta buf in this method + * + * RETURN : + * + *==========================================================================*/ +void QCamera3HardwareInterface::handleMetadataWithLock( + mm_camera_super_buf_t *metadata_buf, bool free_and_bufdone_meta_buf) +{ + ATRACE_CALL(); + if ((mFlushPerf) || (ERROR == mState) || (DEINIT == mState)) { + //during flush do not send metadata from this thread + LOGD("not sending metadata during flush or when mState is error"); + if (free_and_bufdone_meta_buf) { + mMetadataChannel->bufDone(metadata_buf); + free(metadata_buf); + } + return; + } + + //not in flush + metadata_buffer_t *metadata = (metadata_buffer_t *)metadata_buf->bufs[0]->buffer; + int32_t frame_number_valid, urgent_frame_number_valid; + uint32_t frame_number, urgent_frame_number; + int64_t capture_time; + nsecs_t currentSysTime; + + int32_t *p_frame_number_valid = + POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata); + uint32_t *p_frame_number = POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata); + int64_t *p_capture_time = POINTER_OF_META(CAM_INTF_META_SENSOR_TIMESTAMP, metadata); + int32_t *p_urgent_frame_number_valid = + POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata); + uint32_t *p_urgent_frame_number = + POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata); + IF_META_AVAILABLE(cam_frame_dropped_t, p_cam_frame_drop, CAM_INTF_META_FRAME_DROPPED, + metadata) { + LOGD("Dropped frame info for frame_number_valid %d, frame_number %d", + *p_frame_number_valid, *p_frame_number); + } + + if ((NULL == p_frame_number_valid) || (NULL == p_frame_number) || (NULL == p_capture_time) || + (NULL == p_urgent_frame_number_valid) || (NULL == p_urgent_frame_number)) { + LOGE("Invalid metadata"); + if (free_and_bufdone_meta_buf) { + mMetadataChannel->bufDone(metadata_buf); + free(metadata_buf); + } + goto done_metadata; + } + frame_number_valid = *p_frame_number_valid; + frame_number = *p_frame_number; + capture_time = *p_capture_time; + urgent_frame_number_valid = *p_urgent_frame_number_valid; + urgent_frame_number = *p_urgent_frame_number; + currentSysTime = systemTime(CLOCK_MONOTONIC); + + // Detect if buffers from any requests are overdue + for (auto &req : mPendingBuffersMap.mPendingBuffersInRequest) { + if ( (currentSysTime - req.timestamp) > + s2ns(MISSING_REQUEST_BUF_TIMEOUT) ) { + for (auto &missed : req.mPendingBufferList) { + LOGE("Current frame: %d. Missing: frame = %d, buffer = %p," + "stream type = %d, stream format = %d", + frame_number, req.frame_number, missed.buffer, + missed.stream->stream_type, missed.stream->format); + } + } + } + //Partial result on process_capture_result for timestamp + if (urgent_frame_number_valid) { + LOGD("valid urgent frame_number = %u, capture_time = %lld", + urgent_frame_number, capture_time); + + //Recieved an urgent Frame Number, handle it + //using partial results + for (pendingRequestIterator i = + mPendingRequestsList.begin(); i != mPendingRequestsList.end(); i++) { + LOGD("Iterator Frame = %d urgent frame = %d", + i->frame_number, urgent_frame_number); + + if ((!i->input_buffer) && (i->frame_number < urgent_frame_number) && + (i->partial_result_cnt == 0)) { + LOGE("Error: HAL missed urgent metadata for frame number %d", + i->frame_number); + } + + if (i->frame_number == urgent_frame_number && + i->bUrgentReceived == 0) { + + camera3_capture_result_t result; + memset(&result, 0, sizeof(camera3_capture_result_t)); + + i->partial_result_cnt++; + i->bUrgentReceived = 1; + // Extract 3A metadata + result.result = + translateCbUrgentMetadataToResultMetadata(metadata); + // Populate metadata result + result.frame_number = urgent_frame_number; + result.num_output_buffers = 0; + result.output_buffers = NULL; + result.partial_result = i->partial_result_cnt; + + mCallbackOps->process_capture_result(mCallbackOps, &result); + LOGD("urgent frame_number = %u, capture_time = %lld", + result.frame_number, capture_time); + free_camera_metadata((camera_metadata_t *)result.result); + break; + } + } + } + + if (!frame_number_valid) { + LOGD("Not a valid normal frame number, used as SOF only"); + if (free_and_bufdone_meta_buf) { + mMetadataChannel->bufDone(metadata_buf); + free(metadata_buf); + } + goto done_metadata; + } + LOGH("valid frame_number = %u, capture_time = %lld", + frame_number, capture_time); + + for (pendingRequestIterator i = mPendingRequestsList.begin(); + i != mPendingRequestsList.end() && i->frame_number <= frame_number;) { + // Flush out all entries with less or equal frame numbers. + + camera3_capture_result_t result; + memset(&result, 0, sizeof(camera3_capture_result_t)); + + LOGD("frame_number in the list is %u", i->frame_number); + i->partial_result_cnt++; + result.partial_result = i->partial_result_cnt; + + // Check whether any stream buffer corresponding to this is dropped or not + // If dropped, then send the ERROR_BUFFER for the corresponding stream + // The API does not expect a blob buffer to be dropped + if (p_cam_frame_drop && p_cam_frame_drop->frame_dropped) { + /* Clear notify_msg structure */ + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + for (List<RequestedBufferInfo>::iterator j = i->buffers.begin(); + j != i->buffers.end(); j++) { + if (j->stream->format != HAL_PIXEL_FORMAT_BLOB) { + QCamera3ProcessingChannel *channel = (QCamera3ProcessingChannel *)j->stream->priv; + uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask()); + for (uint32_t k = 0; k < p_cam_frame_drop->cam_stream_ID.num_streams; k++) { + if (streamID == p_cam_frame_drop->cam_stream_ID.streamID[k]) { + // Send Error notify to frameworks with CAMERA3_MSG_ERROR_BUFFER + LOGE("Start of reporting error frame#=%u, streamID=%u", + i->frame_number, streamID); + notify_msg.type = CAMERA3_MSG_ERROR; + notify_msg.message.error.frame_number = i->frame_number; + notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER ; + notify_msg.message.error.error_stream = j->stream; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + LOGE("End of reporting error frame#=%u, streamID=%u", + i->frame_number, streamID); + PendingFrameDropInfo PendingFrameDrop; + PendingFrameDrop.frame_number=i->frame_number; + PendingFrameDrop.stream_ID = streamID; + // Add the Frame drop info to mPendingFrameDropList + mPendingFrameDropList.push_back(PendingFrameDrop); + } + } + } else { + LOGE("JPEG buffer dropped for frame number %d", + i->frame_number); + } + } + } + + // Send empty metadata with already filled buffers for dropped metadata + // and send valid metadata with already filled buffers for current metadata + /* we could hit this case when we either + * 1. have a pending reprocess request or + * 2. miss a metadata buffer callback */ + if (i->frame_number < frame_number) { + if (i->input_buffer) { + /* this will be handled in handleInputBufferWithLock */ + i++; + continue; + } else { + + mPendingLiveRequest--; + + CameraMetadata dummyMetadata; + dummyMetadata.update(ANDROID_REQUEST_ID, &(i->request_id), 1); + result.result = dummyMetadata.release(); + + notifyError(i->frame_number, CAMERA3_MSG_ERROR_RESULT); + } + } else { + mPendingLiveRequest--; + /* Clear notify_msg structure */ + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + + // Send shutter notify to frameworks + notify_msg.type = CAMERA3_MSG_SHUTTER; + notify_msg.message.shutter.frame_number = i->frame_number; + notify_msg.message.shutter.timestamp = (uint64_t)capture_time; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + + i->timestamp = capture_time; + + // Find channel requiring metadata, meaning internal offline postprocess + // is needed. + //TODO: for now, we don't support two streams requiring metadata at the same time. + // (because we are not making copies, and metadata buffer is not reference counted. + bool internalPproc = false; + for (pendingBufferIterator iter = i->buffers.begin(); + iter != i->buffers.end(); iter++) { + if (iter->need_metadata) { + internalPproc = true; + QCamera3ProcessingChannel *channel = + (QCamera3ProcessingChannel *)iter->stream->priv; + channel->queueReprocMetadata(metadata_buf); + break; + } + } + + result.result = translateFromHalMetadata(metadata, + i->timestamp, i->request_id, i->jpegMetadata, i->pipeline_depth, + i->capture_intent, internalPproc, i->fwkCacMode); + + saveExifParams(metadata); + + if (i->blob_request) { + { + //Dump tuning metadata if enabled and available + char prop[PROPERTY_VALUE_MAX]; + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.dumpmetadata", prop, "0"); + int32_t enabled = atoi(prop); + if (enabled && metadata->is_tuning_params_valid) { + dumpMetadataToFile(metadata->tuning_params, + mMetaFrameCount, + enabled, + "Snapshot", + frame_number); + } + } + } + + if (!internalPproc) { + LOGD("couldn't find need_metadata for this metadata"); + // Return metadata buffer + if (free_and_bufdone_meta_buf) { + mMetadataChannel->bufDone(metadata_buf); + free(metadata_buf); + } + } + } + if (!result.result) { + LOGE("metadata is NULL"); + } + result.frame_number = i->frame_number; + result.input_buffer = i->input_buffer; + result.num_output_buffers = 0; + result.output_buffers = NULL; + for (List<RequestedBufferInfo>::iterator j = i->buffers.begin(); + j != i->buffers.end(); j++) { + if (j->buffer) { + result.num_output_buffers++; + } + } + + updateFpsInPreviewBuffer(metadata, i->frame_number); + + if (result.num_output_buffers > 0) { + camera3_stream_buffer_t *result_buffers = + new camera3_stream_buffer_t[result.num_output_buffers]; + if (result_buffers != NULL) { + size_t result_buffers_idx = 0; + for (List<RequestedBufferInfo>::iterator j = i->buffers.begin(); + j != i->buffers.end(); j++) { + if (j->buffer) { + for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin(); + m != mPendingFrameDropList.end(); m++) { + QCamera3Channel *channel = (QCamera3Channel *)j->buffer->stream->priv; + uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask()); + if((m->stream_ID == streamID) && (m->frame_number==frame_number)) { + j->buffer->status=CAMERA3_BUFFER_STATUS_ERROR; + LOGE("Stream STATUS_ERROR frame_number=%u, streamID=%u", + frame_number, streamID); + m = mPendingFrameDropList.erase(m); + break; + } + } + mPendingBuffersMap.removeBuf(j->buffer->buffer); + result_buffers[result_buffers_idx++] = *(j->buffer); + free(j->buffer); + j->buffer = NULL; + } + } + result.output_buffers = result_buffers; + mCallbackOps->process_capture_result(mCallbackOps, &result); + LOGD("meta frame_number = %u, capture_time = %lld", + result.frame_number, i->timestamp); + free_camera_metadata((camera_metadata_t *)result.result); + delete[] result_buffers; + }else { + LOGE("Fatal error: out of memory"); + } + } else { + mCallbackOps->process_capture_result(mCallbackOps, &result); + LOGD("meta frame_number = %u, capture_time = %lld", + result.frame_number, i->timestamp); + free_camera_metadata((camera_metadata_t *)result.result); + } + + i = erasePendingRequest(i); + + if (!mPendingReprocessResultList.empty()) { + handlePendingReprocResults(frame_number + 1); + } + } + +done_metadata: + for (pendingRequestIterator i = mPendingRequestsList.begin(); + i != mPendingRequestsList.end() ;i++) { + i->pipeline_depth++; + } + LOGD("mPendingLiveRequest = %d", mPendingLiveRequest); + unblockRequestIfNecessary(); +} + +/*=========================================================================== + * FUNCTION : hdrPlusPerfLock + * + * DESCRIPTION: perf lock for HDR+ using custom intent + * + * PARAMETERS : @metadata_buf: Metadata super_buf pointer + * + * RETURN : None + * + *==========================================================================*/ +void QCamera3HardwareInterface::hdrPlusPerfLock( + mm_camera_super_buf_t *metadata_buf) +{ + if (NULL == metadata_buf) { + LOGE("metadata_buf is NULL"); + return; + } + metadata_buffer_t *metadata = + (metadata_buffer_t *)metadata_buf->bufs[0]->buffer; + int32_t *p_frame_number_valid = + POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata); + uint32_t *p_frame_number = + POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata); + + if (p_frame_number_valid == NULL || p_frame_number == NULL) { + LOGE("%s: Invalid metadata", __func__); + return; + } + + //acquire perf lock for 5 sec after the last HDR frame is captured + if ((p_frame_number_valid != NULL) && *p_frame_number_valid) { + if ((p_frame_number != NULL) && + (mLastCustIntentFrmNum == (int32_t)*p_frame_number)) { + m_perfLock.lock_acq_timed(HDR_PLUS_PERF_TIME_OUT); + } + } + + //release lock after perf lock timer is expired. If lock is already released, + //isTimerReset returns false + if (m_perfLock.isTimerReset()) { + mLastCustIntentFrmNum = -1; + m_perfLock.lock_rel_timed(); + } +} + +/*=========================================================================== + * FUNCTION : handleInputBufferWithLock + * + * DESCRIPTION: Handles input buffer and shutter callback with mMutex lock held. + * + * PARAMETERS : @frame_number: frame number of the input buffer + * + * RETURN : + * + *==========================================================================*/ +void QCamera3HardwareInterface::handleInputBufferWithLock(uint32_t frame_number) +{ + ATRACE_CALL(); + pendingRequestIterator i = mPendingRequestsList.begin(); + while (i != mPendingRequestsList.end() && i->frame_number != frame_number){ + i++; + } + if (i != mPendingRequestsList.end() && i->input_buffer) { + //found the right request + if (!i->shutter_notified) { + CameraMetadata settings; + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + nsecs_t capture_time = systemTime(CLOCK_MONOTONIC); + if(i->settings) { + settings = i->settings; + if (settings.exists(ANDROID_SENSOR_TIMESTAMP)) { + capture_time = settings.find(ANDROID_SENSOR_TIMESTAMP).data.i64[0]; + } else { + LOGE("No timestamp in input settings! Using current one."); + } + } else { + LOGE("Input settings missing!"); + } + + notify_msg.type = CAMERA3_MSG_SHUTTER; + notify_msg.message.shutter.frame_number = frame_number; + notify_msg.message.shutter.timestamp = (uint64_t)capture_time; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + i->shutter_notified = true; + LOGD("Input request metadata notify frame_number = %u, capture_time = %llu", + i->frame_number, notify_msg.message.shutter.timestamp); + } + + if (i->input_buffer->release_fence != -1) { + int32_t rc = sync_wait(i->input_buffer->release_fence, TIMEOUT_NEVER); + close(i->input_buffer->release_fence); + if (rc != OK) { + LOGE("input buffer sync wait failed %d", rc); + } + } + + camera3_capture_result result; + memset(&result, 0, sizeof(camera3_capture_result)); + result.frame_number = frame_number; + result.result = i->settings; + result.input_buffer = i->input_buffer; + result.partial_result = PARTIAL_RESULT_COUNT; + + mCallbackOps->process_capture_result(mCallbackOps, &result); + LOGD("Input request metadata and input buffer frame_number = %u", + i->frame_number); + i = erasePendingRequest(i); + } else { + LOGE("Could not find input request for frame number %d", frame_number); + } +} + +/*=========================================================================== + * FUNCTION : handleBufferWithLock + * + * DESCRIPTION: Handles image buffer callback with mMutex lock held. + * + * PARAMETERS : @buffer: image buffer for the callback + * @frame_number: frame number of the image buffer + * + * RETURN : + * + *==========================================================================*/ +void QCamera3HardwareInterface::handleBufferWithLock( + camera3_stream_buffer_t *buffer, uint32_t frame_number) +{ + ATRACE_CALL(); + /* Nothing to be done during error state */ + if ((ERROR == mState) || (DEINIT == mState)) { + return; + } + if (mFlushPerf) { + handleBuffersDuringFlushLock(buffer); + return; + } + //not in flush + // If the frame number doesn't exist in the pending request list, + // directly send the buffer to the frameworks, and update pending buffers map + // Otherwise, book-keep the buffer. + pendingRequestIterator i = mPendingRequestsList.begin(); + while (i != mPendingRequestsList.end() && i->frame_number != frame_number){ + i++; + } + if (i == mPendingRequestsList.end()) { + // Verify all pending requests frame_numbers are greater + for (pendingRequestIterator j = mPendingRequestsList.begin(); + j != mPendingRequestsList.end(); j++) { + if ((j->frame_number < frame_number) && !(j->input_buffer)) { + LOGW("Error: pending live frame number %d is smaller than %d", + j->frame_number, frame_number); + } + } + camera3_capture_result_t result; + memset(&result, 0, sizeof(camera3_capture_result_t)); + result.result = NULL; + result.frame_number = frame_number; + result.num_output_buffers = 1; + result.partial_result = 0; + for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin(); + m != mPendingFrameDropList.end(); m++) { + QCamera3Channel *channel = (QCamera3Channel *)buffer->stream->priv; + uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask()); + if((m->stream_ID == streamID) && (m->frame_number==frame_number) ) { + buffer->status=CAMERA3_BUFFER_STATUS_ERROR; + LOGD("Stream STATUS_ERROR frame_number=%d, streamID=%d", + frame_number, streamID); + m = mPendingFrameDropList.erase(m); + break; + } + } + result.output_buffers = buffer; + LOGH("result frame_number = %d, buffer = %p", + frame_number, buffer->buffer); + + mPendingBuffersMap.removeBuf(buffer->buffer); + + mCallbackOps->process_capture_result(mCallbackOps, &result); + } else { + if (i->input_buffer) { + CameraMetadata settings; + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + nsecs_t capture_time = systemTime(CLOCK_MONOTONIC); + if(i->settings) { + settings = i->settings; + if (settings.exists(ANDROID_SENSOR_TIMESTAMP)) { + capture_time = settings.find(ANDROID_SENSOR_TIMESTAMP).data.i64[0]; + } else { + LOGW("No timestamp in input settings! Using current one."); + } + } else { + LOGE("Input settings missing!"); + } + + notify_msg.type = CAMERA3_MSG_SHUTTER; + notify_msg.message.shutter.frame_number = frame_number; + notify_msg.message.shutter.timestamp = (uint64_t)capture_time; + + if (i->input_buffer->release_fence != -1) { + int32_t rc = sync_wait(i->input_buffer->release_fence, TIMEOUT_NEVER); + close(i->input_buffer->release_fence); + if (rc != OK) { + LOGE("input buffer sync wait failed %d", rc); + } + } + mPendingBuffersMap.removeBuf(buffer->buffer); + + bool notifyNow = true; + for (pendingRequestIterator j = mPendingRequestsList.begin(); + j != mPendingRequestsList.end(); j++) { + if (j->frame_number < frame_number) { + notifyNow = false; + break; + } + } + + if (notifyNow) { + camera3_capture_result result; + memset(&result, 0, sizeof(camera3_capture_result)); + result.frame_number = frame_number; + result.result = i->settings; + result.input_buffer = i->input_buffer; + result.num_output_buffers = 1; + result.output_buffers = buffer; + result.partial_result = PARTIAL_RESULT_COUNT; + + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + mCallbackOps->process_capture_result(mCallbackOps, &result); + LOGD("Notify reprocess now %d!", frame_number); + i = erasePendingRequest(i); + } else { + // Cache reprocess result for later + PendingReprocessResult pendingResult; + memset(&pendingResult, 0, sizeof(PendingReprocessResult)); + pendingResult.notify_msg = notify_msg; + pendingResult.buffer = *buffer; + pendingResult.frame_number = frame_number; + mPendingReprocessResultList.push_back(pendingResult); + LOGD("Cache reprocess result %d!", frame_number); + } + } else { + for (List<RequestedBufferInfo>::iterator j = i->buffers.begin(); + j != i->buffers.end(); j++) { + if (j->stream == buffer->stream) { + if (j->buffer != NULL) { + LOGE("Error: buffer is already set"); + } else { + j->buffer = (camera3_stream_buffer_t *)malloc( + sizeof(camera3_stream_buffer_t)); + *(j->buffer) = *buffer; + LOGH("cache buffer %p at result frame_number %u", + buffer->buffer, frame_number); + } + } + } + } + } +} + +/*=========================================================================== + * FUNCTION : unblockRequestIfNecessary + * + * DESCRIPTION: Unblock capture_request if max_buffer hasn't been reached. Note + * that mMutex is held when this function is called. + * + * PARAMETERS : + * + * RETURN : + * + *==========================================================================*/ +void QCamera3HardwareInterface::unblockRequestIfNecessary() +{ + // Unblock process_capture_request + pthread_cond_signal(&mRequestCond); +} + + +/*=========================================================================== + * FUNCTION : processCaptureRequest + * + * DESCRIPTION: process a capture request from camera service + * + * PARAMETERS : + * @request : request from framework to process + * + * RETURN : + * + *==========================================================================*/ +int QCamera3HardwareInterface::processCaptureRequest( + camera3_capture_request_t *request) +{ + ATRACE_CALL(); + int rc = NO_ERROR; + int32_t request_id; + CameraMetadata meta; + uint32_t minInFlightRequests = MIN_INFLIGHT_REQUESTS; + uint32_t maxInFlightRequests = MAX_INFLIGHT_REQUESTS; + bool isVidBufRequested = false; + camera3_stream_buffer_t *pInputBuffer = NULL; + + pthread_mutex_lock(&mMutex); + + // Validate current state + switch (mState) { + case CONFIGURED: + case STARTED: + /* valid state */ + break; + + case ERROR: + pthread_mutex_unlock(&mMutex); + handleCameraDeviceError(); + return -ENODEV; + + default: + LOGE("Invalid state %d", mState); + pthread_mutex_unlock(&mMutex); + return -ENODEV; + } + + rc = validateCaptureRequest(request); + if (rc != NO_ERROR) { + LOGE("incoming request is not valid"); + pthread_mutex_unlock(&mMutex); + return rc; + } + + meta = request->settings; + + // For first capture request, send capture intent, and + // stream on all streams + if (mState == CONFIGURED) { + // send an unconfigure to the backend so that the isp + // resources are deallocated + if (!mFirstConfiguration) { + cam_stream_size_info_t stream_config_info; + int32_t hal_version = CAM_HAL_V3; + memset(&stream_config_info, 0, sizeof(cam_stream_size_info_t)); + stream_config_info.buffer_info.min_buffers = + MIN_INFLIGHT_REQUESTS; + stream_config_info.buffer_info.max_buffers = + m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS; + clear_metadata_buffer(mParameters); + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_PARM_HAL_VERSION, hal_version); + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_META_STREAM_INFO, stream_config_info); + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, + mParameters); + if (rc < 0) { + LOGE("set_parms for unconfigure failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + m_perfLock.lock_acq(); + /* get eis information for stream configuration */ + cam_is_type_t is_type; + char is_type_value[PROPERTY_VALUE_MAX]; + property_get("persist.camera.is_type", is_type_value, "0"); + is_type = static_cast<cam_is_type_t>(atoi(is_type_value)); + + if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) { + int32_t hal_version = CAM_HAL_V3; + uint8_t captureIntent = + meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0]; + mCaptureIntent = captureIntent; + clear_metadata_buffer(mParameters); + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_HAL_VERSION, hal_version); + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_CAPTURE_INTENT, captureIntent); + } + + //If EIS is enabled, turn it on for video + bool setEis = m_bEisEnable && m_bEisSupportedSize; + int32_t vsMode; + vsMode = (setEis)? DIS_ENABLE: DIS_DISABLE; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_DIS_ENABLE, vsMode)) { + rc = BAD_VALUE; + } + + //IS type will be 0 unless EIS is supported. If EIS is supported + //it could either be 1 or 4 depending on the stream and video size + for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) { + if (setEis) { + if (!m_bEisSupportedSize) { + is_type = IS_TYPE_DIS; + } else { + if (mStreamConfigInfo.type[i] == CAM_STREAM_TYPE_PREVIEW) { + is_type = IS_TYPE_EIS_2_0; + }else if (mStreamConfigInfo.type[i] == CAM_STREAM_TYPE_VIDEO) { + is_type = IS_TYPE_EIS_3_0; + }else { + is_type = IS_TYPE_NONE; + } + } + mStreamConfigInfo.is_type[i] = is_type; + } + else { + mStreamConfigInfo.is_type[i] = IS_TYPE_NONE; + } + } + + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_META_STREAM_INFO, mStreamConfigInfo); + + int32_t tintless_value = 1; + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_PARM_TINTLESS, tintless_value); + //Disable CDS for HFR mode or if DIS/EIS is on. + //CDS is a session parameter in the backend/ISP, so need to be set/reset + //after every configure_stream + if ((CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE == mOpMode) || + (m_bIsVideo)) { + int32_t cds = CAM_CDS_MODE_OFF; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_PARM_CDS_MODE, cds)) + LOGE("Failed to disable CDS for HFR mode"); + + } + setMobicat(); + + /* Set fps and hfr mode while sending meta stream info so that sensor + * can configure appropriate streaming mode */ + mHFRVideoFps = DEFAULT_VIDEO_FPS; + if (meta.exists(ANDROID_CONTROL_AE_TARGET_FPS_RANGE)) { + rc = setHalFpsRange(meta, mParameters); + if (rc != NO_ERROR) { + LOGE("setHalFpsRange failed"); + } + } + if (meta.exists(ANDROID_CONTROL_MODE)) { + uint8_t metaMode = meta.find(ANDROID_CONTROL_MODE).data.u8[0]; + rc = extractSceneMode(meta, metaMode, mParameters); + if (rc != NO_ERROR) { + LOGE("extractSceneMode failed"); + } + } + + //TODO: validate the arguments, HSV scenemode should have only the + //advertised fps ranges + + /*set the capture intent, hal version, tintless, stream info, + *and disenable parameters to the backend*/ + LOGD("set_parms META_STREAM_INFO " ); + for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) { + LOGI("STREAM INFO : type %d, wxh: %d x %d, pp_mask: 0x%x " + "Format:%d", + mStreamConfigInfo.type[i], + mStreamConfigInfo.stream_sizes[i].width, + mStreamConfigInfo.stream_sizes[i].height, + mStreamConfigInfo.postprocess_mask[i], + mStreamConfigInfo.format[i]); + } + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, + mParameters); + if (rc < 0) { + LOGE("set_parms failed for hal version, stream info"); + } + + cam_dimension_t sensor_dim; + memset(&sensor_dim, 0, sizeof(sensor_dim)); + rc = getSensorOutputSize(sensor_dim); + if (rc != NO_ERROR) { + LOGE("Failed to get sensor output size"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + + mCropRegionMapper.update(gCamCapability[mCameraId]->active_array_size.width, + gCamCapability[mCameraId]->active_array_size.height, + sensor_dim.width, sensor_dim.height); + + /* Set batchmode before initializing channel. Since registerBuffer + * internally initializes some of the channels, better set batchmode + * even before first register buffer */ + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + if (((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) + && mBatchSize) { + rc = channel->setBatchSize(mBatchSize); + //Disable per frame map unmap for HFR/batchmode case + rc |= channel->setPerFrameMapUnmap(false); + if (NO_ERROR != rc) { + LOGE("Channel init failed %d", rc); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + } + + //First initialize all streams + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + if ((((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) || + ((1U << CAM_STREAM_TYPE_PREVIEW) == channel->getStreamTypeMask())) && + setEis) + rc = channel->initialize(is_type); + else { + rc = channel->initialize(IS_TYPE_NONE); + } + if (NO_ERROR != rc) { + LOGE("Channel initialization failed %d", rc); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + if (mRawDumpChannel) { + rc = mRawDumpChannel->initialize(IS_TYPE_NONE); + if (rc != NO_ERROR) { + LOGE("Error: Raw Dump Channel init failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + if (mSupportChannel) { + rc = mSupportChannel->initialize(IS_TYPE_NONE); + if (rc < 0) { + LOGE("Support channel initialization failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + if (mAnalysisChannel) { + rc = mAnalysisChannel->initialize(IS_TYPE_NONE); + if (rc < 0) { + LOGE("Analysis channel initialization failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + if (mDummyBatchChannel) { + rc = mDummyBatchChannel->setBatchSize(mBatchSize); + if (rc < 0) { + LOGE("mDummyBatchChannel setBatchSize failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + rc = mDummyBatchChannel->initialize(is_type); + if (rc < 0) { + LOGE("mDummyBatchChannel initialization failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + // Set bundle info + rc = setBundleInfo(); + if (rc < 0) { + LOGE("setBundleInfo failed %d", rc); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + + //update settings from app here + if (meta.exists(QCAMERA3_DUALCAM_LINK_ENABLE)) { + mIsDeviceLinked = meta.find(QCAMERA3_DUALCAM_LINK_ENABLE).data.u8[0]; + LOGH("Dualcam: setting On=%d id =%d", mIsDeviceLinked, mCameraId); + } + if (meta.exists(QCAMERA3_DUALCAM_LINK_IS_MAIN)) { + mIsMainCamera = meta.find(QCAMERA3_DUALCAM_LINK_IS_MAIN).data.u8[0]; + LOGH("Dualcam: Is this main camera = %d id =%d", mIsMainCamera, mCameraId); + } + if (meta.exists(QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID)) { + mLinkedCameraId = meta.find(QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID).data.u8[0]; + LOGH("Dualcam: Linked camera Id %d id =%d", mLinkedCameraId, mCameraId); + + if ( (mLinkedCameraId >= MM_CAMERA_MAX_NUM_SENSORS) && + (mLinkedCameraId != mCameraId) ) { + LOGE("Dualcam: mLinkedCameraId %d is invalid, current cam id = %d", + mLinkedCameraId, mCameraId); + goto error_exit; + } + } + + // add bundle related cameras + LOGH("%s: Dualcam: id =%d, mIsDeviceLinked=%d", __func__,mCameraId, mIsDeviceLinked); + if (meta.exists(QCAMERA3_DUALCAM_LINK_ENABLE)) { + if (mIsDeviceLinked) + m_pRelCamSyncBuf->sync_control = CAM_SYNC_RELATED_SENSORS_ON; + else + m_pRelCamSyncBuf->sync_control = CAM_SYNC_RELATED_SENSORS_OFF; + + pthread_mutex_lock(&gCamLock); + + if (sessionId[mLinkedCameraId] == 0xDEADBEEF) { + LOGE("Dualcam: Invalid Session Id "); + pthread_mutex_unlock(&gCamLock); + goto error_exit; + } + + if (mIsMainCamera == 1) { + m_pRelCamSyncBuf->mode = CAM_MODE_PRIMARY; + m_pRelCamSyncBuf->type = CAM_TYPE_MAIN; + // related session id should be session id of linked session + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } else { + m_pRelCamSyncBuf->mode = CAM_MODE_SECONDARY; + m_pRelCamSyncBuf->type = CAM_TYPE_AUX; + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } + pthread_mutex_unlock(&gCamLock); + + rc = mCameraHandle->ops->sync_related_sensors( + mCameraHandle->camera_handle, m_pRelCamSyncBuf); + if (rc < 0) { + LOGE("Dualcam: link failed"); + goto error_exit; + } + } + + //Then start them. + LOGH("Start META Channel"); + rc = mMetadataChannel->start(); + if (rc < 0) { + LOGE("META channel start failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + + if (mAnalysisChannel) { + rc = mAnalysisChannel->start(); + if (rc < 0) { + LOGE("Analysis channel start failed"); + mMetadataChannel->stop(); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + if (mSupportChannel) { + rc = mSupportChannel->start(); + if (rc < 0) { + LOGE("Support channel start failed"); + mMetadataChannel->stop(); + /* Although support and analysis are mutually exclusive today + adding it in anycase for future proofing */ + if (mAnalysisChannel) { + mAnalysisChannel->stop(); + } + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + LOGH("Start Processing Channel mask=%d", + channel->getStreamTypeMask()); + rc = channel->start(); + if (rc < 0) { + LOGE("channel start failed"); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + if (mRawDumpChannel) { + LOGD("Starting raw dump stream"); + rc = mRawDumpChannel->start(); + if (rc != NO_ERROR) { + LOGE("Error Starting Raw Dump Channel"); + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = + (QCamera3Channel *)(*it)->stream->priv; + LOGH("Stopping Processing Channel mask=%d", + channel->getStreamTypeMask()); + channel->stop(); + } + if (mSupportChannel) + mSupportChannel->stop(); + if (mAnalysisChannel) { + mAnalysisChannel->stop(); + } + mMetadataChannel->stop(); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + if (mChannelHandle) { + + rc = mCameraHandle->ops->start_channel(mCameraHandle->camera_handle, + mChannelHandle); + if (rc != NO_ERROR) { + LOGE("start_channel failed %d", rc); + pthread_mutex_unlock(&mMutex); + goto error_exit; + } + } + + goto no_error; +error_exit: + m_perfLock.lock_rel(); + return rc; +no_error: + m_perfLock.lock_rel(); + + mWokenUpByDaemon = false; + mPendingLiveRequest = 0; + mFirstConfiguration = false; + enablePowerHint(); + } + + uint32_t frameNumber = request->frame_number; + cam_stream_ID_t streamID; + + if (mFlushPerf) { + //we cannot accept any requests during flush + LOGE("process_capture_request cannot proceed during flush"); + pthread_mutex_unlock(&mMutex); + return NO_ERROR; //should return an error + } + + if (meta.exists(ANDROID_REQUEST_ID)) { + request_id = meta.find(ANDROID_REQUEST_ID).data.i32[0]; + mCurrentRequestId = request_id; + LOGD("Received request with id: %d", request_id); + } else if (mState == CONFIGURED || mCurrentRequestId == -1){ + LOGE("Unable to find request id field, \ + & no previous id available"); + pthread_mutex_unlock(&mMutex); + return NAME_NOT_FOUND; + } else { + LOGD("Re-using old request id"); + request_id = mCurrentRequestId; + } + + LOGH("num_output_buffers = %d input_buffer = %p frame_number = %d", + request->num_output_buffers, + request->input_buffer, + frameNumber); + // Acquire all request buffers first + streamID.num_streams = 0; + int blob_request = 0; + uint32_t snapshotStreamId = 0; + for (size_t i = 0; i < request->num_output_buffers; i++) { + const camera3_stream_buffer_t& output = request->output_buffers[i]; + QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv; + + if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) { + //Call function to store local copy of jpeg data for encode params. + blob_request = 1; + snapshotStreamId = channel->getStreamID(channel->getStreamTypeMask()); + } + + if (output.acquire_fence != -1) { + rc = sync_wait(output.acquire_fence, TIMEOUT_NEVER); + close(output.acquire_fence); + if (rc != OK) { + LOGE("sync wait failed %d", rc); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + + streamID.streamID[streamID.num_streams] = + channel->getStreamID(channel->getStreamTypeMask()); + streamID.num_streams++; + + if ((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) { + isVidBufRequested = true; + } + } + + if (blob_request) { + KPI_ATRACE_INT("SNAPSHOT", 1); + } + if (blob_request && mRawDumpChannel) { + LOGD("Trigger Raw based on blob request if Raw dump is enabled"); + streamID.streamID[streamID.num_streams] = + mRawDumpChannel->getStreamID(mRawDumpChannel->getStreamTypeMask()); + streamID.num_streams++; + } + + if(request->input_buffer == NULL) { + /* Parse the settings: + * - For every request in NORMAL MODE + * - For every request in HFR mode during preview only case + * - For first request of every batch in HFR mode during video + * recording. In batchmode the same settings except frame number is + * repeated in each request of the batch. + */ + if (!mBatchSize || + (mBatchSize && !isVidBufRequested) || + (mBatchSize && isVidBufRequested && !mToBeQueuedVidBufs)) { + rc = setFrameParameters(request, streamID, blob_request, snapshotStreamId); + if (rc < 0) { + LOGE("fail to set frame parameters"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + /* For batchMode HFR, setFrameParameters is not called for every + * request. But only frame number of the latest request is parsed. + * Keep track of first and last frame numbers in a batch so that + * metadata for the frame numbers of batch can be duplicated in + * handleBatchMetadta */ + if (mBatchSize) { + if (!mToBeQueuedVidBufs) { + //start of the batch + mFirstFrameNumberInBatch = request->frame_number; + } + if(ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_META_FRAME_NUMBER, request->frame_number)) { + LOGE("Failed to set the frame number in the parameters"); + return BAD_VALUE; + } + } + if (mNeedSensorRestart) { + /* Unlock the mutex as restartSensor waits on the channels to be + * stopped, which in turn calls stream callback functions - + * handleBufferWithLock and handleMetadataWithLock */ + pthread_mutex_unlock(&mMutex); + rc = dynamicUpdateMetaStreamInfo(); + if (rc != NO_ERROR) { + LOGE("Restarting the sensor failed"); + return BAD_VALUE; + } + mNeedSensorRestart = false; + pthread_mutex_lock(&mMutex); + } + } else { + + if (request->input_buffer->acquire_fence != -1) { + rc = sync_wait(request->input_buffer->acquire_fence, TIMEOUT_NEVER); + close(request->input_buffer->acquire_fence); + if (rc != OK) { + LOGE("input buffer sync wait failed %d", rc); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + } + + if (mCaptureIntent == ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM) { + mLastCustIntentFrmNum = frameNumber; + } + /* Update pending request list and pending buffers map */ + PendingRequestInfo pendingRequest; + pendingRequestIterator latestRequest; + pendingRequest.frame_number = frameNumber; + pendingRequest.num_buffers = request->num_output_buffers; + pendingRequest.request_id = request_id; + pendingRequest.blob_request = blob_request; + pendingRequest.timestamp = 0; + pendingRequest.bUrgentReceived = 0; + if (request->input_buffer) { + pendingRequest.input_buffer = + (camera3_stream_buffer_t*)malloc(sizeof(camera3_stream_buffer_t)); + *(pendingRequest.input_buffer) = *(request->input_buffer); + pInputBuffer = pendingRequest.input_buffer; + } else { + pendingRequest.input_buffer = NULL; + pInputBuffer = NULL; + } + + pendingRequest.pipeline_depth = 0; + pendingRequest.partial_result_cnt = 0; + extractJpegMetadata(mCurJpegMeta, request); + pendingRequest.jpegMetadata = mCurJpegMeta; + pendingRequest.settings = saveRequestSettings(mCurJpegMeta, request); + pendingRequest.shutter_notified = false; + + //extract capture intent + if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) { + mCaptureIntent = + meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0]; + } + pendingRequest.capture_intent = mCaptureIntent; + + //extract CAC info + if (meta.exists(ANDROID_COLOR_CORRECTION_ABERRATION_MODE)) { + mCacMode = + meta.find(ANDROID_COLOR_CORRECTION_ABERRATION_MODE).data.u8[0]; + } + pendingRequest.fwkCacMode = mCacMode; + + PendingBuffersInRequest bufsForCurRequest; + bufsForCurRequest.frame_number = frameNumber; + // Mark current timestamp for the new request + bufsForCurRequest.timestamp = systemTime(CLOCK_MONOTONIC); + + for (size_t i = 0; i < request->num_output_buffers; i++) { + RequestedBufferInfo requestedBuf; + memset(&requestedBuf, 0, sizeof(requestedBuf)); + requestedBuf.stream = request->output_buffers[i].stream; + requestedBuf.buffer = NULL; + pendingRequest.buffers.push_back(requestedBuf); + + // Add to buffer handle the pending buffers list + PendingBufferInfo bufferInfo; + bufferInfo.buffer = request->output_buffers[i].buffer; + bufferInfo.stream = request->output_buffers[i].stream; + bufsForCurRequest.mPendingBufferList.push_back(bufferInfo); + QCamera3Channel *channel = (QCamera3Channel *)bufferInfo.stream->priv; + LOGD("frame = %d, buffer = %p, streamTypeMask = %d, stream format = %d", + frameNumber, bufferInfo.buffer, + channel->getStreamTypeMask(), bufferInfo.stream->format); + } + // Add this request packet into mPendingBuffersMap + mPendingBuffersMap.mPendingBuffersInRequest.push_back(bufsForCurRequest); + LOGD("mPendingBuffersMap.num_overall_buffers = %d", + mPendingBuffersMap.get_num_overall_buffers()); + + latestRequest = mPendingRequestsList.insert( + mPendingRequestsList.end(), pendingRequest); + if(mFlush) { + pthread_mutex_unlock(&mMutex); + return NO_ERROR; + } + + // Notify metadata channel we receive a request + mMetadataChannel->request(NULL, frameNumber); + + if(request->input_buffer != NULL){ + LOGD("Input request, frame_number %d", frameNumber); + rc = setReprocParameters(request, &mReprocMeta, snapshotStreamId); + if (NO_ERROR != rc) { + LOGE("fail to set reproc parameters"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + + // Call request on other streams + uint32_t streams_need_metadata = 0; + pendingBufferIterator pendingBufferIter = latestRequest->buffers.begin(); + for (size_t i = 0; i < request->num_output_buffers; i++) { + const camera3_stream_buffer_t& output = request->output_buffers[i]; + QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv; + + if (channel == NULL) { + LOGW("invalid channel pointer for stream"); + continue; + } + + if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) { + LOGD("snapshot request with output buffer %p, input buffer %p, frame_number %d", + output.buffer, request->input_buffer, frameNumber); + if(request->input_buffer != NULL){ + rc = channel->request(output.buffer, frameNumber, + pInputBuffer, &mReprocMeta); + if (rc < 0) { + LOGE("Fail to request on picture channel"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } else { + LOGD("snapshot request with buffer %p, frame_number %d", + output.buffer, frameNumber); + if (!request->settings) { + rc = channel->request(output.buffer, frameNumber, + NULL, mPrevParameters); + } else { + rc = channel->request(output.buffer, frameNumber, + NULL, mParameters); + } + if (rc < 0) { + LOGE("Fail to request on picture channel"); + pthread_mutex_unlock(&mMutex); + return rc; + } + pendingBufferIter->need_metadata = true; + streams_need_metadata++; + } + } else if (output.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) { + bool needMetadata = false; + + if (m_perfLock.isPerfLockTimedAcquired()) { + if (m_perfLock.isTimerReset()) + { + m_perfLock.lock_rel_timed(); + m_perfLock.lock_acq_timed(BURST_REPROCESS_PERF_TIME_OUT); + } + } else { + m_perfLock.lock_acq_timed(BURST_REPROCESS_PERF_TIME_OUT); + } + + QCamera3YUVChannel *yuvChannel = (QCamera3YUVChannel *)channel; + rc = yuvChannel->request(output.buffer, frameNumber, + pInputBuffer, + (pInputBuffer ? &mReprocMeta : mParameters), needMetadata); + if (rc < 0) { + LOGE("Fail to request on YUV channel"); + pthread_mutex_unlock(&mMutex); + return rc; + } + pendingBufferIter->need_metadata = needMetadata; + if (needMetadata) + streams_need_metadata += 1; + LOGD("calling YUV channel request, need_metadata is %d", + needMetadata); + } else { + LOGD("request with buffer %p, frame_number %d", + output.buffer, frameNumber); + /* Set perf lock for API-2 zsl */ + if (IS_USAGE_ZSL(output.stream->usage)) { + if (m_perfLock.isPerfLockTimedAcquired()) { + if (m_perfLock.isTimerReset()) + { + m_perfLock.lock_rel_timed(); + m_perfLock.lock_acq_timed(BURST_REPROCESS_PERF_TIME_OUT); + } + } else { + m_perfLock.lock_acq_timed(BURST_REPROCESS_PERF_TIME_OUT); + } + } + + rc = channel->request(output.buffer, frameNumber); + if (((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) + && mBatchSize) { + mToBeQueuedVidBufs++; + if (mToBeQueuedVidBufs == mBatchSize) { + channel->queueBatchBuf(); + } + } + if (rc < 0) { + LOGE("request failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + pendingBufferIter++; + } + + //If 2 streams have need_metadata set to true, fail the request, unless + //we copy/reference count the metadata buffer + if (streams_need_metadata > 1) { + LOGE("not supporting request in which two streams requires" + " 2 HAL metadata for reprocessing"); + pthread_mutex_unlock(&mMutex); + return -EINVAL; + } + + if(request->input_buffer == NULL) { + /* Set the parameters to backend: + * - For every request in NORMAL MODE + * - For every request in HFR mode during preview only case + * - Once every batch in HFR mode during video recording + */ + if (!mBatchSize || + (mBatchSize && !isVidBufRequested) || + (mBatchSize && isVidBufRequested && (mToBeQueuedVidBufs == mBatchSize))) { + LOGD("set_parms batchSz: %d IsVidBufReq: %d vidBufTobeQd: %d ", + mBatchSize, isVidBufRequested, + mToBeQueuedVidBufs); + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, + mParameters); + if (rc < 0) { + LOGE("set_parms failed"); + } + /* reset to zero coz, the batch is queued */ + mToBeQueuedVidBufs = 0; + mPendingBatchMap.add(frameNumber, mFirstFrameNumberInBatch); + } + mPendingLiveRequest++; + } + + LOGD("mPendingLiveRequest = %d", mPendingLiveRequest); + + mState = STARTED; + // Added a timed condition wait + struct timespec ts; + uint8_t isValidTimeout = 1; + rc = clock_gettime(CLOCK_REALTIME, &ts); + if (rc < 0) { + isValidTimeout = 0; + LOGE("Error reading the real time clock!!"); + } + else { + // Make timeout as 5 sec for request to be honored + ts.tv_sec += 5; + } + //Block on conditional variable + if (mBatchSize) { + /* For HFR, more buffers are dequeued upfront to improve the performance */ + minInFlightRequests = MIN_INFLIGHT_HFR_REQUESTS; + maxInFlightRequests = MAX_INFLIGHT_HFR_REQUESTS; + } + if (m_perfLock.isPerfLockTimedAcquired() && m_perfLock.isTimerReset()) + m_perfLock.lock_rel_timed(); + + while ((mPendingLiveRequest >= minInFlightRequests) && !pInputBuffer && + (mState != ERROR) && (mState != DEINIT)) { + if (!isValidTimeout) { + LOGD("Blocking on conditional wait"); + pthread_cond_wait(&mRequestCond, &mMutex); + } + else { + LOGD("Blocking on timed conditional wait"); + rc = pthread_cond_timedwait(&mRequestCond, &mMutex, &ts); + if (rc == ETIMEDOUT) { + rc = -ENODEV; + LOGE("Unblocked on timeout!!!!"); + break; + } + } + LOGD("Unblocked"); + if (mWokenUpByDaemon) { + mWokenUpByDaemon = false; + if (mPendingLiveRequest < maxInFlightRequests) + break; + } + } + pthread_mutex_unlock(&mMutex); + + return rc; +} + +/*=========================================================================== + * FUNCTION : dump + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : + *==========================================================================*/ +void QCamera3HardwareInterface::dump(int fd) +{ + pthread_mutex_lock(&mMutex); + dprintf(fd, "\n Camera HAL3 information Begin \n"); + + dprintf(fd, "\nNumber of pending requests: %zu \n", + mPendingRequestsList.size()); + dprintf(fd, "-------+-------------------+-------------+----------+---------------------\n"); + dprintf(fd, " Frame | Number of Buffers | Req Id: | Blob Req | Input buffer present\n"); + dprintf(fd, "-------+-------------------+-------------+----------+---------------------\n"); + for(pendingRequestIterator i = mPendingRequestsList.begin(); + i != mPendingRequestsList.end(); i++) { + dprintf(fd, " %5d | %17d | %11d | %8d | %p \n", + i->frame_number, i->num_buffers, i->request_id, i->blob_request, + i->input_buffer); + } + dprintf(fd, "\nPending buffer map: Number of buffers: %u\n", + mPendingBuffersMap.get_num_overall_buffers()); + dprintf(fd, "-------+------------------\n"); + dprintf(fd, " Frame | Stream type mask \n"); + dprintf(fd, "-------+------------------\n"); + for(auto &req : mPendingBuffersMap.mPendingBuffersInRequest) { + for(auto &j : req.mPendingBufferList) { + QCamera3Channel *channel = (QCamera3Channel *)(j.stream->priv); + dprintf(fd, " %5d | %11d \n", + req.frame_number, channel->getStreamTypeMask()); + } + } + dprintf(fd, "-------+------------------\n"); + + dprintf(fd, "\nPending frame drop list: %zu\n", + mPendingFrameDropList.size()); + dprintf(fd, "-------+-----------\n"); + dprintf(fd, " Frame | Stream ID \n"); + dprintf(fd, "-------+-----------\n"); + for(List<PendingFrameDropInfo>::iterator i = mPendingFrameDropList.begin(); + i != mPendingFrameDropList.end(); i++) { + dprintf(fd, " %5d | %9d \n", + i->frame_number, i->stream_ID); + } + dprintf(fd, "-------+-----------\n"); + + dprintf(fd, "\n Camera HAL3 information End \n"); + + /* use dumpsys media.camera as trigger to send update debug level event */ + mUpdateDebugLevel = true; + pthread_mutex_unlock(&mMutex); + return; +} + +/*=========================================================================== + * FUNCTION : flush + * + * DESCRIPTION: Calls stopAllChannels, notifyErrorForPendingRequests and + * conditionally restarts channels + * + * PARAMETERS : + * @ restartChannels: re-start all channels + * + * + * RETURN : + * 0 on success + * Error code on failure + *==========================================================================*/ +int QCamera3HardwareInterface::flush(bool restartChannels) +{ + KPI_ATRACE_CALL(); + int32_t rc = NO_ERROR; + + LOGD("Unblocking Process Capture Request"); + pthread_mutex_lock(&mMutex); + mFlush = true; + pthread_mutex_unlock(&mMutex); + + rc = stopAllChannels(); + // unlink of dualcam + if (mIsDeviceLinked) { + m_pRelCamSyncBuf->sync_control = CAM_SYNC_RELATED_SENSORS_OFF; + pthread_mutex_lock(&gCamLock); + + if (mIsMainCamera == 1) { + m_pRelCamSyncBuf->mode = CAM_MODE_PRIMARY; + m_pRelCamSyncBuf->type = CAM_TYPE_MAIN; + // related session id should be session id of linked session + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } else { + m_pRelCamSyncBuf->mode = CAM_MODE_SECONDARY; + m_pRelCamSyncBuf->type = CAM_TYPE_AUX; + m_pRelCamSyncBuf->related_sensor_session_id = sessionId[mLinkedCameraId]; + } + pthread_mutex_unlock(&gCamLock); + + rc = mCameraHandle->ops->sync_related_sensors( + mCameraHandle->camera_handle, m_pRelCamSyncBuf); + if (rc < 0) { + LOGE("Dualcam: Unlink failed, but still proceed to close"); + } + } + + if (rc < 0) { + LOGE("stopAllChannels failed"); + return rc; + } + if (mChannelHandle) { + mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle, + mChannelHandle); + } + + // Reset bundle info + rc = setBundleInfo(); + if (rc < 0) { + LOGE("setBundleInfo failed %d", rc); + return rc; + } + + // Mutex Lock + pthread_mutex_lock(&mMutex); + + // Unblock process_capture_request + mPendingLiveRequest = 0; + pthread_cond_signal(&mRequestCond); + + rc = notifyErrorForPendingRequests(); + if (rc < 0) { + LOGE("notifyErrorForPendingRequests failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + + mFlush = false; + + // Start the Streams/Channels + if (restartChannels) { + rc = startAllChannels(); + if (rc < 0) { + LOGE("startAllChannels failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + + if (mChannelHandle) { + mCameraHandle->ops->start_channel(mCameraHandle->camera_handle, + mChannelHandle); + if (rc < 0) { + LOGE("start_channel failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + } + + pthread_mutex_unlock(&mMutex); + + return 0; +} + +/*=========================================================================== + * FUNCTION : flushPerf + * + * DESCRIPTION: This is the performance optimization version of flush that does + * not use stream off, rather flushes the system + * + * PARAMETERS : + * + * + * RETURN : 0 : success + * -EINVAL: input is malformed (device is not valid) + * -ENODEV: if the device has encountered a serious error + *==========================================================================*/ +int QCamera3HardwareInterface::flushPerf() +{ + ATRACE_CALL(); + int32_t rc = 0; + struct timespec timeout; + bool timed_wait = false; + + pthread_mutex_lock(&mMutex); + mFlushPerf = true; + mPendingBuffersMap.numPendingBufsAtFlush = + mPendingBuffersMap.get_num_overall_buffers(); + LOGD("Calling flush. Wait for %d buffers to return", + mPendingBuffersMap.numPendingBufsAtFlush); + + /* send the flush event to the backend */ + rc = mCameraHandle->ops->flush(mCameraHandle->camera_handle); + if (rc < 0) { + LOGE("Error in flush: IOCTL failure"); + mFlushPerf = false; + pthread_mutex_unlock(&mMutex); + return -ENODEV; + } + + if (mPendingBuffersMap.numPendingBufsAtFlush == 0) { + LOGD("No pending buffers in HAL, return flush"); + mFlushPerf = false; + pthread_mutex_unlock(&mMutex); + return rc; + } + + /* wait on a signal that buffers were received */ + rc = clock_gettime(CLOCK_REALTIME, &timeout); + if (rc < 0) { + LOGE("Error reading the real time clock, cannot use timed wait"); + } else { + timeout.tv_sec += FLUSH_TIMEOUT; + timed_wait = true; + } + + //Block on conditional variable + while (mPendingBuffersMap.numPendingBufsAtFlush != 0) { + LOGD("Waiting on mBuffersCond"); + if (!timed_wait) { + rc = pthread_cond_wait(&mBuffersCond, &mMutex); + if (rc != 0) { + LOGE("pthread_cond_wait failed due to rc = %s", + strerror(rc)); + break; + } + } else { + rc = pthread_cond_timedwait(&mBuffersCond, &mMutex, &timeout); + if (rc != 0) { + LOGE("pthread_cond_timedwait failed due to rc = %s", + strerror(rc)); + break; + } + } + } + if (rc != 0) { + mFlushPerf = false; + pthread_mutex_unlock(&mMutex); + return -ENODEV; + } + + LOGD("Received buffers, now safe to return them"); + + //make sure the channels handle flush + //currently only required for the picture channel to release snapshot resources + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (*it)->channel; + if (channel) { + rc = channel->flush(); + if (rc) { + LOGE("Flushing the channels failed with error %d", rc); + // even though the channel flush failed we need to continue and + // return the buffers we have to the framework, however the return + // value will be an error + rc = -ENODEV; + } + } + } + + /* notify the frameworks and send errored results */ + rc = notifyErrorForPendingRequests(); + if (rc < 0) { + LOGE("notifyErrorForPendingRequests failed"); + pthread_mutex_unlock(&mMutex); + return rc; + } + + //unblock process_capture_request + mPendingLiveRequest = 0; + unblockRequestIfNecessary(); + + mFlushPerf = false; + pthread_mutex_unlock(&mMutex); + LOGD ("Flush Operation complete. rc = %d", rc); + return rc; +} + +/*=========================================================================== + * FUNCTION : handleCameraDeviceError + * + * DESCRIPTION: This function calls internal flush and notifies the error to + * framework and updates the state variable. + * + * PARAMETERS : None + * + * RETURN : NO_ERROR on Success + * Error code on failure + *==========================================================================*/ +int32_t QCamera3HardwareInterface::handleCameraDeviceError() +{ + int32_t rc = NO_ERROR; + + pthread_mutex_lock(&mMutex); + if (mState != ERROR) { + //if mState != ERROR, nothing to be done + pthread_mutex_unlock(&mMutex); + return NO_ERROR; + } + pthread_mutex_unlock(&mMutex); + + rc = flush(false /* restart channels */); + if (NO_ERROR != rc) { + LOGE("internal flush to handle mState = ERROR failed"); + } + + pthread_mutex_lock(&mMutex); + mState = DEINIT; + pthread_mutex_unlock(&mMutex); + + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + notify_msg.type = CAMERA3_MSG_ERROR; + notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_DEVICE; + notify_msg.message.error.error_stream = NULL; + notify_msg.message.error.frame_number = 0; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + + return rc; +} + +/*=========================================================================== + * FUNCTION : captureResultCb + * + * DESCRIPTION: Callback handler for all capture result + * (streams, as well as metadata) + * + * PARAMETERS : + * @metadata : metadata information + * @buffer : actual gralloc buffer to be returned to frameworks. + * NULL if metadata. + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3HardwareInterface::captureResultCb(mm_camera_super_buf_t *metadata_buf, + camera3_stream_buffer_t *buffer, uint32_t frame_number, bool isInputBuffer) +{ + if (metadata_buf) { + if (mBatchSize) { + handleBatchMetadata(metadata_buf, + true /* free_and_bufdone_meta_buf */); + } else { /* mBatchSize = 0 */ + hdrPlusPerfLock(metadata_buf); + pthread_mutex_lock(&mMutex); + handleMetadataWithLock(metadata_buf, + true /* free_and_bufdone_meta_buf */); + pthread_mutex_unlock(&mMutex); + } + } else if (isInputBuffer) { + pthread_mutex_lock(&mMutex); + handleInputBufferWithLock(frame_number); + pthread_mutex_unlock(&mMutex); + } else { + pthread_mutex_lock(&mMutex); + handleBufferWithLock(buffer, frame_number); + pthread_mutex_unlock(&mMutex); + } + return; +} + +/*=========================================================================== + * FUNCTION : getReprocessibleOutputStreamId + * + * DESCRIPTION: Get source output stream id for the input reprocess stream + * based on size and format, which would be the largest + * output stream if an input stream exists. + * + * PARAMETERS : + * @id : return the stream id if found + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3HardwareInterface::getReprocessibleOutputStreamId(uint32_t &id) +{ + /* check if any output or bidirectional stream with the same size and format + and return that stream */ + if ((mInputStreamInfo.dim.width > 0) && + (mInputStreamInfo.dim.height > 0)) { + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + + camera3_stream_t *stream = (*it)->stream; + if ((stream->width == (uint32_t)mInputStreamInfo.dim.width) && + (stream->height == (uint32_t)mInputStreamInfo.dim.height) && + (stream->format == mInputStreamInfo.format)) { + // Usage flag for an input stream and the source output stream + // may be different. + LOGD("Found reprocessible output stream! %p", *it); + LOGD("input stream usage 0x%x, current stream usage 0x%x", + stream->usage, mInputStreamInfo.usage); + + QCamera3Channel *channel = (QCamera3Channel *)stream->priv; + if (channel != NULL && channel->mStreams[0]) { + id = channel->mStreams[0]->getMyServerID(); + return NO_ERROR; + } + } + } + } else { + LOGD("No input stream, so no reprocessible output stream"); + } + return NAME_NOT_FOUND; +} + +/*=========================================================================== + * FUNCTION : lookupFwkName + * + * DESCRIPTION: In case the enum is not same in fwk and backend + * make sure the parameter is correctly propogated + * + * PARAMETERS : + * @arr : map between the two enums + * @len : len of the map + * @hal_name : name of the hal_parm to map + * + * RETURN : int type of status + * fwk_name -- success + * none-zero failure code + *==========================================================================*/ +template <typename halType, class mapType> int lookupFwkName(const mapType *arr, + size_t len, halType hal_name) +{ + + for (size_t i = 0; i < len; i++) { + if (arr[i].hal_name == hal_name) { + return arr[i].fwk_name; + } + } + + /* Not able to find matching framework type is not necessarily + * an error case. This happens when mm-camera supports more attributes + * than the frameworks do */ + LOGH("Cannot find matching framework type"); + return NAME_NOT_FOUND; +} + +/*=========================================================================== + * FUNCTION : lookupHalName + * + * DESCRIPTION: In case the enum is not same in fwk and backend + * make sure the parameter is correctly propogated + * + * PARAMETERS : + * @arr : map between the two enums + * @len : len of the map + * @fwk_name : name of the hal_parm to map + * + * RETURN : int32_t type of status + * hal_name -- success + * none-zero failure code + *==========================================================================*/ +template <typename fwkType, class mapType> int lookupHalName(const mapType *arr, + size_t len, fwkType fwk_name) +{ + for (size_t i = 0; i < len; i++) { + if (arr[i].fwk_name == fwk_name) { + return arr[i].hal_name; + } + } + + LOGE("Cannot find matching hal type fwk_name=%d", fwk_name); + return NAME_NOT_FOUND; +} + +/*=========================================================================== + * FUNCTION : lookupProp + * + * DESCRIPTION: lookup a value by its name + * + * PARAMETERS : + * @arr : map between the two enums + * @len : size of the map + * @name : name to be looked up + * + * RETURN : Value if found + * CAM_CDS_MODE_MAX if not found + *==========================================================================*/ +template <class mapType> cam_cds_mode_type_t lookupProp(const mapType *arr, + size_t len, const char *name) +{ + if (name) { + for (size_t i = 0; i < len; i++) { + if (!strcmp(arr[i].desc, name)) { + return arr[i].val; + } + } + } + return CAM_CDS_MODE_MAX; +} + +/*=========================================================================== + * + * DESCRIPTION: + * + * PARAMETERS : + * @metadata : metadata information from callback + * @timestamp: metadata buffer timestamp + * @request_id: request id + * @jpegMetadata: additional jpeg metadata + * @pprocDone: whether internal offline postprocsesing is done + * + * RETURN : camera_metadata_t* + * metadata in a format specified by fwk + *==========================================================================*/ +camera_metadata_t* +QCamera3HardwareInterface::translateFromHalMetadata( + metadata_buffer_t *metadata, + nsecs_t timestamp, + int32_t request_id, + const CameraMetadata& jpegMetadata, + uint8_t pipeline_depth, + uint8_t capture_intent, + bool pprocDone, + uint8_t fwk_cacMode) +{ + CameraMetadata camMetadata; + camera_metadata_t *resultMetadata; + + if (jpegMetadata.entryCount()) + camMetadata.append(jpegMetadata); + + camMetadata.update(ANDROID_SENSOR_TIMESTAMP, ×tamp, 1); + camMetadata.update(ANDROID_REQUEST_ID, &request_id, 1); + camMetadata.update(ANDROID_REQUEST_PIPELINE_DEPTH, &pipeline_depth, 1); + camMetadata.update(ANDROID_CONTROL_CAPTURE_INTENT, &capture_intent, 1); + + IF_META_AVAILABLE(uint32_t, frame_number, CAM_INTF_META_FRAME_NUMBER, metadata) { + int64_t fwk_frame_number = *frame_number; + camMetadata.update(ANDROID_SYNC_FRAME_NUMBER, &fwk_frame_number, 1); + } + + IF_META_AVAILABLE(cam_fps_range_t, float_range, CAM_INTF_PARM_FPS_RANGE, metadata) { + int32_t fps_range[2]; + fps_range[0] = (int32_t)float_range->min_fps; + fps_range[1] = (int32_t)float_range->max_fps; + camMetadata.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + fps_range, 2); + LOGD("urgent Metadata : ANDROID_CONTROL_AE_TARGET_FPS_RANGE [%d, %d]", + fps_range[0], fps_range[1]); + } + + IF_META_AVAILABLE(int32_t, expCompensation, CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata) { + camMetadata.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, expCompensation, 1); + } + + IF_META_AVAILABLE(uint32_t, sceneMode, CAM_INTF_PARM_BESTSHOT_MODE, metadata) { + int val = (uint8_t)lookupFwkName(SCENE_MODES_MAP, + METADATA_MAP_SIZE(SCENE_MODES_MAP), + *sceneMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwkSceneMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_SCENE_MODE, &fwkSceneMode, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_SCENE_MODE: %d", + fwkSceneMode); + } + } + + IF_META_AVAILABLE(uint32_t, ae_lock, CAM_INTF_PARM_AEC_LOCK, metadata) { + uint8_t fwk_ae_lock = (uint8_t) *ae_lock; + camMetadata.update(ANDROID_CONTROL_AE_LOCK, &fwk_ae_lock, 1); + } + + IF_META_AVAILABLE(uint32_t, awb_lock, CAM_INTF_PARM_AWB_LOCK, metadata) { + uint8_t fwk_awb_lock = (uint8_t) *awb_lock; + camMetadata.update(ANDROID_CONTROL_AWB_LOCK, &fwk_awb_lock, 1); + } + + IF_META_AVAILABLE(uint32_t, color_correct_mode, CAM_INTF_META_COLOR_CORRECT_MODE, metadata) { + uint8_t fwk_color_correct_mode = (uint8_t) *color_correct_mode; + camMetadata.update(ANDROID_COLOR_CORRECTION_MODE, &fwk_color_correct_mode, 1); + } + + IF_META_AVAILABLE(cam_edge_application_t, edgeApplication, + CAM_INTF_META_EDGE_MODE, metadata) { + camMetadata.update(ANDROID_EDGE_MODE, &(edgeApplication->edge_mode), 1); + } + + IF_META_AVAILABLE(uint32_t, flashPower, CAM_INTF_META_FLASH_POWER, metadata) { + uint8_t fwk_flashPower = (uint8_t) *flashPower; + camMetadata.update(ANDROID_FLASH_FIRING_POWER, &fwk_flashPower, 1); + } + + IF_META_AVAILABLE(int64_t, flashFiringTime, CAM_INTF_META_FLASH_FIRING_TIME, metadata) { + camMetadata.update(ANDROID_FLASH_FIRING_TIME, flashFiringTime, 1); + } + + IF_META_AVAILABLE(int32_t, flashState, CAM_INTF_META_FLASH_STATE, metadata) { + if (0 <= *flashState) { + uint8_t fwk_flashState = (uint8_t) *flashState; + if (!gCamCapability[mCameraId]->flash_available) { + fwk_flashState = ANDROID_FLASH_STATE_UNAVAILABLE; + } + camMetadata.update(ANDROID_FLASH_STATE, &fwk_flashState, 1); + } + } + + IF_META_AVAILABLE(uint32_t, flashMode, CAM_INTF_META_FLASH_MODE, metadata) { + int val = lookupFwkName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP), *flashMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwk_flashMode = (uint8_t)val; + camMetadata.update(ANDROID_FLASH_MODE, &fwk_flashMode, 1); + } + } + + IF_META_AVAILABLE(uint32_t, hotPixelMode, CAM_INTF_META_HOTPIXEL_MODE, metadata) { + uint8_t fwk_hotPixelMode = (uint8_t) *hotPixelMode; + camMetadata.update(ANDROID_HOT_PIXEL_MODE, &fwk_hotPixelMode, 1); + } + + IF_META_AVAILABLE(float, lensAperture, CAM_INTF_META_LENS_APERTURE, metadata) { + camMetadata.update(ANDROID_LENS_APERTURE , lensAperture, 1); + } + + IF_META_AVAILABLE(float, filterDensity, CAM_INTF_META_LENS_FILTERDENSITY, metadata) { + camMetadata.update(ANDROID_LENS_FILTER_DENSITY , filterDensity, 1); + } + + IF_META_AVAILABLE(float, focalLength, CAM_INTF_META_LENS_FOCAL_LENGTH, metadata) { + camMetadata.update(ANDROID_LENS_FOCAL_LENGTH, focalLength, 1); + } + + IF_META_AVAILABLE(uint32_t, opticalStab, CAM_INTF_META_LENS_OPT_STAB_MODE, metadata) { + uint8_t fwk_opticalStab = (uint8_t) *opticalStab; + camMetadata.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, &fwk_opticalStab, 1); + } + + IF_META_AVAILABLE(uint32_t, videoStab, CAM_INTF_META_VIDEO_STAB_MODE, metadata) { + uint8_t fwk_videoStab = (uint8_t) *videoStab; + LOGD("fwk_videoStab = %d", fwk_videoStab); + camMetadata.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &fwk_videoStab, 1); + } else { + // Regardless of Video stab supports or not, CTS is expecting the EIS result to be non NULL + // and so hardcoding the Video Stab result to OFF mode. + uint8_t fwkVideoStabMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF; + camMetadata.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &fwkVideoStabMode, 1); + LOGD("%s: EIS result default to OFF mode", __func__); + } + + IF_META_AVAILABLE(uint32_t, noiseRedMode, CAM_INTF_META_NOISE_REDUCTION_MODE, metadata) { + uint8_t fwk_noiseRedMode = (uint8_t) *noiseRedMode; + camMetadata.update(ANDROID_NOISE_REDUCTION_MODE, &fwk_noiseRedMode, 1); + } + + IF_META_AVAILABLE(float, effectiveExposureFactor, CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR, metadata) { + camMetadata.update(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR, effectiveExposureFactor, 1); + } + + IF_META_AVAILABLE(cam_black_level_metadata_t, blackLevelSourcePattern, + CAM_INTF_META_BLACK_LEVEL_SOURCE_PATTERN, metadata) { + + LOGD("dynamicblackLevel = %f %f %f %f", + blackLevelSourcePattern->cam_black_level[0], + blackLevelSourcePattern->cam_black_level[1], + blackLevelSourcePattern->cam_black_level[2], + blackLevelSourcePattern->cam_black_level[3]); + } + + IF_META_AVAILABLE(cam_black_level_metadata_t, blackLevelAppliedPattern, + CAM_INTF_META_BLACK_LEVEL_APPLIED_PATTERN, metadata) { + float fwk_blackLevelInd[4]; + + fwk_blackLevelInd[0] = blackLevelAppliedPattern->cam_black_level[0]; + fwk_blackLevelInd[1] = blackLevelAppliedPattern->cam_black_level[1]; + fwk_blackLevelInd[2] = blackLevelAppliedPattern->cam_black_level[2]; + fwk_blackLevelInd[3] = blackLevelAppliedPattern->cam_black_level[3]; + + LOGD("applied dynamicblackLevel = %f %f %f %f", + blackLevelAppliedPattern->cam_black_level[0], + blackLevelAppliedPattern->cam_black_level[1], + blackLevelAppliedPattern->cam_black_level[2], + blackLevelAppliedPattern->cam_black_level[3]); + camMetadata.update(QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN, fwk_blackLevelInd, 4); + camMetadata.update(NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL, fwk_blackLevelInd, 4); + } + + + if (gCamCapability[mCameraId]->optical_black_region_count != 0 && + gCamCapability[mCameraId]->optical_black_region_count <= MAX_OPTICAL_BLACK_REGIONS) { + int32_t opticalBlackRegions[MAX_OPTICAL_BLACK_REGIONS * 4]; + for (size_t i = 0; i < gCamCapability[mCameraId]->optical_black_region_count * 4; i++) { + opticalBlackRegions[i] = gCamCapability[mCameraId]->optical_black_regions[i]; + } + camMetadata.update(NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS, + opticalBlackRegions, gCamCapability[mCameraId]->optical_black_region_count * 4); + } + + IF_META_AVAILABLE(cam_crop_region_t, hScalerCropRegion, + CAM_INTF_META_SCALER_CROP_REGION, metadata) { + int32_t scalerCropRegion[4]; + scalerCropRegion[0] = hScalerCropRegion->left; + scalerCropRegion[1] = hScalerCropRegion->top; + scalerCropRegion[2] = hScalerCropRegion->width; + scalerCropRegion[3] = hScalerCropRegion->height; + + // Adjust crop region from sensor output coordinate system to active + // array coordinate system. + mCropRegionMapper.toActiveArray(scalerCropRegion[0], scalerCropRegion[1], + scalerCropRegion[2], scalerCropRegion[3]); + + camMetadata.update(ANDROID_SCALER_CROP_REGION, scalerCropRegion, 4); + } + + IF_META_AVAILABLE(int64_t, sensorExpTime, CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata) { + LOGD("sensorExpTime = %lld", *sensorExpTime); + camMetadata.update(ANDROID_SENSOR_EXPOSURE_TIME , sensorExpTime, 1); + } + + IF_META_AVAILABLE(int64_t, sensorFameDuration, + CAM_INTF_META_SENSOR_FRAME_DURATION, metadata) { + LOGD("sensorFameDuration = %lld", *sensorFameDuration); + camMetadata.update(ANDROID_SENSOR_FRAME_DURATION, sensorFameDuration, 1); + } + + IF_META_AVAILABLE(int64_t, sensorRollingShutterSkew, + CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW, metadata) { + LOGD("sensorRollingShutterSkew = %lld", *sensorRollingShutterSkew); + camMetadata.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW, + sensorRollingShutterSkew, 1); + } + + IF_META_AVAILABLE(int32_t, sensorSensitivity, CAM_INTF_META_SENSOR_SENSITIVITY, metadata) { + LOGD("sensorSensitivity = %d", *sensorSensitivity); + camMetadata.update(ANDROID_SENSOR_SENSITIVITY, sensorSensitivity, 1); + + //calculate the noise profile based on sensitivity + double noise_profile_S = computeNoiseModelEntryS(*sensorSensitivity); + double noise_profile_O = computeNoiseModelEntryO(*sensorSensitivity); + double noise_profile[2 * gCamCapability[mCameraId]->num_color_channels]; + for (int i = 0; i < 2 * gCamCapability[mCameraId]->num_color_channels; i += 2) { + noise_profile[i] = noise_profile_S; + noise_profile[i+1] = noise_profile_O; + } + LOGD("noise model entry (S, O) is (%f, %f)", + noise_profile_S, noise_profile_O); + camMetadata.update(ANDROID_SENSOR_NOISE_PROFILE, noise_profile, + (size_t) (2 * gCamCapability[mCameraId]->num_color_channels)); + } + + IF_META_AVAILABLE(uint32_t, shadingMode, CAM_INTF_META_SHADING_MODE, metadata) { + uint8_t fwk_shadingMode = (uint8_t) *shadingMode; + camMetadata.update(ANDROID_SHADING_MODE, &fwk_shadingMode, 1); + } + + IF_META_AVAILABLE(uint32_t, faceDetectMode, CAM_INTF_META_STATS_FACEDETECT_MODE, metadata) { + int val = lookupFwkName(FACEDETECT_MODES_MAP, METADATA_MAP_SIZE(FACEDETECT_MODES_MAP), + *faceDetectMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwk_faceDetectMode = (uint8_t)val; + camMetadata.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &fwk_faceDetectMode, 1); + + if (fwk_faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) { + IF_META_AVAILABLE(cam_face_detection_data_t, faceDetectionInfo, + CAM_INTF_META_FACE_DETECTION, metadata) { + uint8_t numFaces = MIN( + faceDetectionInfo->num_faces_detected, MAX_ROI); + int32_t faceIds[MAX_ROI]; + uint8_t faceScores[MAX_ROI]; + int32_t faceRectangles[MAX_ROI * 4]; + int32_t faceLandmarks[MAX_ROI * 6]; + size_t j = 0, k = 0; + + for (size_t i = 0; i < numFaces; i++) { + faceScores[i] = (uint8_t)faceDetectionInfo->faces[i].score; + // Adjust crop region from sensor output coordinate system to active + // array coordinate system. + cam_rect_t& rect = faceDetectionInfo->faces[i].face_boundary; + mCropRegionMapper.toActiveArray(rect.left, rect.top, + rect.width, rect.height); + + convertToRegions(faceDetectionInfo->faces[i].face_boundary, + faceRectangles+j, -1); + + j+= 4; + } + if (numFaces <= 0) { + memset(faceIds, 0, sizeof(int32_t) * MAX_ROI); + memset(faceScores, 0, sizeof(uint8_t) * MAX_ROI); + memset(faceRectangles, 0, sizeof(int32_t) * MAX_ROI * 4); + memset(faceLandmarks, 0, sizeof(int32_t) * MAX_ROI * 6); + } + + camMetadata.update(ANDROID_STATISTICS_FACE_SCORES, faceScores, + numFaces); + camMetadata.update(ANDROID_STATISTICS_FACE_RECTANGLES, + faceRectangles, numFaces * 4U); + if (fwk_faceDetectMode == + ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) { + IF_META_AVAILABLE(cam_face_landmarks_data_t, landmarks, + CAM_INTF_META_FACE_LANDMARK, metadata) { + + for (size_t i = 0; i < numFaces; i++) { + // Map the co-ordinate sensor output coordinate system to active + // array coordinate system. + mCropRegionMapper.toActiveArray( + landmarks->face_landmarks[i].left_eye_center.x, + landmarks->face_landmarks[i].left_eye_center.y); + mCropRegionMapper.toActiveArray( + landmarks->face_landmarks[i].right_eye_center.x, + landmarks->face_landmarks[i].right_eye_center.y); + mCropRegionMapper.toActiveArray( + landmarks->face_landmarks[i].mouth_center.x, + landmarks->face_landmarks[i].mouth_center.y); + + convertLandmarks(landmarks->face_landmarks[i], faceLandmarks+k); + k+= 6; + } + } + + camMetadata.update(ANDROID_STATISTICS_FACE_IDS, faceIds, numFaces); + camMetadata.update(ANDROID_STATISTICS_FACE_LANDMARKS, + faceLandmarks, numFaces * 6U); + } + } + } + } + } + + IF_META_AVAILABLE(uint32_t, histogramMode, CAM_INTF_META_STATS_HISTOGRAM_MODE, metadata) { + uint8_t fwk_histogramMode = (uint8_t) *histogramMode; + camMetadata.update(ANDROID_STATISTICS_HISTOGRAM_MODE, &fwk_histogramMode, 1); + } + + IF_META_AVAILABLE(uint32_t, sharpnessMapMode, + CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, metadata) { + uint8_t fwk_sharpnessMapMode = (uint8_t) *sharpnessMapMode; + camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &fwk_sharpnessMapMode, 1); + } + + IF_META_AVAILABLE(cam_sharpness_map_t, sharpnessMap, + CAM_INTF_META_STATS_SHARPNESS_MAP, metadata) { + camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP, (int32_t *)sharpnessMap->sharpness, + CAM_MAX_MAP_WIDTH * CAM_MAX_MAP_HEIGHT * 3); + } + + IF_META_AVAILABLE(cam_lens_shading_map_t, lensShadingMap, + CAM_INTF_META_LENS_SHADING_MAP, metadata) { + size_t map_height = MIN((size_t)gCamCapability[mCameraId]->lens_shading_map_size.height, + CAM_MAX_SHADING_MAP_HEIGHT); + size_t map_width = MIN((size_t)gCamCapability[mCameraId]->lens_shading_map_size.width, + CAM_MAX_SHADING_MAP_WIDTH); + camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP, + lensShadingMap->lens_shading, 4U * map_width * map_height); + } + + IF_META_AVAILABLE(uint32_t, toneMapMode, CAM_INTF_META_TONEMAP_MODE, metadata) { + uint8_t fwk_toneMapMode = (uint8_t) *toneMapMode; + camMetadata.update(ANDROID_TONEMAP_MODE, &fwk_toneMapMode, 1); + } + + IF_META_AVAILABLE(cam_rgb_tonemap_curves, tonemap, CAM_INTF_META_TONEMAP_CURVES, metadata) { + //Populate CAM_INTF_META_TONEMAP_CURVES + /* ch0 = G, ch 1 = B, ch 2 = R*/ + if (tonemap->tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) { + LOGE("Fatal: tonemap_points_cnt %d exceeds max value of %d", + tonemap->tonemap_points_cnt, + CAM_MAX_TONEMAP_CURVE_SIZE); + tonemap->tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE; + } + + camMetadata.update(ANDROID_TONEMAP_CURVE_GREEN, + &tonemap->curves[0].tonemap_points[0][0], + tonemap->tonemap_points_cnt * 2); + + camMetadata.update(ANDROID_TONEMAP_CURVE_BLUE, + &tonemap->curves[1].tonemap_points[0][0], + tonemap->tonemap_points_cnt * 2); + + camMetadata.update(ANDROID_TONEMAP_CURVE_RED, + &tonemap->curves[2].tonemap_points[0][0], + tonemap->tonemap_points_cnt * 2); + } + + IF_META_AVAILABLE(cam_color_correct_gains_t, colorCorrectionGains, + CAM_INTF_META_COLOR_CORRECT_GAINS, metadata) { + camMetadata.update(ANDROID_COLOR_CORRECTION_GAINS, colorCorrectionGains->gains, + CC_GAINS_COUNT); + } + + IF_META_AVAILABLE(cam_color_correct_matrix_t, colorCorrectionMatrix, + CAM_INTF_META_COLOR_CORRECT_TRANSFORM, metadata) { + camMetadata.update(ANDROID_COLOR_CORRECTION_TRANSFORM, + (camera_metadata_rational_t *)(void *)colorCorrectionMatrix->transform_matrix, + CC_MATRIX_COLS * CC_MATRIX_ROWS); + } + + IF_META_AVAILABLE(cam_profile_tone_curve, toneCurve, + CAM_INTF_META_PROFILE_TONE_CURVE, metadata) { + if (toneCurve->tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) { + LOGE("Fatal: tonemap_points_cnt %d exceeds max value of %d", + toneCurve->tonemap_points_cnt, + CAM_MAX_TONEMAP_CURVE_SIZE); + toneCurve->tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE; + } + camMetadata.update(ANDROID_SENSOR_PROFILE_TONE_CURVE, + (float*)toneCurve->curve.tonemap_points, + toneCurve->tonemap_points_cnt * 2); + } + + IF_META_AVAILABLE(cam_color_correct_gains_t, predColorCorrectionGains, + CAM_INTF_META_PRED_COLOR_CORRECT_GAINS, metadata) { + camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_GAINS, + predColorCorrectionGains->gains, 4); + } + + IF_META_AVAILABLE(cam_color_correct_matrix_t, predColorCorrectionMatrix, + CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, metadata) { + camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM, + (camera_metadata_rational_t *)(void *)predColorCorrectionMatrix->transform_matrix, + CC_MATRIX_ROWS * CC_MATRIX_COLS); + } + + IF_META_AVAILABLE(float, otpWbGrGb, CAM_INTF_META_OTP_WB_GRGB, metadata) { + camMetadata.update(ANDROID_SENSOR_GREEN_SPLIT, otpWbGrGb, 1); + } + + IF_META_AVAILABLE(uint32_t, blackLevelLock, CAM_INTF_META_BLACK_LEVEL_LOCK, metadata) { + uint8_t fwk_blackLevelLock = (uint8_t) *blackLevelLock; + camMetadata.update(ANDROID_BLACK_LEVEL_LOCK, &fwk_blackLevelLock, 1); + } + + IF_META_AVAILABLE(uint32_t, sceneFlicker, CAM_INTF_META_SCENE_FLICKER, metadata) { + uint8_t fwk_sceneFlicker = (uint8_t) *sceneFlicker; + camMetadata.update(ANDROID_STATISTICS_SCENE_FLICKER, &fwk_sceneFlicker, 1); + } + + IF_META_AVAILABLE(uint32_t, effectMode, CAM_INTF_PARM_EFFECT, metadata) { + int val = lookupFwkName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP), + *effectMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwk_effectMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_EFFECT_MODE, &fwk_effectMode, 1); + } + } + + IF_META_AVAILABLE(cam_test_pattern_data_t, testPatternData, + CAM_INTF_META_TEST_PATTERN_DATA, metadata) { + int32_t fwk_testPatternMode = lookupFwkName(TEST_PATTERN_MAP, + METADATA_MAP_SIZE(TEST_PATTERN_MAP), testPatternData->mode); + if (NAME_NOT_FOUND != fwk_testPatternMode) { + camMetadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &fwk_testPatternMode, 1); + } + int32_t fwk_testPatternData[4]; + fwk_testPatternData[0] = testPatternData->r; + fwk_testPatternData[3] = testPatternData->b; + switch (gCamCapability[mCameraId]->color_arrangement) { + case CAM_FILTER_ARRANGEMENT_RGGB: + case CAM_FILTER_ARRANGEMENT_GRBG: + fwk_testPatternData[1] = testPatternData->gr; + fwk_testPatternData[2] = testPatternData->gb; + break; + case CAM_FILTER_ARRANGEMENT_GBRG: + case CAM_FILTER_ARRANGEMENT_BGGR: + fwk_testPatternData[2] = testPatternData->gr; + fwk_testPatternData[1] = testPatternData->gb; + break; + default: + LOGE("color arrangement %d is not supported", + gCamCapability[mCameraId]->color_arrangement); + break; + } + camMetadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA, fwk_testPatternData, 4); + } + + IF_META_AVAILABLE(double, gps_coords, CAM_INTF_META_JPEG_GPS_COORDINATES, metadata) { + camMetadata.update(ANDROID_JPEG_GPS_COORDINATES, gps_coords, 3); + } + + IF_META_AVAILABLE(uint8_t, gps_methods, CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata) { + String8 str((const char *)gps_methods); + camMetadata.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, str); + } + + IF_META_AVAILABLE(int64_t, gps_timestamp, CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata) { + camMetadata.update(ANDROID_JPEG_GPS_TIMESTAMP, gps_timestamp, 1); + } + + IF_META_AVAILABLE(int32_t, jpeg_orientation, CAM_INTF_META_JPEG_ORIENTATION, metadata) { + camMetadata.update(ANDROID_JPEG_ORIENTATION, jpeg_orientation, 1); + } + + IF_META_AVAILABLE(uint32_t, jpeg_quality, CAM_INTF_META_JPEG_QUALITY, metadata) { + uint8_t fwk_jpeg_quality = (uint8_t) *jpeg_quality; + camMetadata.update(ANDROID_JPEG_QUALITY, &fwk_jpeg_quality, 1); + } + + IF_META_AVAILABLE(uint32_t, thumb_quality, CAM_INTF_META_JPEG_THUMB_QUALITY, metadata) { + uint8_t fwk_thumb_quality = (uint8_t) *thumb_quality; + camMetadata.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &fwk_thumb_quality, 1); + } + + IF_META_AVAILABLE(cam_dimension_t, thumb_size, CAM_INTF_META_JPEG_THUMB_SIZE, metadata) { + int32_t fwk_thumb_size[2]; + fwk_thumb_size[0] = thumb_size->width; + fwk_thumb_size[1] = thumb_size->height; + camMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE, fwk_thumb_size, 2); + } + + IF_META_AVAILABLE(int32_t, privateData, CAM_INTF_META_PRIVATE_DATA, metadata) { + camMetadata.update(QCAMERA3_PRIVATEDATA_REPROCESS, + privateData, + MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES / sizeof(int32_t)); + } + + if (metadata->is_tuning_params_valid) { + uint8_t tuning_meta_data_blob[sizeof(tuning_params_t)]; + uint8_t *data = (uint8_t *)&tuning_meta_data_blob[0]; + metadata->tuning_params.tuning_data_version = TUNING_DATA_VERSION; + + + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_data_version), + sizeof(uint32_t)); + data += sizeof(uint32_t); + + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_sensor_data_size), + sizeof(uint32_t)); + LOGD("tuning_sensor_data_size %d",(int)(*(int *)data)); + data += sizeof(uint32_t); + + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_vfe_data_size), + sizeof(uint32_t)); + LOGD("tuning_vfe_data_size %d",(int)(*(int *)data)); + data += sizeof(uint32_t); + + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_cpp_data_size), + sizeof(uint32_t)); + LOGD("tuning_cpp_data_size %d",(int)(*(int *)data)); + data += sizeof(uint32_t); + + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_cac_data_size), + sizeof(uint32_t)); + LOGD("tuning_cac_data_size %d",(int)(*(int *)data)); + data += sizeof(uint32_t); + + metadata->tuning_params.tuning_mod3_data_size = 0; + memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_mod3_data_size), + sizeof(uint32_t)); + LOGD("tuning_mod3_data_size %d",(int)(*(int *)data)); + data += sizeof(uint32_t); + + size_t count = MIN(metadata->tuning_params.tuning_sensor_data_size, + TUNING_SENSOR_DATA_MAX); + memcpy(data, ((uint8_t *)&metadata->tuning_params.data), + count); + data += count; + + count = MIN(metadata->tuning_params.tuning_vfe_data_size, + TUNING_VFE_DATA_MAX); + memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_VFE_DATA_OFFSET]), + count); + data += count; + + count = MIN(metadata->tuning_params.tuning_cpp_data_size, + TUNING_CPP_DATA_MAX); + memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_CPP_DATA_OFFSET]), + count); + data += count; + + count = MIN(metadata->tuning_params.tuning_cac_data_size, + TUNING_CAC_DATA_MAX); + memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_CAC_DATA_OFFSET]), + count); + data += count; + + camMetadata.update(QCAMERA3_TUNING_META_DATA_BLOB, + (int32_t *)(void *)tuning_meta_data_blob, + (size_t)(data-tuning_meta_data_blob) / sizeof(uint32_t)); + } + + IF_META_AVAILABLE(cam_neutral_col_point_t, neuColPoint, + CAM_INTF_META_NEUTRAL_COL_POINT, metadata) { + camMetadata.update(ANDROID_SENSOR_NEUTRAL_COLOR_POINT, + (camera_metadata_rational_t *)(void *)neuColPoint->neutral_col_point, + NEUTRAL_COL_POINTS); + } + + IF_META_AVAILABLE(uint32_t, shadingMapMode, CAM_INTF_META_LENS_SHADING_MAP_MODE, metadata) { + uint8_t fwk_shadingMapMode = (uint8_t) *shadingMapMode; + camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &fwk_shadingMapMode, 1); + } + + IF_META_AVAILABLE(cam_area_t, hAeRegions, CAM_INTF_META_AEC_ROI, metadata) { + int32_t aeRegions[REGIONS_TUPLE_COUNT]; + // Adjust crop region from sensor output coordinate system to active + // array coordinate system. + mCropRegionMapper.toActiveArray(hAeRegions->rect.left, hAeRegions->rect.top, + hAeRegions->rect.width, hAeRegions->rect.height); + + convertToRegions(hAeRegions->rect, aeRegions, hAeRegions->weight); + camMetadata.update(ANDROID_CONTROL_AE_REGIONS, aeRegions, + REGIONS_TUPLE_COUNT); + LOGD("Metadata : ANDROID_CONTROL_AE_REGIONS: FWK: [%d,%d,%d,%d] HAL: [%d,%d,%d,%d]", + aeRegions[0], aeRegions[1], aeRegions[2], aeRegions[3], + hAeRegions->rect.left, hAeRegions->rect.top, hAeRegions->rect.width, + hAeRegions->rect.height); + } + + IF_META_AVAILABLE(uint32_t, afState, CAM_INTF_META_AF_STATE, metadata) { + uint8_t fwk_afState = (uint8_t) *afState; + camMetadata.update(ANDROID_CONTROL_AF_STATE, &fwk_afState, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AF_STATE %u", *afState); + } + + IF_META_AVAILABLE(float, focusDistance, CAM_INTF_META_LENS_FOCUS_DISTANCE, metadata) { + camMetadata.update(ANDROID_LENS_FOCUS_DISTANCE , focusDistance, 1); + } + + IF_META_AVAILABLE(float, focusRange, CAM_INTF_META_LENS_FOCUS_RANGE, metadata) { + camMetadata.update(ANDROID_LENS_FOCUS_RANGE , focusRange, 2); + } + + IF_META_AVAILABLE(cam_af_lens_state_t, lensState, CAM_INTF_META_LENS_STATE, metadata) { + uint8_t fwk_lensState = *lensState; + camMetadata.update(ANDROID_LENS_STATE , &fwk_lensState, 1); + } + + IF_META_AVAILABLE(cam_area_t, hAfRegions, CAM_INTF_META_AF_ROI, metadata) { + /*af regions*/ + int32_t afRegions[REGIONS_TUPLE_COUNT]; + // Adjust crop region from sensor output coordinate system to active + // array coordinate system. + mCropRegionMapper.toActiveArray(hAfRegions->rect.left, hAfRegions->rect.top, + hAfRegions->rect.width, hAfRegions->rect.height); + + convertToRegions(hAfRegions->rect, afRegions, hAfRegions->weight); + camMetadata.update(ANDROID_CONTROL_AF_REGIONS, afRegions, + REGIONS_TUPLE_COUNT); + LOGD("Metadata : ANDROID_CONTROL_AF_REGIONS: FWK: [%d,%d,%d,%d] HAL: [%d,%d,%d,%d]", + afRegions[0], afRegions[1], afRegions[2], afRegions[3], + hAfRegions->rect.left, hAfRegions->rect.top, hAfRegions->rect.width, + hAfRegions->rect.height); + } + + IF_META_AVAILABLE(uint32_t, hal_ab_mode, CAM_INTF_PARM_ANTIBANDING, metadata) { + int val = lookupFwkName(ANTIBANDING_MODES_MAP, METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP), + *hal_ab_mode); + if (NAME_NOT_FOUND != val) { + uint8_t fwk_ab_mode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &fwk_ab_mode, 1); + } + } + + IF_META_AVAILABLE(uint32_t, bestshotMode, CAM_INTF_PARM_BESTSHOT_MODE, metadata) { + int val = lookupFwkName(SCENE_MODES_MAP, + METADATA_MAP_SIZE(SCENE_MODES_MAP), *bestshotMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwkBestshotMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_SCENE_MODE, &fwkBestshotMode, 1); + LOGD("Metadata : ANDROID_CONTROL_SCENE_MODE"); + } else { + LOGH("Metadata not found : ANDROID_CONTROL_SCENE_MODE"); + } + } + + IF_META_AVAILABLE(uint32_t, mode, CAM_INTF_META_MODE, metadata) { + uint8_t fwk_mode = (uint8_t) *mode; + camMetadata.update(ANDROID_CONTROL_MODE, &fwk_mode, 1); + } + + /* Constant metadata values to be update*/ + uint8_t hotPixelModeFast = ANDROID_HOT_PIXEL_MODE_FAST; + camMetadata.update(ANDROID_HOT_PIXEL_MODE, &hotPixelModeFast, 1); + + uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF; + camMetadata.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1); + + int32_t hotPixelMap[2]; + camMetadata.update(ANDROID_STATISTICS_HOT_PIXEL_MAP, &hotPixelMap[0], 0); + + // CDS + IF_META_AVAILABLE(int32_t, cds, CAM_INTF_PARM_CDS_MODE, metadata) { + camMetadata.update(QCAMERA3_CDS_MODE, cds, 1); + } + + // TNR + IF_META_AVAILABLE(cam_denoise_param_t, tnr, CAM_INTF_PARM_TEMPORAL_DENOISE, metadata) { + uint8_t tnr_enable = tnr->denoise_enable; + int32_t tnr_process_type = (int32_t)tnr->process_plates; + + camMetadata.update(QCAMERA3_TEMPORAL_DENOISE_ENABLE, &tnr_enable, 1); + camMetadata.update(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, &tnr_process_type, 1); + } + + // Reprocess crop data + IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, metadata) { + uint8_t cnt = crop_data->num_of_streams; + if ( (0 >= cnt) || (cnt > MAX_NUM_STREAMS)) { + // mm-qcamera-daemon only posts crop_data for streams + // not linked to pproc. So no valid crop metadata is not + // necessarily an error case. + LOGD("No valid crop metadata entries"); + } else { + uint32_t reproc_stream_id; + if ( NO_ERROR != getReprocessibleOutputStreamId(reproc_stream_id)) { + LOGD("No reprocessible stream found, ignore crop data"); + } else { + int rc = NO_ERROR; + Vector<int32_t> roi_map; + int32_t *crop = new int32_t[cnt*4]; + if (NULL == crop) { + rc = NO_MEMORY; + } + if (NO_ERROR == rc) { + int32_t streams_found = 0; + for (size_t i = 0; i < cnt; i++) { + if (crop_data->crop_info[i].stream_id == reproc_stream_id) { + if (pprocDone) { + // HAL already does internal reprocessing, + // either via reprocessing before JPEG encoding, + // or offline postprocessing for pproc bypass case. + crop[0] = 0; + crop[1] = 0; + crop[2] = mInputStreamInfo.dim.width; + crop[3] = mInputStreamInfo.dim.height; + } else { + crop[0] = crop_data->crop_info[i].crop.left; + crop[1] = crop_data->crop_info[i].crop.top; + crop[2] = crop_data->crop_info[i].crop.width; + crop[3] = crop_data->crop_info[i].crop.height; + } + roi_map.add(crop_data->crop_info[i].roi_map.left); + roi_map.add(crop_data->crop_info[i].roi_map.top); + roi_map.add(crop_data->crop_info[i].roi_map.width); + roi_map.add(crop_data->crop_info[i].roi_map.height); + streams_found++; + LOGD("Adding reprocess crop data for stream %dx%d, %dx%d", + crop[0], crop[1], crop[2], crop[3]); + LOGD("Adding reprocess crop roi map for stream %dx%d, %dx%d", + crop_data->crop_info[i].roi_map.left, + crop_data->crop_info[i].roi_map.top, + crop_data->crop_info[i].roi_map.width, + crop_data->crop_info[i].roi_map.height); + break; + + } + } + camMetadata.update(QCAMERA3_CROP_COUNT_REPROCESS, + &streams_found, 1); + camMetadata.update(QCAMERA3_CROP_REPROCESS, + crop, (size_t)(streams_found * 4)); + if (roi_map.array()) { + camMetadata.update(QCAMERA3_CROP_ROI_MAP_REPROCESS, + roi_map.array(), roi_map.size()); + } + } + if (crop) { + delete [] crop; + } + } + } + } + + if (gCamCapability[mCameraId]->aberration_modes_count == 0) { + // Regardless of CAC supports or not, CTS is expecting the CAC result to be non NULL and + // so hardcoding the CAC result to OFF mode. + uint8_t fwkCacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF; + camMetadata.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &fwkCacMode, 1); + } else { + IF_META_AVAILABLE(cam_aberration_mode_t, cacMode, CAM_INTF_PARM_CAC, metadata) { + int val = lookupFwkName(COLOR_ABERRATION_MAP, METADATA_MAP_SIZE(COLOR_ABERRATION_MAP), + *cacMode); + if (NAME_NOT_FOUND != val) { + uint8_t resultCacMode = (uint8_t)val; + // check whether CAC result from CB is equal to Framework set CAC mode + // If not equal then set the CAC mode came in corresponding request + if (fwk_cacMode != resultCacMode) { + resultCacMode = fwk_cacMode; + } + LOGD("fwk_cacMode=%d resultCacMode=%d", fwk_cacMode, resultCacMode); + camMetadata.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &resultCacMode, 1); + } else { + LOGE("Invalid CAC camera parameter: %d", *cacMode); + } + } + } + + // Post blob of cam_cds_data through vendor tag. + IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, metadata) { + uint8_t cnt = cdsInfo->num_of_streams; + cam_cds_data_t cdsDataOverride; + memset(&cdsDataOverride, 0, sizeof(cdsDataOverride)); + cdsDataOverride.session_cds_enable = cdsInfo->session_cds_enable; + cdsDataOverride.num_of_streams = 1; + if ((0 < cnt) && (cnt <= MAX_NUM_STREAMS)) { + uint32_t reproc_stream_id; + if ( NO_ERROR != getReprocessibleOutputStreamId(reproc_stream_id)) { + LOGD("No reprocessible stream found, ignore cds data"); + } else { + for (size_t i = 0; i < cnt; i++) { + if (cdsInfo->cds_info[i].stream_id == + reproc_stream_id) { + cdsDataOverride.cds_info[0].cds_enable = + cdsInfo->cds_info[i].cds_enable; + break; + } + } + } + } else { + LOGD("Invalid stream count %d in CDS_DATA", cnt); + } + camMetadata.update(QCAMERA3_CDS_INFO, + (uint8_t *)&cdsDataOverride, + sizeof(cam_cds_data_t)); + } + + // Ldaf calibration data + if (!mLdafCalibExist) { + IF_META_AVAILABLE(uint32_t, ldafCalib, + CAM_INTF_META_LDAF_EXIF, metadata) { + mLdafCalibExist = true; + mLdafCalib[0] = ldafCalib[0]; + mLdafCalib[1] = ldafCalib[1]; + LOGD("ldafCalib[0] is %d, ldafCalib[1] is %d", + ldafCalib[0], ldafCalib[1]); + } + } + + // Reprocess and DDM debug data through vendor tag + cam_reprocess_info_t repro_info; + memset(&repro_info, 0, sizeof(cam_reprocess_info_t)); + IF_META_AVAILABLE(cam_stream_crop_info_t, sensorCropInfo, + CAM_INTF_META_SNAP_CROP_INFO_SENSOR, metadata) { + memcpy(&(repro_info.sensor_crop_info), sensorCropInfo, sizeof(cam_stream_crop_info_t)); + } + IF_META_AVAILABLE(cam_stream_crop_info_t, camifCropInfo, + CAM_INTF_META_SNAP_CROP_INFO_CAMIF, metadata) { + memcpy(&(repro_info.camif_crop_info), camifCropInfo, sizeof(cam_stream_crop_info_t)); + } + IF_META_AVAILABLE(cam_stream_crop_info_t, ispCropInfo, + CAM_INTF_META_SNAP_CROP_INFO_ISP, metadata) { + memcpy(&(repro_info.isp_crop_info), ispCropInfo, sizeof(cam_stream_crop_info_t)); + } + IF_META_AVAILABLE(cam_stream_crop_info_t, cppCropInfo, + CAM_INTF_META_SNAP_CROP_INFO_CPP, metadata) { + memcpy(&(repro_info.cpp_crop_info), cppCropInfo, sizeof(cam_stream_crop_info_t)); + } + IF_META_AVAILABLE(cam_focal_length_ratio_t, ratio, + CAM_INTF_META_AF_FOCAL_LENGTH_RATIO, metadata) { + memcpy(&(repro_info.af_focal_length_ratio), ratio, sizeof(cam_focal_length_ratio_t)); + } + IF_META_AVAILABLE(int32_t, flip, CAM_INTF_PARM_FLIP, metadata) { + memcpy(&(repro_info.pipeline_flip), flip, sizeof(int32_t)); + } + IF_META_AVAILABLE(cam_rotation_info_t, rotationInfo, + CAM_INTF_PARM_ROTATION, metadata) { + memcpy(&(repro_info.rotation_info), rotationInfo, sizeof(cam_rotation_info_t)); + } + IF_META_AVAILABLE(cam_area_t, afRoi, CAM_INTF_META_AF_ROI, metadata) { + memcpy(&(repro_info.af_roi), afRoi, sizeof(cam_area_t)); + } + IF_META_AVAILABLE(cam_dyn_img_data_t, dynMask, CAM_INTF_META_IMG_DYN_FEAT, metadata) { + memcpy(&(repro_info.dyn_mask), dynMask, sizeof(cam_dyn_img_data_t)); + } + camMetadata.update(QCAMERA3_HAL_PRIVATEDATA_REPROCESS_DATA_BLOB, + (uint8_t *)&repro_info, sizeof(cam_reprocess_info_t)); + + resultMetadata = camMetadata.release(); + return resultMetadata; +} + +/*=========================================================================== + * FUNCTION : saveExifParams + * + * DESCRIPTION: + * + * PARAMETERS : + * @metadata : metadata information from callback + * + * RETURN : none + * + *==========================================================================*/ +void QCamera3HardwareInterface::saveExifParams(metadata_buffer_t *metadata) +{ + IF_META_AVAILABLE(cam_ae_exif_debug_t, ae_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_AE, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->ae_debug_params = *ae_exif_debug_params; + mExifParams.debug_params->ae_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_awb_exif_debug_t,awb_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_AWB, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->awb_debug_params = *awb_exif_debug_params; + mExifParams.debug_params->awb_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_af_exif_debug_t,af_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_AF, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->af_debug_params = *af_exif_debug_params; + mExifParams.debug_params->af_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_asd_exif_debug_t, asd_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_ASD, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->asd_debug_params = *asd_exif_debug_params; + mExifParams.debug_params->asd_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_stats_buffer_exif_debug_t,stats_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_STATS, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->stats_debug_params = *stats_exif_debug_params; + mExifParams.debug_params->stats_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_bestats_buffer_exif_debug_t,bestats_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_BESTATS, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->bestats_debug_params = *bestats_exif_debug_params; + mExifParams.debug_params->bestats_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_bhist_buffer_exif_debug_t, bhist_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_BHIST, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->bhist_debug_params = *bhist_exif_debug_params; + mExifParams.debug_params->bhist_debug_params_valid = TRUE; + } + } + IF_META_AVAILABLE(cam_q3a_tuning_info_t, q3a_tuning_exif_debug_params, + CAM_INTF_META_EXIF_DEBUG_3A_TUNING, metadata) { + if (mExifParams.debug_params) { + mExifParams.debug_params->q3a_tuning_debug_params = *q3a_tuning_exif_debug_params; + mExifParams.debug_params->q3a_tuning_debug_params_valid = TRUE; + } + } +} + +/*=========================================================================== + * FUNCTION : get3AExifParams + * + * DESCRIPTION: + * + * PARAMETERS : none + * + * + * RETURN : mm_jpeg_exif_params_t + * + *==========================================================================*/ +mm_jpeg_exif_params_t QCamera3HardwareInterface::get3AExifParams() +{ + return mExifParams; +} + +/*=========================================================================== + * FUNCTION : translateCbUrgentMetadataToResultMetadata + * + * DESCRIPTION: + * + * PARAMETERS : + * @metadata : metadata information from callback + * + * RETURN : camera_metadata_t* + * metadata in a format specified by fwk + *==========================================================================*/ +camera_metadata_t* +QCamera3HardwareInterface::translateCbUrgentMetadataToResultMetadata + (metadata_buffer_t *metadata) +{ + CameraMetadata camMetadata; + camera_metadata_t *resultMetadata; + + + IF_META_AVAILABLE(uint32_t, whiteBalanceState, CAM_INTF_META_AWB_STATE, metadata) { + uint8_t fwk_whiteBalanceState = (uint8_t) *whiteBalanceState; + camMetadata.update(ANDROID_CONTROL_AWB_STATE, &fwk_whiteBalanceState, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AWB_STATE %u", *whiteBalanceState); + } + + IF_META_AVAILABLE(cam_trigger_t, aecTrigger, CAM_INTF_META_AEC_PRECAPTURE_TRIGGER, metadata) { + camMetadata.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, + &aecTrigger->trigger, 1); + camMetadata.update(ANDROID_CONTROL_AE_PRECAPTURE_ID, + &aecTrigger->trigger_id, 1); + LOGD("urgent Metadata : CAM_INTF_META_AEC_PRECAPTURE_TRIGGER: %d", + aecTrigger->trigger); + LOGD("urgent Metadata : ANDROID_CONTROL_AE_PRECAPTURE_ID: %d", + aecTrigger->trigger_id); + } + + IF_META_AVAILABLE(uint32_t, ae_state, CAM_INTF_META_AEC_STATE, metadata) { + uint8_t fwk_ae_state = (uint8_t) *ae_state; + camMetadata.update(ANDROID_CONTROL_AE_STATE, &fwk_ae_state, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AE_STATE %u", *ae_state); + } + + IF_META_AVAILABLE(uint32_t, focusMode, CAM_INTF_PARM_FOCUS_MODE, metadata) { + int val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP), *focusMode); + if (NAME_NOT_FOUND != val) { + uint8_t fwkAfMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_AF_MODE, &fwkAfMode, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AF_MODE %d", val); + } else { + LOGH("urgent Metadata not found : ANDROID_CONTROL_AF_MODE %d", + val); + } + } + + IF_META_AVAILABLE(cam_trigger_t, af_trigger, CAM_INTF_META_AF_TRIGGER, metadata) { + camMetadata.update(ANDROID_CONTROL_AF_TRIGGER, + &af_trigger->trigger, 1); + LOGD("urgent Metadata : CAM_INTF_META_AF_TRIGGER = %d", + af_trigger->trigger); + camMetadata.update(ANDROID_CONTROL_AF_TRIGGER_ID, &af_trigger->trigger_id, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AF_TRIGGER_ID = %d", + af_trigger->trigger_id); + } + + IF_META_AVAILABLE(int32_t, whiteBalance, CAM_INTF_PARM_WHITE_BALANCE, metadata) { + int val = lookupFwkName(WHITE_BALANCE_MODES_MAP, + METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP), *whiteBalance); + if (NAME_NOT_FOUND != val) { + uint8_t fwkWhiteBalanceMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_AWB_MODE, &fwkWhiteBalanceMode, 1); + LOGD("urgent Metadata : ANDROID_CONTROL_AWB_MODE %d", val); + } else { + LOGH("urgent Metadata not found : ANDROID_CONTROL_AWB_MODE"); + } + } + + uint8_t fwk_aeMode = ANDROID_CONTROL_AE_MODE_OFF; + uint32_t aeMode = CAM_AE_MODE_MAX; + int32_t flashMode = CAM_FLASH_MODE_MAX; + int32_t redeye = -1; + IF_META_AVAILABLE(uint32_t, pAeMode, CAM_INTF_META_AEC_MODE, metadata) { + aeMode = *pAeMode; + } + IF_META_AVAILABLE(int32_t, pFlashMode, CAM_INTF_PARM_LED_MODE, metadata) { + flashMode = *pFlashMode; + } + IF_META_AVAILABLE(int32_t, pRedeye, CAM_INTF_PARM_REDEYE_REDUCTION, metadata) { + redeye = *pRedeye; + } + + if (1 == redeye) { + fwk_aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE; + camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1); + } else if ((CAM_FLASH_MODE_AUTO == flashMode) || (CAM_FLASH_MODE_ON == flashMode)) { + int val = lookupFwkName(AE_FLASH_MODE_MAP, METADATA_MAP_SIZE(AE_FLASH_MODE_MAP), + flashMode); + if (NAME_NOT_FOUND != val) { + fwk_aeMode = (uint8_t)val; + camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1); + } else { + LOGE("Unsupported flash mode %d", flashMode); + } + } else if (aeMode == CAM_AE_MODE_ON) { + fwk_aeMode = ANDROID_CONTROL_AE_MODE_ON; + camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1); + } else if (aeMode == CAM_AE_MODE_OFF) { + fwk_aeMode = ANDROID_CONTROL_AE_MODE_OFF; + camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1); + } else { + LOGE("Not enough info to deduce ANDROID_CONTROL_AE_MODE redeye:%d, " + "flashMode:%d, aeMode:%u!!!", + redeye, flashMode, aeMode); + } + + resultMetadata = camMetadata.release(); + return resultMetadata; +} + +/*=========================================================================== + * FUNCTION : dumpMetadataToFile + * + * DESCRIPTION: Dumps tuning metadata to file system + * + * PARAMETERS : + * @meta : tuning metadata + * @dumpFrameCount : current dump frame count + * @enabled : Enable mask + * + *==========================================================================*/ +void QCamera3HardwareInterface::dumpMetadataToFile(tuning_params_t &meta, + uint32_t &dumpFrameCount, + bool enabled, + const char *type, + uint32_t frameNumber) +{ + //Some sanity checks + if (meta.tuning_sensor_data_size > TUNING_SENSOR_DATA_MAX) { + LOGE("Tuning sensor data size bigger than expected %d: %d", + meta.tuning_sensor_data_size, + TUNING_SENSOR_DATA_MAX); + return; + } + + if (meta.tuning_vfe_data_size > TUNING_VFE_DATA_MAX) { + LOGE("Tuning VFE data size bigger than expected %d: %d", + meta.tuning_vfe_data_size, + TUNING_VFE_DATA_MAX); + return; + } + + if (meta.tuning_cpp_data_size > TUNING_CPP_DATA_MAX) { + LOGE("Tuning CPP data size bigger than expected %d: %d", + meta.tuning_cpp_data_size, + TUNING_CPP_DATA_MAX); + return; + } + + if (meta.tuning_cac_data_size > TUNING_CAC_DATA_MAX) { + LOGE("Tuning CAC data size bigger than expected %d: %d", + meta.tuning_cac_data_size, + TUNING_CAC_DATA_MAX); + return; + } + // + + if(enabled){ + char timeBuf[FILENAME_MAX]; + char buf[FILENAME_MAX]; + memset(buf, 0, sizeof(buf)); + memset(timeBuf, 0, sizeof(timeBuf)); + time_t current_time; + struct tm * timeinfo; + time (¤t_time); + timeinfo = localtime (¤t_time); + if (timeinfo != NULL) { + strftime (timeBuf, sizeof(timeBuf), + QCAMERA_DUMP_FRM_LOCATION"%Y%m%d%H%M%S", timeinfo); + } + String8 filePath(timeBuf); + snprintf(buf, + sizeof(buf), + "%dm_%s_%d.bin", + dumpFrameCount, + type, + frameNumber); + filePath.append(buf); + int file_fd = open(filePath.string(), O_RDWR | O_CREAT, 0777); + if (file_fd >= 0) { + ssize_t written_len = 0; + meta.tuning_data_version = TUNING_DATA_VERSION; + void *data = (void *)((uint8_t *)&meta.tuning_data_version); + written_len += write(file_fd, data, sizeof(uint32_t)); + data = (void *)((uint8_t *)&meta.tuning_sensor_data_size); + LOGD("tuning_sensor_data_size %d",(int)(*(int *)data)); + written_len += write(file_fd, data, sizeof(uint32_t)); + data = (void *)((uint8_t *)&meta.tuning_vfe_data_size); + LOGD("tuning_vfe_data_size %d",(int)(*(int *)data)); + written_len += write(file_fd, data, sizeof(uint32_t)); + data = (void *)((uint8_t *)&meta.tuning_cpp_data_size); + LOGD("tuning_cpp_data_size %d",(int)(*(int *)data)); + written_len += write(file_fd, data, sizeof(uint32_t)); + data = (void *)((uint8_t *)&meta.tuning_cac_data_size); + LOGD("tuning_cac_data_size %d",(int)(*(int *)data)); + written_len += write(file_fd, data, sizeof(uint32_t)); + meta.tuning_mod3_data_size = 0; + data = (void *)((uint8_t *)&meta.tuning_mod3_data_size); + LOGD("tuning_mod3_data_size %d",(int)(*(int *)data)); + written_len += write(file_fd, data, sizeof(uint32_t)); + size_t total_size = meta.tuning_sensor_data_size; + data = (void *)((uint8_t *)&meta.data); + written_len += write(file_fd, data, total_size); + total_size = meta.tuning_vfe_data_size; + data = (void *)((uint8_t *)&meta.data[TUNING_VFE_DATA_OFFSET]); + written_len += write(file_fd, data, total_size); + total_size = meta.tuning_cpp_data_size; + data = (void *)((uint8_t *)&meta.data[TUNING_CPP_DATA_OFFSET]); + written_len += write(file_fd, data, total_size); + total_size = meta.tuning_cac_data_size; + data = (void *)((uint8_t *)&meta.data[TUNING_CAC_DATA_OFFSET]); + written_len += write(file_fd, data, total_size); + close(file_fd); + }else { + LOGE("fail to open file for metadata dumping"); + } + } +} + +/*=========================================================================== + * FUNCTION : cleanAndSortStreamInfo + * + * DESCRIPTION: helper method to clean up invalid streams in stream_info, + * and sort them such that raw stream is at the end of the list + * This is a workaround for camera daemon constraint. + * + * PARAMETERS : None + * + *==========================================================================*/ +void QCamera3HardwareInterface::cleanAndSortStreamInfo() +{ + List<stream_info_t *> newStreamInfo; + + /*clean up invalid streams*/ + for (List<stream_info_t*>::iterator it=mStreamInfo.begin(); + it != mStreamInfo.end();) { + if(((*it)->status) == INVALID){ + QCamera3Channel *channel = (QCamera3Channel*)(*it)->stream->priv; + delete channel; + free(*it); + it = mStreamInfo.erase(it); + } else { + it++; + } + } + + // Move preview/video/callback/snapshot streams into newList + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end();) { + if ((*it)->stream->format != HAL_PIXEL_FORMAT_RAW_OPAQUE && + (*it)->stream->format != HAL_PIXEL_FORMAT_RAW10 && + (*it)->stream->format != HAL_PIXEL_FORMAT_RAW16) { + newStreamInfo.push_back(*it); + it = mStreamInfo.erase(it); + } else + it++; + } + // Move raw streams into newList + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end();) { + newStreamInfo.push_back(*it); + it = mStreamInfo.erase(it); + } + + mStreamInfo = newStreamInfo; +} + +/*=========================================================================== + * FUNCTION : extractJpegMetadata + * + * DESCRIPTION: helper method to extract Jpeg metadata from capture request. + * JPEG metadata is cached in HAL, and return as part of capture + * result when metadata is returned from camera daemon. + * + * PARAMETERS : @jpegMetadata: jpeg metadata to be extracted + * @request: capture request + * + *==========================================================================*/ +void QCamera3HardwareInterface::extractJpegMetadata( + CameraMetadata& jpegMetadata, + const camera3_capture_request_t *request) +{ + CameraMetadata frame_settings; + frame_settings = request->settings; + + if (frame_settings.exists(ANDROID_JPEG_GPS_COORDINATES)) + jpegMetadata.update(ANDROID_JPEG_GPS_COORDINATES, + frame_settings.find(ANDROID_JPEG_GPS_COORDINATES).data.d, + frame_settings.find(ANDROID_JPEG_GPS_COORDINATES).count); + + if (frame_settings.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) + jpegMetadata.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, + frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).data.u8, + frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).count); + + if (frame_settings.exists(ANDROID_JPEG_GPS_TIMESTAMP)) + jpegMetadata.update(ANDROID_JPEG_GPS_TIMESTAMP, + frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).data.i64, + frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).count); + + if (frame_settings.exists(ANDROID_JPEG_ORIENTATION)) + jpegMetadata.update(ANDROID_JPEG_ORIENTATION, + frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32, + frame_settings.find(ANDROID_JPEG_ORIENTATION).count); + + if (frame_settings.exists(ANDROID_JPEG_QUALITY)) + jpegMetadata.update(ANDROID_JPEG_QUALITY, + frame_settings.find(ANDROID_JPEG_QUALITY).data.u8, + frame_settings.find(ANDROID_JPEG_QUALITY).count); + + if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) + jpegMetadata.update(ANDROID_JPEG_THUMBNAIL_QUALITY, + frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).data.u8, + frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).count); + + if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) { + int32_t thumbnail_size[2]; + thumbnail_size[0] = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0]; + thumbnail_size[1] = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1]; + if (frame_settings.exists(ANDROID_JPEG_ORIENTATION)) { + int32_t orientation = + frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0]; + if ((orientation == 90) || (orientation == 270)) { + //swap thumbnail dimensions for rotations 90 and 270 in jpeg metadata. + int32_t temp; + temp = thumbnail_size[0]; + thumbnail_size[0] = thumbnail_size[1]; + thumbnail_size[1] = temp; + } + } + jpegMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE, + thumbnail_size, + frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).count); + } + +} + +/*=========================================================================== + * FUNCTION : convertToRegions + * + * DESCRIPTION: helper method to convert from cam_rect_t into int32_t array + * + * PARAMETERS : + * @rect : cam_rect_t struct to convert + * @region : int32_t destination array + * @weight : if we are converting from cam_area_t, weight is valid + * else weight = -1 + * + *==========================================================================*/ +void QCamera3HardwareInterface::convertToRegions(cam_rect_t rect, + int32_t *region, int weight) +{ + region[0] = rect.left; + region[1] = rect.top; + region[2] = rect.left + rect.width; + region[3] = rect.top + rect.height; + if (weight > -1) { + region[4] = weight; + } +} + +/*=========================================================================== + * FUNCTION : convertFromRegions + * + * DESCRIPTION: helper method to convert from array to cam_rect_t + * + * PARAMETERS : + * @rect : cam_rect_t struct to convert + * @region : int32_t destination array + * @weight : if we are converting from cam_area_t, weight is valid + * else weight = -1 + * + *==========================================================================*/ +void QCamera3HardwareInterface::convertFromRegions(cam_area_t &roi, + const camera_metadata_t *settings, uint32_t tag) +{ + CameraMetadata frame_settings; + frame_settings = settings; + int32_t x_min = frame_settings.find(tag).data.i32[0]; + int32_t y_min = frame_settings.find(tag).data.i32[1]; + int32_t x_max = frame_settings.find(tag).data.i32[2]; + int32_t y_max = frame_settings.find(tag).data.i32[3]; + roi.weight = frame_settings.find(tag).data.i32[4]; + roi.rect.left = x_min; + roi.rect.top = y_min; + roi.rect.width = x_max - x_min; + roi.rect.height = y_max - y_min; +} + +/*=========================================================================== + * FUNCTION : resetIfNeededROI + * + * DESCRIPTION: helper method to reset the roi if it is greater than scaler + * crop region + * + * PARAMETERS : + * @roi : cam_area_t struct to resize + * @scalerCropRegion : cam_crop_region_t region to compare against + * + * + *==========================================================================*/ +bool QCamera3HardwareInterface::resetIfNeededROI(cam_area_t* roi, + const cam_crop_region_t* scalerCropRegion) +{ + int32_t roi_x_max = roi->rect.width + roi->rect.left; + int32_t roi_y_max = roi->rect.height + roi->rect.top; + int32_t crop_x_max = scalerCropRegion->width + scalerCropRegion->left; + int32_t crop_y_max = scalerCropRegion->height + scalerCropRegion->top; + + /* According to spec weight = 0 is used to indicate roi needs to be disabled + * without having this check the calculations below to validate if the roi + * is inside scalar crop region will fail resulting in the roi not being + * reset causing algorithm to continue to use stale roi window + */ + if (roi->weight == 0) { + return true; + } + + if ((roi_x_max < scalerCropRegion->left) || + // right edge of roi window is left of scalar crop's left edge + (roi_y_max < scalerCropRegion->top) || + // bottom edge of roi window is above scalar crop's top edge + (roi->rect.left > crop_x_max) || + // left edge of roi window is beyond(right) of scalar crop's right edge + (roi->rect.top > crop_y_max)){ + // top edge of roi windo is above scalar crop's top edge + return false; + } + if (roi->rect.left < scalerCropRegion->left) { + roi->rect.left = scalerCropRegion->left; + } + if (roi->rect.top < scalerCropRegion->top) { + roi->rect.top = scalerCropRegion->top; + } + if (roi_x_max > crop_x_max) { + roi_x_max = crop_x_max; + } + if (roi_y_max > crop_y_max) { + roi_y_max = crop_y_max; + } + roi->rect.width = roi_x_max - roi->rect.left; + roi->rect.height = roi_y_max - roi->rect.top; + return true; +} + +/*=========================================================================== + * FUNCTION : convertLandmarks + * + * DESCRIPTION: helper method to extract the landmarks from face detection info + * + * PARAMETERS : + * @landmark_data : input landmark data to be converted + * @landmarks : int32_t destination array + * + * + *==========================================================================*/ +void QCamera3HardwareInterface::convertLandmarks( + cam_face_landmarks_info_t landmark_data, + int32_t *landmarks) +{ + landmarks[0] = (int32_t)landmark_data.left_eye_center.x; + landmarks[1] = (int32_t)landmark_data.left_eye_center.y; + landmarks[2] = (int32_t)landmark_data.right_eye_center.x; + landmarks[3] = (int32_t)landmark_data.right_eye_center.y; + landmarks[4] = (int32_t)landmark_data.mouth_center.x; + landmarks[5] = (int32_t)landmark_data.mouth_center.y; +} + +#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX ) +/*=========================================================================== + * FUNCTION : initCapabilities + * + * DESCRIPTION: initialize camera capabilities in static data struct + * + * PARAMETERS : + * @cameraId : camera Id + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::initCapabilities(uint32_t cameraId) +{ + int rc = 0; + mm_camera_vtbl_t *cameraHandle = NULL; + QCamera3HeapMemory *capabilityHeap = NULL; + + rc = camera_open((uint8_t)cameraId, &cameraHandle); + if (rc) { + LOGE("camera_open failed. rc = %d", rc); + goto open_failed; + } + if (!cameraHandle) { + LOGE("camera_open failed. cameraHandle = %p", cameraHandle); + goto open_failed; + } + + capabilityHeap = new QCamera3HeapMemory(1); + if (capabilityHeap == NULL) { + LOGE("creation of capabilityHeap failed"); + goto heap_creation_failed; + } + /* Allocate memory for capability buffer */ + rc = capabilityHeap->allocate(sizeof(cam_capability_t)); + if(rc != OK) { + LOGE("No memory for cappability"); + goto allocate_failed; + } + + /* Map memory for capability buffer */ + memset(DATA_PTR(capabilityHeap,0), 0, sizeof(cam_capability_t)); + rc = cameraHandle->ops->map_buf(cameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_CAPABILITY, + capabilityHeap->getFd(0), + sizeof(cam_capability_t)); + if(rc < 0) { + LOGE("failed to map capability buffer"); + goto map_failed; + } + + /* Query Capability */ + rc = cameraHandle->ops->query_capability(cameraHandle->camera_handle); + if(rc < 0) { + LOGE("failed to query capability"); + goto query_failed; + } + gCamCapability[cameraId] = (cam_capability_t *)malloc(sizeof(cam_capability_t)); + if (!gCamCapability[cameraId]) { + LOGE("out of memory"); + goto query_failed; + } + memcpy(gCamCapability[cameraId], DATA_PTR(capabilityHeap,0), + sizeof(cam_capability_t)); + + int index; + for (index = 0; index < CAM_ANALYSIS_INFO_MAX; index++) { + cam_analysis_info_t *p_analysis_info = + &gCamCapability[cameraId]->analysis_info[index]; + p_analysis_info->analysis_padding_info.offset_info.offset_x = 0; + p_analysis_info->analysis_padding_info.offset_info.offset_y = 0; + } + rc = 0; + +query_failed: + cameraHandle->ops->unmap_buf(cameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_CAPABILITY); +map_failed: + capabilityHeap->deallocate(); +allocate_failed: + delete capabilityHeap; +heap_creation_failed: + cameraHandle->ops->close_camera(cameraHandle->camera_handle); + cameraHandle = NULL; +open_failed: + return rc; +} + +/*========================================================================== + * FUNCTION : get3Aversion + * + * DESCRIPTION: get the Q3A S/W version + * + * PARAMETERS : + * @sw_version: Reference of Q3A structure which will hold version info upon + * return + * + * RETURN : None + * + *==========================================================================*/ +void QCamera3HardwareInterface::get3AVersion(cam_q3a_version_t &sw_version) +{ + if(gCamCapability[mCameraId]) + sw_version = gCamCapability[mCameraId]->q3a_version; + else + LOGE("Capability structure NULL!"); +} + + +/*=========================================================================== + * FUNCTION : initParameters + * + * DESCRIPTION: initialize camera parameters + * + * PARAMETERS : + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::initParameters() +{ + int rc = 0; + + //Allocate Set Param Buffer + mParamHeap = new QCamera3HeapMemory(1); + rc = mParamHeap->allocate(sizeof(metadata_buffer_t)); + if(rc != OK) { + rc = NO_MEMORY; + LOGE("Failed to allocate SETPARM Heap memory"); + delete mParamHeap; + mParamHeap = NULL; + return rc; + } + + //Map memory for parameters buffer + rc = mCameraHandle->ops->map_buf(mCameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_PARM_BUF, + mParamHeap->getFd(0), + sizeof(metadata_buffer_t)); + if(rc < 0) { + LOGE("failed to map SETPARM buffer"); + rc = FAILED_TRANSACTION; + mParamHeap->deallocate(); + delete mParamHeap; + mParamHeap = NULL; + return rc; + } + + mParameters = (metadata_buffer_t *) DATA_PTR(mParamHeap,0); + + mPrevParameters = (metadata_buffer_t *)malloc(sizeof(metadata_buffer_t)); + return rc; +} + +/*=========================================================================== + * FUNCTION : deinitParameters + * + * DESCRIPTION: de-initialize camera parameters + * + * PARAMETERS : + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3HardwareInterface::deinitParameters() +{ + mCameraHandle->ops->unmap_buf(mCameraHandle->camera_handle, + CAM_MAPPING_BUF_TYPE_PARM_BUF); + + mParamHeap->deallocate(); + delete mParamHeap; + mParamHeap = NULL; + + mParameters = NULL; + + free(mPrevParameters); + mPrevParameters = NULL; +} + +/*=========================================================================== + * FUNCTION : calcMaxJpegSize + * + * DESCRIPTION: Calculates maximum jpeg size supported by the cameraId + * + * PARAMETERS : + * + * RETURN : max_jpeg_size + *==========================================================================*/ +size_t QCamera3HardwareInterface::calcMaxJpegSize(uint32_t camera_id) +{ + size_t max_jpeg_size = 0; + size_t temp_width, temp_height; + size_t count = MIN(gCamCapability[camera_id]->picture_sizes_tbl_cnt, + MAX_SIZES_CNT); + for (size_t i = 0; i < count; i++) { + temp_width = (size_t)gCamCapability[camera_id]->picture_sizes_tbl[i].width; + temp_height = (size_t)gCamCapability[camera_id]->picture_sizes_tbl[i].height; + if (temp_width * temp_height > max_jpeg_size ) { + max_jpeg_size = temp_width * temp_height; + } + } + max_jpeg_size = max_jpeg_size * 3/2 + sizeof(camera3_jpeg_blob_t); + return max_jpeg_size; +} + +/*=========================================================================== + * FUNCTION : getMaxRawSize + * + * DESCRIPTION: Fetches maximum raw size supported by the cameraId + * + * PARAMETERS : + * + * RETURN : Largest supported Raw Dimension + *==========================================================================*/ +cam_dimension_t QCamera3HardwareInterface::getMaxRawSize(uint32_t camera_id) +{ + int max_width = 0; + cam_dimension_t maxRawSize; + + memset(&maxRawSize, 0, sizeof(cam_dimension_t)); + for (size_t i = 0; i < gCamCapability[camera_id]->supported_raw_dim_cnt; i++) { + if (max_width < gCamCapability[camera_id]->raw_dim[i].width) { + max_width = gCamCapability[camera_id]->raw_dim[i].width; + maxRawSize = gCamCapability[camera_id]->raw_dim[i]; + } + } + return maxRawSize; +} + + +/*=========================================================================== + * FUNCTION : calcMaxJpegDim + * + * DESCRIPTION: Calculates maximum jpeg dimension supported by the cameraId + * + * PARAMETERS : + * + * RETURN : max_jpeg_dim + *==========================================================================*/ +cam_dimension_t QCamera3HardwareInterface::calcMaxJpegDim() +{ + cam_dimension_t max_jpeg_dim; + cam_dimension_t curr_jpeg_dim; + max_jpeg_dim.width = 0; + max_jpeg_dim.height = 0; + curr_jpeg_dim.width = 0; + curr_jpeg_dim.height = 0; + for (size_t i = 0; i < gCamCapability[mCameraId]->picture_sizes_tbl_cnt; i++) { + curr_jpeg_dim.width = gCamCapability[mCameraId]->picture_sizes_tbl[i].width; + curr_jpeg_dim.height = gCamCapability[mCameraId]->picture_sizes_tbl[i].height; + if (curr_jpeg_dim.width * curr_jpeg_dim.height > + max_jpeg_dim.width * max_jpeg_dim.height ) { + max_jpeg_dim.width = curr_jpeg_dim.width; + max_jpeg_dim.height = curr_jpeg_dim.height; + } + } + return max_jpeg_dim; +} + +/*=========================================================================== + * FUNCTION : addStreamConfig + * + * DESCRIPTION: adds the stream configuration to the array + * + * PARAMETERS : + * @available_stream_configs : pointer to stream configuration array + * @scalar_format : scalar format + * @dim : configuration dimension + * @config_type : input or output configuration type + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3HardwareInterface::addStreamConfig(Vector<int32_t> &available_stream_configs, + int32_t scalar_format, const cam_dimension_t &dim, int32_t config_type) +{ + available_stream_configs.add(scalar_format); + available_stream_configs.add(dim.width); + available_stream_configs.add(dim.height); + available_stream_configs.add(config_type); +} + +/*=========================================================================== + * FUNCTION : suppportBurstCapture + * + * DESCRIPTION: Whether a particular camera supports BURST_CAPTURE + * + * PARAMETERS : + * @cameraId : camera Id + * + * RETURN : true if camera supports BURST_CAPTURE + * false otherwise + *==========================================================================*/ +bool QCamera3HardwareInterface::supportBurstCapture(uint32_t cameraId) +{ + const int64_t highResDurationBound = 50000000; // 50 ms, 20 fps + const int64_t fullResDurationBound = 100000000; // 100 ms, 10 fps + const int32_t highResWidth = 3264; + const int32_t highResHeight = 2448; + + if (gCamCapability[cameraId]->picture_min_duration[0] > fullResDurationBound) { + // Maximum resolution images cannot be captured at >= 10fps + // -> not supporting BURST_CAPTURE + return false; + } + + if (gCamCapability[cameraId]->picture_min_duration[0] <= highResDurationBound) { + // Maximum resolution images can be captured at >= 20fps + // --> supporting BURST_CAPTURE + return true; + } + + // Find the smallest highRes resolution, or largest resolution if there is none + size_t totalCnt = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, + MAX_SIZES_CNT); + size_t highRes = 0; + while ((highRes + 1 < totalCnt) && + (gCamCapability[cameraId]->picture_sizes_tbl[highRes+1].width * + gCamCapability[cameraId]->picture_sizes_tbl[highRes+1].height >= + highResWidth * highResHeight)) { + highRes++; + } + if (gCamCapability[cameraId]->picture_min_duration[highRes] <= highResDurationBound) { + return true; + } else { + return false; + } +} + +/*=========================================================================== + * FUNCTION : initStaticMetadata + * + * DESCRIPTION: initialize the static metadata + * + * PARAMETERS : + * @cameraId : camera Id + * + * RETURN : int32_t type of status + * 0 -- success + * non-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::initStaticMetadata(uint32_t cameraId) +{ + int rc = 0; + CameraMetadata staticInfo; + size_t count = 0; + bool limitedDevice = false; + char prop[PROPERTY_VALUE_MAX]; + bool supportBurst = false; + + supportBurst = supportBurstCapture(cameraId); + + /* If sensor is YUV sensor (no raw support) or if per-frame control is not + * guaranteed or if min fps of max resolution is less than 20 fps, its + * advertised as limited device*/ + limitedDevice = gCamCapability[cameraId]->no_per_frame_control_support || + (CAM_SENSOR_YUV == gCamCapability[cameraId]->sensor_type.sens_type) || + (CAM_SENSOR_MONO == gCamCapability[cameraId]->sensor_type.sens_type) || + !supportBurst; + + uint8_t supportedHwLvl = limitedDevice ? + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED : + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL; + + staticInfo.update(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL, + &supportedHwLvl, 1); + + bool facingBack = false; + if ((gCamCapability[cameraId]->position == CAM_POSITION_BACK) || + (gCamCapability[cameraId]->position == CAM_POSITION_BACK_AUX)) { + facingBack = true; + } + /*HAL 3 only*/ + staticInfo.update(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, + &gCamCapability[cameraId]->min_focus_distance, 1); + + staticInfo.update(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, + &gCamCapability[cameraId]->hyper_focal_distance, 1); + + /*should be using focal lengths but sensor doesn't provide that info now*/ + staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, + &gCamCapability[cameraId]->focal_length, + 1); + + staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_APERTURES, + gCamCapability[cameraId]->apertures, + MIN(CAM_APERTURES_MAX, gCamCapability[cameraId]->apertures_count)); + + staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES, + gCamCapability[cameraId]->filter_densities, + MIN(CAM_FILTER_DENSITIES_MAX, gCamCapability[cameraId]->filter_densities_count)); + + + staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION, + (uint8_t *)gCamCapability[cameraId]->optical_stab_modes, + MIN((size_t)CAM_OPT_STAB_MAX, gCamCapability[cameraId]->optical_stab_modes_count)); + + int32_t lens_shading_map_size[] = { + MIN(CAM_MAX_SHADING_MAP_WIDTH, gCamCapability[cameraId]->lens_shading_map_size.width), + MIN(CAM_MAX_SHADING_MAP_HEIGHT, gCamCapability[cameraId]->lens_shading_map_size.height)}; + staticInfo.update(ANDROID_LENS_INFO_SHADING_MAP_SIZE, + lens_shading_map_size, + sizeof(lens_shading_map_size)/sizeof(int32_t)); + + staticInfo.update(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, + gCamCapability[cameraId]->sensor_physical_size, SENSOR_PHYSICAL_SIZE_CNT); + + staticInfo.update(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, + gCamCapability[cameraId]->exposure_time_range, EXPOSURE_TIME_RANGE_CNT); + + staticInfo.update(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, + &gCamCapability[cameraId]->max_frame_duration, 1); + + camera_metadata_rational baseGainFactor = { + gCamCapability[cameraId]->base_gain_factor.numerator, + gCamCapability[cameraId]->base_gain_factor.denominator}; + staticInfo.update(ANDROID_SENSOR_BASE_GAIN_FACTOR, + &baseGainFactor, 1); + + staticInfo.update(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, + (uint8_t *)&gCamCapability[cameraId]->color_arrangement, 1); + + int32_t pixel_array_size[] = {gCamCapability[cameraId]->pixel_array_size.width, + gCamCapability[cameraId]->pixel_array_size.height}; + staticInfo.update(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, + pixel_array_size, sizeof(pixel_array_size)/sizeof(pixel_array_size[0])); + + int32_t active_array_size[] = {gCamCapability[cameraId]->active_array_size.left, + gCamCapability[cameraId]->active_array_size.top, + gCamCapability[cameraId]->active_array_size.width, + gCamCapability[cameraId]->active_array_size.height}; + staticInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, + active_array_size, sizeof(active_array_size)/sizeof(active_array_size[0])); + + staticInfo.update(ANDROID_SENSOR_INFO_WHITE_LEVEL, + &gCamCapability[cameraId]->white_level, 1); + + staticInfo.update(ANDROID_SENSOR_BLACK_LEVEL_PATTERN, + gCamCapability[cameraId]->black_level_pattern, BLACK_LEVEL_PATTERN_CNT); + + staticInfo.update(ANDROID_FLASH_INFO_CHARGE_DURATION, + &gCamCapability[cameraId]->flash_charge_duration, 1); + + staticInfo.update(ANDROID_TONEMAP_MAX_CURVE_POINTS, + &gCamCapability[cameraId]->max_tone_map_curve_points, 1); + + uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN; + staticInfo.update(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, + ×tampSource, 1); + + staticInfo.update(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT, + &gCamCapability[cameraId]->histogram_size, 1); + + staticInfo.update(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT, + &gCamCapability[cameraId]->max_histogram_count, 1); + + int32_t sharpness_map_size[] = { + gCamCapability[cameraId]->sharpness_map_size.width, + gCamCapability[cameraId]->sharpness_map_size.height}; + + staticInfo.update(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, + sharpness_map_size, sizeof(sharpness_map_size)/sizeof(int32_t)); + + staticInfo.update(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE, + &gCamCapability[cameraId]->max_sharpness_map_value, 1); + + int32_t scalar_formats[] = { + ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE, + ANDROID_SCALER_AVAILABLE_FORMATS_RAW16, + ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888, + ANDROID_SCALER_AVAILABLE_FORMATS_BLOB, + HAL_PIXEL_FORMAT_RAW10, + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED}; + size_t scalar_formats_count = sizeof(scalar_formats) / sizeof(int32_t); + staticInfo.update(ANDROID_SCALER_AVAILABLE_FORMATS, + scalar_formats, + scalar_formats_count); + + int32_t available_processed_sizes[MAX_SIZES_CNT * 2]; + count = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT); + makeTable(gCamCapability[cameraId]->picture_sizes_tbl, + count, MAX_SIZES_CNT, available_processed_sizes); + staticInfo.update(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES, + available_processed_sizes, count * 2); + + int32_t available_raw_sizes[MAX_SIZES_CNT * 2]; + count = MIN(gCamCapability[cameraId]->supported_raw_dim_cnt, MAX_SIZES_CNT); + makeTable(gCamCapability[cameraId]->raw_dim, + count, MAX_SIZES_CNT, available_raw_sizes); + staticInfo.update(ANDROID_SCALER_AVAILABLE_RAW_SIZES, + available_raw_sizes, count * 2); + + int32_t available_fps_ranges[MAX_SIZES_CNT * 2]; + count = MIN(gCamCapability[cameraId]->fps_ranges_tbl_cnt, MAX_SIZES_CNT); + makeFPSTable(gCamCapability[cameraId]->fps_ranges_tbl, + count, MAX_SIZES_CNT, available_fps_ranges); + staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + available_fps_ranges, count * 2); + + camera_metadata_rational exposureCompensationStep = { + gCamCapability[cameraId]->exp_compensation_step.numerator, + gCamCapability[cameraId]->exp_compensation_step.denominator}; + staticInfo.update(ANDROID_CONTROL_AE_COMPENSATION_STEP, + &exposureCompensationStep, 1); + + Vector<uint8_t> availableVstabModes; + availableVstabModes.add(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF); + char eis_prop[PROPERTY_VALUE_MAX]; + memset(eis_prop, 0, sizeof(eis_prop)); + property_get("persist.camera.eis.enable", eis_prop, "0"); + uint8_t eis_prop_set = (uint8_t)atoi(eis_prop); + if (facingBack && eis_prop_set) { + availableVstabModes.add(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON); + } + staticInfo.update(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, + availableVstabModes.array(), availableVstabModes.size()); + + /*HAL 1 and HAL 3 common*/ + float maxZoom = 4; + staticInfo.update(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, + &maxZoom, 1); + + uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_CENTER_ONLY; + staticInfo.update(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1); + + int32_t max3aRegions[3] = {/*AE*/1,/*AWB*/ 0,/*AF*/ 1}; + if (gCamCapability[cameraId]->supported_focus_modes_cnt == 1) + max3aRegions[2] = 0; /* AF not supported */ + staticInfo.update(ANDROID_CONTROL_MAX_REGIONS, + max3aRegions, 3); + + /* 0: OFF, 1: OFF+SIMPLE, 2: OFF+FULL, 3: OFF+SIMPLE+FULL */ + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.facedetect", prop, "1"); + uint8_t supportedFaceDetectMode = (uint8_t)atoi(prop); + LOGD("Support face detection mode: %d", + supportedFaceDetectMode); + + int32_t maxFaces = gCamCapability[cameraId]->max_num_roi; + /* support mode should be OFF if max number of face is 0 */ + if (maxFaces <= 0) { + supportedFaceDetectMode = 0; + } + Vector<uint8_t> availableFaceDetectModes; + availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF); + if (supportedFaceDetectMode == 1) { + availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE); + } else if (supportedFaceDetectMode == 2) { + availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_FULL); + } else if (supportedFaceDetectMode == 3) { + availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE); + availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_FULL); + } else { + maxFaces = 0; + } + staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + availableFaceDetectModes.array(), + availableFaceDetectModes.size()); + staticInfo.update(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, + (int32_t *)&maxFaces, 1); + + int32_t exposureCompensationRange[] = { + gCamCapability[cameraId]->exposure_compensation_min, + gCamCapability[cameraId]->exposure_compensation_max}; + staticInfo.update(ANDROID_CONTROL_AE_COMPENSATION_RANGE, + exposureCompensationRange, + sizeof(exposureCompensationRange)/sizeof(int32_t)); + + uint8_t lensFacing = (facingBack) ? + ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT; + staticInfo.update(ANDROID_LENS_FACING, &lensFacing, 1); + + staticInfo.update(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + available_thumbnail_sizes, + sizeof(available_thumbnail_sizes)/sizeof(int32_t)); + + /*all sizes will be clubbed into this tag*/ + count = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT); + /*android.scaler.availableStreamConfigurations*/ + Vector<int32_t> available_stream_configs; + cam_dimension_t active_array_dim; + active_array_dim.width = gCamCapability[cameraId]->active_array_size.width; + active_array_dim.height = gCamCapability[cameraId]->active_array_size.height; + /* Add input/output stream configurations for each scalar formats*/ + for (size_t j = 0; j < scalar_formats_count; j++) { + switch (scalar_formats[j]) { + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16: + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW10: + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->supported_raw_dim_cnt); i++) { + addStreamConfig(available_stream_configs, scalar_formats[j], + gCamCapability[cameraId]->raw_dim[i], + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); + } + break; + case HAL_PIXEL_FORMAT_BLOB: + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->picture_sizes_tbl_cnt); i++) { + addStreamConfig(available_stream_configs, scalar_formats[j], + gCamCapability[cameraId]->picture_sizes_tbl[i], + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); + } + break; + case HAL_PIXEL_FORMAT_YCbCr_420_888: + case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED: + default: + cam_dimension_t largest_picture_size; + memset(&largest_picture_size, 0, sizeof(cam_dimension_t)); + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->picture_sizes_tbl_cnt); i++) { + addStreamConfig(available_stream_configs, scalar_formats[j], + gCamCapability[cameraId]->picture_sizes_tbl[i], + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT); + /* Book keep largest */ + if (gCamCapability[cameraId]->picture_sizes_tbl[i].width + >= largest_picture_size.width && + gCamCapability[cameraId]->picture_sizes_tbl[i].height + >= largest_picture_size.height) + largest_picture_size = gCamCapability[cameraId]->picture_sizes_tbl[i]; + } + /*For below 2 formats we also support i/p streams for reprocessing advertise those*/ + if (scalar_formats[j] == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED || + scalar_formats[j] == HAL_PIXEL_FORMAT_YCbCr_420_888) { + addStreamConfig(available_stream_configs, scalar_formats[j], + largest_picture_size, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT); + } + break; + } + } + + staticInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, + available_stream_configs.array(), available_stream_configs.size()); + static const uint8_t hotpixelMode = ANDROID_HOT_PIXEL_MODE_FAST; + staticInfo.update(ANDROID_HOT_PIXEL_MODE, &hotpixelMode, 1); + + static const uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF; + staticInfo.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1); + + /* android.scaler.availableMinFrameDurations */ + Vector<int64_t> available_min_durations; + for (size_t j = 0; j < scalar_formats_count; j++) { + switch (scalar_formats[j]) { + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16: + case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE: + case HAL_PIXEL_FORMAT_RAW10: + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->supported_raw_dim_cnt); i++) { + available_min_durations.add(scalar_formats[j]); + available_min_durations.add(gCamCapability[cameraId]->raw_dim[i].width); + available_min_durations.add(gCamCapability[cameraId]->raw_dim[i].height); + available_min_durations.add(gCamCapability[cameraId]->raw_min_duration[i]); + } + break; + default: + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->picture_sizes_tbl_cnt); i++) { + available_min_durations.add(scalar_formats[j]); + available_min_durations.add(gCamCapability[cameraId]->picture_sizes_tbl[i].width); + available_min_durations.add(gCamCapability[cameraId]->picture_sizes_tbl[i].height); + available_min_durations.add(gCamCapability[cameraId]->picture_min_duration[i]); + } + break; + } + } + staticInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, + available_min_durations.array(), available_min_durations.size()); + + Vector<int32_t> available_hfr_configs; + for (size_t i = 0; i < gCamCapability[cameraId]->hfr_tbl_cnt; i++) { + int32_t fps = 0; + switch (gCamCapability[cameraId]->hfr_tbl[i].mode) { + case CAM_HFR_MODE_60FPS: + fps = 60; + break; + case CAM_HFR_MODE_90FPS: + fps = 90; + break; + case CAM_HFR_MODE_120FPS: + fps = 120; + break; + case CAM_HFR_MODE_150FPS: + fps = 150; + break; + case CAM_HFR_MODE_180FPS: + fps = 180; + break; + case CAM_HFR_MODE_210FPS: + fps = 210; + break; + case CAM_HFR_MODE_240FPS: + fps = 240; + break; + case CAM_HFR_MODE_480FPS: + fps = 480; + break; + case CAM_HFR_MODE_OFF: + case CAM_HFR_MODE_MAX: + default: + break; + } + + /* Advertise only MIN_FPS_FOR_BATCH_MODE or above as HIGH_SPEED_CONFIGS */ + if (fps >= MIN_FPS_FOR_BATCH_MODE) { + /* For each HFR frame rate, need to advertise one variable fps range + * and one fixed fps range per dimension. Eg: for 120 FPS, advertise [30, 120] + * and [120, 120]. While camcorder preview alone is running [30, 120] is + * set by the app. When video recording is started, [120, 120] is + * set. This way sensor configuration does not change when recording + * is started */ + + /* (width, height, fps_min, fps_max, batch_size_max) */ + for (size_t j = 0; j < gCamCapability[cameraId]->hfr_tbl[i].dim_cnt && + j < MAX_SIZES_CNT; j++) { + available_hfr_configs.add( + gCamCapability[cameraId]->hfr_tbl[i].dim[j].width); + available_hfr_configs.add( + gCamCapability[cameraId]->hfr_tbl[i].dim[j].height); + available_hfr_configs.add(PREVIEW_FPS_FOR_HFR); + available_hfr_configs.add(fps); + available_hfr_configs.add(fps / PREVIEW_FPS_FOR_HFR); + + /* (width, height, fps_min, fps_max, batch_size_max) */ + available_hfr_configs.add( + gCamCapability[cameraId]->hfr_tbl[i].dim[j].width); + available_hfr_configs.add( + gCamCapability[cameraId]->hfr_tbl[i].dim[j].height); + available_hfr_configs.add(fps); + available_hfr_configs.add(fps); + available_hfr_configs.add(fps / PREVIEW_FPS_FOR_HFR); + } + } + } + //Advertise HFR capability only if the property is set + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.hal3hfr.enable", prop, "1"); + uint8_t hfrEnable = (uint8_t)atoi(prop); + + if(hfrEnable && available_hfr_configs.array()) { + staticInfo.update( + ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS, + available_hfr_configs.array(), available_hfr_configs.size()); + } + + int32_t max_jpeg_size = (int32_t)calcMaxJpegSize(cameraId); + staticInfo.update(ANDROID_JPEG_MAX_SIZE, + &max_jpeg_size, 1); + + uint8_t avail_effects[CAM_EFFECT_MODE_MAX]; + size_t size = 0; + count = CAM_EFFECT_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_effects_cnt, count); + for (size_t i = 0; i < count; i++) { + int val = lookupFwkName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP), + gCamCapability[cameraId]->supported_effects[i]); + if (NAME_NOT_FOUND != val) { + avail_effects[size] = (uint8_t)val; + size++; + } + } + staticInfo.update(ANDROID_CONTROL_AVAILABLE_EFFECTS, + avail_effects, + size); + + uint8_t avail_scene_modes[CAM_SCENE_MODE_MAX]; + uint8_t supported_indexes[CAM_SCENE_MODE_MAX]; + size_t supported_scene_modes_cnt = 0; + count = CAM_SCENE_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_scene_modes_cnt, count); + for (size_t i = 0; i < count; i++) { + if (gCamCapability[cameraId]->supported_scene_modes[i] != + CAM_SCENE_MODE_OFF) { + int val = lookupFwkName(SCENE_MODES_MAP, + METADATA_MAP_SIZE(SCENE_MODES_MAP), + gCamCapability[cameraId]->supported_scene_modes[i]); + if (NAME_NOT_FOUND != val) { + avail_scene_modes[supported_scene_modes_cnt] = (uint8_t)val; + supported_indexes[supported_scene_modes_cnt] = (uint8_t)i; + supported_scene_modes_cnt++; + } + } + } + staticInfo.update(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, + avail_scene_modes, + supported_scene_modes_cnt); + + uint8_t scene_mode_overrides[CAM_SCENE_MODE_MAX * 3]; + makeOverridesList(gCamCapability[cameraId]->scene_mode_overrides, + supported_scene_modes_cnt, + CAM_SCENE_MODE_MAX, + scene_mode_overrides, + supported_indexes, + cameraId); + + if (supported_scene_modes_cnt == 0) { + supported_scene_modes_cnt = 1; + avail_scene_modes[0] = ANDROID_CONTROL_SCENE_MODE_DISABLED; + } + + staticInfo.update(ANDROID_CONTROL_SCENE_MODE_OVERRIDES, + scene_mode_overrides, supported_scene_modes_cnt * 3); + + uint8_t available_control_modes[] = {ANDROID_CONTROL_MODE_OFF, + ANDROID_CONTROL_MODE_AUTO, + ANDROID_CONTROL_MODE_USE_SCENE_MODE}; + staticInfo.update(ANDROID_CONTROL_AVAILABLE_MODES, + available_control_modes, + 3); + + uint8_t avail_antibanding_modes[CAM_ANTIBANDING_MODE_MAX]; + size = 0; + count = CAM_ANTIBANDING_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_antibandings_cnt, count); + for (size_t i = 0; i < count; i++) { + int val = lookupFwkName(ANTIBANDING_MODES_MAP, METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP), + gCamCapability[cameraId]->supported_antibandings[i]); + if (NAME_NOT_FOUND != val) { + avail_antibanding_modes[size] = (uint8_t)val; + size++; + } + + } + staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, + avail_antibanding_modes, + size); + + uint8_t avail_abberation_modes[] = { + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF, + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST, + ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY}; + count = CAM_COLOR_CORRECTION_ABERRATION_MAX; + count = MIN(gCamCapability[cameraId]->aberration_modes_count, count); + if (0 == count) { + // If no aberration correction modes are available for a device, this advertise OFF mode + size = 1; + } else { + // If count is not zero then atleast one among the FAST or HIGH quality is supported + // So, advertize all 3 modes if atleast any one mode is supported as per the + // new M requirement + size = 3; + } + staticInfo.update(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, + avail_abberation_modes, + size); + + uint8_t avail_af_modes[CAM_FOCUS_MODE_MAX]; + size = 0; + count = CAM_FOCUS_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_focus_modes_cnt, count); + for (size_t i = 0; i < count; i++) { + int val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP), + gCamCapability[cameraId]->supported_focus_modes[i]); + if (NAME_NOT_FOUND != val) { + avail_af_modes[size] = (uint8_t)val; + size++; + } + } + staticInfo.update(ANDROID_CONTROL_AF_AVAILABLE_MODES, + avail_af_modes, + size); + + uint8_t avail_awb_modes[CAM_WB_MODE_MAX]; + size = 0; + count = CAM_WB_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_white_balances_cnt, count); + for (size_t i = 0; i < count; i++) { + int val = lookupFwkName(WHITE_BALANCE_MODES_MAP, + METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP), + gCamCapability[cameraId]->supported_white_balances[i]); + if (NAME_NOT_FOUND != val) { + avail_awb_modes[size] = (uint8_t)val; + size++; + } + } + staticInfo.update(ANDROID_CONTROL_AWB_AVAILABLE_MODES, + avail_awb_modes, + size); + + uint8_t available_flash_levels[CAM_FLASH_FIRING_LEVEL_MAX]; + count = CAM_FLASH_FIRING_LEVEL_MAX; + count = MIN(gCamCapability[cameraId]->supported_flash_firing_level_cnt, + count); + for (size_t i = 0; i < count; i++) { + available_flash_levels[i] = + gCamCapability[cameraId]->supported_firing_levels[i]; + } + staticInfo.update(ANDROID_FLASH_FIRING_POWER, + available_flash_levels, count); + + uint8_t flashAvailable; + if (gCamCapability[cameraId]->flash_available) + flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_TRUE; + else + flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE; + staticInfo.update(ANDROID_FLASH_INFO_AVAILABLE, + &flashAvailable, 1); + + Vector<uint8_t> avail_ae_modes; + count = CAM_AE_MODE_MAX; + count = MIN(gCamCapability[cameraId]->supported_ae_modes_cnt, count); + for (size_t i = 0; i < count; i++) { + avail_ae_modes.add(gCamCapability[cameraId]->supported_ae_modes[i]); + } + if (flashAvailable) { + avail_ae_modes.add(ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH); + avail_ae_modes.add(ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH); + } + staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_MODES, + avail_ae_modes.array(), + avail_ae_modes.size()); + + int32_t sensitivity_range[2]; + sensitivity_range[0] = gCamCapability[cameraId]->sensitivity_range.min_sensitivity; + sensitivity_range[1] = gCamCapability[cameraId]->sensitivity_range.max_sensitivity; + staticInfo.update(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, + sensitivity_range, + sizeof(sensitivity_range) / sizeof(int32_t)); + + staticInfo.update(ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY, + &gCamCapability[cameraId]->max_analog_sensitivity, + 1); + + int32_t sensor_orientation = (int32_t)gCamCapability[cameraId]->sensor_mount_angle; + staticInfo.update(ANDROID_SENSOR_ORIENTATION, + &sensor_orientation, + 1); + + int32_t max_output_streams[] = { + MAX_STALLING_STREAMS, + MAX_PROCESSED_STREAMS, + MAX_RAW_STREAMS}; + staticInfo.update(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, + max_output_streams, + sizeof(max_output_streams)/sizeof(max_output_streams[0])); + + uint8_t avail_leds = 0; + staticInfo.update(ANDROID_LED_AVAILABLE_LEDS, + &avail_leds, 0); + + uint8_t focus_dist_calibrated; + int val = lookupFwkName(FOCUS_CALIBRATION_MAP, METADATA_MAP_SIZE(FOCUS_CALIBRATION_MAP), + gCamCapability[cameraId]->focus_dist_calibrated); + if (NAME_NOT_FOUND != val) { + focus_dist_calibrated = (uint8_t)val; + staticInfo.update(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, + &focus_dist_calibrated, 1); + } + + int32_t avail_testpattern_modes[MAX_TEST_PATTERN_CNT]; + size = 0; + count = MIN(gCamCapability[cameraId]->supported_test_pattern_modes_cnt, + MAX_TEST_PATTERN_CNT); + for (size_t i = 0; i < count; i++) { + int testpatternMode = lookupFwkName(TEST_PATTERN_MAP, METADATA_MAP_SIZE(TEST_PATTERN_MAP), + gCamCapability[cameraId]->supported_test_pattern_modes[i]); + if (NAME_NOT_FOUND != testpatternMode) { + avail_testpattern_modes[size] = testpatternMode; + size++; + } + } + staticInfo.update(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, + avail_testpattern_modes, + size); + + uint8_t max_pipeline_depth = (uint8_t)(MAX_INFLIGHT_REQUESTS + EMPTY_PIPELINE_DELAY + FRAME_SKIP_DELAY); + staticInfo.update(ANDROID_REQUEST_PIPELINE_MAX_DEPTH, + &max_pipeline_depth, + 1); + + int32_t partial_result_count = PARTIAL_RESULT_COUNT; + staticInfo.update(ANDROID_REQUEST_PARTIAL_RESULT_COUNT, + &partial_result_count, + 1); + + int32_t max_stall_duration = MAX_REPROCESS_STALL; + staticInfo.update(ANDROID_REPROCESS_MAX_CAPTURE_STALL, &max_stall_duration, 1); + + Vector<uint8_t> available_capabilities; + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE); + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR); + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING); + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS); + if (supportBurst) { + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE); + } + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING); + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING); + if (hfrEnable && available_hfr_configs.array()) { + available_capabilities.add( + ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO); + } + + if (CAM_SENSOR_YUV != gCamCapability[cameraId]->sensor_type.sens_type) { + available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW); + } + staticInfo.update(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, + available_capabilities.array(), + available_capabilities.size()); + + //aeLockAvailable to be set to true if capabilities has MANUAL_SENSOR or BURST_CAPTURE + //Assumption is that all bayer cameras support MANUAL_SENSOR. + uint8_t aeLockAvailable = (gCamCapability[cameraId]->sensor_type.sens_type == CAM_SENSOR_RAW) ? + ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE; + + staticInfo.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, + &aeLockAvailable, 1); + + //awbLockAvailable to be set to true if capabilities has MANUAL_POST_PROCESSING or + //BURST_CAPTURE. Assumption is that all bayer cameras support MANUAL_POST_PROCESSING. + uint8_t awbLockAvailable = (gCamCapability[cameraId]->sensor_type.sens_type == CAM_SENSOR_RAW) ? + ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE; + + staticInfo.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + &awbLockAvailable, 1); + + int32_t max_input_streams = 1; + staticInfo.update(ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, + &max_input_streams, + 1); + + /* format of the map is : input format, num_output_formats, outputFormat1,..,outputFormatN */ + int32_t io_format_map[] = {HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 2, + HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_YCbCr_420_888, + HAL_PIXEL_FORMAT_YCbCr_420_888, 2, HAL_PIXEL_FORMAT_BLOB, + HAL_PIXEL_FORMAT_YCbCr_420_888}; + staticInfo.update(ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP, + io_format_map, sizeof(io_format_map)/sizeof(io_format_map[0])); + + int32_t max_latency = ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL; + staticInfo.update(ANDROID_SYNC_MAX_LATENCY, + &max_latency, + 1); + + uint8_t available_hot_pixel_modes[] = {ANDROID_HOT_PIXEL_MODE_FAST, + ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY}; + staticInfo.update(ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES, + available_hot_pixel_modes, + sizeof(available_hot_pixel_modes)/sizeof(available_hot_pixel_modes[0])); + + uint8_t available_shading_modes[] = {ANDROID_SHADING_MODE_OFF, + ANDROID_SHADING_MODE_FAST, + ANDROID_SHADING_MODE_HIGH_QUALITY}; + staticInfo.update(ANDROID_SHADING_AVAILABLE_MODES, + available_shading_modes, + 3); + + uint8_t available_lens_shading_map_modes[] = {ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF, + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON}; + staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, + available_lens_shading_map_modes, + 2); + + uint8_t available_edge_modes[] = {ANDROID_EDGE_MODE_OFF, + ANDROID_EDGE_MODE_FAST, + ANDROID_EDGE_MODE_HIGH_QUALITY, + ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG}; + staticInfo.update(ANDROID_EDGE_AVAILABLE_EDGE_MODES, + available_edge_modes, + sizeof(available_edge_modes)/sizeof(available_edge_modes[0])); + + uint8_t available_noise_red_modes[] = {ANDROID_NOISE_REDUCTION_MODE_OFF, + ANDROID_NOISE_REDUCTION_MODE_FAST, + ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY, + ANDROID_NOISE_REDUCTION_MODE_MINIMAL, + ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG}; + staticInfo.update(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, + available_noise_red_modes, + sizeof(available_noise_red_modes)/sizeof(available_noise_red_modes[0])); + + uint8_t available_tonemap_modes[] = {ANDROID_TONEMAP_MODE_CONTRAST_CURVE, + ANDROID_TONEMAP_MODE_FAST, + ANDROID_TONEMAP_MODE_HIGH_QUALITY}; + staticInfo.update(ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES, + available_tonemap_modes, + sizeof(available_tonemap_modes)/sizeof(available_tonemap_modes[0])); + + uint8_t available_hot_pixel_map_modes[] = {ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF}; + staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES, + available_hot_pixel_map_modes, + sizeof(available_hot_pixel_map_modes)/sizeof(available_hot_pixel_map_modes[0])); + + val = lookupFwkName(REFERENCE_ILLUMINANT_MAP, METADATA_MAP_SIZE(REFERENCE_ILLUMINANT_MAP), + gCamCapability[cameraId]->reference_illuminant1); + if (NAME_NOT_FOUND != val) { + uint8_t fwkReferenceIlluminant = (uint8_t)val; + staticInfo.update(ANDROID_SENSOR_REFERENCE_ILLUMINANT1, &fwkReferenceIlluminant, 1); + } + + val = lookupFwkName(REFERENCE_ILLUMINANT_MAP, METADATA_MAP_SIZE(REFERENCE_ILLUMINANT_MAP), + gCamCapability[cameraId]->reference_illuminant2); + if (NAME_NOT_FOUND != val) { + uint8_t fwkReferenceIlluminant = (uint8_t)val; + staticInfo.update(ANDROID_SENSOR_REFERENCE_ILLUMINANT2, &fwkReferenceIlluminant, 1); + } + + staticInfo.update(ANDROID_SENSOR_FORWARD_MATRIX1, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->forward_matrix1, + FORWARD_MATRIX_COLS * FORWARD_MATRIX_ROWS); + + staticInfo.update(ANDROID_SENSOR_FORWARD_MATRIX2, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->forward_matrix2, + FORWARD_MATRIX_COLS * FORWARD_MATRIX_ROWS); + + staticInfo.update(ANDROID_SENSOR_COLOR_TRANSFORM1, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->color_transform1, + COLOR_TRANSFORM_COLS * COLOR_TRANSFORM_ROWS); + + staticInfo.update(ANDROID_SENSOR_COLOR_TRANSFORM2, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->color_transform2, + COLOR_TRANSFORM_COLS * COLOR_TRANSFORM_ROWS); + + staticInfo.update(ANDROID_SENSOR_CALIBRATION_TRANSFORM1, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->calibration_transform1, + CAL_TRANSFORM_COLS * CAL_TRANSFORM_ROWS); + + staticInfo.update(ANDROID_SENSOR_CALIBRATION_TRANSFORM2, (camera_metadata_rational_t *) + (void *)gCamCapability[cameraId]->calibration_transform2, + CAL_TRANSFORM_COLS * CAL_TRANSFORM_ROWS); + + int32_t request_keys_basic[] = {ANDROID_COLOR_CORRECTION_MODE, + ANDROID_COLOR_CORRECTION_TRANSFORM, ANDROID_COLOR_CORRECTION_GAINS, + ANDROID_COLOR_CORRECTION_ABERRATION_MODE, + ANDROID_CONTROL_AE_ANTIBANDING_MODE, ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + ANDROID_CONTROL_AE_LOCK, ANDROID_CONTROL_AE_MODE, + ANDROID_CONTROL_AE_REGIONS, ANDROID_CONTROL_AE_TARGET_FPS_RANGE, + ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AF_MODE, + ANDROID_CONTROL_AF_TRIGGER, ANDROID_CONTROL_AWB_LOCK, + ANDROID_CONTROL_AWB_MODE, ANDROID_CONTROL_CAPTURE_INTENT, + ANDROID_CONTROL_EFFECT_MODE, ANDROID_CONTROL_MODE, + ANDROID_CONTROL_SCENE_MODE, ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, + ANDROID_DEMOSAIC_MODE, ANDROID_EDGE_MODE, + ANDROID_FLASH_FIRING_POWER, ANDROID_FLASH_FIRING_TIME, ANDROID_FLASH_MODE, + ANDROID_JPEG_GPS_COORDINATES, + ANDROID_JPEG_GPS_PROCESSING_METHOD, ANDROID_JPEG_GPS_TIMESTAMP, + ANDROID_JPEG_ORIENTATION, ANDROID_JPEG_QUALITY, ANDROID_JPEG_THUMBNAIL_QUALITY, + ANDROID_JPEG_THUMBNAIL_SIZE, ANDROID_LENS_APERTURE, ANDROID_LENS_FILTER_DENSITY, + ANDROID_LENS_FOCAL_LENGTH, ANDROID_LENS_FOCUS_DISTANCE, + ANDROID_LENS_OPTICAL_STABILIZATION_MODE, ANDROID_NOISE_REDUCTION_MODE, + ANDROID_REQUEST_ID, ANDROID_REQUEST_TYPE, + ANDROID_SCALER_CROP_REGION, ANDROID_SENSOR_EXPOSURE_TIME, + ANDROID_SENSOR_FRAME_DURATION, ANDROID_HOT_PIXEL_MODE, + ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, + ANDROID_SENSOR_SENSITIVITY, ANDROID_SHADING_MODE, + ANDROID_STATISTICS_FACE_DETECT_MODE, + ANDROID_STATISTICS_HISTOGRAM_MODE, ANDROID_STATISTICS_SHARPNESS_MAP_MODE, + ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, ANDROID_TONEMAP_CURVE_BLUE, + ANDROID_TONEMAP_CURVE_GREEN, ANDROID_TONEMAP_CURVE_RED, ANDROID_TONEMAP_MODE, + ANDROID_BLACK_LEVEL_LOCK }; + + size_t request_keys_cnt = + sizeof(request_keys_basic)/sizeof(request_keys_basic[0]); + Vector<int32_t> available_request_keys; + available_request_keys.appendArray(request_keys_basic, request_keys_cnt); + if (gCamCapability[cameraId]->supported_focus_modes_cnt > 1) { + available_request_keys.add(ANDROID_CONTROL_AF_REGIONS); + } + + staticInfo.update(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, + available_request_keys.array(), available_request_keys.size()); + + int32_t result_keys_basic[] = {ANDROID_COLOR_CORRECTION_TRANSFORM, + ANDROID_COLOR_CORRECTION_GAINS, ANDROID_CONTROL_AE_MODE, ANDROID_CONTROL_AE_REGIONS, + ANDROID_CONTROL_AE_STATE, ANDROID_CONTROL_AF_MODE, + ANDROID_CONTROL_AF_STATE, ANDROID_CONTROL_AWB_MODE, + ANDROID_CONTROL_AWB_STATE, ANDROID_CONTROL_MODE, ANDROID_EDGE_MODE, + ANDROID_FLASH_FIRING_POWER, ANDROID_FLASH_FIRING_TIME, ANDROID_FLASH_MODE, + ANDROID_FLASH_STATE, ANDROID_JPEG_GPS_COORDINATES, ANDROID_JPEG_GPS_PROCESSING_METHOD, + ANDROID_JPEG_GPS_TIMESTAMP, ANDROID_JPEG_ORIENTATION, ANDROID_JPEG_QUALITY, + ANDROID_JPEG_THUMBNAIL_QUALITY, ANDROID_JPEG_THUMBNAIL_SIZE, ANDROID_LENS_APERTURE, + ANDROID_LENS_FILTER_DENSITY, ANDROID_LENS_FOCAL_LENGTH, ANDROID_LENS_FOCUS_DISTANCE, + ANDROID_LENS_FOCUS_RANGE, ANDROID_LENS_STATE, ANDROID_LENS_OPTICAL_STABILIZATION_MODE, + ANDROID_NOISE_REDUCTION_MODE, ANDROID_REQUEST_ID, + ANDROID_SCALER_CROP_REGION, ANDROID_SHADING_MODE, ANDROID_SENSOR_EXPOSURE_TIME, + ANDROID_SENSOR_FRAME_DURATION, ANDROID_SENSOR_SENSITIVITY, + ANDROID_SENSOR_TIMESTAMP, ANDROID_SENSOR_NEUTRAL_COLOR_POINT, + ANDROID_SENSOR_PROFILE_TONE_CURVE, ANDROID_BLACK_LEVEL_LOCK, ANDROID_TONEMAP_CURVE_BLUE, + ANDROID_TONEMAP_CURVE_GREEN, ANDROID_TONEMAP_CURVE_RED, ANDROID_TONEMAP_MODE, + ANDROID_STATISTICS_FACE_DETECT_MODE, ANDROID_STATISTICS_HISTOGRAM_MODE, + ANDROID_STATISTICS_SHARPNESS_MAP, ANDROID_STATISTICS_SHARPNESS_MAP_MODE, + ANDROID_STATISTICS_PREDICTED_COLOR_GAINS, ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM, + ANDROID_STATISTICS_SCENE_FLICKER, ANDROID_STATISTICS_FACE_RECTANGLES, + ANDROID_STATISTICS_FACE_SCORES}; + size_t result_keys_cnt = + sizeof(result_keys_basic)/sizeof(result_keys_basic[0]); + + Vector<int32_t> available_result_keys; + available_result_keys.appendArray(result_keys_basic, result_keys_cnt); + if (gCamCapability[cameraId]->supported_focus_modes_cnt > 1) { + available_result_keys.add(ANDROID_CONTROL_AF_REGIONS); + } + if (CAM_SENSOR_RAW == gCamCapability[cameraId]->sensor_type.sens_type) { + available_result_keys.add(ANDROID_SENSOR_NOISE_PROFILE); + available_result_keys.add(ANDROID_SENSOR_GREEN_SPLIT); + } + if (supportedFaceDetectMode == 1) { + available_result_keys.add(ANDROID_STATISTICS_FACE_RECTANGLES); + available_result_keys.add(ANDROID_STATISTICS_FACE_SCORES); + } else if ((supportedFaceDetectMode == 2) || + (supportedFaceDetectMode == 3)) { + available_result_keys.add(ANDROID_STATISTICS_FACE_IDS); + available_result_keys.add(ANDROID_STATISTICS_FACE_LANDMARKS); + } + staticInfo.update(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, + available_result_keys.array(), available_result_keys.size()); + + int32_t available_characteristics_keys[] = {ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, + ANDROID_CONTROL_AE_AVAILABLE_MODES, ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, + ANDROID_CONTROL_AE_COMPENSATION_RANGE, ANDROID_CONTROL_AE_COMPENSATION_STEP, + ANDROID_CONTROL_AF_AVAILABLE_MODES, ANDROID_CONTROL_AVAILABLE_EFFECTS, + ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, + ANDROID_SCALER_CROPPING_TYPE, + ANDROID_SYNC_MAX_LATENCY, + ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, + ANDROID_CONTROL_AVAILABLE_SCENE_MODES, + ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, + ANDROID_CONTROL_AWB_AVAILABLE_MODES, ANDROID_CONTROL_MAX_REGIONS, + ANDROID_CONTROL_SCENE_MODE_OVERRIDES,ANDROID_FLASH_INFO_AVAILABLE, + ANDROID_FLASH_INFO_CHARGE_DURATION, ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, + ANDROID_JPEG_MAX_SIZE, ANDROID_LENS_INFO_AVAILABLE_APERTURES, + ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES, + ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, + ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION, + ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, + ANDROID_LENS_INFO_SHADING_MAP_SIZE, ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, + ANDROID_LENS_FACING, + ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS, + ANDROID_REQUEST_PIPELINE_MAX_DEPTH, ANDROID_REQUEST_AVAILABLE_CAPABILITIES, + ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, + ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, ANDROID_REQUEST_PARTIAL_RESULT_COUNT, + ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, + ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP, + ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, + /*ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,*/ + ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, ANDROID_SENSOR_FORWARD_MATRIX1, + ANDROID_SENSOR_REFERENCE_ILLUMINANT1, ANDROID_SENSOR_REFERENCE_ILLUMINANT2, + ANDROID_SENSOR_FORWARD_MATRIX2, ANDROID_SENSOR_COLOR_TRANSFORM1, + ANDROID_SENSOR_COLOR_TRANSFORM2, ANDROID_SENSOR_CALIBRATION_TRANSFORM1, + ANDROID_SENSOR_CALIBRATION_TRANSFORM2, ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, + ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, + ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, ANDROID_SENSOR_INFO_MAX_FRAME_DURATION, + ANDROID_SENSOR_INFO_PHYSICAL_SIZE, ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, + ANDROID_SENSOR_INFO_WHITE_LEVEL, ANDROID_SENSOR_BASE_GAIN_FACTOR, + ANDROID_SENSOR_BLACK_LEVEL_PATTERN, ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY, + ANDROID_SENSOR_ORIENTATION, ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES, + ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES, + ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT, + ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT, + ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE, + ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES, + ANDROID_EDGE_AVAILABLE_EDGE_MODES, + ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, + ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES, + ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES, + ANDROID_TONEMAP_MAX_CURVE_POINTS, + ANDROID_CONTROL_AVAILABLE_MODES, + ANDROID_CONTROL_AE_LOCK_AVAILABLE, + ANDROID_CONTROL_AWB_LOCK_AVAILABLE, + ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, + ANDROID_SHADING_AVAILABLE_MODES, + ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL }; + staticInfo.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, + available_characteristics_keys, + sizeof(available_characteristics_keys)/sizeof(int32_t)); + + /*available stall durations depend on the hw + sw and will be different for different devices */ + /*have to add for raw after implementation*/ + int32_t stall_formats[] = {HAL_PIXEL_FORMAT_BLOB, ANDROID_SCALER_AVAILABLE_FORMATS_RAW16}; + size_t stall_formats_count = sizeof(stall_formats)/sizeof(int32_t); + + Vector<int64_t> available_stall_durations; + for (uint32_t j = 0; j < stall_formats_count; j++) { + if (stall_formats[j] == HAL_PIXEL_FORMAT_BLOB) { + for (uint32_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->picture_sizes_tbl_cnt); i++) { + available_stall_durations.add(stall_formats[j]); + available_stall_durations.add(gCamCapability[cameraId]->picture_sizes_tbl[i].width); + available_stall_durations.add(gCamCapability[cameraId]->picture_sizes_tbl[i].height); + available_stall_durations.add(gCamCapability[cameraId]->jpeg_stall_durations[i]); + } + } else { + for (uint32_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->supported_raw_dim_cnt); i++) { + available_stall_durations.add(stall_formats[j]); + available_stall_durations.add(gCamCapability[cameraId]->raw_dim[i].width); + available_stall_durations.add(gCamCapability[cameraId]->raw_dim[i].height); + available_stall_durations.add(gCamCapability[cameraId]->raw16_stall_durations[i]); + } + } + } + staticInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS, + available_stall_durations.array(), + available_stall_durations.size()); + + //QCAMERA3_OPAQUE_RAW + uint8_t raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY; + cam_format_t fmt = CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG; + switch (gCamCapability[cameraId]->opaque_raw_fmt) { + case LEGACY_RAW: + if (gCamCapability[cameraId]->white_level == MAX_VALUE_8BIT) + fmt = CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GBRG; + else if (gCamCapability[cameraId]->white_level == MAX_VALUE_10BIT) + fmt = CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG; + else if (gCamCapability[cameraId]->white_level == MAX_VALUE_12BIT) + fmt = CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GBRG; + raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY; + break; + case MIPI_RAW: + if (gCamCapability[cameraId]->white_level == MAX_VALUE_8BIT) + fmt = CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG; + else if (gCamCapability[cameraId]->white_level == MAX_VALUE_10BIT) + fmt = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG; + else if (gCamCapability[cameraId]->white_level == MAX_VALUE_12BIT) + fmt = CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GBRG; + raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_MIPI; + break; + default: + LOGE("unknown opaque_raw_format %d", + gCamCapability[cameraId]->opaque_raw_fmt); + break; + } + staticInfo.update(QCAMERA3_OPAQUE_RAW_FORMAT, &raw_format, 1); + + Vector<int32_t> strides; + for (size_t i = 0; i < MIN(MAX_SIZES_CNT, + gCamCapability[cameraId]->supported_raw_dim_cnt); i++) { + cam_stream_buf_plane_info_t buf_planes; + strides.add(gCamCapability[cameraId]->raw_dim[i].width); + strides.add(gCamCapability[cameraId]->raw_dim[i].height); + mm_stream_calc_offset_raw(fmt, &gCamCapability[cameraId]->raw_dim[i], + &gCamCapability[cameraId]->padding_info, &buf_planes); + strides.add(buf_planes.plane_info.mp[0].stride); + } + staticInfo.update(QCAMERA3_OPAQUE_RAW_STRIDES, strides.array(), + strides.size()); + + staticInfo.update(QCAMERA3_DUALCAM_CALIB_META_DATA_BLOB, + (const uint8_t*)&gCamCapability[cameraId]->related_cam_calibration, + sizeof(gCamCapability[cameraId]->related_cam_calibration)); + + uint8_t isMonoOnly = + (gCamCapability[cameraId]->color_arrangement == CAM_FILTER_ARRANGEMENT_Y); + staticInfo.update(QCAMERA3_SENSOR_IS_MONO_ONLY, + &isMonoOnly, 1); + + gStaticMetadata[cameraId] = staticInfo.release(); + return rc; +} + +/*=========================================================================== + * FUNCTION : makeTable + * + * DESCRIPTION: make a table of sizes + * + * PARAMETERS : + * + * + *==========================================================================*/ +void QCamera3HardwareInterface::makeTable(cam_dimension_t* dimTable, size_t size, + size_t max_size, int32_t *sizeTable) +{ + size_t j = 0; + if (size > max_size) { + size = max_size; + } + for (size_t i = 0; i < size; i++) { + sizeTable[j] = dimTable[i].width; + sizeTable[j+1] = dimTable[i].height; + j+=2; + } +} + +/*=========================================================================== + * FUNCTION : makeFPSTable + * + * DESCRIPTION: make a table of fps ranges + * + * PARAMETERS : + * + *==========================================================================*/ +void QCamera3HardwareInterface::makeFPSTable(cam_fps_range_t* fpsTable, size_t size, + size_t max_size, int32_t *fpsRangesTable) +{ + size_t j = 0; + if (size > max_size) { + size = max_size; + } + for (size_t i = 0; i < size; i++) { + fpsRangesTable[j] = (int32_t)fpsTable[i].min_fps; + fpsRangesTable[j+1] = (int32_t)fpsTable[i].max_fps; + j+=2; + } +} + +/*=========================================================================== + * FUNCTION : makeOverridesList + * + * DESCRIPTION: make a list of scene mode overrides + * + * PARAMETERS : + * + * + *==========================================================================*/ +void QCamera3HardwareInterface::makeOverridesList( + cam_scene_mode_overrides_t* overridesTable, size_t size, size_t max_size, + uint8_t *overridesList, uint8_t *supported_indexes, uint32_t camera_id) +{ + /*daemon will give a list of overrides for all scene modes. + However we should send the fwk only the overrides for the scene modes + supported by the framework*/ + size_t j = 0; + if (size > max_size) { + size = max_size; + } + size_t focus_count = CAM_FOCUS_MODE_MAX; + focus_count = MIN(gCamCapability[camera_id]->supported_focus_modes_cnt, + focus_count); + for (size_t i = 0; i < size; i++) { + bool supt = false; + size_t index = supported_indexes[i]; + overridesList[j] = gCamCapability[camera_id]->flash_available ? + ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH : ANDROID_CONTROL_AE_MODE_ON; + int val = lookupFwkName(WHITE_BALANCE_MODES_MAP, + METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP), + overridesTable[index].awb_mode); + if (NAME_NOT_FOUND != val) { + overridesList[j+1] = (uint8_t)val; + } + uint8_t focus_override = overridesTable[index].af_mode; + for (size_t k = 0; k < focus_count; k++) { + if (gCamCapability[camera_id]->supported_focus_modes[k] == focus_override) { + supt = true; + break; + } + } + if (supt) { + val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP), + focus_override); + if (NAME_NOT_FOUND != val) { + overridesList[j+2] = (uint8_t)val; + } + } else { + overridesList[j+2] = ANDROID_CONTROL_AF_MODE_OFF; + } + j+=3; + } +} + +/*=========================================================================== + * FUNCTION : filterJpegSizes + * + * DESCRIPTION: Returns the supported jpeg sizes based on the max dimension that + * could be downscaled to + * + * PARAMETERS : + * + * RETURN : length of jpegSizes array + *==========================================================================*/ + +size_t QCamera3HardwareInterface::filterJpegSizes(int32_t *jpegSizes, int32_t *processedSizes, + size_t processedSizesCnt, size_t maxCount, cam_rect_t active_array_size, + uint8_t downscale_factor) +{ + if (0 == downscale_factor) { + downscale_factor = 1; + } + + int32_t min_width = active_array_size.width / downscale_factor; + int32_t min_height = active_array_size.height / downscale_factor; + size_t jpegSizesCnt = 0; + if (processedSizesCnt > maxCount) { + processedSizesCnt = maxCount; + } + for (size_t i = 0; i < processedSizesCnt; i+=2) { + if (processedSizes[i] >= min_width && processedSizes[i+1] >= min_height) { + jpegSizes[jpegSizesCnt] = processedSizes[i]; + jpegSizes[jpegSizesCnt+1] = processedSizes[i+1]; + jpegSizesCnt += 2; + } + } + return jpegSizesCnt; +} + +/*=========================================================================== + * FUNCTION : computeNoiseModelEntryS + * + * DESCRIPTION: function to map a given sensitivity to the S noise + * model parameters in the DNG noise model. + * + * PARAMETERS : sens : the sensor sensitivity + * + ** RETURN : S (sensor amplification) noise + * + *==========================================================================*/ +double QCamera3HardwareInterface::computeNoiseModelEntryS(int32_t sens) { + double s = gCamCapability[mCameraId]->gradient_S * sens + + gCamCapability[mCameraId]->offset_S; + return ((s < 0.0) ? 0.0 : s); +} + +/*=========================================================================== + * FUNCTION : computeNoiseModelEntryO + * + * DESCRIPTION: function to map a given sensitivity to the O noise + * model parameters in the DNG noise model. + * + * PARAMETERS : sens : the sensor sensitivity + * + ** RETURN : O (sensor readout) noise + * + *==========================================================================*/ +double QCamera3HardwareInterface::computeNoiseModelEntryO(int32_t sens) { + int32_t max_analog_sens = gCamCapability[mCameraId]->max_analog_sensitivity; + double digital_gain = (1.0 * sens / max_analog_sens) < 1.0 ? + 1.0 : (1.0 * sens / max_analog_sens); + double o = gCamCapability[mCameraId]->gradient_O * sens * sens + + gCamCapability[mCameraId]->offset_O * digital_gain * digital_gain; + return ((o < 0.0) ? 0.0 : o); +} + +/*=========================================================================== + * FUNCTION : getSensorSensitivity + * + * DESCRIPTION: convert iso_mode to an integer value + * + * PARAMETERS : iso_mode : the iso_mode supported by sensor + * + ** RETURN : sensitivity supported by sensor + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::getSensorSensitivity(int32_t iso_mode) +{ + int32_t sensitivity; + + switch (iso_mode) { + case CAM_ISO_MODE_100: + sensitivity = 100; + break; + case CAM_ISO_MODE_200: + sensitivity = 200; + break; + case CAM_ISO_MODE_400: + sensitivity = 400; + break; + case CAM_ISO_MODE_800: + sensitivity = 800; + break; + case CAM_ISO_MODE_1600: + sensitivity = 1600; + break; + default: + sensitivity = -1; + break; + } + return sensitivity; +} + +/*=========================================================================== + * FUNCTION : getCamInfo + * + * DESCRIPTION: query camera capabilities + * + * PARAMETERS : + * @cameraId : camera Id + * @info : camera info struct to be filled in with camera capabilities + * + * RETURN : int type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HardwareInterface::getCamInfo(uint32_t cameraId, + struct camera_info *info) +{ + ATRACE_CALL(); + int rc = 0; + + pthread_mutex_lock(&gCamLock); + if (NULL == gCamCapability[cameraId]) { + rc = initCapabilities(cameraId); + if (rc < 0) { + pthread_mutex_unlock(&gCamLock); + return rc; + } + } + + if (NULL == gStaticMetadata[cameraId]) { + rc = initStaticMetadata(cameraId); + if (rc < 0) { + pthread_mutex_unlock(&gCamLock); + return rc; + } + } + + switch(gCamCapability[cameraId]->position) { + case CAM_POSITION_BACK: + case CAM_POSITION_BACK_AUX: + info->facing = CAMERA_FACING_BACK; + break; + + case CAM_POSITION_FRONT: + case CAM_POSITION_FRONT_AUX: + info->facing = CAMERA_FACING_FRONT; + break; + + default: + LOGE("Unknown position type %d for camera id:%d", + gCamCapability[cameraId]->position, cameraId); + rc = -1; + break; + } + + + info->orientation = (int)gCamCapability[cameraId]->sensor_mount_angle; + info->device_version = CAMERA_DEVICE_API_VERSION_3_3; + info->static_camera_characteristics = gStaticMetadata[cameraId]; + + //For now assume both cameras can operate independently. + info->conflicting_devices = NULL; + info->conflicting_devices_length = 0; + + //resource cost is 100 * MIN(1.0, m/M), + //where m is throughput requirement with maximum stream configuration + //and M is CPP maximum throughput. + float max_fps = 0.0; + for (uint32_t i = 0; + i < gCamCapability[cameraId]->fps_ranges_tbl_cnt; i++) { + if (max_fps < gCamCapability[cameraId]->fps_ranges_tbl[i].max_fps) + max_fps = gCamCapability[cameraId]->fps_ranges_tbl[i].max_fps; + } + float ratio = 1.0 * MAX_PROCESSED_STREAMS * + gCamCapability[cameraId]->active_array_size.width * + gCamCapability[cameraId]->active_array_size.height * max_fps / + gCamCapability[cameraId]->max_pixel_bandwidth; + info->resource_cost = 100 * MIN(1.0, ratio); + LOGI("camera %d resource cost is %d", cameraId, + info->resource_cost); + + pthread_mutex_unlock(&gCamLock); + return rc; +} + +/*=========================================================================== + * FUNCTION : translateCapabilityToMetadata + * + * DESCRIPTION: translate the capability into camera_metadata_t + * + * PARAMETERS : type of the request + * + * + * RETURN : success: camera_metadata_t* + * failure: NULL + * + *==========================================================================*/ +camera_metadata_t* QCamera3HardwareInterface::translateCapabilityToMetadata(int type) +{ + if (mDefaultMetadata[type] != NULL) { + return mDefaultMetadata[type]; + } + //first time we are handling this request + //fill up the metadata structure using the wrapper class + CameraMetadata settings; + //translate from cam_capability_t to camera_metadata_tag_t + static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE; + settings.update(ANDROID_REQUEST_TYPE, &requestType, 1); + int32_t defaultRequestID = 0; + settings.update(ANDROID_REQUEST_ID, &defaultRequestID, 1); + + /* OIS disable */ + char ois_prop[PROPERTY_VALUE_MAX]; + memset(ois_prop, 0, sizeof(ois_prop)); + property_get("persist.camera.ois.disable", ois_prop, "0"); + uint8_t ois_disable = (uint8_t)atoi(ois_prop); + + /* Force video to use OIS */ + char videoOisProp[PROPERTY_VALUE_MAX]; + memset(videoOisProp, 0, sizeof(videoOisProp)); + property_get("persist.camera.ois.video", videoOisProp, "1"); + uint8_t forceVideoOis = (uint8_t)atoi(videoOisProp); + + // EIS enable/disable + char eis_prop[PROPERTY_VALUE_MAX]; + memset(eis_prop, 0, sizeof(eis_prop)); + property_get("persist.camera.eis.enable", eis_prop, "0"); + const uint8_t eis_prop_set = (uint8_t)atoi(eis_prop); + + const bool facingBack = ((gCamCapability[mCameraId]->position == CAM_POSITION_BACK) || + (gCamCapability[mCameraId]->position == CAM_POSITION_BACK_AUX)); + // This is a bit hacky. EIS is enabled only when the above setprop + // is set to non-zero value and on back camera (for 2015 Nexus). + // Ideally, we should rely on m_bEisEnable, but we cannot guarantee + // configureStream is called before this function. In other words, + // we cannot guarantee the app will call configureStream before + // calling createDefaultRequest. + const bool eisEnabled = facingBack && eis_prop_set; + + uint8_t controlIntent = 0; + uint8_t focusMode; + uint8_t vsMode; + uint8_t optStabMode; + uint8_t cacMode; + uint8_t edge_mode; + uint8_t noise_red_mode; + uint8_t tonemap_mode; + bool highQualityModeEntryAvailable = FALSE; + bool fastModeEntryAvailable = FALSE; + vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + switch (type) { + case CAMERA3_TEMPLATE_PREVIEW: + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW; + focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + edge_mode = ANDROID_EDGE_MODE_FAST; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + break; + case CAMERA3_TEMPLATE_STILL_CAPTURE: + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE; + focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + edge_mode = ANDROID_EDGE_MODE_HIGH_QUALITY; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY; + tonemap_mode = ANDROID_TONEMAP_MODE_HIGH_QUALITY; + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF; + // Order of priority for default CAC is HIGH Quality -> FAST -> OFF + for (size_t i = 0; i < gCamCapability[mCameraId]->aberration_modes_count; i++) { + if (gCamCapability[mCameraId]->aberration_modes[i] == + CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY) { + highQualityModeEntryAvailable = TRUE; + } else if (gCamCapability[mCameraId]->aberration_modes[i] == + CAM_COLOR_CORRECTION_ABERRATION_FAST) { + fastModeEntryAvailable = TRUE; + } + } + if (highQualityModeEntryAvailable) { + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY; + } else if (fastModeEntryAvailable) { + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + } + break; + case CAMERA3_TEMPLATE_VIDEO_RECORD: + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD; + focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + if (eisEnabled) { + vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON; + } + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + edge_mode = ANDROID_EDGE_MODE_FAST; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + if (forceVideoOis) + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + break; + case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT: + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT; + focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + if (eisEnabled) { + vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON; + } + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + edge_mode = ANDROID_EDGE_MODE_FAST; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + if (forceVideoOis) + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + break; + case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG: + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG; + focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + edge_mode = ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + break; + case CAMERA3_TEMPLATE_MANUAL: + edge_mode = ANDROID_EDGE_MODE_FAST; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL; + focusMode = ANDROID_CONTROL_AF_MODE_OFF; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + break; + default: + edge_mode = ANDROID_EDGE_MODE_FAST; + noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST; + tonemap_mode = ANDROID_TONEMAP_MODE_FAST; + cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST; + controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM; + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + break; + } + settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &cacMode, 1); + settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1); + settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vsMode, 1); + if (gCamCapability[mCameraId]->supported_focus_modes_cnt == 1) { + focusMode = ANDROID_CONTROL_AF_MODE_OFF; + } + settings.update(ANDROID_CONTROL_AF_MODE, &focusMode, 1); + + if (gCamCapability[mCameraId]->optical_stab_modes_count == 1 && + gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_ON) + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON; + else if ((gCamCapability[mCameraId]->optical_stab_modes_count == 1 && + gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_OFF) + || ois_disable) + optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF; + settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, &optStabMode, 1); + + settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, + &gCamCapability[mCameraId]->exposure_compensation_default, 1); + + static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF; + settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1); + + static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF; + settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1); + + static const uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO; + settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1); + + static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO; + settings.update(ANDROID_CONTROL_MODE, &controlMode, 1); + + static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF; + settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1); + + static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY; + settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1); + + static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON; + settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1); + + /*flash*/ + static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF; + settings.update(ANDROID_FLASH_MODE, &flashMode, 1); + + static const uint8_t flashFiringLevel = CAM_FLASH_FIRING_LEVEL_4; + settings.update(ANDROID_FLASH_FIRING_POWER, + &flashFiringLevel, 1); + + /* lens */ + float default_aperture = gCamCapability[mCameraId]->apertures[0]; + settings.update(ANDROID_LENS_APERTURE, &default_aperture, 1); + + if (gCamCapability[mCameraId]->filter_densities_count) { + float default_filter_density = gCamCapability[mCameraId]->filter_densities[0]; + settings.update(ANDROID_LENS_FILTER_DENSITY, &default_filter_density, + gCamCapability[mCameraId]->filter_densities_count); + } + + float default_focal_length = gCamCapability[mCameraId]->focal_length; + settings.update(ANDROID_LENS_FOCAL_LENGTH, &default_focal_length, 1); + + float default_focus_distance = 0; + settings.update(ANDROID_LENS_FOCUS_DISTANCE, &default_focus_distance, 1); + + static const uint8_t demosaicMode = ANDROID_DEMOSAIC_MODE_FAST; + settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1); + + static const uint8_t hotpixelMode = ANDROID_HOT_PIXEL_MODE_FAST; + settings.update(ANDROID_HOT_PIXEL_MODE, &hotpixelMode, 1); + + static const int32_t testpatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF; + settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testpatternMode, 1); + + /* face detection (default to OFF) */ + static const uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF; + settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1); + + static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF; + settings.update(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1); + + static const uint8_t sharpnessMapMode = ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF; + settings.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1); + + static const uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF; + settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1); + + static const uint8_t lensShadingMode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF; + settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &lensShadingMode, 1); + + static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF; + settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1); + + /* Exposure time(Update the Min Exposure Time)*/ + int64_t default_exposure_time = gCamCapability[mCameraId]->exposure_time_range[0]; + settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &default_exposure_time, 1); + + /* frame duration */ + static const int64_t default_frame_duration = NSEC_PER_33MSEC; + settings.update(ANDROID_SENSOR_FRAME_DURATION, &default_frame_duration, 1); + + /* sensitivity */ + static const int32_t default_sensitivity = 100; + settings.update(ANDROID_SENSOR_SENSITIVITY, &default_sensitivity, 1); + + /*edge mode*/ + settings.update(ANDROID_EDGE_MODE, &edge_mode, 1); + + /*noise reduction mode*/ + settings.update(ANDROID_NOISE_REDUCTION_MODE, &noise_red_mode, 1); + + /*color correction mode*/ + static const uint8_t color_correct_mode = ANDROID_COLOR_CORRECTION_MODE_FAST; + settings.update(ANDROID_COLOR_CORRECTION_MODE, &color_correct_mode, 1); + + /*transform matrix mode*/ + settings.update(ANDROID_TONEMAP_MODE, &tonemap_mode, 1); + + int32_t scaler_crop_region[4]; + scaler_crop_region[0] = 0; + scaler_crop_region[1] = 0; + scaler_crop_region[2] = gCamCapability[mCameraId]->active_array_size.width; + scaler_crop_region[3] = gCamCapability[mCameraId]->active_array_size.height; + settings.update(ANDROID_SCALER_CROP_REGION, scaler_crop_region, 4); + + static const uint8_t antibanding_mode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO; + settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &antibanding_mode, 1); + + /*focus distance*/ + float focus_distance = 0.0; + settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focus_distance, 1); + + /*target fps range: use maximum range for picture, and maximum fixed range for video*/ + float max_range = 0.0; + float max_fixed_fps = 0.0; + int32_t fps_range[2] = {0, 0}; + for (uint32_t i = 0; i < gCamCapability[mCameraId]->fps_ranges_tbl_cnt; + i++) { + float range = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps - + gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps; + if (type == CAMERA3_TEMPLATE_PREVIEW || + type == CAMERA3_TEMPLATE_STILL_CAPTURE || + type == CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG) { + if (range > max_range) { + fps_range[0] = + (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps; + fps_range[1] = + (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps; + max_range = range; + } + } else { + if (range < 0.01 && max_fixed_fps < + gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps) { + fps_range[0] = + (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps; + fps_range[1] = + (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps; + max_fixed_fps = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps; + } + } + } + settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, fps_range, 2); + + /*precapture trigger*/ + uint8_t precapture_trigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE; + settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &precapture_trigger, 1); + + /*af trigger*/ + uint8_t af_trigger = ANDROID_CONTROL_AF_TRIGGER_IDLE; + settings.update(ANDROID_CONTROL_AF_TRIGGER, &af_trigger, 1); + + /* ae & af regions */ + int32_t active_region[] = { + gCamCapability[mCameraId]->active_array_size.left, + gCamCapability[mCameraId]->active_array_size.top, + gCamCapability[mCameraId]->active_array_size.left + + gCamCapability[mCameraId]->active_array_size.width, + gCamCapability[mCameraId]->active_array_size.top + + gCamCapability[mCameraId]->active_array_size.height, + 0}; + settings.update(ANDROID_CONTROL_AE_REGIONS, active_region, + sizeof(active_region) / sizeof(active_region[0])); + settings.update(ANDROID_CONTROL_AF_REGIONS, active_region, + sizeof(active_region) / sizeof(active_region[0])); + + /* black level lock */ + uint8_t blacklevel_lock = ANDROID_BLACK_LEVEL_LOCK_OFF; + settings.update(ANDROID_BLACK_LEVEL_LOCK, &blacklevel_lock, 1); + + /* lens shading map mode */ + uint8_t shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF; + if (CAM_SENSOR_RAW == gCamCapability[mCameraId]->sensor_type.sens_type) { + shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON; + } + settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &shadingmap_mode, 1); + + //special defaults for manual template + if (type == CAMERA3_TEMPLATE_MANUAL) { + static const uint8_t manualControlMode = ANDROID_CONTROL_MODE_OFF; + settings.update(ANDROID_CONTROL_MODE, &manualControlMode, 1); + + static const uint8_t manualFocusMode = ANDROID_CONTROL_AF_MODE_OFF; + settings.update(ANDROID_CONTROL_AF_MODE, &manualFocusMode, 1); + + static const uint8_t manualAeMode = ANDROID_CONTROL_AE_MODE_OFF; + settings.update(ANDROID_CONTROL_AE_MODE, &manualAeMode, 1); + + static const uint8_t manualAwbMode = ANDROID_CONTROL_AWB_MODE_OFF; + settings.update(ANDROID_CONTROL_AWB_MODE, &manualAwbMode, 1); + + static const uint8_t manualTonemapMode = ANDROID_TONEMAP_MODE_FAST; + settings.update(ANDROID_TONEMAP_MODE, &manualTonemapMode, 1); + + static const uint8_t manualColorCorrectMode = ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX; + settings.update(ANDROID_COLOR_CORRECTION_MODE, &manualColorCorrectMode, 1); + } + + + /* TNR + * We'll use this location to determine which modes TNR will be set. + * We will enable TNR to be on if either of the Preview/Video stream requires TNR + * This is not to be confused with linking on a per stream basis that decision + * is still on per-session basis and will be handled as part of config stream + */ + uint8_t tnr_enable = 0; + + if (m_bTnrPreview || m_bTnrVideo) { + + switch (type) { + case CAMERA3_TEMPLATE_VIDEO_RECORD: + case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT: + tnr_enable = 1; + break; + + default: + tnr_enable = 0; + break; + } + + int32_t tnr_process_type = (int32_t)getTemporalDenoiseProcessPlate(); + settings.update(QCAMERA3_TEMPORAL_DENOISE_ENABLE, &tnr_enable, 1); + settings.update(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, &tnr_process_type, 1); + + LOGD("TNR:%d with process plate %d for template:%d", + tnr_enable, tnr_process_type, type); + } + + //Update Link tags to default + int32_t sync_type = CAM_TYPE_STANDALONE; + settings.update(QCAMERA3_DUALCAM_LINK_ENABLE, &sync_type, 1); + + int32_t is_main = 0; //this doesn't matter as app should overwrite + settings.update(QCAMERA3_DUALCAM_LINK_IS_MAIN, &is_main, 1); + + settings.update(QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID, &is_main, 1); + + /* CDS default */ + char prop[PROPERTY_VALUE_MAX]; + memset(prop, 0, sizeof(prop)); + property_get("persist.camera.CDS", prop, "Auto"); + cam_cds_mode_type_t cds_mode = CAM_CDS_MODE_AUTO; + cds_mode = lookupProp(CDS_MAP, METADATA_MAP_SIZE(CDS_MAP), prop); + if (CAM_CDS_MODE_MAX == cds_mode) { + cds_mode = CAM_CDS_MODE_AUTO; + } + + /* Disabling CDS in templates which have TNR enabled*/ + if (tnr_enable) + cds_mode = CAM_CDS_MODE_OFF; + + int32_t mode = cds_mode; + settings.update(QCAMERA3_CDS_MODE, &mode, 1); + mDefaultMetadata[type] = settings.release(); + + return mDefaultMetadata[type]; +} + +/*=========================================================================== + * FUNCTION : setFrameParameters + * + * DESCRIPTION: set parameters per frame as requested in the metadata from + * framework + * + * PARAMETERS : + * @request : request that needs to be serviced + * @streamID : Stream ID of all the requested streams + * @blob_request: Whether this request is a blob request or not + * + * RETURN : success: NO_ERROR + * failure: + *==========================================================================*/ +int QCamera3HardwareInterface::setFrameParameters( + camera3_capture_request_t *request, + cam_stream_ID_t streamID, + int blob_request, + uint32_t snapshotStreamId) +{ + /*translate from camera_metadata_t type to parm_type_t*/ + int rc = 0; + int32_t hal_version = CAM_HAL_V3; + + clear_metadata_buffer(mParameters); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_HAL_VERSION, hal_version)) { + LOGE("Failed to set hal version in the parameters"); + return BAD_VALUE; + } + + /*we need to update the frame number in the parameters*/ + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_FRAME_NUMBER, + request->frame_number)) { + LOGE("Failed to set the frame number in the parameters"); + return BAD_VALUE; + } + + /* Update stream id of all the requested buffers */ + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_STREAM_ID, streamID)) { + LOGE("Failed to set stream type mask in the parameters"); + return BAD_VALUE; + } + + if (mUpdateDebugLevel) { + uint32_t dummyDebugLevel = 0; + /* The value of dummyDebugLevel is irrelavent. On + * CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, read debug property */ + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, + dummyDebugLevel)) { + LOGE("Failed to set UPDATE_DEBUG_LEVEL"); + return BAD_VALUE; + } + mUpdateDebugLevel = false; + } + + if(request->settings != NULL){ + rc = translateToHalMetadata(request, mParameters, snapshotStreamId); + if (blob_request) + memcpy(mPrevParameters, mParameters, sizeof(metadata_buffer_t)); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : setReprocParameters + * + * DESCRIPTION: Translate frameworks metadata to HAL metadata structure, and + * return it. + * + * PARAMETERS : + * @request : request that needs to be serviced + * + * RETURN : success: NO_ERROR + * failure: + *==========================================================================*/ +int32_t QCamera3HardwareInterface::setReprocParameters( + camera3_capture_request_t *request, metadata_buffer_t *reprocParam, + uint32_t snapshotStreamId) +{ + /*translate from camera_metadata_t type to parm_type_t*/ + int rc = 0; + + if (NULL == request->settings){ + LOGE("Reprocess settings cannot be NULL"); + return BAD_VALUE; + } + + if (NULL == reprocParam) { + LOGE("Invalid reprocessing metadata buffer"); + return BAD_VALUE; + } + clear_metadata_buffer(reprocParam); + + /*we need to update the frame number in the parameters*/ + if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FRAME_NUMBER, + request->frame_number)) { + LOGE("Failed to set the frame number in the parameters"); + return BAD_VALUE; + } + + rc = translateToHalMetadata(request, reprocParam, snapshotStreamId); + if (rc < 0) { + LOGE("Failed to translate reproc request"); + return rc; + } + + CameraMetadata frame_settings; + frame_settings = request->settings; + if (frame_settings.exists(QCAMERA3_CROP_COUNT_REPROCESS) && + frame_settings.exists(QCAMERA3_CROP_REPROCESS)) { + int32_t *crop_count = + frame_settings.find(QCAMERA3_CROP_COUNT_REPROCESS).data.i32; + int32_t *crop_data = + frame_settings.find(QCAMERA3_CROP_REPROCESS).data.i32; + int32_t *roi_map = + frame_settings.find(QCAMERA3_CROP_ROI_MAP_REPROCESS).data.i32; + if ((0 < *crop_count) && (*crop_count < MAX_NUM_STREAMS)) { + cam_crop_data_t crop_meta; + memset(&crop_meta, 0, sizeof(cam_crop_data_t)); + crop_meta.num_of_streams = 1; + crop_meta.crop_info[0].crop.left = crop_data[0]; + crop_meta.crop_info[0].crop.top = crop_data[1]; + crop_meta.crop_info[0].crop.width = crop_data[2]; + crop_meta.crop_info[0].crop.height = crop_data[3]; + + crop_meta.crop_info[0].roi_map.left = + roi_map[0]; + crop_meta.crop_info[0].roi_map.top = + roi_map[1]; + crop_meta.crop_info[0].roi_map.width = + roi_map[2]; + crop_meta.crop_info[0].roi_map.height = + roi_map[3]; + + if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_CROP_DATA, crop_meta)) { + rc = BAD_VALUE; + } + LOGD("Found reprocess crop data for stream %p %dx%d, %dx%d", + request->input_buffer->stream, + crop_meta.crop_info[0].crop.left, + crop_meta.crop_info[0].crop.top, + crop_meta.crop_info[0].crop.width, + crop_meta.crop_info[0].crop.height); + LOGD("Found reprocess roi map data for stream %p %dx%d, %dx%d", + request->input_buffer->stream, + crop_meta.crop_info[0].roi_map.left, + crop_meta.crop_info[0].roi_map.top, + crop_meta.crop_info[0].roi_map.width, + crop_meta.crop_info[0].roi_map.height); + } else { + LOGE("Invalid reprocess crop count %d!", *crop_count); + } + } else { + LOGE("No crop data from matching output stream"); + } + + /* These settings are not needed for regular requests so handle them specially for + reprocess requests; information needed for EXIF tags */ + if (frame_settings.exists(ANDROID_FLASH_MODE)) { + int val = lookupHalName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP), + (int)frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]); + if (NAME_NOT_FOUND != val) { + uint32_t flashMode = (uint32_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FLASH_MODE, flashMode)) { + rc = BAD_VALUE; + } + } else { + LOGE("Could not map fwk flash mode %d to correct hal flash mode", + frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]); + } + } else { + LOGH("No flash mode in reprocess settings"); + } + + if (frame_settings.exists(ANDROID_FLASH_STATE)) { + int32_t flashState = (int32_t)frame_settings.find(ANDROID_FLASH_STATE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FLASH_STATE, flashState)) { + rc = BAD_VALUE; + } + } else { + LOGH("No flash state in reprocess settings"); + } + + if (frame_settings.exists(QCAMERA3_HAL_PRIVATEDATA_REPROCESS_FLAGS)) { + uint8_t *reprocessFlags = + frame_settings.find(QCAMERA3_HAL_PRIVATEDATA_REPROCESS_FLAGS).data.u8; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_REPROCESS_FLAGS, + *reprocessFlags)) { + rc = BAD_VALUE; + } + } + + // Add metadata which reprocess needs + if (frame_settings.exists(QCAMERA3_HAL_PRIVATEDATA_REPROCESS_DATA_BLOB)) { + cam_reprocess_info_t *repro_info = + (cam_reprocess_info_t *)frame_settings.find + (QCAMERA3_HAL_PRIVATEDATA_REPROCESS_DATA_BLOB).data.u8; + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_SNAP_CROP_INFO_SENSOR, + repro_info->sensor_crop_info); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_SNAP_CROP_INFO_CAMIF, + repro_info->camif_crop_info); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_SNAP_CROP_INFO_ISP, + repro_info->isp_crop_info); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_SNAP_CROP_INFO_CPP, + repro_info->cpp_crop_info); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_AF_FOCAL_LENGTH_RATIO, + repro_info->af_focal_length_ratio); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_PARM_FLIP, + repro_info->pipeline_flip); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_AF_ROI, + repro_info->af_roi); + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_IMG_DYN_FEAT, + repro_info->dyn_mask); + /* If there is ANDROID_JPEG_ORIENTATION in frame setting, + CAM_INTF_PARM_ROTATION metadata then has been added in + translateToHalMetadata. HAL need to keep this new rotation + metadata. Otherwise, the old rotation info saved in the vendor tag + would be used */ + IF_META_AVAILABLE(cam_rotation_info_t, rotationInfo, + CAM_INTF_PARM_ROTATION, reprocParam) { + LOGD("CAM_INTF_PARM_ROTATION metadata is added in translateToHalMetadata"); + } else { + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_PARM_ROTATION, + repro_info->rotation_info); + } + } + + /* Add additional JPEG cropping information. App add QCAMERA3_JPEG_ENCODE_CROP_RECT + to ask for cropping and use ROI for downscale/upscale during HW JPEG encoding. + roi.width and roi.height would be the final JPEG size. + For now, HAL only checks this for reprocess request */ + if (frame_settings.exists(QCAMERA3_JPEG_ENCODE_CROP_ENABLE) && + frame_settings.exists(QCAMERA3_JPEG_ENCODE_CROP_RECT)) { + uint8_t *enable = + frame_settings.find(QCAMERA3_JPEG_ENCODE_CROP_ENABLE).data.u8; + if (*enable == TRUE) { + int32_t *crop_data = + frame_settings.find(QCAMERA3_JPEG_ENCODE_CROP_RECT).data.i32; + cam_stream_crop_info_t crop_meta; + memset(&crop_meta, 0, sizeof(cam_stream_crop_info_t)); + crop_meta.stream_id = 0; + crop_meta.crop.left = crop_data[0]; + crop_meta.crop.top = crop_data[1]; + crop_meta.crop.width = crop_data[2]; + crop_meta.crop.height = crop_data[3]; + // The JPEG crop roi should match cpp output size + IF_META_AVAILABLE(cam_stream_crop_info_t, cpp_crop, + CAM_INTF_META_SNAP_CROP_INFO_CPP, reprocParam) { + crop_meta.roi_map.left = 0; + crop_meta.roi_map.top = 0; + crop_meta.roi_map.width = cpp_crop->crop.width; + crop_meta.roi_map.height = cpp_crop->crop.height; + } + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_PARM_JPEG_ENCODE_CROP, + crop_meta); + LOGH("Add JPEG encode crop left %d, top %d, width %d, height %d, mCameraId %d", + crop_meta.crop.left, crop_meta.crop.top, + crop_meta.crop.width, crop_meta.crop.height, mCameraId); + LOGH("Add JPEG encode crop ROI left %d, top %d, width %d, height %d, mCameraId %d", + crop_meta.roi_map.left, crop_meta.roi_map.top, + crop_meta.roi_map.width, crop_meta.roi_map.height, mCameraId); + + // Add JPEG scale information + cam_dimension_t scale_dim; + memset(&scale_dim, 0, sizeof(cam_dimension_t)); + if (frame_settings.exists(QCAMERA3_JPEG_ENCODE_CROP_ROI)) { + int32_t *roi = + frame_settings.find(QCAMERA3_JPEG_ENCODE_CROP_ROI).data.i32; + scale_dim.width = roi[2]; + scale_dim.height = roi[3]; + ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_PARM_JPEG_SCALE_DIMENSION, + scale_dim); + LOGH("Add JPEG encode scale width %d, height %d, mCameraId %d", + scale_dim.width, scale_dim.height, mCameraId); + } + } + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : saveRequestSettings + * + * DESCRIPTION: Add any settings that might have changed to the request settings + * and save the settings to be applied on the frame + * + * PARAMETERS : + * @jpegMetadata : the extracted and/or modified jpeg metadata + * @request : request with initial settings + * + * RETURN : + * camera_metadata_t* : pointer to the saved request settings + *==========================================================================*/ +camera_metadata_t* QCamera3HardwareInterface::saveRequestSettings( + const CameraMetadata &jpegMetadata, + camera3_capture_request_t *request) +{ + camera_metadata_t *resultMetadata; + CameraMetadata camMetadata; + camMetadata = request->settings; + + if (jpegMetadata.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) { + int32_t thumbnail_size[2]; + thumbnail_size[0] = jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0]; + thumbnail_size[1] = jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1]; + camMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnail_size, + jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).count); + } + + if (request->input_buffer != NULL) { + uint8_t reprocessFlags = 1; + camMetadata.update(QCAMERA3_HAL_PRIVATEDATA_REPROCESS_FLAGS, + (uint8_t*)&reprocessFlags, + sizeof(reprocessFlags)); + } + + resultMetadata = camMetadata.release(); + return resultMetadata; +} + +/*=========================================================================== + * FUNCTION : setHalFpsRange + * + * DESCRIPTION: set FPS range parameter + * + * + * PARAMETERS : + * @settings : Metadata from framework + * @hal_metadata: Metadata buffer + * + * + * RETURN : success: NO_ERROR + * failure: + *==========================================================================*/ +int32_t QCamera3HardwareInterface::setHalFpsRange(const CameraMetadata &settings, + metadata_buffer_t *hal_metadata) +{ + int32_t rc = NO_ERROR; + cam_fps_range_t fps_range; + fps_range.min_fps = (float) + settings.find(ANDROID_CONTROL_AE_TARGET_FPS_RANGE).data.i32[0]; + fps_range.max_fps = (float) + settings.find(ANDROID_CONTROL_AE_TARGET_FPS_RANGE).data.i32[1]; + fps_range.video_min_fps = fps_range.min_fps; + fps_range.video_max_fps = fps_range.max_fps; + + LOGD("aeTargetFpsRange fps: [%f %f]", + fps_range.min_fps, fps_range.max_fps); + /* In CONSTRAINED_HFR_MODE, sensor_fps is derived from aeTargetFpsRange as + * follows: + * ---------------------------------------------------------------| + * Video stream is absent in configure_streams | + * (Camcorder preview before the first video record | + * ---------------------------------------------------------------| + * vid_buf_requested | aeTgtFpsRng | snsrFpsMode | sensorFpsRange | + * | | | vid_min/max_fps| + * ---------------------------------------------------------------| + * NO | [ 30, 240] | 240 | [240, 240] | + * |-------------|-------------|----------------| + * | [240, 240] | 240 | [240, 240] | + * ---------------------------------------------------------------| + * Video stream is present in configure_streams | + * ---------------------------------------------------------------| + * vid_buf_requested | aeTgtFpsRng | snsrFpsMode | sensorFpsRange | + * | | | vid_min/max_fps| + * ---------------------------------------------------------------| + * NO | [ 30, 240] | 240 | [240, 240] | + * (camcorder prev |-------------|-------------|----------------| + * after video rec | [240, 240] | 240 | [240, 240] | + * is stopped) | | | | + * ---------------------------------------------------------------| + * YES | [ 30, 240] | 240 | [240, 240] | + * |-------------|-------------|----------------| + * | [240, 240] | 240 | [240, 240] | + * ---------------------------------------------------------------| + * When Video stream is absent in configure_streams, + * preview fps = sensor_fps / batchsize + * Eg: for 240fps at batchSize 4, preview = 60fps + * for 120fps at batchSize 4, preview = 30fps + * + * When video stream is present in configure_streams, preview fps is as per + * the ratio of preview buffers to video buffers requested in process + * capture request + */ + mBatchSize = 0; + if (CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE == mOpMode) { + fps_range.min_fps = fps_range.video_max_fps; + fps_range.video_min_fps = fps_range.video_max_fps; + int val = lookupHalName(HFR_MODE_MAP, METADATA_MAP_SIZE(HFR_MODE_MAP), + fps_range.max_fps); + if (NAME_NOT_FOUND != val) { + cam_hfr_mode_t hfrMode = (cam_hfr_mode_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_HFR, hfrMode)) { + return BAD_VALUE; + } + + if (fps_range.max_fps >= MIN_FPS_FOR_BATCH_MODE) { + /* If batchmode is currently in progress and the fps changes, + * set the flag to restart the sensor */ + if((mHFRVideoFps >= MIN_FPS_FOR_BATCH_MODE) && + (mHFRVideoFps != fps_range.max_fps)) { + mNeedSensorRestart = true; + } + mHFRVideoFps = fps_range.max_fps; + mBatchSize = mHFRVideoFps / PREVIEW_FPS_FOR_HFR; + if (mBatchSize > MAX_HFR_BATCH_SIZE) { + mBatchSize = MAX_HFR_BATCH_SIZE; + } + } + LOGD("hfrMode: %d batchSize: %d", hfrMode, mBatchSize); + + } + } else { + /* HFR mode is session param in backend/ISP. This should be reset when + * in non-HFR mode */ + cam_hfr_mode_t hfrMode = CAM_HFR_MODE_OFF; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_HFR, hfrMode)) { + return BAD_VALUE; + } + } + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_FPS_RANGE, fps_range)) { + return BAD_VALUE; + } + LOGD("fps: [%f %f] vid_fps: [%f %f]", fps_range.min_fps, + fps_range.max_fps, fps_range.video_min_fps, fps_range.video_max_fps); + return rc; +} + +/*=========================================================================== + * FUNCTION : translateToHalMetadata + * + * DESCRIPTION: read from the camera_metadata_t and change to parm_type_t + * + * + * PARAMETERS : + * @request : request sent from framework + * + * + * RETURN : success: NO_ERROR + * failure: + *==========================================================================*/ +int QCamera3HardwareInterface::translateToHalMetadata + (const camera3_capture_request_t *request, + metadata_buffer_t *hal_metadata, + uint32_t snapshotStreamId) +{ + int rc = 0; + CameraMetadata frame_settings; + frame_settings = request->settings; + + /* Do not change the order of the following list unless you know what you are + * doing. + * The order is laid out in such a way that parameters in the front of the table + * may be used to override the parameters later in the table. Examples are: + * 1. META_MODE should precede AEC/AWB/AF MODE + * 2. AEC MODE should preced EXPOSURE_TIME/SENSITIVITY/FRAME_DURATION + * 3. AWB_MODE should precede COLOR_CORRECTION_MODE + * 4. Any mode should precede it's corresponding settings + */ + if (frame_settings.exists(ANDROID_CONTROL_MODE)) { + uint8_t metaMode = frame_settings.find(ANDROID_CONTROL_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_MODE, metaMode)) { + rc = BAD_VALUE; + } + rc = extractSceneMode(frame_settings, metaMode, hal_metadata); + if (rc != NO_ERROR) { + LOGE("extractSceneMode failed"); + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AE_MODE)) { + uint8_t fwk_aeMode = + frame_settings.find(ANDROID_CONTROL_AE_MODE).data.u8[0]; + uint8_t aeMode; + int32_t redeye; + + if (fwk_aeMode == ANDROID_CONTROL_AE_MODE_OFF ) { + aeMode = CAM_AE_MODE_OFF; + } else { + aeMode = CAM_AE_MODE_ON; + } + if (fwk_aeMode == ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) { + redeye = 1; + } else { + redeye = 0; + } + + int val = lookupHalName(AE_FLASH_MODE_MAP, METADATA_MAP_SIZE(AE_FLASH_MODE_MAP), + fwk_aeMode); + if (NAME_NOT_FOUND != val) { + int32_t flashMode = (int32_t)val; + ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_LED_MODE, flashMode); + } + + ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_MODE, aeMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_REDEYE_REDUCTION, redeye)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AWB_MODE)) { + uint8_t fwk_whiteLevel = frame_settings.find(ANDROID_CONTROL_AWB_MODE).data.u8[0]; + int val = lookupHalName(WHITE_BALANCE_MODES_MAP, METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP), + fwk_whiteLevel); + if (NAME_NOT_FOUND != val) { + uint8_t whiteLevel = (uint8_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_WHITE_BALANCE, whiteLevel)) { + rc = BAD_VALUE; + } + } + } + + if (frame_settings.exists(ANDROID_COLOR_CORRECTION_ABERRATION_MODE)) { + uint8_t fwk_cacMode = + frame_settings.find( + ANDROID_COLOR_CORRECTION_ABERRATION_MODE).data.u8[0]; + int val = lookupHalName(COLOR_ABERRATION_MAP, METADATA_MAP_SIZE(COLOR_ABERRATION_MAP), + fwk_cacMode); + if (NAME_NOT_FOUND != val) { + cam_aberration_mode_t cacMode = (cam_aberration_mode_t) val; + bool entryAvailable = FALSE; + // Check whether Frameworks set CAC mode is supported in device or not + for (size_t i = 0; i < gCamCapability[mCameraId]->aberration_modes_count; i++) { + if (gCamCapability[mCameraId]->aberration_modes[i] == cacMode) { + entryAvailable = TRUE; + break; + } + } + LOGD("FrameworksCacMode=%d entryAvailable=%d", cacMode, entryAvailable); + // If entry not found then set the device supported mode instead of frameworks mode i.e, + // Only HW ISP CAC + NO SW CAC : Advertise all 3 with High doing same as fast by ISP + // NO HW ISP CAC + Only SW CAC : Advertise all 3 with Fast doing the same as OFF + if (entryAvailable == FALSE) { + if (gCamCapability[mCameraId]->aberration_modes_count == 0) { + cacMode = CAM_COLOR_CORRECTION_ABERRATION_OFF; + } else { + if (cacMode == CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY) { + // High is not supported and so set the FAST as spec say's underlying + // device implementation can be the same for both modes. + cacMode = CAM_COLOR_CORRECTION_ABERRATION_FAST; + } else if (cacMode == CAM_COLOR_CORRECTION_ABERRATION_FAST) { + // Fast is not supported and so we cannot set HIGH or FAST but choose OFF + // in order to avoid the fps drop due to high quality + cacMode = CAM_COLOR_CORRECTION_ABERRATION_OFF; + } else { + cacMode = CAM_COLOR_CORRECTION_ABERRATION_OFF; + } + } + } + LOGD("Final cacMode is %d", cacMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_CAC, cacMode)) { + rc = BAD_VALUE; + } + } else { + LOGE("Invalid framework CAC mode: %d", fwk_cacMode); + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AF_MODE)) { + uint8_t fwk_focusMode = frame_settings.find(ANDROID_CONTROL_AF_MODE).data.u8[0]; + int val = lookupHalName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP), + fwk_focusMode); + if (NAME_NOT_FOUND != val) { + uint8_t focusMode = (uint8_t)val; + LOGD("set focus mode %d", focusMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_FOCUS_MODE, focusMode)) { + rc = BAD_VALUE; + } + } + } + + if (frame_settings.exists(ANDROID_LENS_FOCUS_DISTANCE)) { + float focalDistance = frame_settings.find(ANDROID_LENS_FOCUS_DISTANCE).data.f[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FOCUS_DISTANCE, + focalDistance)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AE_ANTIBANDING_MODE)) { + uint8_t fwk_antibandingMode = + frame_settings.find(ANDROID_CONTROL_AE_ANTIBANDING_MODE).data.u8[0]; + int val = lookupHalName(ANTIBANDING_MODES_MAP, + METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP), fwk_antibandingMode); + if (NAME_NOT_FOUND != val) { + uint32_t hal_antibandingMode = (uint32_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_ANTIBANDING, + hal_antibandingMode)) { + rc = BAD_VALUE; + } + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION)) { + int32_t expCompensation = frame_settings.find( + ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION).data.i32[0]; + if (expCompensation < gCamCapability[mCameraId]->exposure_compensation_min) + expCompensation = gCamCapability[mCameraId]->exposure_compensation_min; + if (expCompensation > gCamCapability[mCameraId]->exposure_compensation_max) + expCompensation = gCamCapability[mCameraId]->exposure_compensation_max; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EXPOSURE_COMPENSATION, + expCompensation)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AE_LOCK)) { + uint8_t aeLock = frame_settings.find(ANDROID_CONTROL_AE_LOCK).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_AEC_LOCK, aeLock)) { + rc = BAD_VALUE; + } + } + if (frame_settings.exists(ANDROID_CONTROL_AE_TARGET_FPS_RANGE)) { + rc = setHalFpsRange(frame_settings, hal_metadata); + if (rc != NO_ERROR) { + LOGE("setHalFpsRange failed"); + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AWB_LOCK)) { + uint8_t awbLock = frame_settings.find(ANDROID_CONTROL_AWB_LOCK).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_AWB_LOCK, awbLock)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_EFFECT_MODE)) { + uint8_t fwk_effectMode = frame_settings.find(ANDROID_CONTROL_EFFECT_MODE).data.u8[0]; + int val = lookupHalName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP), + fwk_effectMode); + if (NAME_NOT_FOUND != val) { + uint8_t effectMode = (uint8_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EFFECT, effectMode)) { + rc = BAD_VALUE; + } + } + } + + if (frame_settings.exists(ANDROID_COLOR_CORRECTION_MODE)) { + uint8_t colorCorrectMode = frame_settings.find(ANDROID_COLOR_CORRECTION_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_MODE, + colorCorrectMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_COLOR_CORRECTION_GAINS)) { + cam_color_correct_gains_t colorCorrectGains; + for (size_t i = 0; i < CC_GAINS_COUNT; i++) { + colorCorrectGains.gains[i] = + frame_settings.find(ANDROID_COLOR_CORRECTION_GAINS).data.f[i]; + } + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_GAINS, + colorCorrectGains)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_COLOR_CORRECTION_TRANSFORM)) { + cam_color_correct_matrix_t colorCorrectTransform; + cam_rational_type_t transform_elem; + size_t num = 0; + for (size_t i = 0; i < CC_MATRIX_ROWS; i++) { + for (size_t j = 0; j < CC_MATRIX_COLS; j++) { + transform_elem.numerator = + frame_settings.find(ANDROID_COLOR_CORRECTION_TRANSFORM).data.r[num].numerator; + transform_elem.denominator = + frame_settings.find(ANDROID_COLOR_CORRECTION_TRANSFORM).data.r[num].denominator; + colorCorrectTransform.transform_matrix[i][j] = transform_elem; + num++; + } + } + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_TRANSFORM, + colorCorrectTransform)) { + rc = BAD_VALUE; + } + } + + cam_trigger_t aecTrigger; + aecTrigger.trigger = CAM_AEC_TRIGGER_IDLE; + aecTrigger.trigger_id = -1; + if (frame_settings.exists(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER)&& + frame_settings.exists(ANDROID_CONTROL_AE_PRECAPTURE_ID)) { + aecTrigger.trigger = + frame_settings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER).data.u8[0]; + aecTrigger.trigger_id = + frame_settings.find(ANDROID_CONTROL_AE_PRECAPTURE_ID).data.i32[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_PRECAPTURE_TRIGGER, + aecTrigger)) { + rc = BAD_VALUE; + } + LOGD("precaptureTrigger: %d precaptureTriggerID: %d", + aecTrigger.trigger, aecTrigger.trigger_id); + } + + /*af_trigger must come with a trigger id*/ + if (frame_settings.exists(ANDROID_CONTROL_AF_TRIGGER) && + frame_settings.exists(ANDROID_CONTROL_AF_TRIGGER_ID)) { + cam_trigger_t af_trigger; + af_trigger.trigger = + frame_settings.find(ANDROID_CONTROL_AF_TRIGGER).data.u8[0]; + af_trigger.trigger_id = + frame_settings.find(ANDROID_CONTROL_AF_TRIGGER_ID).data.i32[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AF_TRIGGER, af_trigger)) { + rc = BAD_VALUE; + } + LOGD("AfTrigger: %d AfTriggerID: %d", + af_trigger.trigger, af_trigger.trigger_id); + } + + if (frame_settings.exists(ANDROID_DEMOSAIC_MODE)) { + int32_t demosaic = frame_settings.find(ANDROID_DEMOSAIC_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_DEMOSAIC, demosaic)) { + rc = BAD_VALUE; + } + } + if (frame_settings.exists(ANDROID_EDGE_MODE)) { + cam_edge_application_t edge_application; + edge_application.edge_mode = frame_settings.find(ANDROID_EDGE_MODE).data.u8[0]; + if (edge_application.edge_mode == CAM_EDGE_MODE_OFF) { + edge_application.sharpness = 0; + } else { + edge_application.sharpness = gCamCapability[mCameraId]->sharpness_ctrl.def_value; //default + } + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_EDGE_MODE, edge_application)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_FLASH_MODE)) { + int32_t respectFlashMode = 1; + if (frame_settings.exists(ANDROID_CONTROL_AE_MODE)) { + uint8_t fwk_aeMode = + frame_settings.find(ANDROID_CONTROL_AE_MODE).data.u8[0]; + if (fwk_aeMode > ANDROID_CONTROL_AE_MODE_ON) { + respectFlashMode = 0; + LOGH("AE Mode controls flash, ignore android.flash.mode"); + } + } + if (respectFlashMode) { + int val = lookupHalName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP), + (int)frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]); + LOGH("flash mode after mapping %d", val); + // To check: CAM_INTF_META_FLASH_MODE usage + if (NAME_NOT_FOUND != val) { + uint8_t flashMode = (uint8_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_LED_MODE, flashMode)) { + rc = BAD_VALUE; + } + } + } + } + + if (frame_settings.exists(ANDROID_FLASH_FIRING_POWER)) { + uint8_t flashPower = frame_settings.find(ANDROID_FLASH_FIRING_POWER).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_FLASH_POWER, flashPower)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_FLASH_FIRING_TIME)) { + int64_t flashFiringTime = frame_settings.find(ANDROID_FLASH_FIRING_TIME).data.i64[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_FLASH_FIRING_TIME, + flashFiringTime)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_HOT_PIXEL_MODE)) { + uint8_t hotPixelMode = frame_settings.find(ANDROID_HOT_PIXEL_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_HOTPIXEL_MODE, + hotPixelMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_LENS_APERTURE)) { + float lensAperture = frame_settings.find( ANDROID_LENS_APERTURE).data.f[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_APERTURE, + lensAperture)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_LENS_FILTER_DENSITY)) { + float filterDensity = frame_settings.find(ANDROID_LENS_FILTER_DENSITY).data.f[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FILTERDENSITY, + filterDensity)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_LENS_FOCAL_LENGTH)) { + float focalLength = frame_settings.find(ANDROID_LENS_FOCAL_LENGTH).data.f[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FOCAL_LENGTH, + focalLength)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_LENS_OPTICAL_STABILIZATION_MODE)) { + uint8_t optStabMode = + frame_settings.find(ANDROID_LENS_OPTICAL_STABILIZATION_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_OPT_STAB_MODE, + optStabMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE)) { + uint8_t videoStabMode = + frame_settings.find(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE).data.u8[0]; + LOGD("videoStabMode from APP = %d", videoStabMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_VIDEO_STAB_MODE, + videoStabMode)) { + rc = BAD_VALUE; + } + } + + + if (frame_settings.exists(ANDROID_NOISE_REDUCTION_MODE)) { + uint8_t noiseRedMode = frame_settings.find(ANDROID_NOISE_REDUCTION_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_NOISE_REDUCTION_MODE, + noiseRedMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR)) { + float reprocessEffectiveExposureFactor = + frame_settings.find(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR).data.f[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR, + reprocessEffectiveExposureFactor)) { + rc = BAD_VALUE; + } + } + + cam_crop_region_t scalerCropRegion; + bool scalerCropSet = false; + if (frame_settings.exists(ANDROID_SCALER_CROP_REGION)) { + scalerCropRegion.left = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[0]; + scalerCropRegion.top = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[1]; + scalerCropRegion.width = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[2]; + scalerCropRegion.height = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[3]; + + // Map coordinate system from active array to sensor output. + mCropRegionMapper.toSensor(scalerCropRegion.left, scalerCropRegion.top, + scalerCropRegion.width, scalerCropRegion.height); + + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SCALER_CROP_REGION, + scalerCropRegion)) { + rc = BAD_VALUE; + } + scalerCropSet = true; + } + + if (frame_settings.exists(ANDROID_SENSOR_EXPOSURE_TIME)) { + int64_t sensorExpTime = + frame_settings.find(ANDROID_SENSOR_EXPOSURE_TIME).data.i64[0]; + LOGD("setting sensorExpTime %lld", sensorExpTime); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_EXPOSURE_TIME, + sensorExpTime)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_SENSOR_FRAME_DURATION)) { + int64_t sensorFrameDuration = + frame_settings.find(ANDROID_SENSOR_FRAME_DURATION).data.i64[0]; + int64_t minFrameDuration = getMinFrameDuration(request); + sensorFrameDuration = MAX(sensorFrameDuration, minFrameDuration); + if (sensorFrameDuration > gCamCapability[mCameraId]->max_frame_duration) + sensorFrameDuration = gCamCapability[mCameraId]->max_frame_duration; + LOGD("clamp sensorFrameDuration to %lld", sensorFrameDuration); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_FRAME_DURATION, + sensorFrameDuration)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_SENSOR_SENSITIVITY)) { + int32_t sensorSensitivity = frame_settings.find(ANDROID_SENSOR_SENSITIVITY).data.i32[0]; + if (sensorSensitivity < gCamCapability[mCameraId]->sensitivity_range.min_sensitivity) + sensorSensitivity = gCamCapability[mCameraId]->sensitivity_range.min_sensitivity; + if (sensorSensitivity > gCamCapability[mCameraId]->sensitivity_range.max_sensitivity) + sensorSensitivity = gCamCapability[mCameraId]->sensitivity_range.max_sensitivity; + LOGD("clamp sensorSensitivity to %d", sensorSensitivity); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_SENSITIVITY, + sensorSensitivity)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_SHADING_MODE)) { + uint8_t shadingMode = frame_settings.find(ANDROID_SHADING_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SHADING_MODE, shadingMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_STATISTICS_FACE_DETECT_MODE)) { + uint8_t fwk_facedetectMode = + frame_settings.find(ANDROID_STATISTICS_FACE_DETECT_MODE).data.u8[0]; + + int val = lookupHalName(FACEDETECT_MODES_MAP, METADATA_MAP_SIZE(FACEDETECT_MODES_MAP), + fwk_facedetectMode); + + if (NAME_NOT_FOUND != val) { + uint8_t facedetectMode = (uint8_t)val; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_FACEDETECT_MODE, + facedetectMode)) { + rc = BAD_VALUE; + } + } + } + + if (frame_settings.exists(ANDROID_STATISTICS_HISTOGRAM_MODE)) { + uint8_t histogramMode = + frame_settings.find(ANDROID_STATISTICS_HISTOGRAM_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_HISTOGRAM_MODE, + histogramMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_STATISTICS_SHARPNESS_MAP_MODE)) { + uint8_t sharpnessMapMode = + frame_settings.find(ANDROID_STATISTICS_SHARPNESS_MAP_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, + sharpnessMapMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_TONEMAP_MODE)) { + uint8_t tonemapMode = + frame_settings.find(ANDROID_TONEMAP_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TONEMAP_MODE, tonemapMode)) { + rc = BAD_VALUE; + } + } + /* Tonemap curve channels ch0 = G, ch 1 = B, ch 2 = R */ + /*All tonemap channels will have the same number of points*/ + if (frame_settings.exists(ANDROID_TONEMAP_CURVE_GREEN) && + frame_settings.exists(ANDROID_TONEMAP_CURVE_BLUE) && + frame_settings.exists(ANDROID_TONEMAP_CURVE_RED)) { + cam_rgb_tonemap_curves tonemapCurves; + tonemapCurves.tonemap_points_cnt = frame_settings.find(ANDROID_TONEMAP_CURVE_GREEN).count/2; + if (tonemapCurves.tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) { + LOGE("Fatal: tonemap_points_cnt %d exceeds max value of %d", + tonemapCurves.tonemap_points_cnt, + CAM_MAX_TONEMAP_CURVE_SIZE); + tonemapCurves.tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE; + } + + /* ch0 = G*/ + size_t point = 0; + cam_tonemap_curve_t tonemapCurveGreen; + for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) { + for (size_t j = 0; j < 2; j++) { + tonemapCurveGreen.tonemap_points[i][j] = + frame_settings.find(ANDROID_TONEMAP_CURVE_GREEN).data.f[point]; + point++; + } + } + tonemapCurves.curves[0] = tonemapCurveGreen; + + /* ch 1 = B */ + point = 0; + cam_tonemap_curve_t tonemapCurveBlue; + for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) { + for (size_t j = 0; j < 2; j++) { + tonemapCurveBlue.tonemap_points[i][j] = + frame_settings.find(ANDROID_TONEMAP_CURVE_BLUE).data.f[point]; + point++; + } + } + tonemapCurves.curves[1] = tonemapCurveBlue; + + /* ch 2 = R */ + point = 0; + cam_tonemap_curve_t tonemapCurveRed; + for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) { + for (size_t j = 0; j < 2; j++) { + tonemapCurveRed.tonemap_points[i][j] = + frame_settings.find(ANDROID_TONEMAP_CURVE_RED).data.f[point]; + point++; + } + } + tonemapCurves.curves[2] = tonemapCurveRed; + + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TONEMAP_CURVES, + tonemapCurves)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_CAPTURE_INTENT)) { + uint8_t captureIntent = frame_settings.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_CAPTURE_INTENT, + captureIntent)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_BLACK_LEVEL_LOCK)) { + uint8_t blackLevelLock = frame_settings.find(ANDROID_BLACK_LEVEL_LOCK).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_BLACK_LEVEL_LOCK, + blackLevelLock)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE)) { + uint8_t lensShadingMapMode = + frame_settings.find(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_SHADING_MAP_MODE, + lensShadingMapMode)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AE_REGIONS)) { + cam_area_t roi; + bool reset = true; + convertFromRegions(roi, request->settings, ANDROID_CONTROL_AE_REGIONS); + + // Map coordinate system from active array to sensor output. + mCropRegionMapper.toSensor(roi.rect.left, roi.rect.top, roi.rect.width, + roi.rect.height); + + if (scalerCropSet) { + reset = resetIfNeededROI(&roi, &scalerCropRegion); + } + if (reset && ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_ROI, roi)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_CONTROL_AF_REGIONS)) { + cam_area_t roi; + bool reset = true; + convertFromRegions(roi, request->settings, ANDROID_CONTROL_AF_REGIONS); + + // Map coordinate system from active array to sensor output. + mCropRegionMapper.toSensor(roi.rect.left, roi.rect.top, roi.rect.width, + roi.rect.height); + + if (scalerCropSet) { + reset = resetIfNeededROI(&roi, &scalerCropRegion); + } + if (reset && ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AF_ROI, roi)) { + rc = BAD_VALUE; + } + } + + // CDS for non-HFR non-video mode + if ((mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE) && + !(m_bIsVideo) && frame_settings.exists(QCAMERA3_CDS_MODE)) { + int32_t *fwk_cds = frame_settings.find(QCAMERA3_CDS_MODE).data.i32; + if ((CAM_CDS_MODE_MAX <= *fwk_cds) || (0 > *fwk_cds)) { + LOGE("Invalid CDS mode %d!", *fwk_cds); + } else { + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, + CAM_INTF_PARM_CDS_MODE, *fwk_cds)) { + rc = BAD_VALUE; + } + } + } + + // TNR + if (frame_settings.exists(QCAMERA3_TEMPORAL_DENOISE_ENABLE) && + frame_settings.exists(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE)) { + uint8_t b_TnrRequested = 0; + cam_denoise_param_t tnr; + tnr.denoise_enable = frame_settings.find(QCAMERA3_TEMPORAL_DENOISE_ENABLE).data.u8[0]; + tnr.process_plates = + (cam_denoise_process_type_t)frame_settings.find( + QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE).data.i32[0]; + b_TnrRequested = tnr.denoise_enable; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_TEMPORAL_DENOISE, tnr)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_SENSOR_TEST_PATTERN_MODE)) { + int32_t fwk_testPatternMode = + frame_settings.find(ANDROID_SENSOR_TEST_PATTERN_MODE).data.i32[0]; + int testPatternMode = lookupHalName(TEST_PATTERN_MAP, + METADATA_MAP_SIZE(TEST_PATTERN_MAP), fwk_testPatternMode); + + if (NAME_NOT_FOUND != testPatternMode) { + cam_test_pattern_data_t testPatternData; + memset(&testPatternData, 0, sizeof(testPatternData)); + testPatternData.mode = (cam_test_pattern_mode_t)testPatternMode; + if (testPatternMode == CAM_TEST_PATTERN_SOLID_COLOR && + frame_settings.exists(ANDROID_SENSOR_TEST_PATTERN_DATA)) { + int32_t *fwk_testPatternData = + frame_settings.find(ANDROID_SENSOR_TEST_PATTERN_DATA).data.i32; + testPatternData.r = fwk_testPatternData[0]; + testPatternData.b = fwk_testPatternData[3]; + switch (gCamCapability[mCameraId]->color_arrangement) { + case CAM_FILTER_ARRANGEMENT_RGGB: + case CAM_FILTER_ARRANGEMENT_GRBG: + testPatternData.gr = fwk_testPatternData[1]; + testPatternData.gb = fwk_testPatternData[2]; + break; + case CAM_FILTER_ARRANGEMENT_GBRG: + case CAM_FILTER_ARRANGEMENT_BGGR: + testPatternData.gr = fwk_testPatternData[2]; + testPatternData.gb = fwk_testPatternData[1]; + break; + default: + LOGE("color arrangement %d is not supported", + gCamCapability[mCameraId]->color_arrangement); + break; + } + } + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TEST_PATTERN_DATA, + testPatternData)) { + rc = BAD_VALUE; + } + } else { + LOGE("Invalid framework sensor test pattern mode %d", + fwk_testPatternMode); + } + } + + if (frame_settings.exists(ANDROID_JPEG_GPS_COORDINATES)) { + size_t count = 0; + camera_metadata_entry_t gps_coords = frame_settings.find(ANDROID_JPEG_GPS_COORDINATES); + ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_COORDINATES, + gps_coords.data.d, gps_coords.count, count); + if (gps_coords.count != count) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) { + char gps_methods[GPS_PROCESSING_METHOD_SIZE]; + size_t count = 0; + const char *gps_methods_src = (const char *) + frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).data.u8; + memset(gps_methods, '\0', sizeof(gps_methods)); + strlcpy(gps_methods, gps_methods_src, sizeof(gps_methods)); + ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_PROC_METHODS, + gps_methods, GPS_PROCESSING_METHOD_SIZE, count); + if (GPS_PROCESSING_METHOD_SIZE != count) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_GPS_TIMESTAMP)) { + int64_t gps_timestamp = frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).data.i64[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_TIMESTAMP, + gps_timestamp)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_ORIENTATION)) { + int32_t orientation = frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0]; + cam_rotation_info_t rotation_info; + if (orientation == 0) { + rotation_info.rotation = ROTATE_0; + } else if (orientation == 90) { + rotation_info.rotation = ROTATE_90; + } else if (orientation == 180) { + rotation_info.rotation = ROTATE_180; + } else if (orientation == 270) { + rotation_info.rotation = ROTATE_270; + } + rotation_info.streamId = snapshotStreamId; + ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_ORIENTATION, orientation); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_ROTATION, rotation_info)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_QUALITY)) { + uint32_t quality = (uint32_t) frame_settings.find(ANDROID_JPEG_QUALITY).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_QUALITY, quality)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) { + uint32_t thumb_quality = (uint32_t) + frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).data.u8[0]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_THUMB_QUALITY, + thumb_quality)) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) { + cam_dimension_t dim; + dim.width = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0]; + dim.height = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1]; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_THUMB_SIZE, dim)) { + rc = BAD_VALUE; + } + } + + // Internal metadata + if (frame_settings.exists(QCAMERA3_PRIVATEDATA_REPROCESS)) { + size_t count = 0; + camera_metadata_entry_t privatedata = frame_settings.find(QCAMERA3_PRIVATEDATA_REPROCESS); + ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_PRIVATE_DATA, + privatedata.data.i32, privatedata.count, count); + if (privatedata.count != count) { + rc = BAD_VALUE; + } + } + + if (frame_settings.exists(QCAMERA3_USE_AV_TIMER)) { + uint8_t* use_av_timer = + frame_settings.find(QCAMERA3_USE_AV_TIMER).data.u8; + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_USE_AV_TIMER, *use_av_timer)) { + rc = BAD_VALUE; + } + } + + // EV step + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EV_STEP, + gCamCapability[mCameraId]->exp_compensation_step)) { + rc = BAD_VALUE; + } + + // CDS info + if (frame_settings.exists(QCAMERA3_CDS_INFO)) { + cam_cds_data_t *cdsData = (cam_cds_data_t *) + frame_settings.find(QCAMERA3_CDS_INFO).data.u8; + + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, + CAM_INTF_META_CDS_DATA, *cdsData)) { + rc = BAD_VALUE; + } + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : captureResultCb + * + * DESCRIPTION: Callback handler for all channels (streams, as well as metadata) + * + * PARAMETERS : + * @frame : frame information from mm-camera-interface + * @buffer : actual gralloc buffer to be returned to frameworks. NULL if metadata. + * @userdata: userdata + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3HardwareInterface::captureResultCb(mm_camera_super_buf_t *metadata, + camera3_stream_buffer_t *buffer, + uint32_t frame_number, bool isInputBuffer, void *userdata) +{ + QCamera3HardwareInterface *hw = (QCamera3HardwareInterface *)userdata; + if (hw == NULL) { + LOGE("Invalid hw %p", hw); + return; + } + + hw->captureResultCb(metadata, buffer, frame_number, isInputBuffer); + return; +} + + +/*=========================================================================== + * FUNCTION : initialize + * + * DESCRIPTION: Pass framework callback pointers to HAL + * + * PARAMETERS : + * + * + * RETURN : Success : 0 + * Failure: -ENODEV + *==========================================================================*/ + +int QCamera3HardwareInterface::initialize(const struct camera3_device *device, + const camera3_callback_ops_t *callback_ops) +{ + LOGD("E"); + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return -ENODEV; + } + + int rc = hw->initialize(callback_ops); + LOGD("X"); + return rc; +} + +/*=========================================================================== + * FUNCTION : configure_streams + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : Success: 0 + * Failure: -EINVAL (if stream configuration is invalid) + * -ENODEV (fatal error) + *==========================================================================*/ + +int QCamera3HardwareInterface::configure_streams( + const struct camera3_device *device, + camera3_stream_configuration_t *stream_list) +{ + LOGD("E"); + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return -ENODEV; + } + int rc = hw->configureStreams(stream_list); + LOGD("X"); + return rc; +} + +/*=========================================================================== + * FUNCTION : construct_default_request_settings + * + * DESCRIPTION: Configure a settings buffer to meet the required use case + * + * PARAMETERS : + * + * + * RETURN : Success: Return valid metadata + * Failure: Return NULL + *==========================================================================*/ +const camera_metadata_t* QCamera3HardwareInterface:: + construct_default_request_settings(const struct camera3_device *device, + int type) +{ + + LOGD("E"); + camera_metadata_t* fwk_metadata = NULL; + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return NULL; + } + + fwk_metadata = hw->translateCapabilityToMetadata(type); + + LOGD("X"); + return fwk_metadata; +} + +/*=========================================================================== + * FUNCTION : process_capture_request + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : + *==========================================================================*/ +int QCamera3HardwareInterface::process_capture_request( + const struct camera3_device *device, + camera3_capture_request_t *request) +{ + LOGD("E"); + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return -EINVAL; + } + + int rc = hw->processCaptureRequest(request); + LOGD("X"); + return rc; +} + +/*=========================================================================== + * FUNCTION : dump + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : + *==========================================================================*/ + +void QCamera3HardwareInterface::dump( + const struct camera3_device *device, int fd) +{ + /* Log level property is read when "adb shell dumpsys media.camera" is + called so that the log level can be controlled without restarting + the media server */ + getLogLevel(); + + LOGD("E"); + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return; + } + + hw->dump(fd); + LOGD("X"); + return; +} + +/*=========================================================================== + * FUNCTION : flush + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : + *==========================================================================*/ + +int QCamera3HardwareInterface::flush( + const struct camera3_device *device) +{ + int rc; + LOGD("E"); + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>(device->priv); + if (!hw) { + LOGE("NULL camera device"); + return -EINVAL; + } + + pthread_mutex_lock(&hw->mMutex); + // Validate current state + switch (hw->mState) { + case STARTED: + /* valid state */ + break; + + case ERROR: + pthread_mutex_unlock(&hw->mMutex); + hw->handleCameraDeviceError(); + return -ENODEV; + + default: + LOGI("Flush returned during state %d", hw->mState); + pthread_mutex_unlock(&hw->mMutex); + return 0; + } + pthread_mutex_unlock(&hw->mMutex); + + rc = hw->flush(true /* restart channels */ ); + LOGD("X"); + return rc; +} + +/*=========================================================================== + * FUNCTION : close_camera_device + * + * DESCRIPTION: + * + * PARAMETERS : + * + * + * RETURN : + *==========================================================================*/ +int QCamera3HardwareInterface::close_camera_device(struct hw_device_t* device) +{ + int ret = NO_ERROR; + QCamera3HardwareInterface *hw = + reinterpret_cast<QCamera3HardwareInterface *>( + reinterpret_cast<camera3_device_t *>(device)->priv); + if (!hw) { + LOGE("NULL camera device"); + return BAD_VALUE; + } + + LOGI("[KPI Perf]: E camera id %d", hw->mCameraId); + delete hw; + LOGI("[KPI Perf]: X"); + return ret; +} + +/*=========================================================================== + * FUNCTION : getWaveletDenoiseProcessPlate + * + * DESCRIPTION: query wavelet denoise process plate + * + * PARAMETERS : None + * + * RETURN : WNR prcocess plate value + *==========================================================================*/ +cam_denoise_process_type_t QCamera3HardwareInterface::getWaveletDenoiseProcessPlate() +{ + char prop[PROPERTY_VALUE_MAX]; + memset(prop, 0, sizeof(prop)); + property_get("persist.denoise.process.plates", prop, "0"); + int processPlate = atoi(prop); + switch(processPlate) { + case 0: + return CAM_WAVELET_DENOISE_YCBCR_PLANE; + case 1: + return CAM_WAVELET_DENOISE_CBCR_ONLY; + case 2: + return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR; + case 3: + return CAM_WAVELET_DENOISE_STREAMLINED_CBCR; + default: + return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR; + } +} + + +/*=========================================================================== + * FUNCTION : getTemporalDenoiseProcessPlate + * + * DESCRIPTION: query temporal denoise process plate + * + * PARAMETERS : None + * + * RETURN : TNR prcocess plate value + *==========================================================================*/ +cam_denoise_process_type_t QCamera3HardwareInterface::getTemporalDenoiseProcessPlate() +{ + char prop[PROPERTY_VALUE_MAX]; + memset(prop, 0, sizeof(prop)); + property_get("persist.tnr.process.plates", prop, "0"); + int processPlate = atoi(prop); + switch(processPlate) { + case 0: + return CAM_WAVELET_DENOISE_YCBCR_PLANE; + case 1: + return CAM_WAVELET_DENOISE_CBCR_ONLY; + case 2: + return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR; + case 3: + return CAM_WAVELET_DENOISE_STREAMLINED_CBCR; + default: + return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR; + } +} + + +/*=========================================================================== + * FUNCTION : extractSceneMode + * + * DESCRIPTION: Extract scene mode from frameworks set metadata + * + * PARAMETERS : + * @frame_settings: CameraMetadata reference + * @metaMode: ANDROID_CONTORL_MODE + * @hal_metadata: hal metadata structure + * + * RETURN : None + *==========================================================================*/ +int32_t QCamera3HardwareInterface::extractSceneMode( + const CameraMetadata &frame_settings, uint8_t metaMode, + metadata_buffer_t *hal_metadata) +{ + int32_t rc = NO_ERROR; + + if (metaMode == ANDROID_CONTROL_MODE_USE_SCENE_MODE) { + camera_metadata_ro_entry entry = + frame_settings.find(ANDROID_CONTROL_SCENE_MODE); + if (0 == entry.count) + return rc; + + uint8_t fwk_sceneMode = entry.data.u8[0]; + + int val = lookupHalName(SCENE_MODES_MAP, + sizeof(SCENE_MODES_MAP)/sizeof(SCENE_MODES_MAP[0]), + fwk_sceneMode); + if (NAME_NOT_FOUND != val) { + uint8_t sceneMode = (uint8_t)val; + LOGD("sceneMode: %d", sceneMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, + CAM_INTF_PARM_BESTSHOT_MODE, sceneMode)) { + rc = BAD_VALUE; + } + } + } else if ((ANDROID_CONTROL_MODE_OFF == metaMode) || + (ANDROID_CONTROL_MODE_AUTO == metaMode)) { + uint8_t sceneMode = CAM_SCENE_MODE_OFF; + LOGD("sceneMode: %d", sceneMode); + if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, + CAM_INTF_PARM_BESTSHOT_MODE, sceneMode)) { + rc = BAD_VALUE; + } + } + return rc; +} + +/*=========================================================================== + * FUNCTION : needRotationReprocess + * + * DESCRIPTION: if rotation needs to be done by reprocess in pp + * + * PARAMETERS : none + * + * RETURN : true: needed + * false: no need + *==========================================================================*/ +bool QCamera3HardwareInterface::needRotationReprocess() +{ + if ((gCamCapability[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION) > 0) { + // current rotation is not zero, and pp has the capability to process rotation + LOGH("need do reprocess for rotation"); + return true; + } + + return false; +} + +/*=========================================================================== + * FUNCTION : needReprocess + * + * DESCRIPTION: if reprocess in needed + * + * PARAMETERS : none + * + * RETURN : true: needed + * false: no need + *==========================================================================*/ +bool QCamera3HardwareInterface::needReprocess(cam_feature_mask_t postprocess_mask) +{ + if (gCamCapability[mCameraId]->qcom_supported_feature_mask > 0) { + // TODO: add for ZSL HDR later + // pp module has min requirement for zsl reprocess, or WNR in ZSL mode + if(postprocess_mask == CAM_QCOM_FEATURE_NONE){ + LOGH("need do reprocess for ZSL WNR or min PP reprocess"); + return true; + } else { + LOGH("already post processed frame"); + return false; + } + } + return needRotationReprocess(); +} + +/*=========================================================================== + * FUNCTION : needJpegExifRotation + * + * DESCRIPTION: if rotation from jpeg is needed + * + * PARAMETERS : none + * + * RETURN : true: needed + * false: no need + *==========================================================================*/ +bool QCamera3HardwareInterface::needJpegExifRotation() +{ + /*If the pp does not have the ability to do rotation, enable jpeg rotation*/ + if (!(gCamCapability[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION)) { + LOGD("Need use Jpeg EXIF Rotation"); + return true; + } + return false; +} + +/*=========================================================================== + * FUNCTION : addOfflineReprocChannel + * + * DESCRIPTION: add a reprocess channel that will do reprocess on frames + * coming from input channel + * + * PARAMETERS : + * @config : reprocess configuration + * @inputChHandle : pointer to the input (source) channel + * + * + * RETURN : Ptr to the newly created channel obj. NULL if failed. + *==========================================================================*/ +QCamera3ReprocessChannel *QCamera3HardwareInterface::addOfflineReprocChannel( + const reprocess_config_t &config, QCamera3ProcessingChannel *inputChHandle) +{ + int32_t rc = NO_ERROR; + QCamera3ReprocessChannel *pChannel = NULL; + + pChannel = new QCamera3ReprocessChannel(mCameraHandle->camera_handle, + mChannelHandle, mCameraHandle->ops, captureResultCb, config.padding, + CAM_QCOM_FEATURE_NONE, this, inputChHandle); + if (NULL == pChannel) { + LOGE("no mem for reprocess channel"); + return NULL; + } + + rc = pChannel->initialize(IS_TYPE_NONE); + if (rc != NO_ERROR) { + LOGE("init reprocess channel failed, ret = %d", rc); + delete pChannel; + return NULL; + } + + // pp feature config + cam_pp_feature_config_t pp_config; + memset(&pp_config, 0, sizeof(cam_pp_feature_config_t)); + + pp_config.feature_mask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3; + if (gCamCapability[mCameraId]->qcom_supported_feature_mask + & CAM_QCOM_FEATURE_DSDN) { + //Use CPP CDS incase h/w supports it. + pp_config.feature_mask &= ~CAM_QCOM_FEATURE_CDS; + pp_config.feature_mask |= CAM_QCOM_FEATURE_DSDN; + } + if (!(gCamCapability[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION)) { + pp_config.feature_mask &= ~CAM_QCOM_FEATURE_ROTATION; + } + + rc = pChannel->addReprocStreamsFromSource(pp_config, + config, + IS_TYPE_NONE, + mMetadataChannel); + + if (rc != NO_ERROR) { + delete pChannel; + return NULL; + } + return pChannel; +} + +/*=========================================================================== + * FUNCTION : getMobicatMask + * + * DESCRIPTION: returns mobicat mask + * + * PARAMETERS : none + * + * RETURN : mobicat mask + * + *==========================================================================*/ +uint8_t QCamera3HardwareInterface::getMobicatMask() +{ + return m_MobicatMask; +} + +/*=========================================================================== + * FUNCTION : setMobicat + * + * DESCRIPTION: set Mobicat on/off. + * + * PARAMETERS : + * @params : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3HardwareInterface::setMobicat() +{ + char value [PROPERTY_VALUE_MAX]; + property_get("persist.camera.mobicat", value, "0"); + int32_t ret = NO_ERROR; + uint8_t enableMobi = (uint8_t)atoi(value); + + if (enableMobi) { + tune_cmd_t tune_cmd; + tune_cmd.type = SET_RELOAD_CHROMATIX; + tune_cmd.module = MODULE_ALL; + tune_cmd.value = TRUE; + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_PARM_SET_VFE_COMMAND, + tune_cmd); + + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_PARM_SET_PP_COMMAND, + tune_cmd); + } + m_MobicatMask = enableMobi; + + return ret; +} + +/*=========================================================================== +* FUNCTION : getLogLevel +* +* DESCRIPTION: Reads the log level property into a variable +* +* PARAMETERS : +* None +* +* RETURN : +* None +*==========================================================================*/ +void QCamera3HardwareInterface::getLogLevel() +{ + char prop[PROPERTY_VALUE_MAX]; + uint32_t globalLogLevel = 0; + + property_get("persist.camera.hal.debug", prop, "0"); + int val = atoi(prop); + if (0 <= val) { + gCamHal3LogLevel = (uint32_t)val; + } + + property_get("persist.camera.kpi.debug", prop, "1"); + gKpiDebugLevel = atoi(prop); + + property_get("persist.camera.global.debug", prop, "0"); + val = atoi(prop); + if (0 <= val) { + globalLogLevel = (uint32_t)val; + } + + /* Highest log level among hal.logs and global.logs is selected */ + if (gCamHal3LogLevel < globalLogLevel) + gCamHal3LogLevel = globalLogLevel; + + return; +} + +/*=========================================================================== + * FUNCTION : validateStreamRotations + * + * DESCRIPTION: Check if the rotations requested are supported + * + * PARAMETERS : + * @stream_list : streams to be configured + * + * RETURN : NO_ERROR on success + * -EINVAL on failure + * + *==========================================================================*/ +int QCamera3HardwareInterface::validateStreamRotations( + camera3_stream_configuration_t *streamList) +{ + int rc = NO_ERROR; + + /* + * Loop through all streams requested in configuration + * Check if unsupported rotations have been requested on any of them + */ + for (size_t j = 0; j < streamList->num_streams; j++){ + camera3_stream_t *newStream = streamList->streams[j]; + + bool isRotated = (newStream->rotation != CAMERA3_STREAM_ROTATION_0); + bool isImplDef = (newStream->format == + HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED); + bool isZsl = (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL && + isImplDef); + + if (isRotated && (!isImplDef || isZsl)) { + LOGE("Error: Unsupported rotation of %d requested for stream" + "type:%d and stream format:%d", + newStream->rotation, newStream->stream_type, + newStream->format); + rc = -EINVAL; + break; + } + } + + return rc; +} + +/*=========================================================================== +* FUNCTION : getFlashInfo +* +* DESCRIPTION: Retrieve information about whether the device has a flash. +* +* PARAMETERS : +* @cameraId : Camera id to query +* @hasFlash : Boolean indicating whether there is a flash device +* associated with given camera +* @flashNode : If a flash device exists, this will be its device node. +* +* RETURN : +* None +*==========================================================================*/ +void QCamera3HardwareInterface::getFlashInfo(const int cameraId, + bool& hasFlash, + char (&flashNode)[QCAMERA_MAX_FILEPATH_LENGTH]) +{ + cam_capability_t* camCapability = gCamCapability[cameraId]; + if (NULL == camCapability) { + hasFlash = false; + flashNode[0] = '\0'; + } else { + hasFlash = camCapability->flash_available; + strlcpy(flashNode, + (char*)camCapability->flash_dev_name, + QCAMERA_MAX_FILEPATH_LENGTH); + } +} + +/*=========================================================================== +* FUNCTION : getEepromVersionInfo +* +* DESCRIPTION: Retrieve version info of the sensor EEPROM data +* +* PARAMETERS : None +* +* RETURN : string describing EEPROM version +* "\0" if no such info available +*==========================================================================*/ +const char *QCamera3HardwareInterface::getEepromVersionInfo() +{ + return (const char *)&gCamCapability[mCameraId]->eeprom_version_info[0]; +} + +/*=========================================================================== +* FUNCTION : getLdafCalib +* +* DESCRIPTION: Retrieve Laser AF calibration data +* +* PARAMETERS : None +* +* RETURN : Two uint32_t describing laser AF calibration data +* NULL if none is available. +*==========================================================================*/ +const uint32_t *QCamera3HardwareInterface::getLdafCalib() +{ + if (mLdafCalibExist) { + return &mLdafCalib[0]; + } else { + return NULL; + } +} + +/*=========================================================================== + * FUNCTION : dynamicUpdateMetaStreamInfo + * + * DESCRIPTION: This function: + * (1) stops all the channels + * (2) returns error on pending requests and buffers + * (3) sends metastream_info in setparams + * (4) starts all channels + * This is useful when sensor has to be restarted to apply any + * settings such as frame rate from a different sensor mode + * + * PARAMETERS : None + * + * RETURN : NO_ERROR on success + * Error codes on failure + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::dynamicUpdateMetaStreamInfo() +{ + ATRACE_CALL(); + int rc = NO_ERROR; + + LOGD("E"); + + rc = stopAllChannels(); + if (rc < 0) { + LOGE("stopAllChannels failed"); + return rc; + } + + rc = notifyErrorForPendingRequests(); + if (rc < 0) { + LOGE("notifyErrorForPendingRequests failed"); + return rc; + } + + for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) { + LOGI("STREAM INFO : type %d, wxh: %d x %d, pp_mask: 0x%x" + "Format:%d", + mStreamConfigInfo.type[i], + mStreamConfigInfo.stream_sizes[i].width, + mStreamConfigInfo.stream_sizes[i].height, + mStreamConfigInfo.postprocess_mask[i], + mStreamConfigInfo.format[i]); + } + + /* Send meta stream info once again so that ISP can start */ + ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, + CAM_INTF_META_STREAM_INFO, mStreamConfigInfo); + rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, + mParameters); + if (rc < 0) { + LOGE("set Metastreaminfo failed. Sensor mode does not change"); + } + + rc = startAllChannels(); + if (rc < 0) { + LOGE("startAllChannels failed"); + return rc; + } + + LOGD("X"); + return rc; +} + +/*=========================================================================== + * FUNCTION : stopAllChannels + * + * DESCRIPTION: This function stops (equivalent to stream-off) all channels + * + * PARAMETERS : None + * + * RETURN : NO_ERROR on success + * Error codes on failure + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::stopAllChannels() +{ + int32_t rc = NO_ERROR; + + LOGD("Stopping all channels"); + // Stop the Streams/Channels + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + if (channel) { + channel->stop(); + } + (*it)->status = INVALID; + } + + if (mSupportChannel) { + mSupportChannel->stop(); + } + if (mAnalysisChannel) { + mAnalysisChannel->stop(); + } + if (mRawDumpChannel) { + mRawDumpChannel->stop(); + } + if (mMetadataChannel) { + /* If content of mStreamInfo is not 0, there is metadata stream */ + mMetadataChannel->stop(); + } + + LOGD("All channels stopped"); + return rc; +} + +/*=========================================================================== + * FUNCTION : startAllChannels + * + * DESCRIPTION: This function starts (equivalent to stream-on) all channels + * + * PARAMETERS : None + * + * RETURN : NO_ERROR on success + * Error codes on failure + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::startAllChannels() +{ + int32_t rc = NO_ERROR; + + LOGD("Start all channels "); + // Start the Streams/Channels + if (mMetadataChannel) { + /* If content of mStreamInfo is not 0, there is metadata stream */ + rc = mMetadataChannel->start(); + if (rc < 0) { + LOGE("META channel start failed"); + return rc; + } + } + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + if (channel) { + rc = channel->start(); + if (rc < 0) { + LOGE("channel start failed"); + return rc; + } + } + } + if (mAnalysisChannel) { + mAnalysisChannel->start(); + } + if (mSupportChannel) { + rc = mSupportChannel->start(); + if (rc < 0) { + LOGE("Support channel start failed"); + return rc; + } + } + if (mRawDumpChannel) { + rc = mRawDumpChannel->start(); + if (rc < 0) { + LOGE("RAW dump channel start failed"); + return rc; + } + } + + LOGD("All channels started"); + return rc; +} + +/*=========================================================================== + * FUNCTION : notifyErrorForPendingRequests + * + * DESCRIPTION: This function sends error for all the pending requests/buffers + * + * PARAMETERS : None + * + * RETURN : Error codes + * NO_ERROR on success + * + *==========================================================================*/ +int32_t QCamera3HardwareInterface::notifyErrorForPendingRequests() +{ + int32_t rc = NO_ERROR; + unsigned int frameNum = 0; + camera3_capture_result_t result; + camera3_stream_buffer_t *pStream_Buf = NULL; + + memset(&result, 0, sizeof(camera3_capture_result_t)); + + if (mPendingRequestsList.size() > 0) { + pendingRequestIterator i = mPendingRequestsList.begin(); + frameNum = i->frame_number; + } else { + /* There might still be pending buffers even though there are + no pending requests. Setting the frameNum to MAX so that + all the buffers with smaller frame numbers are returned */ + frameNum = UINT_MAX; + } + + LOGH("Oldest frame num on mPendingRequestsList = %u", + frameNum); + + for (auto req = mPendingBuffersMap.mPendingBuffersInRequest.begin(); + req != mPendingBuffersMap.mPendingBuffersInRequest.end(); ) { + + if (req->frame_number < frameNum) { + // Send Error notify to frameworks for each buffer for which + // metadata buffer is already sent + LOGH("Sending ERROR BUFFER for frame %d for %d buffer(s)", + req->frame_number, req->mPendingBufferList.size()); + + pStream_Buf = new camera3_stream_buffer_t[req->mPendingBufferList.size()]; + if (NULL == pStream_Buf) { + LOGE("No memory for pending buffers array"); + return NO_MEMORY; + } + memset(pStream_Buf, 0, + sizeof(camera3_stream_buffer_t)*req->mPendingBufferList.size()); + result.result = NULL; + result.frame_number = req->frame_number; + result.num_output_buffers = req->mPendingBufferList.size(); + result.output_buffers = pStream_Buf; + + size_t index = 0; + for (auto info = req->mPendingBufferList.begin(); + info != req->mPendingBufferList.end(); ) { + + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + notify_msg.type = CAMERA3_MSG_ERROR; + notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER; + notify_msg.message.error.error_stream = info->stream; + notify_msg.message.error.frame_number = req->frame_number; + pStream_Buf[index].acquire_fence = -1; + pStream_Buf[index].release_fence = -1; + pStream_Buf[index].buffer = info->buffer; + pStream_Buf[index].status = CAMERA3_BUFFER_STATUS_ERROR; + pStream_Buf[index].stream = info->stream; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + index++; + // Remove buffer from list + info = req->mPendingBufferList.erase(info); + } + + // Remove this request from Map + LOGD("Removing request %d. Remaining requests in mPendingBuffersMap: %d", + req->frame_number, mPendingBuffersMap.mPendingBuffersInRequest.size()); + req = mPendingBuffersMap.mPendingBuffersInRequest.erase(req); + + mCallbackOps->process_capture_result(mCallbackOps, &result); + + delete [] pStream_Buf; + } else { + + // Go through the pending requests info and send error request to framework + LOGE("Sending ERROR REQUEST for all pending requests"); + pendingRequestIterator i = mPendingRequestsList.begin(); //make sure i is at the beginning + + LOGE("Sending ERROR REQUEST for frame %d", req->frame_number); + + // Send error notify to frameworks + camera3_notify_msg_t notify_msg; + memset(¬ify_msg, 0, sizeof(camera3_notify_msg_t)); + notify_msg.type = CAMERA3_MSG_ERROR; + notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST; + notify_msg.message.error.error_stream = NULL; + notify_msg.message.error.frame_number = req->frame_number; + mCallbackOps->notify(mCallbackOps, ¬ify_msg); + + pStream_Buf = new camera3_stream_buffer_t[req->mPendingBufferList.size()]; + if (NULL == pStream_Buf) { + LOGE("No memory for pending buffers array"); + return NO_MEMORY; + } + memset(pStream_Buf, 0, sizeof(camera3_stream_buffer_t)*req->mPendingBufferList.size()); + + result.result = NULL; + result.frame_number = req->frame_number; + result.input_buffer = i->input_buffer; + result.num_output_buffers = req->mPendingBufferList.size(); + result.output_buffers = pStream_Buf; + + size_t index = 0; + for (auto info = req->mPendingBufferList.begin(); + info != req->mPendingBufferList.end(); ) { + pStream_Buf[index].acquire_fence = -1; + pStream_Buf[index].release_fence = -1; + pStream_Buf[index].buffer = info->buffer; + pStream_Buf[index].status = CAMERA3_BUFFER_STATUS_ERROR; + pStream_Buf[index].stream = info->stream; + index++; + // Remove buffer from list + info = req->mPendingBufferList.erase(info); + } + + // Remove this request from Map + LOGD("Removing request %d. Remaining requests in mPendingBuffersMap: %d", + req->frame_number, mPendingBuffersMap.mPendingBuffersInRequest.size()); + req = mPendingBuffersMap.mPendingBuffersInRequest.erase(req); + + mCallbackOps->process_capture_result(mCallbackOps, &result); + delete [] pStream_Buf; + i = erasePendingRequest(i); + } + } + + /* Reset pending frame Drop list and requests list */ + mPendingFrameDropList.clear(); + + for (auto &req : mPendingBuffersMap.mPendingBuffersInRequest) { + req.mPendingBufferList.clear(); + } + mPendingBuffersMap.mPendingBuffersInRequest.clear(); + mPendingReprocessResultList.clear(); + LOGH("Cleared all the pending buffers "); + + return rc; +} + +bool QCamera3HardwareInterface::isOnEncoder( + const cam_dimension_t max_viewfinder_size, + uint32_t width, uint32_t height) +{ + return (width > (uint32_t)max_viewfinder_size.width || + height > (uint32_t)max_viewfinder_size.height); +} + +/*=========================================================================== + * FUNCTION : setBundleInfo + * + * DESCRIPTION: Set bundle info for all streams that are bundle. + * + * PARAMETERS : None + * + * RETURN : NO_ERROR on success + * Error codes on failure + *==========================================================================*/ +int32_t QCamera3HardwareInterface::setBundleInfo() +{ + int32_t rc = NO_ERROR; + + if (mChannelHandle) { + cam_bundle_config_t bundleInfo; + memset(&bundleInfo, 0, sizeof(bundleInfo)); + rc = mCameraHandle->ops->get_bundle_info( + mCameraHandle->camera_handle, mChannelHandle, &bundleInfo); + if (rc != NO_ERROR) { + LOGE("get_bundle_info failed"); + return rc; + } + if (mAnalysisChannel) { + mAnalysisChannel->setBundleInfo(bundleInfo); + } + if (mSupportChannel) { + mSupportChannel->setBundleInfo(bundleInfo); + } + for (List<stream_info_t *>::iterator it = mStreamInfo.begin(); + it != mStreamInfo.end(); it++) { + QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv; + channel->setBundleInfo(bundleInfo); + } + if (mRawDumpChannel) { + mRawDumpChannel->setBundleInfo(bundleInfo); + } + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : get_num_overall_buffers + * + * DESCRIPTION: Estimate number of pending buffers across all requests. + * + * PARAMETERS : None + * + * RETURN : Number of overall pending buffers + * + *==========================================================================*/ +uint32_t PendingBuffersMap::get_num_overall_buffers() +{ + uint32_t sum_buffers = 0; + for (auto &req : mPendingBuffersInRequest) { + sum_buffers += req.mPendingBufferList.size(); + } + return sum_buffers; +} + +/*=========================================================================== + * FUNCTION : removeBuf + * + * DESCRIPTION: Remove a matching buffer from tracker. + * + * PARAMETERS : @buffer: image buffer for the callback + * + * RETURN : None + * + *==========================================================================*/ +void PendingBuffersMap::removeBuf(buffer_handle_t *buffer) +{ + bool buffer_found = false; + for (auto req = mPendingBuffersInRequest.begin(); + req != mPendingBuffersInRequest.end(); req++) { + for (auto k = req->mPendingBufferList.begin(); + k != req->mPendingBufferList.end(); k++ ) { + if (k->buffer == buffer) { + LOGD("Frame %d: Found Frame buffer %p, take it out from mPendingBufferList", + req->frame_number, buffer); + k = req->mPendingBufferList.erase(k); + if (req->mPendingBufferList.empty()) { + // Remove this request from Map + req = mPendingBuffersInRequest.erase(req); + } + buffer_found = true; + break; + } + } + if (buffer_found) { + break; + } + } + LOGD("mPendingBuffersMap.num_overall_buffers = %d", + get_num_overall_buffers()); +} + +/*=========================================================================== + * FUNCTION : setPAAFSupport + * + * DESCRIPTION: Set the preview-assisted auto focus support bit in + * feature mask according to stream type and filter + * arrangement + * + * PARAMETERS : @feature_mask: current feature mask, which may be modified + * @stream_type: stream type + * @filter_arrangement: filter arrangement + * + * RETURN : None + *==========================================================================*/ +void QCamera3HardwareInterface::setPAAFSupport( + cam_feature_mask_t& feature_mask, + cam_stream_type_t stream_type, + cam_color_filter_arrangement_t filter_arrangement) +{ + LOGD("feature_mask=0x%llx; stream_type=%d, filter_arrangement=%d", + feature_mask, stream_type, filter_arrangement); + + switch (filter_arrangement) { + case CAM_FILTER_ARRANGEMENT_RGGB: + case CAM_FILTER_ARRANGEMENT_GRBG: + case CAM_FILTER_ARRANGEMENT_GBRG: + case CAM_FILTER_ARRANGEMENT_BGGR: + if ((stream_type == CAM_STREAM_TYPE_CALLBACK) || + (stream_type == CAM_STREAM_TYPE_PREVIEW) || + (stream_type == CAM_STREAM_TYPE_VIDEO)) { + feature_mask |= CAM_QCOM_FEATURE_PAAF; + } + break; + case CAM_FILTER_ARRANGEMENT_Y: + if (stream_type == CAM_STREAM_TYPE_ANALYSIS) { + feature_mask |= CAM_QCOM_FEATURE_PAAF; + } + break; + default: + break; + } +} + +/*=========================================================================== +* FUNCTION : getSensorMountAngle +* +* DESCRIPTION: Retrieve sensor mount angle +* +* PARAMETERS : None +* +* RETURN : sensor mount angle in uint32_t +*==========================================================================*/ +uint32_t QCamera3HardwareInterface::getSensorMountAngle() +{ + return gCamCapability[mCameraId]->sensor_mount_angle; +} + +/*=========================================================================== +* FUNCTION : getRelatedCalibrationData +* +* DESCRIPTION: Retrieve related system calibration data +* +* PARAMETERS : None +* +* RETURN : Pointer of related system calibration data +*==========================================================================*/ +const cam_related_system_calibration_data_t *QCamera3HardwareInterface::getRelatedCalibrationData() +{ + return (const cam_related_system_calibration_data_t *) + &(gCamCapability[mCameraId]->related_cam_calibration); +} +}; //end namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3HWI.h b/camera/QCamera2/HAL3/QCamera3HWI.h new file mode 100644 index 0000000..1bcf23a --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3HWI.h @@ -0,0 +1,536 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#ifndef __QCAMERA3HARDWAREINTERFACE_H__ +#define __QCAMERA3HARDWAREINTERFACE_H__ + +// System dependencies +#include <camera/CameraMetadata.h> +#include <pthread.h> +#include <utils/KeyedVector.h> +#include <utils/List.h> + +// Camera dependencies +#include "camera3.h" +#include "QCamera3Channel.h" +#include "QCamera3CropRegionMapper.h" +#include "QCamera3HALHeader.h" +#include "QCamera3Mem.h" +#include "QCameraPerf.h" +#include "QCameraCommon.h" + +extern "C" { +#include "mm_camera_interface.h" +#include "mm_jpeg_interface.h" +} + +using namespace android; + +namespace qcamera { + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +/* Time related macros */ +typedef int64_t nsecs_t; +#define NSEC_PER_SEC 1000000000LLU +#define NSEC_PER_USEC 1000LLU +#define NSEC_PER_33MSEC 33000000LLU + +typedef enum { + SET_ENABLE, + SET_CONTROLENABLE, + SET_RELOAD_CHROMATIX, + SET_STATUS, +} optype_t; + +#define MODULE_ALL 0 + +extern volatile uint32_t gCamHal3LogLevel; + +class QCamera3MetadataChannel; +class QCamera3PicChannel; +class QCamera3HeapMemory; +class QCamera3Exif; + +typedef struct { + camera3_stream_t *stream; + camera3_stream_buffer_set_t buffer_set; + stream_status_t status; + int registered; + QCamera3ProcessingChannel *channel; +} stream_info_t; + +typedef struct { + // Stream handle + camera3_stream_t *stream; + // Buffer handle + buffer_handle_t *buffer; +} PendingBufferInfo; + +typedef struct { + // Frame number corresponding to request + uint32_t frame_number; + // Time when request queued into system + nsecs_t timestamp; + List<PendingBufferInfo> mPendingBufferList; +} PendingBuffersInRequest; + +class PendingBuffersMap { +public: + // Number of outstanding buffers at flush + uint32_t numPendingBufsAtFlush; + // List of pending buffers per request + List<PendingBuffersInRequest> mPendingBuffersInRequest; + uint32_t get_num_overall_buffers(); + void removeBuf(buffer_handle_t *buffer); +}; + + +class QCamera3HardwareInterface { +public: + /* static variable and functions accessed by camera service */ + static camera3_device_ops_t mCameraOps; + //Id of each session in bundle/link + static uint32_t sessionId[MM_CAMERA_MAX_NUM_SENSORS]; + static int initialize(const struct camera3_device *, + const camera3_callback_ops_t *callback_ops); + static int configure_streams(const struct camera3_device *, + camera3_stream_configuration_t *stream_list); + static const camera_metadata_t* construct_default_request_settings( + const struct camera3_device *, int type); + static int process_capture_request(const struct camera3_device *, + camera3_capture_request_t *request); + + static void dump(const struct camera3_device *, int fd); + static int flush(const struct camera3_device *); + static int close_camera_device(struct hw_device_t* device); + +public: + QCamera3HardwareInterface(uint32_t cameraId, + const camera_module_callbacks_t *callbacks); + virtual ~QCamera3HardwareInterface(); + static void camEvtHandle(uint32_t camera_handle, mm_camera_event_t *evt, + void *user_data); + int openCamera(struct hw_device_t **hw_device); + camera_metadata_t* translateCapabilityToMetadata(int type); + + static int getCamInfo(uint32_t cameraId, struct camera_info *info); + static int initCapabilities(uint32_t cameraId); + static int initStaticMetadata(uint32_t cameraId); + static void makeTable(cam_dimension_t *dimTable, size_t size, + size_t max_size, int32_t *sizeTable); + static void makeFPSTable(cam_fps_range_t *fpsTable, size_t size, + size_t max_size, int32_t *fpsRangesTable); + static void makeOverridesList(cam_scene_mode_overrides_t *overridesTable, + size_t size, size_t max_size, uint8_t *overridesList, + uint8_t *supported_indexes, uint32_t camera_id); + static size_t filterJpegSizes(int32_t *jpegSizes, int32_t *processedSizes, + size_t processedSizesCnt, size_t maxCount, cam_rect_t active_array_size, + uint8_t downscale_factor); + static void convertToRegions(cam_rect_t rect, int32_t* region, int weight); + static void convertFromRegions(cam_area_t &roi, const camera_metadata_t *settings, + uint32_t tag); + static bool resetIfNeededROI(cam_area_t* roi, const cam_crop_region_t* scalerCropRegion); + static void convertLandmarks(cam_face_landmarks_info_t face, int32_t* landmarks); + static int32_t getSensorSensitivity(int32_t iso_mode); + + double computeNoiseModelEntryS(int32_t sensitivity); + double computeNoiseModelEntryO(int32_t sensitivity); + + static void captureResultCb(mm_camera_super_buf_t *metadata, + camera3_stream_buffer_t *buffer, uint32_t frame_number, + bool isInputBuffer, void *userdata); + + int initialize(const camera3_callback_ops_t *callback_ops); + int configureStreams(camera3_stream_configuration_t *stream_list); + int configureStreamsPerfLocked(camera3_stream_configuration_t *stream_list); + int processCaptureRequest(camera3_capture_request_t *request); + void dump(int fd); + int flushPerf(); + + int setFrameParameters(camera3_capture_request_t *request, + cam_stream_ID_t streamID, int blob_request, uint32_t snapshotStreamId); + int32_t setReprocParameters(camera3_capture_request_t *request, + metadata_buffer_t *reprocParam, uint32_t snapshotStreamId); + int translateToHalMetadata(const camera3_capture_request_t *request, + metadata_buffer_t *parm, uint32_t snapshotStreamId); + camera_metadata_t* translateCbUrgentMetadataToResultMetadata ( + metadata_buffer_t *metadata); + camera_metadata_t* translateFromHalMetadata(metadata_buffer_t *metadata, + nsecs_t timestamp, int32_t request_id, + const CameraMetadata& jpegMetadata, uint8_t pipeline_depth, + uint8_t capture_intent, bool pprocDone, uint8_t fwk_cacMode); + camera_metadata_t* saveRequestSettings(const CameraMetadata& jpegMetadata, + camera3_capture_request_t *request); + int initParameters(); + void deinitParameters(); + QCamera3ReprocessChannel *addOfflineReprocChannel(const reprocess_config_t &config, + QCamera3ProcessingChannel *inputChHandle); + bool needRotationReprocess(); + bool needJpegExifRotation(); + bool needReprocess(cam_feature_mask_t postprocess_mask); + bool needJpegRotation(); + cam_denoise_process_type_t getWaveletDenoiseProcessPlate(); + cam_denoise_process_type_t getTemporalDenoiseProcessPlate(); + + void captureResultCb(mm_camera_super_buf_t *metadata, + camera3_stream_buffer_t *buffer, uint32_t frame_number, + bool isInputBuffer); + cam_dimension_t calcMaxJpegDim(); + bool needOnlineRotation(); + uint32_t getJpegQuality(); + QCamera3Exif *getExifData(); + mm_jpeg_exif_params_t get3AExifParams(); + uint8_t getMobicatMask(); + static void getFlashInfo(const int cameraId, + bool& hasFlash, + char (&flashNode)[QCAMERA_MAX_FILEPATH_LENGTH]); + const char *getEepromVersionInfo(); + const uint32_t *getLdafCalib(); + void get3AVersion(cam_q3a_version_t &swVersion); + + // Get dual camera related info + bool isDeviceLinked() {return mIsDeviceLinked;} + bool isMainCamera() {return mIsMainCamera;} + uint32_t getSensorMountAngle(); + const cam_related_system_calibration_data_t *getRelatedCalibrationData(); + + template <typename fwkType, typename halType> struct QCameraMap { + fwkType fwk_name; + halType hal_name; + }; + + typedef struct { + const char *const desc; + cam_cds_mode_type_t val; + } QCameraPropMap; + + +private: + + // State transition conditions: + // "\" means not applicable + // "x" means not valid + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | | CLOSED | OPENED | INITIALIZED | CONFIGURED | STARTED | ERROR | DEINIT | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | CLOSED | \ | open | x | x | x | x | x | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | OPENED | close | \ | initialize | x | x | error | x | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // |INITIALIZED | close | x | \ | configure | x | error | x | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | CONFIGURED | close | x | x | configure | request | error | x | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | STARTED | close | x | x | configure | \ | error | x | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | ERROR | close | x | x | x | x | \ | any | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + // | DEINIT | close | x | x | x | x | x | \ | + // +------------+----------+----------+-------------+------------+---------+-------+--------+ + + typedef enum { + CLOSED, + OPENED, + INITIALIZED, + CONFIGURED, + STARTED, + ERROR, + DEINIT + } State; + + int openCamera(); + int closeCamera(); + int flush(bool restartChannels); + static size_t calcMaxJpegSize(uint32_t camera_id); + cam_dimension_t getMaxRawSize(uint32_t camera_id); + static void addStreamConfig(Vector<int32_t> &available_stream_configs, + int32_t scalar_format, const cam_dimension_t &dim, + int32_t config_type); + + int validateCaptureRequest(camera3_capture_request_t *request); + int validateStreamDimensions(camera3_stream_configuration_t *streamList); + int validateStreamRotations(camera3_stream_configuration_t *streamList); + void deriveMinFrameDuration(); + void handleBuffersDuringFlushLock(camera3_stream_buffer_t *buffer); + int32_t handlePendingReprocResults(uint32_t frame_number); + int64_t getMinFrameDuration(const camera3_capture_request_t *request); + void handleMetadataWithLock(mm_camera_super_buf_t *metadata_buf, + bool free_and_bufdone_meta_buf); + void handleBatchMetadata(mm_camera_super_buf_t *metadata_buf, + bool free_and_bufdone_meta_buf); + void handleBufferWithLock(camera3_stream_buffer_t *buffer, + uint32_t frame_number); + void handleInputBufferWithLock(uint32_t frame_number); + void unblockRequestIfNecessary(); + void dumpMetadataToFile(tuning_params_t &meta, uint32_t &dumpFrameCount, + bool enabled, const char *type, uint32_t frameNumber); + static void getLogLevel(); + + void cleanAndSortStreamInfo(); + void extractJpegMetadata(CameraMetadata& jpegMetadata, + const camera3_capture_request_t *request); + + bool isSupportChannelNeeded(camera3_stream_configuration_t *streamList, + cam_stream_size_info_t stream_config_info); + int32_t setMobicat(); + + int32_t getSensorOutputSize(cam_dimension_t &sensor_dim); + int32_t setHalFpsRange(const CameraMetadata &settings, + metadata_buffer_t *hal_metadata); + int32_t extractSceneMode(const CameraMetadata &frame_settings, uint8_t metaMode, + metadata_buffer_t *hal_metadata); + int32_t numOfSizesOnEncoder(const camera3_stream_configuration_t *streamList, + const cam_dimension_t &maxViewfinderSize); + + void addToPPFeatureMask(int stream_format, uint32_t stream_idx); + void updateFpsInPreviewBuffer(metadata_buffer_t *metadata, uint32_t frame_number); + + void enablePowerHint(); + void disablePowerHint(); + int32_t dynamicUpdateMetaStreamInfo(); + int32_t startAllChannels(); + int32_t stopAllChannels(); + int32_t notifyErrorForPendingRequests(); + void notifyError(uint32_t frameNumber, + camera3_error_msg_code_t errorCode); + int32_t getReprocessibleOutputStreamId(uint32_t &id); + int32_t handleCameraDeviceError(); + + bool isOnEncoder(const cam_dimension_t max_viewfinder_size, + uint32_t width, uint32_t height); + void hdrPlusPerfLock(mm_camera_super_buf_t *metadata_buf); + + static bool supportBurstCapture(uint32_t cameraId); + int32_t setBundleInfo(); + + static void setPAAFSupport(cam_feature_mask_t& feature_mask, + cam_stream_type_t stream_type, + cam_color_filter_arrangement_t filter_arrangement); + + camera3_device_t mCameraDevice; + uint32_t mCameraId; + mm_camera_vtbl_t *mCameraHandle; + bool mCameraInitialized; + camera_metadata_t *mDefaultMetadata[CAMERA3_TEMPLATE_COUNT]; + const camera3_callback_ops_t *mCallbackOps; + + QCamera3MetadataChannel *mMetadataChannel; + QCamera3PicChannel *mPictureChannel; + QCamera3RawChannel *mRawChannel; + QCamera3SupportChannel *mSupportChannel; + QCamera3SupportChannel *mAnalysisChannel; + QCamera3RawDumpChannel *mRawDumpChannel; + QCamera3RegularChannel *mDummyBatchChannel; + QCameraPerfLock m_perfLock; + QCameraCommon mCommon; + + uint32_t mChannelHandle; + + void saveExifParams(metadata_buffer_t *metadata); + mm_jpeg_exif_params_t mExifParams; + + //First request yet to be processed after configureStreams + bool mFirstConfiguration; + bool mFlush; + bool mFlushPerf; + bool mEnableRawDump; + QCamera3HeapMemory *mParamHeap; + metadata_buffer_t* mParameters; + metadata_buffer_t* mPrevParameters; + CameraMetadata mCurJpegMeta; + bool m_bIsVideo; + bool m_bIs4KVideo; + bool m_bEisSupportedSize; + bool m_bEisEnable; + typedef struct { + cam_dimension_t dim; + int format; + uint32_t usage; + } InputStreamInfo; + + InputStreamInfo mInputStreamInfo; + uint8_t m_MobicatMask; + uint8_t m_bTnrEnabled; + int8_t mSupportedFaceDetectMode; + uint8_t m_bTnrPreview; + uint8_t m_bTnrVideo; + + /* Data structure to store pending request */ + typedef struct { + camera3_stream_t *stream; + camera3_stream_buffer_t *buffer; + // metadata needs to be consumed by the corresponding stream + // in order to generate the buffer. + bool need_metadata; + } RequestedBufferInfo; + typedef struct { + uint32_t frame_number; + uint32_t num_buffers; + int32_t request_id; + List<RequestedBufferInfo> buffers; + int blob_request; + uint8_t bUrgentReceived; + nsecs_t timestamp; + camera3_stream_buffer_t *input_buffer; + const camera_metadata_t *settings; + CameraMetadata jpegMetadata; + uint8_t pipeline_depth; + uint32_t partial_result_cnt; + uint8_t capture_intent; + uint8_t fwkCacMode; + bool shutter_notified; + } PendingRequestInfo; + typedef struct { + uint32_t frame_number; + uint32_t stream_ID; + } PendingFrameDropInfo; + + typedef struct { + camera3_notify_msg_t notify_msg; + camera3_stream_buffer_t buffer; + uint32_t frame_number; + } PendingReprocessResult; + + typedef KeyedVector<uint32_t, Vector<PendingBufferInfo> > FlushMap; + typedef List<QCamera3HardwareInterface::PendingRequestInfo>::iterator + pendingRequestIterator; + typedef List<QCamera3HardwareInterface::RequestedBufferInfo>::iterator + pendingBufferIterator; + + List<PendingReprocessResult> mPendingReprocessResultList; + List<PendingRequestInfo> mPendingRequestsList; + List<PendingFrameDropInfo> mPendingFrameDropList; + /* Use last frame number of the batch as key and first frame number of the + * batch as value for that key */ + KeyedVector<uint32_t, uint32_t> mPendingBatchMap; + + PendingBuffersMap mPendingBuffersMap; + pthread_cond_t mRequestCond; + uint32_t mPendingLiveRequest; + bool mWokenUpByDaemon; + int32_t mCurrentRequestId; + cam_stream_size_info_t mStreamConfigInfo; + + //mutex for serialized access to camera3_device_ops_t functions + pthread_mutex_t mMutex; + + //condition used to signal flush after buffers have returned + pthread_cond_t mBuffersCond; + + List<stream_info_t*> mStreamInfo; + + int64_t mMinProcessedFrameDuration; + int64_t mMinJpegFrameDuration; + int64_t mMinRawFrameDuration; + + uint32_t mMetaFrameCount; + bool mUpdateDebugLevel; + const camera_module_callbacks_t *mCallbacks; + + uint8_t mCaptureIntent; + uint8_t mCacMode; + metadata_buffer_t mReprocMeta; //scratch meta buffer + /* 0: Not batch, non-zero: Number of image buffers in a batch */ + uint8_t mBatchSize; + // Used only in batch mode + uint8_t mToBeQueuedVidBufs; + // Fixed video fps + float mHFRVideoFps; + uint8_t mOpMode; + uint32_t mFirstFrameNumberInBatch; + camera3_stream_t mDummyBatchStream; + bool mNeedSensorRestart; + + /* sensor output size with current stream configuration */ + QCamera3CropRegionMapper mCropRegionMapper; + + /* Ldaf calibration data */ + bool mLdafCalibExist; + uint32_t mLdafCalib[2]; + bool mPowerHintEnabled; + int32_t mLastCustIntentFrmNum; + + static const QCameraMap<camera_metadata_enum_android_control_effect_mode_t, + cam_effect_mode_type> EFFECT_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_control_awb_mode_t, + cam_wb_mode_type> WHITE_BALANCE_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_control_scene_mode_t, + cam_scene_mode_type> SCENE_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_control_af_mode_t, + cam_focus_mode_type> FOCUS_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_color_correction_aberration_mode_t, + cam_aberration_mode_t> COLOR_ABERRATION_MAP[]; + static const QCameraMap<camera_metadata_enum_android_control_ae_antibanding_mode_t, + cam_antibanding_mode_type> ANTIBANDING_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_lens_state_t, + cam_af_lens_state_t> LENS_STATE_MAP[]; + static const QCameraMap<camera_metadata_enum_android_control_ae_mode_t, + cam_flash_mode_t> AE_FLASH_MODE_MAP[]; + static const QCameraMap<camera_metadata_enum_android_flash_mode_t, + cam_flash_mode_t> FLASH_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_statistics_face_detect_mode_t, + cam_face_detect_mode_t> FACEDETECT_MODES_MAP[]; + static const QCameraMap<camera_metadata_enum_android_lens_info_focus_distance_calibration_t, + cam_focus_calibration_t> FOCUS_CALIBRATION_MAP[]; + static const QCameraMap<camera_metadata_enum_android_sensor_test_pattern_mode_t, + cam_test_pattern_mode_t> TEST_PATTERN_MAP[]; + static const QCameraMap<camera_metadata_enum_android_sensor_reference_illuminant1_t, + cam_illuminat_t> REFERENCE_ILLUMINANT_MAP[]; + static const QCameraMap<int32_t, + cam_hfr_mode_t> HFR_MODE_MAP[]; + + static const QCameraPropMap CDS_MAP[]; + + pendingRequestIterator erasePendingRequest(pendingRequestIterator i); + //GPU library to read buffer padding details. + void *lib_surface_utils; + int (*LINK_get_surface_pixel_alignment)(); + uint32_t mSurfaceStridePadding; + + State mState; + //Dual camera related params + bool mIsDeviceLinked; + bool mIsMainCamera; + uint8_t mLinkedCameraId; + QCamera3HeapMemory *m_pRelCamSyncHeap; + cam_sync_related_sensors_event_info_t *m_pRelCamSyncBuf; + cam_sync_related_sensors_event_info_t m_relCamSyncInfo; + +}; + +}; // namespace qcamera + +#endif /* __QCAMERA2HARDWAREINTERFACE_H__ */ diff --git a/camera/QCamera2/HAL3/QCamera3Mem.cpp b/camera/QCamera2/HAL3/QCamera3Mem.cpp new file mode 100644 index 0000000..ebcc3ba --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Mem.cpp @@ -0,0 +1,1199 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define LOG_TAG "QCameraHWI_Mem" + +// System dependencies +#include <fcntl.h> +#define MMAN_H <SYSTEM_HEADER_PREFIX/mman.h> +#include MMAN_H +#include "gralloc_priv.h" + +// Display dependencies +#include "qdMetaData.h" + +// Camera dependencies +#include "QCamera3HWI.h" +#include "QCamera3Mem.h" +#include "QCameraTrace.h" + +extern "C" { +#include "mm_camera_dbg.h" +#include "mm_camera_interface.h" +} + +using namespace android; + +namespace qcamera { + +// QCaemra2Memory base class + +/*=========================================================================== + * FUNCTION : QCamera3Memory + * + * DESCRIPTION: default constructor of QCamera3Memory + * + * PARAMETERS : none + * + * RETURN : None + *==========================================================================*/ +QCamera3Memory::QCamera3Memory() +{ + mBufferCount = 0; + for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i++) { + mMemInfo[i].fd = -1; + mMemInfo[i].main_ion_fd = -1; + mMemInfo[i].handle = 0; + mMemInfo[i].size = 0; + mCurrentFrameNumbers[i] = -1; + } +} + +/*=========================================================================== + * FUNCTION : ~QCamera3Memory + * + * DESCRIPTION: deconstructor of QCamera3Memory + * + * PARAMETERS : none + * + * RETURN : None + *==========================================================================*/ +QCamera3Memory::~QCamera3Memory() +{ +} + +/*=========================================================================== + * FUNCTION : cacheOpsInternal + * + * DESCRIPTION: ion related memory cache operations + * + * PARAMETERS : + * @index : index of the buffer + * @cmd : cache ops command + * @vaddr : ptr to the virtual address + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3Memory::cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr) +{ + Mutex::Autolock lock(mLock); + + struct ion_flush_data cache_inv_data; + struct ion_custom_data custom_data; + int ret = OK; + + if (MM_CAMERA_MAX_NUM_FRAMES <= index) { + LOGE("index %d out of bound [0, %d)", + index, MM_CAMERA_MAX_NUM_FRAMES); + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return BAD_INDEX; + } + + memset(&cache_inv_data, 0, sizeof(cache_inv_data)); + memset(&custom_data, 0, sizeof(custom_data)); + cache_inv_data.vaddr = vaddr; + cache_inv_data.fd = mMemInfo[index].fd; + cache_inv_data.handle = mMemInfo[index].handle; + cache_inv_data.length = (unsigned int)mMemInfo[index].size; + custom_data.cmd = cmd; + custom_data.arg = (unsigned long)&cache_inv_data; + + LOGD("addr = %p, fd = %d, handle = %lx length = %d, ION Fd = %d", + cache_inv_data.vaddr, cache_inv_data.fd, + (unsigned long)cache_inv_data.handle, cache_inv_data.length, + mMemInfo[index].main_ion_fd); + ret = ioctl(mMemInfo[index].main_ion_fd, ION_IOC_CUSTOM, &custom_data); + if (ret < 0) + LOGE("Cache Invalidate failed: %s\n", strerror(errno)); + + return ret; +} + +/*=========================================================================== + * FUNCTION : getFd + * + * DESCRIPTION: return file descriptor of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : file descriptor + *==========================================================================*/ +int QCamera3Memory::getFd(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (MM_CAMERA_MAX_NUM_FRAMES <= index) { + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + return BAD_INDEX; + } + + return mMemInfo[index].fd; +} + +/*=========================================================================== + * FUNCTION : getSize + * + * DESCRIPTION: return buffer size of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer size + *==========================================================================*/ +ssize_t QCamera3Memory::getSize(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (MM_CAMERA_MAX_NUM_FRAMES <= index) { + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + return BAD_INDEX; + } + + return (ssize_t)mMemInfo[index].size; +} + +/*=========================================================================== + * FUNCTION : getCnt + * + * DESCRIPTION: query number of buffers allocated + * + * PARAMETERS : none + * + * RETURN : number of buffers allocated + *==========================================================================*/ +uint32_t QCamera3Memory::getCnt() +{ + Mutex::Autolock lock(mLock); + + return mBufferCount; +} + +/*=========================================================================== + * FUNCTION : getBufDef + * + * DESCRIPTION: query detailed buffer information + * + * PARAMETERS : + * @offset : [input] frame buffer offset + * @bufDef : [output] reference to struct to store buffer definition + * @index : [input] index of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Memory::getBufDef(const cam_frame_len_offset_t &offset, + mm_camera_buf_def_t &bufDef, uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (!mBufferCount) { + LOGE("Memory not allocated"); + return NO_INIT; + } + + bufDef.fd = mMemInfo[index].fd; + bufDef.frame_len = mMemInfo[index].size; + bufDef.mem_info = (void *)this; + bufDef.buffer = getPtrLocked(index); + bufDef.planes_buf.num_planes = (int8_t)offset.num_planes; + bufDef.buf_idx = (uint8_t)index; + + /* Plane 0 needs to be set separately. Set other planes in a loop */ + bufDef.planes_buf.planes[0].length = offset.mp[0].len; + bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd; + bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset; + bufDef.planes_buf.planes[0].reserved[0] = 0; + for (int i = 1; i < bufDef.planes_buf.num_planes; i++) { + bufDef.planes_buf.planes[i].length = offset.mp[i].len; + bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd; + bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset; + bufDef.planes_buf.planes[i].reserved[0] = + bufDef.planes_buf.planes[i-1].reserved[0] + + bufDef.planes_buf.planes[i-1].length; + } + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : QCamera3HeapMemory + * + * DESCRIPTION: constructor of QCamera3HeapMemory for ion memory used internally in HAL + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3HeapMemory::QCamera3HeapMemory(uint32_t maxCnt) + : QCamera3Memory() +{ + mMaxCnt = MIN(maxCnt, MM_CAMERA_MAX_NUM_FRAMES); + for (uint32_t i = 0; i < mMaxCnt; i ++) + mPtr[i] = NULL; +} + +/*=========================================================================== + * FUNCTION : ~QCamera3HeapMemory + * + * DESCRIPTION: deconstructor of QCamera3HeapMemory + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3HeapMemory::~QCamera3HeapMemory() +{ +} + +/*=========================================================================== + * FUNCTION : allocOneBuffer + * + * DESCRIPTION: impl of allocating one buffers of certain size + * + * PARAMETERS : + * @memInfo : [output] reference to struct to store additional memory allocation info + * @heap : [input] heap id to indicate where the buffers will be allocated from + * @size : [input] lenght of the buffer to be allocated + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HeapMemory::allocOneBuffer(QCamera3MemInfo &memInfo, + unsigned int heap_id, size_t size) +{ + int rc = OK; + struct ion_handle_data handle_data; + struct ion_allocation_data allocData; + struct ion_fd_data ion_info_fd; + int main_ion_fd = -1; + + main_ion_fd = open("/dev/ion", O_RDONLY); + if (main_ion_fd < 0) { + LOGE("Ion dev open failed: %s\n", strerror(errno)); + goto ION_OPEN_FAILED; + } + + memset(&allocData, 0, sizeof(allocData)); + allocData.len = size; + /* to make it page size aligned */ + allocData.len = (allocData.len + 4095U) & (~4095U); + allocData.align = 4096; + allocData.flags = ION_FLAG_CACHED; + allocData.heap_id_mask = heap_id; + rc = ioctl(main_ion_fd, ION_IOC_ALLOC, &allocData); + if (rc < 0) { + LOGE("ION allocation for len %d failed: %s\n", allocData.len, + strerror(errno)); + goto ION_ALLOC_FAILED; + } + + memset(&ion_info_fd, 0, sizeof(ion_info_fd)); + ion_info_fd.handle = allocData.handle; + rc = ioctl(main_ion_fd, ION_IOC_SHARE, &ion_info_fd); + if (rc < 0) { + LOGE("ION map failed %s\n", strerror(errno)); + goto ION_MAP_FAILED; + } + + memInfo.main_ion_fd = main_ion_fd; + memInfo.fd = ion_info_fd.fd; + memInfo.handle = ion_info_fd.handle; + memInfo.size = allocData.len; + return OK; + +ION_MAP_FAILED: + memset(&handle_data, 0, sizeof(handle_data)); + handle_data.handle = ion_info_fd.handle; + ioctl(main_ion_fd, ION_IOC_FREE, &handle_data); +ION_ALLOC_FAILED: + close(main_ion_fd); +ION_OPEN_FAILED: + return NO_MEMORY; +} + +/*=========================================================================== + * FUNCTION : deallocOneBuffer + * + * DESCRIPTION: impl of deallocating one buffers + * + * PARAMETERS : + * @memInfo : reference to struct that stores additional memory allocation info + * + * RETURN : none + *==========================================================================*/ +void QCamera3HeapMemory::deallocOneBuffer(QCamera3MemInfo &memInfo) +{ + struct ion_handle_data handle_data; + + if (memInfo.fd >= 0) { + close(memInfo.fd); + memInfo.fd = -1; + } + + if (memInfo.main_ion_fd >= 0) { + memset(&handle_data, 0, sizeof(handle_data)); + handle_data.handle = memInfo.handle; + ioctl(memInfo.main_ion_fd, ION_IOC_FREE, &handle_data); + close(memInfo.main_ion_fd); + memInfo.main_ion_fd = -1; + } + memInfo.handle = 0; + memInfo.size = 0; +} + +/*=========================================================================== + * FUNCTION : getPtrLocked + * + * DESCRIPTION: Return buffer pointer. + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr + *==========================================================================*/ +void *QCamera3HeapMemory::getPtrLocked(uint32_t index) +{ + if (index >= mBufferCount) { + LOGE("index out of bound"); + return (void *)BAD_INDEX; + } + return mPtr[index]; +} + +/*=========================================================================== + * FUNCTION : markFrameNumber + * + * DESCRIPTION: We use this function from the request call path to mark the + * buffers with the frame number they are intended for this info + * is used later when giving out callback & it is duty of PP to + * ensure that data for that particular frameNumber/Request is + * written to this buffer. + * PARAMETERS : + * @index : index of the buffer + * @frame# : Frame number from the framework + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3HeapMemory::markFrameNumber(uint32_t index, uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + + if (index >= mBufferCount) { + LOGE("Index %d out of bounds, current buffer count is %d", + index, mBufferCount); + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not allocated", index); + return BAD_INDEX; + } + + mCurrentFrameNumbers[index] = (int32_t)frameNumber; + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getFrameNumber + * + * DESCRIPTION: We use this to fetch the frameNumber for the request with which + * this buffer was given to HAL + * + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : int32_t frameNumber + * positive/zero -- success + * negative failure + *==========================================================================*/ +int32_t QCamera3HeapMemory::getFrameNumber(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index >= mBufferCount) { + LOGE("Index %d out of bounds, current buffer count is %d", + index, mBufferCount); + return -1; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return -1; + } + + return mCurrentFrameNumbers[index]; +} + +/*=========================================================================== + * FUNCTION : getBufferIndex + * + * DESCRIPTION: We use this to fetch the buffer index for the request with + * a particular frame number + * + * + * PARAMETERS : + * @frameNumber : frame number of the buffer + * + * RETURN : int32_t buffer index + * negative failure + *==========================================================================*/ +int32_t QCamera3HeapMemory::getBufferIndex(uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + + for (uint32_t index = 0; + index < mBufferCount; index++) { + if (mMemInfo[index].handle && + mCurrentFrameNumbers[index] == (int32_t)frameNumber) + return (int32_t)index; + } + return -1; +} + +/*=========================================================================== + * FUNCTION : getPtr + * + * DESCRIPTION: Return buffer pointer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr + *==========================================================================*/ +void *QCamera3HeapMemory::getPtr(uint32_t index) +{ + return getPtrLocked(index); +} + +/*=========================================================================== + * FUNCTION : allocate + * + * DESCRIPTION: allocate requested number of buffers of certain size + * + * PARAMETERS : + * @size : lenght of the buffer to be allocated + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HeapMemory::allocate(size_t size) +{ + unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID; + uint32_t i; + int rc = NO_ERROR; + + //Note that now we allow incremental allocation. In other words, we allow + //multiple alloc being called as long as the sum of count does not exceed + //mMaxCnt. + if (mBufferCount > 0) { + LOGE("There is already buffer allocated."); + return BAD_INDEX; + } + + for (i = 0; i < mMaxCnt; i ++) { + rc = allocOneBuffer(mMemInfo[i], heap_id_mask, size); + if (rc < 0) { + LOGE("AllocateIonMemory failed"); + goto ALLOC_FAILED; + } + + void *vaddr = mmap(NULL, + mMemInfo[i].size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + mMemInfo[i].fd, 0); + if (vaddr == MAP_FAILED) { + deallocOneBuffer(mMemInfo[i]); + LOGE("mmap failed for buffer %d", i); + goto ALLOC_FAILED; + } else + mPtr[i] = vaddr; + } + if (rc == 0) + mBufferCount = mMaxCnt; + + return OK; + +ALLOC_FAILED: + for (uint32_t j = 0; j < i; j++) { + munmap(mPtr[j], mMemInfo[j].size); + mPtr[j] = NULL; + deallocOneBuffer(mMemInfo[j]); + } + return NO_MEMORY; +} + +/*=========================================================================== + * FUNCTION : allocateOne + * + * DESCRIPTION: allocate one buffer + * + * PARAMETERS : + * @size : lenght of the buffer to be allocated + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HeapMemory::allocateOne(size_t size) +{ + unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID; + int rc = NO_ERROR; + + //Note that now we allow incremental allocation. In other words, we allow + //multiple alloc being called as long as the sum of count does not exceed + //mMaxCnt. + if (mBufferCount + 1 > mMaxCnt) { + LOGE("Buffer count %d + 1 out of bound. Max is %d", + mBufferCount, mMaxCnt); + return BAD_INDEX; + } + + rc = allocOneBuffer(mMemInfo[mBufferCount], heap_id_mask, size); + if (rc < 0) { + LOGE("AllocateIonMemory failed"); + return NO_MEMORY; + } + + void *vaddr = mmap(NULL, + mMemInfo[mBufferCount].size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + mMemInfo[mBufferCount].fd, 0); + if (vaddr == MAP_FAILED) { + deallocOneBuffer(mMemInfo[mBufferCount]); + LOGE("mmap failed for buffer"); + return NO_MEMORY; + } else + mPtr[mBufferCount] = vaddr; + + if (rc == 0) + mBufferCount += 1; + + return mBufferCount-1; +} + +/*=========================================================================== + * FUNCTION : deallocate + * + * DESCRIPTION: deallocate buffers + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +void QCamera3HeapMemory::deallocate() +{ + for (uint32_t i = 0; i < mBufferCount; i++) { + munmap(mPtr[i], mMemInfo[i].size); + mPtr[i] = NULL; + deallocOneBuffer(mMemInfo[i]); + mCurrentFrameNumbers[i] = -1; + } + mBufferCount = 0; +} + +/*=========================================================================== + * FUNCTION : cacheOps + * + * DESCRIPTION: ion related memory cache operations + * + * PARAMETERS : + * @index : index of the buffer + * @cmd : cache ops command + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3HeapMemory::cacheOps(uint32_t index, unsigned int cmd) +{ + if (index >= mBufferCount) + return BAD_INDEX; + return cacheOpsInternal(index, cmd, mPtr[index]); +} + +/*=========================================================================== + * FUNCTION : getMatchBufIndex + * + * DESCRIPTION: query buffer index by object ptr + * + * PARAMETERS : + * @object : object ptr + * + * RETURN : buffer index if match found, + * -1 if failed + *==========================================================================*/ +int QCamera3HeapMemory::getMatchBufIndex(void * /*object*/) +{ + +/* + TODO for HEAP memory type, would there be an equivalent requirement? + + int index = -1; + buffer_handle_t *key = (buffer_handle_t*) object; + if (!key) { + return BAD_VALUE; + } + for (int i = 0; i < mBufferCount; i++) { + if (mBufferHandle[i] == key) { + index = i; + break; + } + } + return index; +*/ + LOGE("FATAL: Not supposed to come here"); + return -1; +} + +/*=========================================================================== + * FUNCTION : QCamera3GrallocMemory + * + * DESCRIPTION: constructor of QCamera3GrallocMemory + * preview stream buffers are allocated from gralloc native_windoe + * + * PARAMETERS : + * @startIdx : start index of array after which we can register buffers in. + * + * RETURN : none + *==========================================================================*/ +QCamera3GrallocMemory::QCamera3GrallocMemory(uint32_t startIdx) + : QCamera3Memory(), mStartIdx(startIdx) +{ + for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i ++) { + mBufferHandle[i] = NULL; + mPrivateHandle[i] = NULL; + } +} + +/*=========================================================================== + * FUNCTION : ~QCamera3GrallocMemory + * + * DESCRIPTION: deconstructor of QCamera3GrallocMemory + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +QCamera3GrallocMemory::~QCamera3GrallocMemory() +{ +} + +/*=========================================================================== + * FUNCTION : registerBuffer + * + * DESCRIPTION: registers frameworks-allocated gralloc buffer_handle_t + * + * PARAMETERS : + * @buffers : buffer_handle_t pointer + * @type : cam_stream_type_t + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3GrallocMemory::registerBuffer(buffer_handle_t *buffer, + __unused cam_stream_type_t type) +{ + status_t ret = NO_ERROR; + struct ion_fd_data ion_info_fd; + void *vaddr = NULL; + int32_t colorSpace = ITU_R_601_FR; + int32_t idx = -1; + + LOGD("E"); + + memset(&ion_info_fd, 0, sizeof(ion_info_fd)); + + if (0 <= getMatchBufIndex((void *) buffer)) { + LOGL("Buffer already registered"); + return ALREADY_EXISTS; + } + + Mutex::Autolock lock(mLock); + if (mBufferCount >= (MM_CAMERA_MAX_NUM_FRAMES - 1 - mStartIdx)) { + LOGE("Number of buffers %d greater than what's supported %d", + mBufferCount, MM_CAMERA_MAX_NUM_FRAMES - mStartIdx); + return BAD_INDEX; + } + + idx = getFreeIndexLocked(); + if (0 > idx) { + LOGE("No available memory slots"); + return BAD_INDEX; + } + + mBufferHandle[idx] = buffer; + mPrivateHandle[idx] = (struct private_handle_t *)(*mBufferHandle[idx]); + + setMetaData(mPrivateHandle[idx], UPDATE_COLOR_SPACE, &colorSpace); + + mMemInfo[idx].main_ion_fd = open("/dev/ion", O_RDONLY); + if (mMemInfo[idx].main_ion_fd < 0) { + LOGE("failed: could not open ion device"); + ret = NO_MEMORY; + goto end; + } else { + ion_info_fd.fd = mPrivateHandle[idx]->fd; + if (ioctl(mMemInfo[idx].main_ion_fd, + ION_IOC_IMPORT, &ion_info_fd) < 0) { + LOGE("ION import failed\n"); + close(mMemInfo[idx].main_ion_fd); + ret = NO_MEMORY; + goto end; + } + } + LOGD("idx = %d, fd = %d, size = %d, offset = %d", + idx, mPrivateHandle[idx]->fd, + mPrivateHandle[idx]->size, + mPrivateHandle[idx]->offset); + mMemInfo[idx].fd = mPrivateHandle[idx]->fd; + mMemInfo[idx].size = + ( /* FIXME: Should update ION interface */ size_t) + mPrivateHandle[idx]->size; + mMemInfo[idx].handle = ion_info_fd.handle; + + vaddr = mmap(NULL, + mMemInfo[idx].size, + PROT_READ | PROT_WRITE, + MAP_SHARED, + mMemInfo[idx].fd, 0); + if (vaddr == MAP_FAILED) { + mMemInfo[idx].handle = 0; + ret = NO_MEMORY; + } else { + mPtr[idx] = vaddr; + mBufferCount++; + } + +end: + LOGD("X "); + return ret; +} +/*=========================================================================== + * FUNCTION : unregisterBufferLocked + * + * DESCRIPTION: Unregister buffer. Please note that this method has to be + * called with 'mLock' acquired. + * + * PARAMETERS : + * @idx : unregister buffer at index 'idx' + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3GrallocMemory::unregisterBufferLocked(size_t idx) +{ + munmap(mPtr[idx], mMemInfo[idx].size); + mPtr[idx] = NULL; + + struct ion_handle_data ion_handle; + memset(&ion_handle, 0, sizeof(ion_handle)); + ion_handle.handle = mMemInfo[idx].handle; + if (ioctl(mMemInfo[idx].main_ion_fd, ION_IOC_FREE, &ion_handle) < 0) { + LOGE("ion free failed"); + } + close(mMemInfo[idx].main_ion_fd); + memset(&mMemInfo[idx], 0, sizeof(struct QCamera3MemInfo)); + mMemInfo[idx].main_ion_fd = -1; + mBufferHandle[idx] = NULL; + mPrivateHandle[idx] = NULL; + mCurrentFrameNumbers[idx] = -1; + mBufferCount--; + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : unregisterBuffer + * + * DESCRIPTION: unregister buffer + * + * PARAMETERS : + * @idx : unregister buffer at index 'idx' + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3GrallocMemory::unregisterBuffer(size_t idx) +{ + int32_t rc = NO_ERROR; + Mutex::Autolock lock(mLock); + + LOGD("E ", __FUNCTION__); + + if (MM_CAMERA_MAX_NUM_FRAMES <= idx) { + LOGE("Buffer index %d greater than what is supported %d", + idx, MM_CAMERA_MAX_NUM_FRAMES); + return BAD_VALUE; + } + if (idx < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + idx, mStartIdx); + return BAD_INDEX; + } + + if (0 == mMemInfo[idx].handle) { + LOGE("Trying to unregister buffer at %d which still not registered", + idx); + return BAD_VALUE; + } + + rc = unregisterBufferLocked(idx); + + LOGD("X ",__FUNCTION__); + + return rc; +} + +/*=========================================================================== + * FUNCTION : unregisterBuffers + * + * DESCRIPTION: unregister buffers + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +void QCamera3GrallocMemory::unregisterBuffers() +{ + int err = NO_ERROR; + Mutex::Autolock lock(mLock); + + LOGD("E ", __FUNCTION__); + + for (uint32_t cnt = mStartIdx; cnt < MM_CAMERA_MAX_NUM_FRAMES; cnt++) { + if (0 == mMemInfo[cnt].handle) { + continue; + } + err = unregisterBufferLocked(cnt); + if (NO_ERROR != err) { + LOGE("Error unregistering buffer %d error %d", + cnt, err); + } + } + mBufferCount = 0; + LOGD("X ",__FUNCTION__); +} + +/*=========================================================================== + * FUNCTION : markFrameNumber + * + * DESCRIPTION: We use this function from the request call path to mark the + * buffers with the frame number they are intended for this info + * is used later when giving out callback & it is duty of PP to + * ensure that data for that particular frameNumber/Request is + * written to this buffer. + * PARAMETERS : + * @index : index of the buffer + * @frame# : Frame number from the framework + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3GrallocMemory::markFrameNumber(uint32_t index, uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + + if (index >= MM_CAMERA_MAX_NUM_FRAMES) { + LOGE("Index out of bounds"); + return BAD_INDEX; + } + if (index < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + index, mStartIdx); + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return BAD_INDEX; + } + + mCurrentFrameNumbers[index] = (int32_t)frameNumber; + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getFrameNumber + * + * DESCRIPTION: We use this to fetch the frameNumber for the request with which + * this buffer was given to HAL + * + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : int32_t frameNumber + * positive/zero -- success + * negative failure + *==========================================================================*/ +int32_t QCamera3GrallocMemory::getFrameNumber(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index >= MM_CAMERA_MAX_NUM_FRAMES) { + LOGE("Index out of bounds"); + return -1; + } + if (index < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + index, mStartIdx); + return BAD_INDEX; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return -1; + } + + return mCurrentFrameNumbers[index]; +} + +/*=========================================================================== + * FUNCTION : getBufferIndex + * + * DESCRIPTION: We use this to fetch the buffer index for the request with + * a particular frame number + * + * + * PARAMETERS : + * @frameNumber : frame number of the buffer + * + * RETURN : int32_t buffer index + * negative failure + *==========================================================================*/ +int32_t QCamera3GrallocMemory::getBufferIndex(uint32_t frameNumber) +{ + for (uint32_t index = mStartIdx; + index < MM_CAMERA_MAX_NUM_FRAMES; index++) { + if (mMemInfo[index].handle && + mCurrentFrameNumbers[index] == (int32_t)frameNumber) + return (int32_t)index; + } + return -1; +} + +/*=========================================================================== + * FUNCTION : cacheOps + * + * DESCRIPTION: ion related memory cache operations + * + * PARAMETERS : + * @index : index of the buffer + * @cmd : cache ops command + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3GrallocMemory::cacheOps(uint32_t index, unsigned int cmd) +{ + if (index >= MM_CAMERA_MAX_NUM_FRAMES) { + LOGE("Index out of bounds"); + return -1; + } + if (index < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + index, mStartIdx); + return BAD_INDEX; + } + + return cacheOpsInternal(index, cmd, mPtr[index]); +} + +/*=========================================================================== + * FUNCTION : getMatchBufIndex + * + * DESCRIPTION: query buffer index by object ptr + * + * PARAMETERS : + * @opaque : opaque ptr + * + * RETURN : buffer index if match found, + * -1 if failed + *==========================================================================*/ +int QCamera3GrallocMemory::getMatchBufIndex(void *object) +{ + Mutex::Autolock lock(mLock); + + int index = -1; + buffer_handle_t *key = (buffer_handle_t*) object; + if (!key) { + return BAD_VALUE; + } + for (uint32_t i = mStartIdx; i < MM_CAMERA_MAX_NUM_FRAMES; i++) { + if (mBufferHandle[i] == key) { + index = (int)i; + break; + } + } + + return index; +} + +/*=========================================================================== + * FUNCTION : getFreeIndexLocked + * + * DESCRIPTION: Find free index slot. Note 'mLock' needs to be acquired + * before calling this method. + * + * PARAMETERS : None + * + * RETURN : free buffer index if found, + * -1 if failed + *==========================================================================*/ +int QCamera3GrallocMemory::getFreeIndexLocked() +{ + int index = -1; + + if (mBufferCount >= (MM_CAMERA_MAX_NUM_FRAMES - 1)) { + LOGE("Number of buffers %d greater than what's supported %d", + mBufferCount, MM_CAMERA_MAX_NUM_FRAMES); + return index; + } + + for (size_t i = mStartIdx; i < MM_CAMERA_MAX_NUM_FRAMES; i++) { + if (0 == mMemInfo[i].handle) { + index = i; + break; + } + } + + return index; +} + +/*=========================================================================== + * FUNCTION : getPtrLocked + * + * DESCRIPTION: Return buffer pointer. Please note 'mLock' must be acquired + * before calling this method. + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr + *==========================================================================*/ +void *QCamera3GrallocMemory::getPtrLocked(uint32_t index) +{ + if (MM_CAMERA_MAX_NUM_FRAMES <= index) { + LOGE("index %d out of bound [0, %d)", + index, MM_CAMERA_MAX_NUM_FRAMES); + return NULL; + } + if (index < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + index, mStartIdx); + return NULL; + } + + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return NULL; + } + + return mPtr[index]; +} + +/*=========================================================================== + * FUNCTION : getPtr + * + * DESCRIPTION: Return buffer pointer. + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr + *==========================================================================*/ +void *QCamera3GrallocMemory::getPtr(uint32_t index) +{ + Mutex::Autolock lock(mLock); + return getPtrLocked(index); +} + +/*=========================================================================== + * FUNCTION : getBufferHandle + * + * DESCRIPTION: return framework pointer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr if match found + NULL if failed + *==========================================================================*/ +void *QCamera3GrallocMemory::getBufferHandle(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (MM_CAMERA_MAX_NUM_FRAMES <= index) { + LOGE("index %d out of bound [0, %d)", + index, MM_CAMERA_MAX_NUM_FRAMES); + return NULL; + } + if (index < mStartIdx) { + LOGE("buffer index %d less than starting index %d", + index, mStartIdx); + return NULL; + } + + if (0 == mMemInfo[index].handle) { + LOGE("Buffer at %d not registered", index); + return NULL; + } + + return mBufferHandle[index]; +} +}; //namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3Mem.h b/camera/QCamera2/HAL3/QCamera3Mem.h new file mode 100644 index 0000000..c99079d --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Mem.h @@ -0,0 +1,158 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __QCAMERA3HWI_MEM_H__ +#define __QCAMERA3HWI_MEM_H__ + +// System dependencies +#include <linux/msm_ion.h> +#include <utils/Mutex.h> + +// Camera dependencies +#include "camera3.h" + +extern "C" { +#include "mm_camera_interface.h" +} + +using namespace android; + +namespace qcamera { + +// Base class for all memory types. Abstract. +class QCamera3Memory { + +public: + int cleanCache(uint32_t index) + { + return cacheOps(index, ION_IOC_CLEAN_CACHES); + } + int invalidateCache(uint32_t index) + { + return cacheOps(index, ION_IOC_INV_CACHES); + } + int cleanInvalidateCache(uint32_t index) + { + return cacheOps(index, ION_IOC_CLEAN_INV_CACHES); + } + int getFd(uint32_t index); + ssize_t getSize(uint32_t index); + uint32_t getCnt(); + + virtual int cacheOps(uint32_t index, unsigned int cmd) = 0; + virtual int getMatchBufIndex(void *object) = 0; + virtual void *getPtr(uint32_t index) = 0; + + virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber) = 0; + virtual int32_t getFrameNumber(uint32_t index) = 0; + virtual int32_t getBufferIndex(uint32_t frameNumber) = 0; + + QCamera3Memory(); + virtual ~QCamera3Memory(); + + int32_t getBufDef(const cam_frame_len_offset_t &offset, + mm_camera_buf_def_t &bufDef, uint32_t index); + +protected: + struct QCamera3MemInfo { + int fd; + int main_ion_fd; + ion_user_handle_t handle; + size_t size; + }; + + int cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr); + virtual void *getPtrLocked(uint32_t index) = 0; + + uint32_t mBufferCount; + struct QCamera3MemInfo mMemInfo[MM_CAMERA_MAX_NUM_FRAMES]; + void *mPtr[MM_CAMERA_MAX_NUM_FRAMES]; + int32_t mCurrentFrameNumbers[MM_CAMERA_MAX_NUM_FRAMES]; + Mutex mLock; +}; + +// Internal heap memory is used for memories used internally +// They are allocated from /dev/ion. Examples are: capabilities, +// parameters, metadata, and internal YUV data for jpeg encoding. +class QCamera3HeapMemory : public QCamera3Memory { +public: + QCamera3HeapMemory(uint32_t maxCnt); + virtual ~QCamera3HeapMemory(); + + int allocate(size_t size); + int allocateOne(size_t size); + void deallocate(); + + virtual int cacheOps(uint32_t index, unsigned int cmd); + virtual int getMatchBufIndex(void *object); + virtual void *getPtr(uint32_t index); + + virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber); + virtual int32_t getFrameNumber(uint32_t index); + virtual int32_t getBufferIndex(uint32_t frameNumber); + +protected: + virtual void *getPtrLocked(uint32_t index); +private: + int allocOneBuffer(struct QCamera3MemInfo &memInfo, + unsigned int heap_id, size_t size); + void deallocOneBuffer(struct QCamera3MemInfo &memInfo); + uint32_t mMaxCnt; +}; + +// Gralloc Memory shared with frameworks +class QCamera3GrallocMemory : public QCamera3Memory { +public: + QCamera3GrallocMemory(uint32_t startIdx); + virtual ~QCamera3GrallocMemory(); + + int registerBuffer(buffer_handle_t *buffer, cam_stream_type_t type); + int32_t unregisterBuffer(size_t idx); + void unregisterBuffers(); + virtual int cacheOps(uint32_t index, unsigned int cmd); + virtual int getMatchBufIndex(void *object); + virtual void *getPtr(uint32_t index); + + virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber); + virtual int32_t getFrameNumber(uint32_t index); + virtual int32_t getBufferIndex(uint32_t frameNumber); + + void *getBufferHandle(uint32_t index); +protected: + virtual void *getPtrLocked(uint32_t index); +private: + int32_t unregisterBufferLocked(size_t idx); + int32_t getFreeIndexLocked(); + buffer_handle_t *mBufferHandle[MM_CAMERA_MAX_NUM_FRAMES]; + struct private_handle_t *mPrivateHandle[MM_CAMERA_MAX_NUM_FRAMES]; + + uint32_t mStartIdx; +}; +}; +#endif diff --git a/camera/QCamera2/HAL3/QCamera3PostProc.cpp b/camera/QCamera2/HAL3/QCamera3PostProc.cpp new file mode 100644 index 0000000..be53d4a --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3PostProc.cpp @@ -0,0 +1,3142 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#define LOG_TAG "QCamera3PostProc" + +// To remove +#include <cutils/properties.h> + +// System dependencies +#include <stdio.h> + +// Camera dependencies +#include "QCamera3Channel.h" +#include "QCamera3HWI.h" +#include "QCamera3PostProc.h" +#include "QCamera3Stream.h" +#include "QCameraTrace.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +#define ENABLE_MODEL_INFO_EXIF + +namespace qcamera { + +static const char ExifAsciiPrefix[] = + { 0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0 }; // "ASCII\0\0\0" + +__unused +static const char ExifUndefinedPrefix[] = + { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; // "\0\0\0\0\0\0\0\0" + +#define EXIF_ASCII_PREFIX_SIZE 8 //(sizeof(ExifAsciiPrefix)) +#define FOCAL_LENGTH_DECIMAL_PRECISION 1000 + +/*=========================================================================== + * FUNCTION : QCamera3PostProcessor + * + * DESCRIPTION: constructor of QCamera3PostProcessor. + * + * PARAMETERS : + * @cam_ctrl : ptr to HWI object + * + * RETURN : None + *==========================================================================*/ +QCamera3PostProcessor::QCamera3PostProcessor(QCamera3ProcessingChannel* ch_ctrl) + : m_parent(ch_ctrl), + mJpegCB(NULL), + mJpegUserData(NULL), + mJpegClientHandle(0), + mJpegSessionId(0), + m_bThumbnailNeeded(TRUE), + m_pReprocChannel(NULL), + m_inputPPQ(releasePPInputData, this), + m_inputFWKPPQ(NULL, this), + m_ongoingPPQ(releaseOngoingPPData, this), + m_inputJpegQ(releaseJpegData, this), + m_ongoingJpegQ(releaseJpegData, this), + m_inputMetaQ(releaseMetadata, this), + m_jpegSettingsQ(NULL, this) +{ + memset(&mJpegHandle, 0, sizeof(mJpegHandle)); + memset(&mJpegMetadata, 0, sizeof(mJpegMetadata)); + pthread_mutex_init(&mReprocJobLock, NULL); +} + +/*=========================================================================== + * FUNCTION : ~QCamera3PostProcessor + * + * DESCRIPTION: deconstructor of QCamera3PostProcessor. + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +QCamera3PostProcessor::~QCamera3PostProcessor() +{ + pthread_mutex_destroy(&mReprocJobLock); +} + +/*=========================================================================== + * FUNCTION : init + * + * DESCRIPTION: initialization of postprocessor + * + * PARAMETERS : + * @memory : output buffer memory + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::init(QCamera3StreamMem *memory) +{ + ATRACE_CALL(); + mOutputMem = memory; + m_dataProcTh.launch(dataProcessRoutine, this); + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : deinit + * + * DESCRIPTION: de-initialization of postprocessor + * + * PARAMETERS : None + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::deinit() +{ + int rc = NO_ERROR; + m_dataProcTh.exit(); + + if (m_pReprocChannel != NULL) { + m_pReprocChannel->stop(); + delete m_pReprocChannel; + m_pReprocChannel = NULL; + } + + if(mJpegClientHandle > 0) { + rc = mJpegHandle.close(mJpegClientHandle); + LOGH("Jpeg closed, rc = %d, mJpegClientHandle = %x", + rc, mJpegClientHandle); + mJpegClientHandle = 0; + memset(&mJpegHandle, 0, sizeof(mJpegHandle)); + } + + mOutputMem = NULL; + return rc; +} + +/*=========================================================================== + * FUNCTION : initJpeg + * + * DESCRIPTION: initialization of jpeg through postprocessor + * + * PARAMETERS : + * @jpeg_cb : callback to handle jpeg event from mm-camera-interface + * @max_pic_dim : max picture dimensions + * @user_data : user data ptr for jpeg callback + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::initJpeg(jpeg_encode_callback_t jpeg_cb, + cam_dimension_t* max_pic_dim, + void *user_data) +{ + ATRACE_CALL(); + mJpegCB = jpeg_cb; + mJpegUserData = user_data; + mm_dimension max_size; + + if ((0 > max_pic_dim->width) || (0 > max_pic_dim->height)) { + LOGE("Negative dimension %dx%d", + max_pic_dim->width, max_pic_dim->height); + return BAD_VALUE; + } + + // set max pic size + memset(&max_size, 0, sizeof(mm_dimension)); + max_size.w = max_pic_dim->width; + max_size.h = max_pic_dim->height; + + // Pass OTP calibration data to JPEG + QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData; + mJpegMetadata.default_sensor_flip = FLIP_NONE; + mJpegMetadata.sensor_mount_angle = hal_obj->getSensorMountAngle(); + memcpy(&mJpegMetadata.otp_calibration_data, + hal_obj->getRelatedCalibrationData(), + sizeof(cam_related_system_calibration_data_t)); + mJpegClientHandle = jpeg_open(&mJpegHandle, NULL, max_size, &mJpegMetadata); + + if (!mJpegClientHandle) { + LOGE("jpeg_open did not work"); + return UNKNOWN_ERROR; + } + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : start + * + * DESCRIPTION: start postprocessor. Data process thread and data notify thread + * will be launched. + * + * PARAMETERS : + * @config : reprocess configuration + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + * NOTE : if any reprocess is needed, a reprocess channel/stream + * will be started. + *==========================================================================*/ +int32_t QCamera3PostProcessor::start(const reprocess_config_t &config) +{ + int32_t rc = NO_ERROR; + QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData; + + if (config.reprocess_type != REPROCESS_TYPE_NONE) { + if (m_pReprocChannel != NULL) { + m_pReprocChannel->stop(); + delete m_pReprocChannel; + m_pReprocChannel = NULL; + } + + // if reprocess is needed, start reprocess channel + LOGD("Setting input channel as pInputChannel"); + m_pReprocChannel = hal_obj->addOfflineReprocChannel(config, m_parent); + if (m_pReprocChannel == NULL) { + LOGE("cannot add reprocess channel"); + return UNKNOWN_ERROR; + } + /*start the reprocess channel only if buffers are already allocated, thus + only start it in an intermediate reprocess type, defer it for others*/ + if (config.reprocess_type == REPROCESS_TYPE_JPEG) { + rc = m_pReprocChannel->start(); + if (rc != 0) { + LOGE("cannot start reprocess channel"); + delete m_pReprocChannel; + m_pReprocChannel = NULL; + return rc; + } + } + } + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC, TRUE, FALSE); + + return rc; +} + +/*=========================================================================== + * FUNCTION : flush + * + * DESCRIPTION: stop ongoing postprocess and jpeg jobs + * + * PARAMETERS : None + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + *==========================================================================*/ +int32_t QCamera3PostProcessor::flush() +{ + int32_t rc = NO_ERROR; + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)m_ongoingJpegQ.dequeue(); + while (jpeg_job != NULL) { + rc = mJpegHandle.abort_job(jpeg_job->jobId); + releaseJpegJobData(jpeg_job); + free(jpeg_job); + + jpeg_job = (qcamera_hal3_jpeg_data_t *)m_ongoingJpegQ.dequeue(); + } + rc = releaseOfflineBuffers(true); + return rc; +} + +/*=========================================================================== + * FUNCTION : stop + * + * DESCRIPTION: stop postprocessor. Data process and notify thread will be stopped. + * + * PARAMETERS : None + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + * NOTE : reprocess channel will be stopped and deleted if there is any + *==========================================================================*/ +int32_t QCamera3PostProcessor::stop() +{ + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC, TRUE, TRUE); + + if (m_pReprocChannel != NULL) { + m_pReprocChannel->stop(); + delete m_pReprocChannel; + m_pReprocChannel = NULL; + } + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getFWKJpegEncodeConfig + * + * DESCRIPTION: function to prepare encoding job information + * + * PARAMETERS : + * @encode_parm : param to be filled with encoding configuration + * @frame : framework input buffer + * @jpeg_settings : jpeg settings to be applied for encoding + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::getFWKJpegEncodeConfig( + mm_jpeg_encode_params_t& encode_parm, + qcamera_fwk_input_pp_data_t *frame, + jpeg_settings_t *jpeg_settings) +{ + LOGD("E"); + int32_t ret = NO_ERROR; + + if ((NULL == frame) || (NULL == jpeg_settings)) { + return BAD_VALUE; + } + + ssize_t bufSize = mOutputMem->getSize(jpeg_settings->out_buf_index); + if (BAD_INDEX == bufSize) { + LOGE("cannot retrieve buffer size for buffer %u", + jpeg_settings->out_buf_index); + return BAD_VALUE; + } + + encode_parm.jpeg_cb = mJpegCB; + encode_parm.userdata = mJpegUserData; + + if (jpeg_settings->thumbnail_size.width > 0 && + jpeg_settings->thumbnail_size.height > 0) + m_bThumbnailNeeded = TRUE; + else + m_bThumbnailNeeded = FALSE; + encode_parm.encode_thumbnail = m_bThumbnailNeeded; + + // get color format + cam_format_t img_fmt = frame->reproc_config.stream_format; + encode_parm.color_format = getColorfmtFromImgFmt(img_fmt); + + // get jpeg quality + encode_parm.quality = jpeg_settings->jpeg_quality; + if (encode_parm.quality <= 0) { + encode_parm.quality = 85; + } + + // get jpeg thumbnail quality + encode_parm.thumb_quality = jpeg_settings->jpeg_thumb_quality; + + cam_frame_len_offset_t main_offset = + frame->reproc_config.input_stream_plane_info.plane_info; + + encode_parm.num_src_bufs = 1; + encode_parm.src_main_buf[0].index = 0; + encode_parm.src_main_buf[0].buf_size = frame->input_buffer.frame_len; + encode_parm.src_main_buf[0].buf_vaddr = (uint8_t *) frame->input_buffer.buffer; + encode_parm.src_main_buf[0].fd = frame->input_buffer.fd; + encode_parm.src_main_buf[0].format = MM_JPEG_FMT_YUV; + encode_parm.src_main_buf[0].offset = main_offset; + + //Pass input thumbnail buffer info to encoder. + //Note: Use main buffer to encode thumbnail + if (m_bThumbnailNeeded == TRUE) { + encode_parm.num_tmb_bufs = 1; + encode_parm.src_thumb_buf[0] = encode_parm.src_main_buf[0]; + } + + //Pass output jpeg buffer info to encoder. + //mOutputMem is allocated by framework. + encode_parm.num_dst_bufs = 1; + encode_parm.dest_buf[0].index = 0; + encode_parm.dest_buf[0].buf_size = (size_t)bufSize; + encode_parm.dest_buf[0].buf_vaddr = (uint8_t *)mOutputMem->getPtr( + jpeg_settings->out_buf_index); + encode_parm.dest_buf[0].fd = mOutputMem->getFd( + jpeg_settings->out_buf_index); + encode_parm.dest_buf[0].format = MM_JPEG_FMT_YUV; + encode_parm.dest_buf[0].offset = main_offset; + + LOGD("X"); + return NO_ERROR; + + LOGD("X with error %d", ret); + return ret; +} + +/*=========================================================================== + * FUNCTION : getJpegEncodeConfig + * + * DESCRIPTION: function to prepare encoding job information + * + * PARAMETERS : + * @encode_parm : param to be filled with encoding configuration + * #main_stream : stream object where the input buffer comes from + * @jpeg_settings : jpeg settings to be applied for encoding + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::getJpegEncodeConfig( + mm_jpeg_encode_params_t& encode_parm, + QCamera3Stream *main_stream, + jpeg_settings_t *jpeg_settings) +{ + LOGD("E"); + int32_t ret = NO_ERROR; + ssize_t bufSize = 0; + + encode_parm.jpeg_cb = mJpegCB; + encode_parm.userdata = mJpegUserData; + + if (jpeg_settings->thumbnail_size.width > 0 && + jpeg_settings->thumbnail_size.height > 0) + m_bThumbnailNeeded = TRUE; + else + m_bThumbnailNeeded = FALSE; + encode_parm.encode_thumbnail = m_bThumbnailNeeded; + + // get color format + cam_format_t img_fmt = CAM_FORMAT_YUV_420_NV12; //default value + main_stream->getFormat(img_fmt); + encode_parm.color_format = getColorfmtFromImgFmt(img_fmt); + + // get jpeg quality + encode_parm.quality = jpeg_settings->jpeg_quality; + if (encode_parm.quality <= 0) { + encode_parm.quality = 85; + } + + // get jpeg thumbnail quality + encode_parm.thumb_quality = jpeg_settings->jpeg_thumb_quality; + + cam_frame_len_offset_t main_offset; + memset(&main_offset, 0, sizeof(cam_frame_len_offset_t)); + main_stream->getFrameOffset(main_offset); + + // src buf config + //Pass input main image buffer info to encoder. + QCamera3StreamMem *pStreamMem = main_stream->getStreamBufs(); + if (pStreamMem == NULL) { + LOGE("cannot get stream bufs from main stream"); + ret = BAD_VALUE; + goto on_error; + } + encode_parm.num_src_bufs = MIN(pStreamMem->getCnt(), MM_JPEG_MAX_BUF); + for (uint32_t i = 0; i < encode_parm.num_src_bufs; i++) { + if (pStreamMem != NULL) { + encode_parm.src_main_buf[i].index = i; + bufSize = pStreamMem->getSize(i); + if (BAD_INDEX == bufSize) { + LOGE("cannot retrieve buffer size for buffer %u", i); + ret = BAD_VALUE; + goto on_error; + } + encode_parm.src_main_buf[i].buf_size = (size_t)bufSize; + encode_parm.src_main_buf[i].buf_vaddr = (uint8_t *)pStreamMem->getPtr(i); + encode_parm.src_main_buf[i].fd = pStreamMem->getFd(i); + encode_parm.src_main_buf[i].format = MM_JPEG_FMT_YUV; + encode_parm.src_main_buf[i].offset = main_offset; + } + } + + //Pass input thumbnail buffer info to encoder. + //Note: Use main buffer to encode thumbnail + if (m_bThumbnailNeeded == TRUE) { + pStreamMem = main_stream->getStreamBufs(); + if (pStreamMem == NULL) { + LOGE("cannot get stream bufs from thumb stream"); + ret = BAD_VALUE; + goto on_error; + } + cam_frame_len_offset_t thumb_offset; + memset(&thumb_offset, 0, sizeof(cam_frame_len_offset_t)); + main_stream->getFrameOffset(thumb_offset); + encode_parm.num_tmb_bufs = MIN(pStreamMem->getCnt(), MM_JPEG_MAX_BUF); + for (uint32_t i = 0; i < encode_parm.num_tmb_bufs; i++) { + if (pStreamMem != NULL) { + encode_parm.src_thumb_buf[i].index = i; + bufSize = pStreamMem->getSize(i); + if (BAD_INDEX == bufSize) { + LOGE("cannot retrieve buffer size for buffer %u", i); + ret = BAD_VALUE; + goto on_error; + } + encode_parm.src_thumb_buf[i].buf_size = (uint32_t)bufSize; + encode_parm.src_thumb_buf[i].buf_vaddr = (uint8_t *)pStreamMem->getPtr(i); + encode_parm.src_thumb_buf[i].fd = pStreamMem->getFd(i); + encode_parm.src_thumb_buf[i].format = MM_JPEG_FMT_YUV; + encode_parm.src_thumb_buf[i].offset = thumb_offset; + } + } + } + + //Pass output jpeg buffer info to encoder. + //mJpegMem is allocated by framework. + bufSize = mOutputMem->getSize(jpeg_settings->out_buf_index); + if (BAD_INDEX == bufSize) { + LOGE("cannot retrieve buffer size for buffer %u", + jpeg_settings->out_buf_index); + ret = BAD_VALUE; + goto on_error; + } + encode_parm.num_dst_bufs = 1; + encode_parm.dest_buf[0].index = 0; + encode_parm.dest_buf[0].buf_size = (size_t)bufSize; + encode_parm.dest_buf[0].buf_vaddr = (uint8_t *)mOutputMem->getPtr( + jpeg_settings->out_buf_index); + encode_parm.dest_buf[0].fd = mOutputMem->getFd( + jpeg_settings->out_buf_index); + encode_parm.dest_buf[0].format = MM_JPEG_FMT_YUV; + encode_parm.dest_buf[0].offset = main_offset; + + LOGD("X"); + return NO_ERROR; + +on_error: + LOGD("X with error %d", ret); + return ret; +} + +int32_t QCamera3PostProcessor::processData(mm_camera_super_buf_t *input) { + return processData(input, NULL, 0); +} + +/*=========================================================================== + * FUNCTION : processData + * + * DESCRIPTION: enqueue data into dataProc thread + * + * PARAMETERS : + * @frame : process input frame + * @output : process output frame + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + * NOTE : depends on if offline reprocess is needed, received frame will + * be sent to either input queue of postprocess or jpeg encoding + *==========================================================================*/ +int32_t QCamera3PostProcessor::processData(mm_camera_super_buf_t *input, + buffer_handle_t *output, uint32_t frameNumber) +{ + LOGD("E"); + pthread_mutex_lock(&mReprocJobLock); + + // enqueue to post proc input queue + qcamera_hal3_pp_buffer_t *pp_buffer = (qcamera_hal3_pp_buffer_t *)malloc( + sizeof(qcamera_hal3_pp_buffer_t)); + if (NULL == pp_buffer) { + LOGE("out of memory"); + return NO_MEMORY; + } + memset(pp_buffer, 0, sizeof(*pp_buffer)); + pp_buffer->input = input; + pp_buffer->output = output; + pp_buffer->frameNumber = frameNumber; + m_inputPPQ.enqueue((void *)pp_buffer); + if (!(m_inputMetaQ.isEmpty())) { + LOGD("meta queue is not empty, do next job"); + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + } else + LOGD("metadata queue is empty"); + pthread_mutex_unlock(&mReprocJobLock); + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : needsReprocess + * + * DESCRIPTION: Determine if reprocess is needed. + * + * PARAMETERS : + * @frame : process frame + * + * RETURN : + * TRUE if frame needs to be reprocessed + * FALSE is frame does not need to be reprocessed + * + *==========================================================================*/ +bool QCamera3PostProcessor::needsReprocess(qcamera_fwk_input_pp_data_t *frame) +{ + metadata_buffer_t* meta = (metadata_buffer_t *) frame->metadata_buffer.buffer; + bool edgeModeOn = FALSE; + bool noiseRedModeOn = FALSE; + bool reproNotDone = TRUE; + + if (frame->reproc_config.reprocess_type == REPROCESS_TYPE_NONE) { + return FALSE; + } + + // edge detection + IF_META_AVAILABLE(cam_edge_application_t, edgeMode, + CAM_INTF_META_EDGE_MODE, meta) { + edgeModeOn = (CAM_EDGE_MODE_OFF != edgeMode->edge_mode); + } + + // noise reduction + IF_META_AVAILABLE(uint32_t, noiseRedMode, + CAM_INTF_META_NOISE_REDUCTION_MODE, meta) { + noiseRedModeOn = (CAM_NOISE_REDUCTION_MODE_OFF != *noiseRedMode); + } + + IF_META_AVAILABLE(uint8_t, reprocess_flags, + CAM_INTF_META_REPROCESS_FLAGS, meta) { + reproNotDone = FALSE; + } + + return (edgeModeOn || noiseRedModeOn || reproNotDone); +} + +/*=========================================================================== + * FUNCTION : processData + * + * DESCRIPTION: enqueue data into dataProc thread + * + * PARAMETERS : + * @frame : process frame + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + * NOTE : depends on if offline reprocess is needed, received frame will + * be sent to either input queue of postprocess or jpeg encoding + *==========================================================================*/ +int32_t QCamera3PostProcessor::processData(qcamera_fwk_input_pp_data_t *frame) +{ + if (needsReprocess(frame)) { + ATRACE_INT("Camera:Reprocess", 1); + LOGH("scheduling framework reprocess"); + pthread_mutex_lock(&mReprocJobLock); + // enqueu to post proc input queue + m_inputFWKPPQ.enqueue((void *)frame); + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + pthread_mutex_unlock(&mReprocJobLock); + } else { + jpeg_settings_t *jpeg_settings = (jpeg_settings_t *)m_jpegSettingsQ.dequeue(); + + if (jpeg_settings == NULL) { + LOGE("Cannot find jpeg settings"); + return BAD_VALUE; + } + + LOGH("no need offline reprocess, sending to jpeg encoding"); + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)malloc(sizeof(qcamera_hal3_jpeg_data_t)); + if (jpeg_job == NULL) { + LOGE("No memory for jpeg job"); + return NO_MEMORY; + } + + memset(jpeg_job, 0, sizeof(qcamera_hal3_jpeg_data_t)); + jpeg_job->fwk_frame = frame; + jpeg_job->jpeg_settings = jpeg_settings; + jpeg_job->metadata = + (metadata_buffer_t *) frame->metadata_buffer.buffer; + + // enqueu to jpeg input queue + m_inputJpegQ.enqueue((void *)jpeg_job); + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + } + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : processPPMetadata + * + * DESCRIPTION: enqueue data into dataProc thread + * + * PARAMETERS : + * @frame : process metadata frame received from pic channel + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + *==========================================================================*/ +int32_t QCamera3PostProcessor::processPPMetadata(mm_camera_super_buf_t *reproc_meta) +{ + LOGD("E"); + pthread_mutex_lock(&mReprocJobLock); + // enqueue to metadata input queue + m_inputMetaQ.enqueue((void *)reproc_meta); + if (!(m_inputPPQ.isEmpty())) { + LOGD("pp queue is not empty, do next job"); + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + } else { + LOGD("pp queue is empty, not calling do next job"); + } + pthread_mutex_unlock(&mReprocJobLock); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : processJpegSettingData + * + * DESCRIPTION: enqueue jpegSetting into dataProc thread + * + * PARAMETERS : + * @jpeg_settings : jpeg settings data received from pic channel + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + *==========================================================================*/ +int32_t QCamera3PostProcessor::processJpegSettingData( + jpeg_settings_t *jpeg_settings) +{ + if (!jpeg_settings) { + LOGE("invalid jpeg settings pointer"); + return -EINVAL; + } + return m_jpegSettingsQ.enqueue((void *)jpeg_settings); +} + +/*=========================================================================== + * FUNCTION : processPPData + * + * DESCRIPTION: process received frame after reprocess. + * + * PARAMETERS : + * @frame : received frame from reprocess channel. + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + * + * NOTE : The frame after reprocess need to send to jpeg encoding. + *==========================================================================*/ +int32_t QCamera3PostProcessor::processPPData(mm_camera_super_buf_t *frame) +{ + qcamera_hal3_pp_data_t *job = (qcamera_hal3_pp_data_t *)m_ongoingPPQ.dequeue(); + ATRACE_INT("Camera:Reprocess", 0); + if (job == NULL || ((NULL == job->src_frame) && (NULL == job->fwk_src_frame))) { + LOGE("Cannot find reprocess job"); + return BAD_VALUE; + } + if (job->jpeg_settings == NULL) { + LOGE("Cannot find jpeg settings"); + return BAD_VALUE; + } + + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)malloc(sizeof(qcamera_hal3_jpeg_data_t)); + if (jpeg_job == NULL) { + LOGE("No memory for jpeg job"); + return NO_MEMORY; + } + + memset(jpeg_job, 0, sizeof(qcamera_hal3_jpeg_data_t)); + jpeg_job->src_frame = frame; + if(frame != job->src_frame) + jpeg_job->src_reproc_frame = job->src_frame; + if (NULL == job->fwk_src_frame) { + jpeg_job->metadata = job->metadata; + } else { + jpeg_job->metadata = + (metadata_buffer_t *) job->fwk_src_frame->metadata_buffer.buffer; + jpeg_job->fwk_src_buffer = job->fwk_src_frame; + } + jpeg_job->src_metadata = job->src_metadata; + jpeg_job->jpeg_settings = job->jpeg_settings; + + // free pp job buf + free(job); + + // enqueu reprocessed frame to jpeg input queue + m_inputJpegQ.enqueue((void *)jpeg_job); + + // wait up data proc thread + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : dequeuePPJob + * + * DESCRIPTION: find a postprocessing job from ongoing pp queue by frame number + * + * PARAMETERS : + * @frameNumber : frame number for the pp job + * + * RETURN : ptr to a pp job struct. NULL if not found. + *==========================================================================*/ +qcamera_hal3_pp_data_t *QCamera3PostProcessor::dequeuePPJob(uint32_t frameNumber) { + qcamera_hal3_pp_data_t *pp_job = NULL; + pp_job = (qcamera_hal3_pp_data_t *)m_ongoingPPQ.dequeue(); + + if (pp_job == NULL) { + LOGE("Fatal: ongoing PP queue is empty"); + return NULL; + } + if (pp_job->fwk_src_frame && + (pp_job->fwk_src_frame->frameNumber != frameNumber)) { + LOGE("head of pp queue doesn't match requested frame number"); + } + return pp_job; +} + +/*=========================================================================== + * FUNCTION : findJpegJobByJobId + * + * DESCRIPTION: find a jpeg job from ongoing Jpeg queue by its job ID + * + * PARAMETERS : + * @jobId : job Id of the job + * + * RETURN : ptr to a jpeg job struct. NULL if not found. + * + * NOTE : Currently only one job is sending to mm-jpeg-interface for jpeg + * encoding. Therefore simply dequeue from the ongoing Jpeg Queue + * will serve the purpose to find the jpeg job. + *==========================================================================*/ +qcamera_hal3_jpeg_data_t *QCamera3PostProcessor::findJpegJobByJobId(uint32_t jobId) +{ + qcamera_hal3_jpeg_data_t * job = NULL; + if (jobId == 0) { + LOGE("not a valid jpeg jobId"); + return NULL; + } + + // currely only one jpeg job ongoing, so simply dequeue the head + job = (qcamera_hal3_jpeg_data_t *)m_ongoingJpegQ.dequeue(); + return job; +} + +/*=========================================================================== + * FUNCTION : releasePPInputData + * + * DESCRIPTION: callback function to release post process input data node + * + * PARAMETERS : + * @data : ptr to post process input data + * @user_data : user data ptr (QCamera3Reprocessor) + * + * RETURN : None + *==========================================================================*/ +void QCamera3PostProcessor::releasePPInputData(void *data, void *user_data) +{ + QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data; + if (NULL != pme) { + qcamera_hal3_pp_buffer_t *buf = (qcamera_hal3_pp_buffer_t *)data; + if (NULL != buf) { + if (buf->input) { + pme->releaseSuperBuf(buf->input); + free(buf->input); + buf->input = NULL; + } + } + } +} + +/*=========================================================================== + * FUNCTION : releaseMetaData + * + * DESCRIPTION: callback function to release metadata camera buffer + * + * PARAMETERS : + * @data : ptr to post process input data + * @user_data : user data ptr (QCamera3Reprocessor) + * + * RETURN : None + *==========================================================================*/ +void QCamera3PostProcessor::releaseMetadata(void *data, void *user_data) +{ + QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data; + if (NULL != pme) { + pme->m_parent->metadataBufDone((mm_camera_super_buf_t *)data); + } +} + +/*=========================================================================== + * FUNCTION : releaseJpegData + * + * DESCRIPTION: callback function to release jpeg job node + * + * PARAMETERS : + * @data : ptr to ongoing jpeg job data + * @user_data : user data ptr (QCamera3Reprocessor) + * + * RETURN : None + *==========================================================================*/ +void QCamera3PostProcessor::releaseJpegData(void *data, void *user_data) +{ + QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data; + if (NULL != pme) { + pme->releaseJpegJobData((qcamera_hal3_jpeg_data_t *)data); + } +} + +/*=========================================================================== + * FUNCTION : releaseOngoingPPData + * + * DESCRIPTION: callback function to release ongoing postprocess job node + * + * PARAMETERS : + * @data : ptr to onging postprocess job + * @user_data : user data ptr (QCamera3Reprocessor) + * + * RETURN : None + *==========================================================================*/ +void QCamera3PostProcessor::releaseOngoingPPData(void *data, void *user_data) +{ + QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data; + if (NULL != pme) { + qcamera_hal3_pp_data_t *pp_data = (qcamera_hal3_pp_data_t *)data; + + if (pp_data && pp_data->src_frame) + pme->releaseSuperBuf(pp_data->src_frame); + + pme->releasePPJobData(pp_data); + + } +} + +/*=========================================================================== + * FUNCTION : releaseSuperBuf + * + * DESCRIPTION: function to release a superbuf frame by returning back to kernel + * + * PARAMETERS : + * @super_buf : ptr to the superbuf frame + * + * RETURN : None + *==========================================================================*/ +void QCamera3PostProcessor::releaseSuperBuf(mm_camera_super_buf_t *super_buf) +{ + if (NULL != super_buf) { + if (m_parent != NULL) { + m_parent->bufDone(super_buf); + } + } +} + +/*=========================================================================== + * FUNCTION : releaseOfflineBuffers + * + * DESCRIPTION: function to release/unmap offline buffers if any + * + * PARAMETERS : + * @allBuffers : flag that asks to release all buffers or only one + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::releaseOfflineBuffers(bool allBuffers) +{ + int32_t rc = NO_ERROR; + + if(NULL != m_pReprocChannel) { + rc = m_pReprocChannel->unmapOfflineBuffers(allBuffers); + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : releaseJpegJobData + * + * DESCRIPTION: function to release internal resources in jpeg job struct + * + * PARAMETERS : + * @job : ptr to jpeg job struct + * + * RETURN : None + * + * NOTE : original source frame need to be queued back to kernel for + * future use. Output buf of jpeg job need to be released since + * it's allocated for each job. Exif object need to be deleted. + *==========================================================================*/ +void QCamera3PostProcessor::releaseJpegJobData(qcamera_hal3_jpeg_data_t *job) +{ + ATRACE_CALL(); + int32_t rc = NO_ERROR; + LOGD("E"); + if (NULL != job) { + if (NULL != job->src_reproc_frame) { + free(job->src_reproc_frame); + job->src_reproc_frame = NULL; + } + + if (NULL != job->src_frame) { + if (NULL != m_pReprocChannel) { + rc = m_pReprocChannel->bufDone(job->src_frame); + if (NO_ERROR != rc) + LOGE("bufDone error: %d", rc); + } + free(job->src_frame); + job->src_frame = NULL; + } + + if (NULL != job->fwk_src_buffer) { + free(job->fwk_src_buffer); + job->fwk_src_buffer = NULL; + } else if (NULL != job->src_metadata) { + m_parent->metadataBufDone(job->src_metadata); + free(job->src_metadata); + job->src_metadata = NULL; + } + + if (NULL != job->fwk_frame) { + free(job->fwk_frame); + job->fwk_frame = NULL; + } + + if (NULL != job->pJpegExifObj) { + delete job->pJpegExifObj; + job->pJpegExifObj = NULL; + } + + if (NULL != job->jpeg_settings) { + free(job->jpeg_settings); + job->jpeg_settings = NULL; + } + } + /* Additional trigger to process any pending jobs in the input queue */ + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + LOGD("X"); +} + +/*=========================================================================== + * FUNCTION : releasePPJobData + * + * DESCRIPTION: function to release internal resources in p pjob struct + * + * PARAMETERS : + * @job : ptr to pp job struct + * + * RETURN : None + * + * NOTE : Original source metadata buffer needs to be released and + * queued back to kernel for future use. src_frame, src_metadata, + * and fwk_src_frame structures need to be freed. + *==========================================================================*/ +void QCamera3PostProcessor::releasePPJobData(qcamera_hal3_pp_data_t *pp_job) +{ + ATRACE_CALL(); + LOGD("E"); + if (NULL != pp_job) { + if (NULL != pp_job->src_frame) { + free(pp_job->src_frame); + if (NULL != pp_job->src_metadata) { + m_parent->metadataBufDone(pp_job->src_metadata); + free(pp_job->src_metadata); + } + pp_job->src_frame = NULL; + pp_job->metadata = NULL; + } + + if (NULL != pp_job->fwk_src_frame) { + free(pp_job->fwk_src_frame); + pp_job->fwk_src_frame = NULL; + } + } + + /* Additional trigger to process any pending jobs in the input queue */ + m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + LOGD("X"); +} + +/*=========================================================================== + * FUNCTION : getColorfmtFromImgFmt + * + * DESCRIPTION: function to return jpeg color format based on its image format + * + * PARAMETERS : + * @img_fmt : image format + * + * RETURN : jpeg color format that can be understandable by omx lib + *==========================================================================*/ +mm_jpeg_color_format QCamera3PostProcessor::getColorfmtFromImgFmt(cam_format_t img_fmt) +{ + switch (img_fmt) { + case CAM_FORMAT_YUV_420_NV21: + case CAM_FORMAT_YUV_420_NV21_VENUS: + return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2; + case CAM_FORMAT_YUV_420_NV21_ADRENO: + return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2; + case CAM_FORMAT_YUV_420_NV12: + case CAM_FORMAT_YUV_420_NV12_VENUS: + return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2; + case CAM_FORMAT_YUV_420_YV12: + return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2; + case CAM_FORMAT_YUV_422_NV61: + return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1; + case CAM_FORMAT_YUV_422_NV16: + return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1; + default: + return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2; + } +} + +/*=========================================================================== + * FUNCTION : getJpegImgTypeFromImgFmt + * + * DESCRIPTION: function to return jpeg encode image type based on its image format + * + * PARAMETERS : + * @img_fmt : image format + * + * RETURN : return jpeg source image format (YUV or Bitstream) + *==========================================================================*/ +mm_jpeg_format_t QCamera3PostProcessor::getJpegImgTypeFromImgFmt(cam_format_t img_fmt) +{ + switch (img_fmt) { + case CAM_FORMAT_YUV_420_NV21: + case CAM_FORMAT_YUV_420_NV21_ADRENO: + case CAM_FORMAT_YUV_420_NV12: + case CAM_FORMAT_YUV_420_NV12_VENUS: + case CAM_FORMAT_YUV_420_NV21_VENUS: + case CAM_FORMAT_YUV_420_YV12: + case CAM_FORMAT_YUV_422_NV61: + case CAM_FORMAT_YUV_422_NV16: + return MM_JPEG_FMT_YUV; + default: + return MM_JPEG_FMT_YUV; + } +} + +/*=========================================================================== + * FUNCTION : encodeFWKData + * + * DESCRIPTION: function to prepare encoding job information and send to + * mm-jpeg-interface to do the encoding job + * + * PARAMETERS : + * @jpeg_job_data : ptr to a struct saving job related information + * @needNewSess : flag to indicate if a new jpeg encoding session need + * to be created. After creation, this flag will be toggled + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::encodeFWKData(qcamera_hal3_jpeg_data_t *jpeg_job_data, + uint8_t &needNewSess) +{ + LOGD("E"); + int32_t ret = NO_ERROR; + mm_jpeg_job_t jpg_job; + uint32_t jobId = 0; + qcamera_fwk_input_pp_data_t *recvd_frame = NULL; + metadata_buffer_t *metadata = NULL; + jpeg_settings_t *jpeg_settings = NULL; + QCamera3HardwareInterface* hal_obj = NULL; + mm_jpeg_debug_exif_params_t *exif_debug_params = NULL; + bool needJpegExifRotation = false; + + if (NULL == jpeg_job_data) { + LOGE("Invalid jpeg job"); + return BAD_VALUE; + } + + recvd_frame = jpeg_job_data->fwk_frame; + if (NULL == recvd_frame) { + LOGE("Invalid input buffer"); + return BAD_VALUE; + } + + metadata = jpeg_job_data->metadata; + if (NULL == metadata) { + LOGE("Invalid metadata buffer"); + return BAD_VALUE; + } + + jpeg_settings = jpeg_job_data->jpeg_settings; + if (NULL == jpeg_settings) { + LOGE("Invalid jpeg settings buffer"); + return BAD_VALUE; + } + + if ((NULL != jpeg_job_data->src_frame) && (NULL != jpeg_job_data->src_frame)) { + LOGE("Unsupported case both framework and camera source buffers are invalid!"); + return BAD_VALUE; + } + + hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData; + + if (mJpegClientHandle <= 0) { + LOGE("Error: bug here, mJpegClientHandle is 0"); + return UNKNOWN_ERROR; + } + + cam_dimension_t src_dim; + memset(&src_dim, 0, sizeof(cam_dimension_t)); + src_dim.width = recvd_frame->reproc_config.input_stream_dim.width; + src_dim.height = recvd_frame->reproc_config.input_stream_dim.height; + + cam_dimension_t dst_dim; + memset(&dst_dim, 0, sizeof(cam_dimension_t)); + dst_dim.width = recvd_frame->reproc_config.output_stream_dim.width; + dst_dim.height = recvd_frame->reproc_config.output_stream_dim.height; + + cam_rect_t crop; + memset(&crop, 0, sizeof(cam_rect_t)); + //TBD_later - Zoom event removed in stream + //main_stream->getCropInfo(crop); + + // Set JPEG encode crop in reprocess frame metadata + // If this JPEG crop info exist, encoder should do cropping + IF_META_AVAILABLE(cam_stream_crop_info_t, jpeg_crop, + CAM_INTF_PARM_JPEG_ENCODE_CROP, metadata) { + memcpy(&crop, &(jpeg_crop->crop), sizeof(cam_rect_t)); + } + + // Set JPEG encode crop in reprocess frame metadata + // If this JPEG scale info exist, encoder should do scaling + IF_META_AVAILABLE(cam_dimension_t, scale_dim, + CAM_INTF_PARM_JPEG_SCALE_DIMENSION, metadata) { + if (scale_dim->width != 0 && scale_dim->height != 0) { + dst_dim.width = scale_dim->width; + dst_dim.height = scale_dim->height; + } + } + + needJpegExifRotation = (hal_obj->needJpegExifRotation() || !needsReprocess(recvd_frame)); + + // If EXIF rotation metadata is added and used to match the JPEG orientation, + // it means CPP rotation is not involved, whether it is because CPP does not + // support rotation, or the reprocessed frame is not sent to CPP. + // Override CAM_INTF_PARM_ROTATION to 0 to avoid wrong CPP rotation info + // to be filled in to JPEG metadata. + if (needJpegExifRotation) { + cam_rotation_info_t rotation_info; + memset(&rotation_info, 0, sizeof(rotation_info)); + rotation_info.rotation = ROTATE_0; + rotation_info.streamId = 0; + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, CAM_INTF_PARM_ROTATION, rotation_info); + } + + LOGH("Need new session?:%d", needNewSess); + if (needNewSess) { + //creating a new session, so we must destroy the old one + if ( 0 < mJpegSessionId ) { + ret = mJpegHandle.destroy_session(mJpegSessionId); + if (ret != NO_ERROR) { + LOGE("Error destroying an old jpeg encoding session, id = %d", + mJpegSessionId); + return ret; + } + mJpegSessionId = 0; + } + // create jpeg encoding session + mm_jpeg_encode_params_t encodeParam; + memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t)); + getFWKJpegEncodeConfig(encodeParam, recvd_frame, jpeg_settings); + LOGH("#src bufs:%d # tmb bufs:%d #dst_bufs:%d", + encodeParam.num_src_bufs,encodeParam.num_tmb_bufs,encodeParam.num_dst_bufs); + if (!needJpegExifRotation && + (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + // swap src width and height, stride and scanline due to rotation + encodeParam.main_dim.src_dim.width = src_dim.height; + encodeParam.main_dim.src_dim.height = src_dim.width; + encodeParam.thumb_dim.src_dim.width = src_dim.height; + encodeParam.thumb_dim.src_dim.height = src_dim.width; + + int32_t temp = encodeParam.src_main_buf[0].offset.mp[0].stride; + encodeParam.src_main_buf[0].offset.mp[0].stride = + encodeParam.src_main_buf[0].offset.mp[0].scanline; + encodeParam.src_main_buf[0].offset.mp[0].scanline = temp; + + temp = encodeParam.src_thumb_buf[0].offset.mp[0].stride; + encodeParam.src_thumb_buf[0].offset.mp[0].stride = + encodeParam.src_thumb_buf[0].offset.mp[0].scanline; + encodeParam.src_thumb_buf[0].offset.mp[0].scanline = temp; + } else { + encodeParam.main_dim.src_dim = src_dim; + encodeParam.thumb_dim.src_dim = src_dim; + } + encodeParam.main_dim.dst_dim = dst_dim; + encodeParam.thumb_dim.dst_dim = jpeg_settings->thumbnail_size; + + if (needJpegExifRotation) { + encodeParam.thumb_rotation = (uint32_t)jpeg_settings->jpeg_orientation; + } + + LOGI("Src Buffer cnt = %d, res = %dX%d len = %d rot = %d " + "src_dim = %dX%d dst_dim = %dX%d", + encodeParam.num_src_bufs, + encodeParam.src_main_buf[0].offset.mp[0].stride, + encodeParam.src_main_buf[0].offset.mp[0].scanline, + encodeParam.src_main_buf[0].offset.frame_len, + encodeParam.rotation, + src_dim.width, src_dim.height, + dst_dim.width, dst_dim.height); + LOGI("Src THUMB buf_cnt = %d, res = %dX%d len = %d rot = %d " + "src_dim = %dX%d, dst_dim = %dX%d", + encodeParam.num_tmb_bufs, + encodeParam.src_thumb_buf[0].offset.mp[0].stride, + encodeParam.src_thumb_buf[0].offset.mp[0].scanline, + encodeParam.src_thumb_buf[0].offset.frame_len, + encodeParam.thumb_rotation, + encodeParam.thumb_dim.src_dim.width, + encodeParam.thumb_dim.src_dim.height, + encodeParam.thumb_dim.dst_dim.width, + encodeParam.thumb_dim.dst_dim.height); + + LOGH("#src bufs:%d # tmb bufs:%d #dst_bufs:%d", + encodeParam.num_src_bufs,encodeParam.num_tmb_bufs,encodeParam.num_dst_bufs); + + ret = mJpegHandle.create_session(mJpegClientHandle, &encodeParam, &mJpegSessionId); + if (ret != NO_ERROR) { + LOGE("Error creating a new jpeg encoding session, ret = %d", ret); + return ret; + } + needNewSess = FALSE; + } + + // Fill in new job + memset(&jpg_job, 0, sizeof(mm_jpeg_job_t)); + jpg_job.job_type = JPEG_JOB_TYPE_ENCODE; + jpg_job.encode_job.session_id = mJpegSessionId; + jpg_job.encode_job.src_index = 0; + jpg_job.encode_job.dst_index = 0; + + // Set main dim job parameters and handle rotation + if (!needJpegExifRotation && (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + + jpg_job.encode_job.main_dim.src_dim.width = src_dim.height; + jpg_job.encode_job.main_dim.src_dim.height = src_dim.width; + + jpg_job.encode_job.main_dim.dst_dim.width = dst_dim.height; + jpg_job.encode_job.main_dim.dst_dim.height = dst_dim.width; + + jpg_job.encode_job.main_dim.crop.width = crop.height; + jpg_job.encode_job.main_dim.crop.height = crop.width; + jpg_job.encode_job.main_dim.crop.left = crop.top; + jpg_job.encode_job.main_dim.crop.top = crop.left; + } else { + jpg_job.encode_job.main_dim.src_dim = src_dim; + jpg_job.encode_job.main_dim.dst_dim = dst_dim; + jpg_job.encode_job.main_dim.crop = crop; + } + + // get 3a sw version info + cam_q3a_version_t sw_version; + memset(&sw_version, 0, sizeof(sw_version)); + if (hal_obj) + hal_obj->get3AVersion(sw_version); + + // get exif data + QCamera3Exif *pJpegExifObj = getExifData(metadata, jpeg_settings, needJpegExifRotation); + jpeg_job_data->pJpegExifObj = pJpegExifObj; + if (pJpegExifObj != NULL) { + jpg_job.encode_job.exif_info.exif_data = pJpegExifObj->getEntries(); + jpg_job.encode_job.exif_info.numOfEntries = + pJpegExifObj->getNumOfEntries(); + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[0] = + sw_version.major_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[1] = + sw_version.minor_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[2] = + sw_version.patch_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[3] = + sw_version.new_feature_des; + } + + // thumbnail dim + LOGH("Thumbnail needed:%d", m_bThumbnailNeeded); + if (m_bThumbnailNeeded == TRUE) { + jpg_job.encode_job.thumb_dim.dst_dim = + jpeg_settings->thumbnail_size; + + if (!needJpegExifRotation && (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + //swap the thumbnail destination width and height if it has + //already been rotated + int temp = jpg_job.encode_job.thumb_dim.dst_dim.width; + jpg_job.encode_job.thumb_dim.dst_dim.width = + jpg_job.encode_job.thumb_dim.dst_dim.height; + jpg_job.encode_job.thumb_dim.dst_dim.height = temp; + + jpg_job.encode_job.thumb_dim.src_dim.width = src_dim.height; + jpg_job.encode_job.thumb_dim.src_dim.height = src_dim.width; + + jpg_job.encode_job.thumb_dim.crop.width = crop.height; + jpg_job.encode_job.thumb_dim.crop.height = crop.width; + jpg_job.encode_job.thumb_dim.crop.left = crop.top; + jpg_job.encode_job.thumb_dim.crop.top = crop.left; + } else { + jpg_job.encode_job.thumb_dim.src_dim = src_dim; + jpg_job.encode_job.thumb_dim.crop = crop; + } + jpg_job.encode_job.thumb_index = 0; + } + + jpg_job.encode_job.cam_exif_params = hal_obj->get3AExifParams(); + exif_debug_params = jpg_job.encode_job.cam_exif_params.debug_params; + // Fill in exif debug data + // Allocate for a local copy of debug parameters + jpg_job.encode_job.cam_exif_params.debug_params = + (mm_jpeg_debug_exif_params_t *) malloc (sizeof(mm_jpeg_debug_exif_params_t)); + if (!jpg_job.encode_job.cam_exif_params.debug_params) { + LOGE("Out of Memory. Allocation failed for 3A debug exif params"); + return NO_MEMORY; + } + + jpg_job.encode_job.mobicat_mask = hal_obj->getMobicatMask(); + + if (metadata != NULL) { + // Fill in the metadata passed as parameter + jpg_job.encode_job.p_metadata = metadata; + + jpg_job.encode_job.p_metadata->is_mobicat_aec_params_valid = + jpg_job.encode_job.cam_exif_params.cam_3a_params_valid; + + if (jpg_job.encode_job.cam_exif_params.cam_3a_params_valid) { + jpg_job.encode_job.p_metadata->mobicat_aec_params = + jpg_job.encode_job.cam_exif_params.cam_3a_params; + } + + if (exif_debug_params) { + // Copy debug parameters locally. + memcpy(jpg_job.encode_job.cam_exif_params.debug_params, + exif_debug_params, (sizeof(mm_jpeg_debug_exif_params_t))); + /* Save a copy of 3A debug params */ + jpg_job.encode_job.p_metadata->is_statsdebug_ae_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_awb_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_af_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_asd_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_stats_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_bestats_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_bhist_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_3a_tuning_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params_valid; + + if (jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_ae_data = + jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_awb_data = + jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_af_data = + jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_asd_data = + jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_stats_buffer_data = + jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_bestats_buffer_data = + jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_bhist_data = + jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_3a_tuning_data = + jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params; + } + } + } else { + LOGW("Metadata is null"); + } + + // Multi image info + if (hal_obj->isDeviceLinked() == TRUE) { + jpg_job.encode_job.multi_image_info.type = MM_JPEG_TYPE_JPEG; + jpg_job.encode_job.multi_image_info.num_of_images = 1; + jpg_job.encode_job.multi_image_info.enable_metadata = 1; + if (hal_obj->isMainCamera() == TRUE) { + jpg_job.encode_job.multi_image_info.is_primary = 1; + } else { + jpg_job.encode_job.multi_image_info.is_primary = 0; + } + } + + jpg_job.encode_job.hal_version = CAM_HAL_V3; + + //Start jpeg encoding + ret = mJpegHandle.start_job(&jpg_job, &jobId); + if (jpg_job.encode_job.cam_exif_params.debug_params) { + free(jpg_job.encode_job.cam_exif_params.debug_params); + } + if (ret == NO_ERROR) { + // remember job info + jpeg_job_data->jobId = jobId; + } + + LOGD("X"); + return ret; +} + +/*=========================================================================== + * FUNCTION : encodeData + * + * DESCRIPTION: function to prepare encoding job information and send to + * mm-jpeg-interface to do the encoding job + * + * PARAMETERS : + * @jpeg_job_data : ptr to a struct saving job related information + * @needNewSess : flag to indicate if a new jpeg encoding session need + * to be created. After creation, this flag will be toggled + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3PostProcessor::encodeData(qcamera_hal3_jpeg_data_t *jpeg_job_data, + uint8_t &needNewSess) +{ + ATRACE_CALL(); + LOGD("E"); + int32_t ret = NO_ERROR; + mm_jpeg_job_t jpg_job; + uint32_t jobId = 0; + QCamera3Stream *main_stream = NULL; + mm_camera_buf_def_t *main_frame = NULL; + QCamera3Channel *srcChannel = NULL; + mm_camera_super_buf_t *recvd_frame = NULL; + metadata_buffer_t *metadata = NULL; + jpeg_settings_t *jpeg_settings = NULL; + QCamera3HardwareInterface* hal_obj = NULL; + mm_jpeg_debug_exif_params_t *exif_debug_params = NULL; + if (m_parent != NULL) { + hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData; + } else { + LOGE("m_parent is NULL, Error"); + return BAD_VALUE; + } + bool needJpegExifRotation = false; + + recvd_frame = jpeg_job_data->src_frame; + metadata = jpeg_job_data->metadata; + jpeg_settings = jpeg_job_data->jpeg_settings; + + LOGD("encoding bufIndex: %u", + jpeg_job_data->src_frame->bufs[0]->buf_idx); + + QCamera3Channel *pChannel = NULL; + // first check picture channel + if (m_parent->getMyHandle() == recvd_frame->ch_id) { + pChannel = m_parent; + } + // check reprocess channel if not found + if (pChannel == NULL) { + if (m_pReprocChannel != NULL && + m_pReprocChannel->getMyHandle() == recvd_frame->ch_id) { + pChannel = m_pReprocChannel; + } + } + + srcChannel = pChannel; + + if (srcChannel == NULL) { + LOGE("No corresponding channel (ch_id = %d) exist, return here", + recvd_frame->ch_id); + return BAD_VALUE; + } + + // find snapshot frame and thumnail frame + //Note: In this version we will receive only snapshot frame. + for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) { + QCamera3Stream *srcStream = + srcChannel->getStreamByHandle(recvd_frame->bufs[i]->stream_id); + if (srcStream != NULL) { + switch (srcStream->getMyType()) { + case CAM_STREAM_TYPE_SNAPSHOT: + case CAM_STREAM_TYPE_OFFLINE_PROC: + main_stream = srcStream; + main_frame = recvd_frame->bufs[i]; + break; + default: + break; + } + } + } + + if(NULL == main_frame){ + LOGE("Main frame is NULL"); + return BAD_VALUE; + } + + QCamera3StreamMem *memObj = (QCamera3StreamMem *)main_frame->mem_info; + if (NULL == memObj) { + LOGE("Memeory Obj of main frame is NULL"); + return NO_MEMORY; + } + + // clean and invalidate cache ops through mem obj of the frame + memObj->cleanInvalidateCache(main_frame->buf_idx); + + if (mJpegClientHandle <= 0) { + LOGE("Error: bug here, mJpegClientHandle is 0"); + return UNKNOWN_ERROR; + } + cam_dimension_t src_dim; + memset(&src_dim, 0, sizeof(cam_dimension_t)); + main_stream->getFrameDimension(src_dim); + + cam_dimension_t dst_dim; + memset(&dst_dim, 0, sizeof(cam_dimension_t)); + if (NO_ERROR != m_parent->getStreamSize(dst_dim)) { + LOGE("Failed to get size of the JPEG stream"); + return UNKNOWN_ERROR; + } + + needJpegExifRotation = hal_obj->needJpegExifRotation(); + IF_META_AVAILABLE(cam_rotation_info_t, rotation_info, CAM_INTF_PARM_ROTATION, metadata) { + if (jpeg_settings->jpeg_orientation != 0 && rotation_info->rotation == ROTATE_0) { + needJpegExifRotation = TRUE; + LOGH("Need EXIF JPEG ROTATION"); + } + } + + // Although in HAL3, legacy flip mode is not advertised + // default value of CAM_INTF_PARM_FLIP is still added here + // for jpge metadata + int32_t flipMode = 0; // no flip + ADD_SET_PARAM_ENTRY_TO_BATCH(metadata, CAM_INTF_PARM_FLIP, flipMode); + + LOGH("Need new session?:%d", needNewSess); + if (needNewSess) { + //creating a new session, so we must destroy the old one + if ( 0 < mJpegSessionId ) { + ret = mJpegHandle.destroy_session(mJpegSessionId); + if (ret != NO_ERROR) { + LOGE("Error destroying an old jpeg encoding session, id = %d", + mJpegSessionId); + return ret; + } + mJpegSessionId = 0; + } + // create jpeg encoding session + mm_jpeg_encode_params_t encodeParam; + memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t)); + getJpegEncodeConfig(encodeParam, main_stream, jpeg_settings); + LOGH("#src bufs:%d # tmb bufs:%d #dst_bufs:%d", + encodeParam.num_src_bufs,encodeParam.num_tmb_bufs,encodeParam.num_dst_bufs); + if (!needJpegExifRotation && + (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + //swap src width and height, stride and scanline due to rotation + encodeParam.main_dim.src_dim.width = src_dim.height; + encodeParam.main_dim.src_dim.height = src_dim.width; + encodeParam.thumb_dim.src_dim.width = src_dim.height; + encodeParam.thumb_dim.src_dim.height = src_dim.width; + + int32_t temp = encodeParam.src_main_buf[0].offset.mp[0].stride; + encodeParam.src_main_buf[0].offset.mp[0].stride = + encodeParam.src_main_buf[0].offset.mp[0].scanline; + encodeParam.src_main_buf[0].offset.mp[0].scanline = temp; + + temp = encodeParam.src_thumb_buf[0].offset.mp[0].stride; + encodeParam.src_thumb_buf[0].offset.mp[0].stride = + encodeParam.src_thumb_buf[0].offset.mp[0].scanline; + encodeParam.src_thumb_buf[0].offset.mp[0].scanline = temp; + } else { + encodeParam.main_dim.src_dim = src_dim; + encodeParam.thumb_dim.src_dim = src_dim; + } + encodeParam.main_dim.dst_dim = dst_dim; + encodeParam.thumb_dim.dst_dim = jpeg_settings->thumbnail_size; + + if (needJpegExifRotation) { + encodeParam.thumb_rotation = (uint32_t)jpeg_settings->jpeg_orientation; + } + + LOGI("Src Buffer cnt = %d, res = %dX%d len = %d rot = %d " + "src_dim = %dX%d dst_dim = %dX%d", + encodeParam.num_src_bufs, + encodeParam.src_main_buf[0].offset.mp[0].stride, + encodeParam.src_main_buf[0].offset.mp[0].scanline, + encodeParam.src_main_buf[0].offset.frame_len, + encodeParam.rotation, + src_dim.width, src_dim.height, + dst_dim.width, dst_dim.height); + LOGI("Src THUMB buf_cnt = %d, res = %dX%d len = %d rot = %d " + "src_dim = %dX%d, dst_dim = %dX%d", + encodeParam.num_tmb_bufs, + encodeParam.src_thumb_buf[0].offset.mp[0].stride, + encodeParam.src_thumb_buf[0].offset.mp[0].scanline, + encodeParam.src_thumb_buf[0].offset.frame_len, + encodeParam.thumb_rotation, + encodeParam.thumb_dim.src_dim.width, + encodeParam.thumb_dim.src_dim.height, + encodeParam.thumb_dim.dst_dim.width, + encodeParam.thumb_dim.dst_dim.height); + ret = mJpegHandle.create_session(mJpegClientHandle, &encodeParam, &mJpegSessionId); + if (ret != NO_ERROR) { + LOGE("Error creating a new jpeg encoding session, ret = %d", ret); + return ret; + } + needNewSess = FALSE; + } + + // Fill in new job + memset(&jpg_job, 0, sizeof(mm_jpeg_job_t)); + jpg_job.job_type = JPEG_JOB_TYPE_ENCODE; + jpg_job.encode_job.session_id = mJpegSessionId; + jpg_job.encode_job.src_index = (int32_t)main_frame->buf_idx; + jpg_job.encode_job.dst_index = 0; + + cam_rect_t crop; + memset(&crop, 0, sizeof(cam_rect_t)); + //TBD_later - Zoom event removed in stream + //main_stream->getCropInfo(crop); + + // Set main dim job parameters and handle rotation + if (!needJpegExifRotation && (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + + jpg_job.encode_job.main_dim.src_dim.width = src_dim.height; + jpg_job.encode_job.main_dim.src_dim.height = src_dim.width; + + jpg_job.encode_job.main_dim.dst_dim.width = dst_dim.height; + jpg_job.encode_job.main_dim.dst_dim.height = dst_dim.width; + + jpg_job.encode_job.main_dim.crop.width = crop.height; + jpg_job.encode_job.main_dim.crop.height = crop.width; + jpg_job.encode_job.main_dim.crop.left = crop.top; + jpg_job.encode_job.main_dim.crop.top = crop.left; + } else { + jpg_job.encode_job.main_dim.src_dim = src_dim; + jpg_job.encode_job.main_dim.dst_dim = dst_dim; + jpg_job.encode_job.main_dim.crop = crop; + } + + // get 3a sw version info + cam_q3a_version_t sw_version; + memset(&sw_version, 0, sizeof(sw_version)); + + if (hal_obj) + hal_obj->get3AVersion(sw_version); + + // get exif data + QCamera3Exif *pJpegExifObj = getExifData(metadata, jpeg_settings, needJpegExifRotation); + jpeg_job_data->pJpegExifObj = pJpegExifObj; + if (pJpegExifObj != NULL) { + jpg_job.encode_job.exif_info.exif_data = pJpegExifObj->getEntries(); + jpg_job.encode_job.exif_info.numOfEntries = + pJpegExifObj->getNumOfEntries(); + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[0] = + sw_version.major_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[1] = + sw_version.minor_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[2] = + sw_version.patch_version; + jpg_job.encode_job.exif_info.debug_data.sw_3a_version[3] = + sw_version.new_feature_des; + } + + // thumbnail dim + LOGH("Thumbnail needed:%d", m_bThumbnailNeeded); + if (m_bThumbnailNeeded == TRUE) { + jpg_job.encode_job.thumb_dim.dst_dim = + jpeg_settings->thumbnail_size; + + if (!needJpegExifRotation && + (jpeg_settings->jpeg_orientation == 90 || + jpeg_settings->jpeg_orientation == 270)) { + //swap the thumbnail destination width and height if it has + //already been rotated + int temp = jpg_job.encode_job.thumb_dim.dst_dim.width; + jpg_job.encode_job.thumb_dim.dst_dim.width = + jpg_job.encode_job.thumb_dim.dst_dim.height; + jpg_job.encode_job.thumb_dim.dst_dim.height = temp; + + jpg_job.encode_job.thumb_dim.src_dim.width = src_dim.height; + jpg_job.encode_job.thumb_dim.src_dim.height = src_dim.width; + + jpg_job.encode_job.thumb_dim.crop.width = crop.height; + jpg_job.encode_job.thumb_dim.crop.height = crop.width; + jpg_job.encode_job.thumb_dim.crop.left = crop.top; + jpg_job.encode_job.thumb_dim.crop.top = crop.left; + } else { + jpg_job.encode_job.thumb_dim.src_dim = src_dim; + jpg_job.encode_job.thumb_dim.crop = crop; + } + jpg_job.encode_job.thumb_index = main_frame->buf_idx; + LOGI("Thumbnail idx = %d src w/h (%dx%d), dst w/h (%dx%d)", + jpg_job.encode_job.thumb_index, + jpg_job.encode_job.thumb_dim.src_dim.width, + jpg_job.encode_job.thumb_dim.src_dim.height, + jpg_job.encode_job.thumb_dim.dst_dim.width, + jpg_job.encode_job.thumb_dim.dst_dim.height); + } + LOGI("Main image idx = %d src w/h (%dx%d), dst w/h (%dx%d)", + jpg_job.encode_job.src_index, + jpg_job.encode_job.main_dim.src_dim.width, + jpg_job.encode_job.main_dim.src_dim.height, + jpg_job.encode_job.main_dim.dst_dim.width, + jpg_job.encode_job.main_dim.dst_dim.height); + + jpg_job.encode_job.cam_exif_params = hal_obj->get3AExifParams(); + exif_debug_params = jpg_job.encode_job.cam_exif_params.debug_params; + + // Allocate for a local copy of debug parameters + jpg_job.encode_job.cam_exif_params.debug_params = + (mm_jpeg_debug_exif_params_t *) malloc (sizeof(mm_jpeg_debug_exif_params_t)); + if (!jpg_job.encode_job.cam_exif_params.debug_params) { + LOGE("Out of Memory. Allocation failed for 3A debug exif params"); + return NO_MEMORY; + } + + jpg_job.encode_job.mobicat_mask = hal_obj->getMobicatMask(); + + if (metadata != NULL) { + //Fill in the metadata passed as parameter + jpg_job.encode_job.p_metadata = metadata; + + jpg_job.encode_job.p_metadata->is_mobicat_aec_params_valid = + jpg_job.encode_job.cam_exif_params.cam_3a_params_valid; + + if (jpg_job.encode_job.cam_exif_params.cam_3a_params_valid) { + jpg_job.encode_job.p_metadata->mobicat_aec_params = + jpg_job.encode_job.cam_exif_params.cam_3a_params; + } + + if (exif_debug_params) { + // Copy debug parameters locally. + memcpy(jpg_job.encode_job.cam_exif_params.debug_params, + exif_debug_params, (sizeof(mm_jpeg_debug_exif_params_t))); + /* Save a copy of 3A debug params */ + jpg_job.encode_job.p_metadata->is_statsdebug_ae_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_awb_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_af_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_asd_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_stats_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_bestats_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_bhist_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params_valid; + jpg_job.encode_job.p_metadata->is_statsdebug_3a_tuning_params_valid = + jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params_valid; + + if (jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_ae_data = + jpg_job.encode_job.cam_exif_params.debug_params->ae_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_awb_data = + jpg_job.encode_job.cam_exif_params.debug_params->awb_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_af_data = + jpg_job.encode_job.cam_exif_params.debug_params->af_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_asd_data = + jpg_job.encode_job.cam_exif_params.debug_params->asd_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_stats_buffer_data = + jpg_job.encode_job.cam_exif_params.debug_params->stats_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_bestats_buffer_data = + jpg_job.encode_job.cam_exif_params.debug_params->bestats_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_bhist_data = + jpg_job.encode_job.cam_exif_params.debug_params->bhist_debug_params; + } + if (jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params_valid) { + jpg_job.encode_job.p_metadata->statsdebug_3a_tuning_data = + jpg_job.encode_job.cam_exif_params.debug_params->q3a_tuning_debug_params; + } + } + } else { + LOGW("Metadata is null"); + } + + // Multi image info + if (hal_obj->isDeviceLinked() == TRUE) { + jpg_job.encode_job.multi_image_info.type = MM_JPEG_TYPE_JPEG; + jpg_job.encode_job.multi_image_info.num_of_images = 1; + jpg_job.encode_job.multi_image_info.enable_metadata = 1; + if (hal_obj->isMainCamera() == TRUE) { + jpg_job.encode_job.multi_image_info.is_primary = 1; + } else { + jpg_job.encode_job.multi_image_info.is_primary = 0; + } + } + + jpg_job.encode_job.hal_version = CAM_HAL_V3; + + //Start jpeg encoding + ret = mJpegHandle.start_job(&jpg_job, &jobId); + if (jpg_job.encode_job.cam_exif_params.debug_params) { + free(jpg_job.encode_job.cam_exif_params.debug_params); + } + if (ret == NO_ERROR) { + // remember job info + jpeg_job_data->jobId = jobId; + } + + LOGD("X"); + return ret; +} + +/*=========================================================================== + * FUNCTION : dataProcessRoutine + * + * DESCRIPTION: data process routine that handles input data either from input + * Jpeg Queue to do jpeg encoding, or from input PP Queue to do + * reprocess. + * + * PARAMETERS : + * @data : user data ptr (QCamera3PostProcessor) + * + * RETURN : None + *==========================================================================*/ +void *QCamera3PostProcessor::dataProcessRoutine(void *data) +{ + int running = 1; + int ret; + uint8_t is_active = FALSE; + uint8_t needNewSess = TRUE; + mm_camera_super_buf_t *meta_buffer = NULL; + LOGD("E"); + QCamera3PostProcessor *pme = (QCamera3PostProcessor *)data; + QCameraCmdThread *cmdThread = &pme->m_dataProcTh; + cmdThread->setName("cam_data_proc"); + + do { + do { + ret = cam_sem_wait(&cmdThread->cmd_sem); + if (ret != 0 && errno != EINVAL) { + LOGE("cam_sem_wait error (%s)", + strerror(errno)); + return NULL; + } + } while (ret != 0); + + // we got notified about new cmd avail in cmd queue + camera_cmd_type_t cmd = cmdThread->getCmd(); + switch (cmd) { + case CAMERA_CMD_TYPE_START_DATA_PROC: + LOGH("start data proc"); + is_active = TRUE; + needNewSess = TRUE; + + pme->m_ongoingPPQ.init(); + pme->m_inputJpegQ.init(); + pme->m_inputPPQ.init(); + pme->m_inputFWKPPQ.init(); + pme->m_inputMetaQ.init(); + cam_sem_post(&cmdThread->sync_sem); + + break; + case CAMERA_CMD_TYPE_STOP_DATA_PROC: + { + LOGH("stop data proc"); + is_active = FALSE; + + // cancel all ongoing jpeg jobs + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue(); + while (jpeg_job != NULL) { + pme->mJpegHandle.abort_job(jpeg_job->jobId); + + pme->releaseJpegJobData(jpeg_job); + free(jpeg_job); + + jpeg_job = (qcamera_hal3_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue(); + } + + // destroy jpeg encoding session + if ( 0 < pme->mJpegSessionId ) { + pme->mJpegHandle.destroy_session(pme->mJpegSessionId); + pme->mJpegSessionId = 0; + } + + needNewSess = TRUE; + + // flush ongoing postproc Queue + pme->m_ongoingPPQ.flush(); + + // flush input jpeg Queue + pme->m_inputJpegQ.flush(); + + // flush input Postproc Queue + pme->m_inputPPQ.flush(); + + // flush framework input Postproc Queue + pme->m_inputFWKPPQ.flush(); + + pme->m_inputMetaQ.flush(); + + // signal cmd is completed + cam_sem_post(&cmdThread->sync_sem); + } + break; + case CAMERA_CMD_TYPE_DO_NEXT_JOB: + { + LOGH("Do next job, active is %d", is_active); + /* needNewSess is set to TRUE as postproc is not re-STARTed + * anymore for every captureRequest */ + needNewSess = TRUE; + if (is_active == TRUE) { + // check if there is any ongoing jpeg jobs + if (pme->m_ongoingJpegQ.isEmpty()) { + LOGD("ongoing jpeg queue is empty so doing the jpeg job"); + // no ongoing jpeg job, we are fine to send jpeg encoding job + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)pme->m_inputJpegQ.dequeue(); + + if (NULL != jpeg_job) { + // add into ongoing jpeg job Q + pme->m_ongoingJpegQ.enqueue((void *)jpeg_job); + + if (jpeg_job->fwk_frame) { + ret = pme->encodeFWKData(jpeg_job, needNewSess); + } else { + ret = pme->encodeData(jpeg_job, needNewSess); + } + if (NO_ERROR != ret) { + // dequeue the last one + pme->m_ongoingJpegQ.dequeue(false); + + pme->releaseJpegJobData(jpeg_job); + free(jpeg_job); + } + } + } + + // check if there are any framework pp jobs + if (!pme->m_inputFWKPPQ.isEmpty()) { + qcamera_fwk_input_pp_data_t *fwk_frame = + (qcamera_fwk_input_pp_data_t *) pme->m_inputFWKPPQ.dequeue(); + if (NULL != fwk_frame) { + qcamera_hal3_pp_data_t *pp_job = + (qcamera_hal3_pp_data_t *)malloc(sizeof(qcamera_hal3_pp_data_t)); + jpeg_settings_t *jpeg_settings = + (jpeg_settings_t *)pme->m_jpegSettingsQ.dequeue(); + if (pp_job != NULL) { + memset(pp_job, 0, sizeof(qcamera_hal3_pp_data_t)); + pp_job->jpeg_settings = jpeg_settings; + if (pme->m_pReprocChannel != NULL) { + if (NO_ERROR != pme->m_pReprocChannel->overrideFwkMetadata(fwk_frame)) { + LOGE("Failed to extract output crop"); + } + // add into ongoing PP job Q + pp_job->fwk_src_frame = fwk_frame; + pme->m_ongoingPPQ.enqueue((void *)pp_job); + ret = pme->m_pReprocChannel->doReprocessOffline(fwk_frame); + if (NO_ERROR != ret) { + // remove from ongoing PP job Q + pme->m_ongoingPPQ.dequeue(false); + } + } else { + LOGE("Reprocess channel is NULL"); + ret = -1; + } + } else { + LOGE("no mem for qcamera_hal3_pp_data_t"); + ret = -1; + } + + if (0 != ret) { + // free pp_job + if (pp_job != NULL) { + free(pp_job); + } + // free frame + if (fwk_frame != NULL) { + free(fwk_frame); + } + } + } + } + + LOGH("dequeuing pp frame"); + pthread_mutex_lock(&pme->mReprocJobLock); + if(!pme->m_inputPPQ.isEmpty() && !pme->m_inputMetaQ.isEmpty()) { + qcamera_hal3_pp_buffer_t *pp_buffer = + (qcamera_hal3_pp_buffer_t *)pme->m_inputPPQ.dequeue(); + meta_buffer = + (mm_camera_super_buf_t *)pme->m_inputMetaQ.dequeue(); + jpeg_settings_t *jpeg_settings = + (jpeg_settings_t *)pme->m_jpegSettingsQ.dequeue(); + pthread_mutex_unlock(&pme->mReprocJobLock); + qcamera_hal3_pp_data_t *pp_job = + (qcamera_hal3_pp_data_t *)malloc(sizeof(qcamera_hal3_pp_data_t)); + if (pp_job == NULL) { + LOGE("no mem for qcamera_hal3_pp_data_t"); + ret = -1; + } else if (meta_buffer == NULL) { + LOGE("failed to dequeue from m_inputMetaQ"); + ret = -1; + } else if (pp_buffer == NULL) { + LOGE("failed to dequeue from m_inputPPQ"); + ret = -1; + } else if (pp_buffer != NULL){ + memset(pp_job, 0, sizeof(qcamera_hal3_pp_data_t)); + pp_job->src_frame = pp_buffer->input; + pp_job->src_metadata = meta_buffer; + if (meta_buffer->bufs[0] != NULL) { + pp_job->metadata = (metadata_buffer_t *) + meta_buffer->bufs[0]->buffer; + } + pp_job->jpeg_settings = jpeg_settings; + pme->m_ongoingPPQ.enqueue((void *)pp_job); + if (pme->m_pReprocChannel != NULL) { + mm_camera_buf_def_t *meta_buffer_arg = NULL; + meta_buffer_arg = meta_buffer->bufs[0]; + qcamera_fwk_input_pp_data_t fwk_frame; + memset(&fwk_frame, 0, sizeof(qcamera_fwk_input_pp_data_t)); + fwk_frame.frameNumber = pp_buffer->frameNumber; + ret = pme->m_pReprocChannel->overrideMetadata( + pp_buffer, meta_buffer_arg, + pp_job->jpeg_settings, + fwk_frame); + if (NO_ERROR == ret) { + // add into ongoing PP job Q + ret = pme->m_pReprocChannel->doReprocessOffline( + &fwk_frame, true); + if (NO_ERROR != ret) { + // remove from ongoing PP job Q + pme->m_ongoingPPQ.dequeue(false); + } + } + } else { + LOGE("No reprocess. Calling processPPData directly"); + ret = pme->processPPData(pp_buffer->input); + } + } + + if (0 != ret) { + // free pp_job + if (pp_job != NULL) { + free(pp_job); + } + // free frame + if (pp_buffer != NULL) { + if (pp_buffer->input) { + pme->releaseSuperBuf(pp_buffer->input); + free(pp_buffer->input); + } + free(pp_buffer); + } + //free metadata + if (NULL != meta_buffer) { + pme->m_parent->metadataBufDone(meta_buffer); + free(meta_buffer); + } + } else { + if (pp_buffer != NULL) { + free(pp_buffer); + } + } + } else { + pthread_mutex_unlock(&pme->mReprocJobLock); + } + } else { + // not active, simply return buf and do no op + qcamera_hal3_jpeg_data_t *jpeg_job = + (qcamera_hal3_jpeg_data_t *)pme->m_inputJpegQ.dequeue(); + if (NULL != jpeg_job) { + free(jpeg_job); + } + + qcamera_hal3_pp_buffer_t* pp_buf = + (qcamera_hal3_pp_buffer_t *)pme->m_inputPPQ.dequeue(); + if (NULL != pp_buf) { + if (pp_buf->input) { + pme->releaseSuperBuf(pp_buf->input); + free(pp_buf->input); + pp_buf->input = NULL; + } + free(pp_buf); + } + mm_camera_super_buf_t *metadata = (mm_camera_super_buf_t *)pme->m_inputMetaQ.dequeue(); + if (metadata != NULL) { + pme->m_parent->metadataBufDone(metadata); + free(metadata); + } + qcamera_fwk_input_pp_data_t *fwk_frame = + (qcamera_fwk_input_pp_data_t *) pme->m_inputFWKPPQ.dequeue(); + if (NULL != fwk_frame) { + free(fwk_frame); + } + } + } + break; + case CAMERA_CMD_TYPE_EXIT: + running = 0; + break; + default: + break; + } + } while (running); + LOGD("X"); + return NULL; +} + +/* EXIF related helper methods */ + +/*=========================================================================== + * FUNCTION : getRational + * + * DESCRIPTION: compose rational struct + * + * PARAMETERS : + * @rat : ptr to struct to store rational info + * @num :num of the rational + * @denom : denom of the rational + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getRational(rat_t *rat, int num, int denom) +{ + if ((0 > num) || (0 >= denom)) { + LOGE("Negative values"); + return BAD_VALUE; + } + if (NULL == rat) { + LOGE("NULL rat input"); + return BAD_VALUE; + } + rat->num = (uint32_t)num; + rat->denom = (uint32_t)denom; + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : parseGPSCoordinate + * + * DESCRIPTION: parse GPS coordinate string + * + * PARAMETERS : + * @coord_str : [input] coordinate string + * @coord : [output] ptr to struct to store coordinate + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int parseGPSCoordinate(const char *coord_str, rat_t* coord) +{ + if(coord == NULL) { + LOGE("error, invalid argument coord == NULL"); + return BAD_VALUE; + } + double degF = atof(coord_str); + if (degF < 0) { + degF = -degF; + } + double minF = (degF - (int) degF) * 60; + double secF = (minF - (int) minF) * 60; + + getRational(&coord[0], (int)degF, 1); + getRational(&coord[1], (int)minF, 1); + getRational(&coord[2], (int)(secF * 10000), 10000); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getExifDateTime + * + * DESCRIPTION: query exif date time + * + * PARAMETERS : + * @dateTime : string to store exif date time + * @subsecTime : string to store exif subsec time + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifDateTime(String8 &dateTime, String8 &subsecTime) +{ + int32_t ret = NO_ERROR; + + //get time and date from system + struct timeval tv; + struct tm timeinfo_data; + + int res = gettimeofday(&tv, NULL); + if (0 == res) { + struct tm *timeinfo = localtime_r(&tv.tv_sec, &timeinfo_data); + if (NULL != timeinfo) { + //Write datetime according to EXIF Spec + //"YYYY:MM:DD HH:MM:SS" (20 chars including \0) + dateTime = String8::format("%04d:%02d:%02d %02d:%02d:%02d", + timeinfo->tm_year + 1900, timeinfo->tm_mon + 1, + timeinfo->tm_mday, timeinfo->tm_hour, + timeinfo->tm_min, timeinfo->tm_sec); + //Write subsec according to EXIF Sepc + subsecTime = String8::format("%06ld", tv.tv_usec); + } else { + LOGE("localtime_r() error"); + ret = UNKNOWN_ERROR; + } + } else if (-1 == res) { + LOGE("gettimeofday() error: %s", strerror(errno)); + ret = UNKNOWN_ERROR; + } else { + LOGE("gettimeofday() unexpected return code: %d", res); + ret = UNKNOWN_ERROR; + } + + return ret; +} + +/*=========================================================================== + * FUNCTION : getExifFocalLength + * + * DESCRIPTION: get exif focal length + * + * PARAMETERS : + * @focalLength : ptr to rational struct to store focal length + * @value : focal length value + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifFocalLength(rat_t *focalLength, float value) +{ + int focalLengthValue = + (int)(value * FOCAL_LENGTH_DECIMAL_PRECISION); + return getRational(focalLength, focalLengthValue, FOCAL_LENGTH_DECIMAL_PRECISION); +} + +/*=========================================================================== + * FUNCTION : getExifExpTimeInfo + * + * DESCRIPTION: get exif exposure time information + * + * PARAMETERS : + * @expoTimeInfo : rational exposure time value + * @value : exposure time value + * RETURN : nt32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifExpTimeInfo(rat_t *expoTimeInfo, int64_t value) +{ + + int64_t cal_exposureTime; + if (value != 0) + cal_exposureTime = value; + else + cal_exposureTime = 60; + + return getRational(expoTimeInfo, 1, (int)cal_exposureTime); +} + +/*=========================================================================== + * FUNCTION : getExifGpsProcessingMethod + * + * DESCRIPTION: get GPS processing method + * + * PARAMETERS : + * @gpsProcessingMethod : string to store GPS process method + * @count : length of the string + * @value : the value of the processing method + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifGpsProcessingMethod(char *gpsProcessingMethod, + uint32_t &count, char* value) +{ + if(value != NULL) { + memcpy(gpsProcessingMethod, ExifAsciiPrefix, EXIF_ASCII_PREFIX_SIZE); + count = EXIF_ASCII_PREFIX_SIZE; + strlcpy(gpsProcessingMethod + EXIF_ASCII_PREFIX_SIZE, + value, + GPS_PROCESSING_METHOD_SIZE); + count += (uint32_t)strlen(value); + gpsProcessingMethod[count++] = '\0'; // increase 1 for the last NULL char + return NO_ERROR; + } else { + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getExifLatitude + * + * DESCRIPTION: get exif latitude + * + * PARAMETERS : + * @latitude : ptr to rational struct to store latitude info + * @latRef : character to indicate latitude reference + * @value : value of the latitude + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifLatitude(rat_t *latitude, char *latRef, double value) +{ + char str[30]; + snprintf(str, sizeof(str), "%f", value); + if(str[0] != '\0') { + parseGPSCoordinate(str, latitude); + + //set Latitude Ref + float latitudeValue = strtof(str, 0); + if(latitudeValue < 0.0f) { + latRef[0] = 'S'; + } else { + latRef[0] = 'N'; + } + latRef[1] = '\0'; + return NO_ERROR; + }else{ + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getExifLongitude + * + * DESCRIPTION: get exif longitude + * + * PARAMETERS : + * @longitude : ptr to rational struct to store longitude info + * @lonRef : character to indicate longitude reference + * @value : value of the longitude + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifLongitude(rat_t *longitude, char *lonRef, double value) +{ + char str[30]; + snprintf(str, sizeof(str), "%f", value); + if(str[0] != '\0') { + parseGPSCoordinate(str, longitude); + + //set Longitude Ref + float longitudeValue = strtof(str, 0); + if(longitudeValue < 0.0f) { + lonRef[0] = 'W'; + } else { + lonRef[0] = 'E'; + } + lonRef[1] = '\0'; + return NO_ERROR; + }else{ + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getExifAltitude + * + * DESCRIPTION: get exif altitude + * + * PARAMETERS : + * @altitude : ptr to rational struct to store altitude info + * @altRef : character to indicate altitude reference + * @argValue : altitude value + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifAltitude(rat_t *altitude, char *altRef, double argValue) +{ + char str[30]; + snprintf(str, sizeof(str), "%f", argValue); + if (str[0] != '\0') { + double value = atof(str); + *altRef = 0; + if(value < 0){ + *altRef = 1; + value = -value; + } + return getRational(altitude, (int)(value * 1000), 1000); + } else { + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getExifGpsDateTimeStamp + * + * DESCRIPTION: get exif GPS date time stamp + * + * PARAMETERS : + * @gpsDateStamp : GPS date time stamp string + * @bufLen : length of the string + * @gpsTimeStamp : ptr to rational struct to store time stamp info + * @value : timestamp value + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifGpsDateTimeStamp(char *gpsDateStamp, uint32_t bufLen, + rat_t *gpsTimeStamp, int64_t value) +{ + char str[30]; + snprintf(str, sizeof(str), "%lld", (long long int)value); + if(str[0] != '\0') { + time_t unixTime = (time_t)atol(str); + struct tm *UTCTimestamp = gmtime(&unixTime); + if (UTCTimestamp != NULL && gpsDateStamp != NULL + && gpsTimeStamp != NULL) { + strftime(gpsDateStamp, bufLen, "%Y:%m:%d", UTCTimestamp); + + getRational(&gpsTimeStamp[0], UTCTimestamp->tm_hour, 1); + getRational(&gpsTimeStamp[1], UTCTimestamp->tm_min, 1); + getRational(&gpsTimeStamp[2], UTCTimestamp->tm_sec, 1); + return NO_ERROR; + } else { + LOGE("Could not get the timestamp"); + return BAD_VALUE; + } + } else { + return BAD_VALUE; + } +} + +/*=========================================================================== + * FUNCTION : getExifExposureValue + * + * DESCRIPTION: get exif GPS date time stamp + * + * PARAMETERS : + * @exposure_val : rational exposure value + * @exposure_comp : exposure compensation + * @step : exposure step + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t getExifExposureValue(srat_t* exposure_val, int32_t exposure_comp, + cam_rational_type_t step) +{ + exposure_val->num = exposure_comp * step.numerator; + exposure_val->denom = step.denominator; + return 0; +} + +/*=========================================================================== + * FUNCTION : getExifData + * + * DESCRIPTION: get exif data to be passed into jpeg encoding + * + * PARAMETERS : + * @metadata : metadata of the encoding request + * @jpeg_settings : jpeg_settings for encoding + * @needJpegExifRotation: check if rotation need to added in EXIF + * + * RETURN : exif data from user setting and GPS + *==========================================================================*/ +QCamera3Exif *QCamera3PostProcessor::getExifData(metadata_buffer_t *metadata, + jpeg_settings_t *jpeg_settings, bool needJpegExifRotation) +{ + QCamera3Exif *exif = new QCamera3Exif(); + if (exif == NULL) { + LOGE("No memory for QCamera3Exif"); + return NULL; + } + QCamera3HardwareInterface* hal_obj = NULL; + if (m_parent != NULL) { + hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData; + } else { + LOGE("m_parent is NULL, Error"); + return NULL; + } + + int32_t rc = NO_ERROR; + uint32_t count = 0; + + // add exif entries + String8 dateTime; + String8 subsecTime; + rc = getExifDateTime(dateTime, subsecTime); + if (rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_DATE_TIME, EXIF_ASCII, + (uint32_t)(dateTime.length() + 1), (void *)dateTime.string()); + exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_ORIGINAL, EXIF_ASCII, + (uint32_t)(dateTime.length() + 1), (void *)dateTime.string()); + exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_DIGITIZED, EXIF_ASCII, + (uint32_t)(dateTime.length() + 1), (void *)dateTime.string()); + exif->addEntry(EXIFTAGID_SUBSEC_TIME, EXIF_ASCII, + (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string()); + exif->addEntry(EXIFTAGID_SUBSEC_TIME_ORIGINAL, EXIF_ASCII, + (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string()); + exif->addEntry(EXIFTAGID_SUBSEC_TIME_DIGITIZED, EXIF_ASCII, + (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string()); + } else { + LOGW("getExifDateTime failed"); + } + + + if (metadata != NULL) { + IF_META_AVAILABLE(float, focal_length, CAM_INTF_META_LENS_FOCAL_LENGTH, metadata) { + rat_t focalLength; + rc = getExifFocalLength(&focalLength, *focal_length); + if (rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_FOCAL_LENGTH, + EXIF_RATIONAL, + 1, + (void *)&(focalLength)); + } else { + LOGW("getExifFocalLength failed"); + } + } + + IF_META_AVAILABLE(int32_t, isoSpeed, CAM_INTF_META_SENSOR_SENSITIVITY, metadata) { + int16_t fwk_isoSpeed = (int16_t) *isoSpeed; + exif->addEntry(EXIFTAGID_ISO_SPEED_RATING, EXIF_SHORT, 1, (void *) &(fwk_isoSpeed)); + } + + + IF_META_AVAILABLE(int64_t, sensor_exposure_time, + CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata) { + rat_t sensorExpTime; + rc = getExifExpTimeInfo(&sensorExpTime, *sensor_exposure_time); + if (rc == NO_ERROR){ + exif->addEntry(EXIFTAGID_EXPOSURE_TIME, + EXIF_RATIONAL, + 1, + (void *)&(sensorExpTime)); + } else { + LOGW("getExifExpTimeInfo failed"); + } + } + + char* jpeg_gps_processing_method = jpeg_settings->gps_processing_method; + if (strlen(jpeg_gps_processing_method) > 0) { + char gpsProcessingMethod[EXIF_ASCII_PREFIX_SIZE + + GPS_PROCESSING_METHOD_SIZE]; + count = 0; + rc = getExifGpsProcessingMethod(gpsProcessingMethod, + count, + jpeg_gps_processing_method); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_GPS_PROCESSINGMETHOD, + EXIF_ASCII, + count, + (void *)gpsProcessingMethod); + } else { + LOGW("getExifGpsProcessingMethod failed"); + } + } + + if (jpeg_settings->gps_coordinates_valid) { + + //latitude + rat_t latitude[3]; + char latRef[2]; + rc = getExifLatitude(latitude, latRef, + jpeg_settings->gps_coordinates[0]); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_GPS_LATITUDE, + EXIF_RATIONAL, + 3, + (void *)latitude); + exif->addEntry(EXIFTAGID_GPS_LATITUDE_REF, + EXIF_ASCII, + 2, + (void *)latRef); + } else { + LOGW("getExifLatitude failed"); + } + + //longitude + rat_t longitude[3]; + char lonRef[2]; + rc = getExifLongitude(longitude, lonRef, + jpeg_settings->gps_coordinates[1]); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_GPS_LONGITUDE, + EXIF_RATIONAL, + 3, + (void *)longitude); + + exif->addEntry(EXIFTAGID_GPS_LONGITUDE_REF, + EXIF_ASCII, + 2, + (void *)lonRef); + } else { + LOGW("getExifLongitude failed"); + } + + //altitude + rat_t altitude; + char altRef; + rc = getExifAltitude(&altitude, &altRef, + jpeg_settings->gps_coordinates[2]); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_GPS_ALTITUDE, + EXIF_RATIONAL, + 1, + (void *)&(altitude)); + + exif->addEntry(EXIFTAGID_GPS_ALTITUDE_REF, + EXIF_BYTE, + 1, + (void *)&altRef); + } else { + LOGW("getExifAltitude failed"); + } + } + + if (jpeg_settings->gps_timestamp_valid) { + + char gpsDateStamp[20]; + rat_t gpsTimeStamp[3]; + rc = getExifGpsDateTimeStamp(gpsDateStamp, 20, gpsTimeStamp, + jpeg_settings->gps_timestamp); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_GPS_DATESTAMP, EXIF_ASCII, + (uint32_t)(strlen(gpsDateStamp) + 1), + (void *)gpsDateStamp); + + exif->addEntry(EXIFTAGID_GPS_TIMESTAMP, + EXIF_RATIONAL, + 3, + (void *)gpsTimeStamp); + } else { + LOGW("getExifGpsDataTimeStamp failed"); + } + } + + IF_META_AVAILABLE(int32_t, exposure_comp, CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata) { + IF_META_AVAILABLE(cam_rational_type_t, comp_step, CAM_INTF_PARM_EV_STEP, metadata) { + srat_t exposure_val; + rc = getExifExposureValue(&exposure_val, *exposure_comp, *comp_step); + if(rc == NO_ERROR) { + exif->addEntry(EXIFTAGID_EXPOSURE_BIAS_VALUE, + EXIF_SRATIONAL, + 1, + (void *)(&exposure_val)); + } else { + LOGW("getExifExposureValue failed "); + } + } + } + } else { + LOGW("no metadata provided "); + } + +#ifdef ENABLE_MODEL_INFO_EXIF + + char value[PROPERTY_VALUE_MAX]; + if (property_get("ro.product.manufacturer", value, "QCOM-AA") > 0) { + exif->addEntry(EXIFTAGID_MAKE, EXIF_ASCII, + (uint32_t)(strlen(value) + 1), (void *)value); + } else { + LOGW("getExifMaker failed"); + } + + if (property_get("ro.product.model", value, "QCAM-AA") > 0) { + exif->addEntry(EXIFTAGID_MODEL, EXIF_ASCII, + (uint32_t)(strlen(value) + 1), (void *)value); + } else { + LOGW("getExifModel failed"); + } + + if (property_get("ro.build.description", value, "QCAM-AA") > 0) { + exif->addEntry(EXIFTAGID_SOFTWARE, EXIF_ASCII, + (uint32_t)(strlen(value) + 1), (void *)value); + } else { + LOGW("getExifSoftware failed"); + } + +#endif + + if (jpeg_settings->image_desc_valid) { + if (exif->addEntry(EXIFTAGID_IMAGE_DESCRIPTION, EXIF_ASCII, + strlen(jpeg_settings->image_desc)+1, + (void *)jpeg_settings->image_desc)) { + LOGW("Adding IMAGE_DESCRIPTION tag failed"); + } + } + + if (needJpegExifRotation) { + int16_t orientation; + switch (jpeg_settings->jpeg_orientation) { + case 0: + orientation = 1; + break; + case 90: + orientation = 6; + break; + case 180: + orientation = 3; + break; + case 270: + orientation = 8; + break; + default: + orientation = 1; + break; + } + exif->addEntry(EXIFTAGID_ORIENTATION, + EXIF_SHORT, + 1, + (void *)&orientation); + exif->addEntry(EXIFTAGID_TN_ORIENTATION, + EXIF_SHORT, + 1, + (void *)&orientation); + + } + + return exif; +} + +/*=========================================================================== + * FUNCTION : QCamera3Exif + * + * DESCRIPTION: constructor of QCamera3Exif + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +QCamera3Exif::QCamera3Exif() + : m_nNumEntries(0) +{ + memset(m_Entries, 0, sizeof(m_Entries)); +} + +/*=========================================================================== + * FUNCTION : ~QCamera3Exif + * + * DESCRIPTION: deconstructor of QCamera3Exif. Will release internal memory ptr. + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +QCamera3Exif::~QCamera3Exif() +{ + for (uint32_t i = 0; i < m_nNumEntries; i++) { + switch (m_Entries[i].tag_entry.type) { + case EXIF_BYTE: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._bytes != NULL) { + free(m_Entries[i].tag_entry.data._bytes); + m_Entries[i].tag_entry.data._bytes = NULL; + } + } + break; + case EXIF_ASCII: + { + if (m_Entries[i].tag_entry.data._ascii != NULL) { + free(m_Entries[i].tag_entry.data._ascii); + m_Entries[i].tag_entry.data._ascii = NULL; + } + } + break; + case EXIF_SHORT: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._shorts != NULL) { + free(m_Entries[i].tag_entry.data._shorts); + m_Entries[i].tag_entry.data._shorts = NULL; + } + } + break; + case EXIF_LONG: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._longs != NULL) { + free(m_Entries[i].tag_entry.data._longs); + m_Entries[i].tag_entry.data._longs = NULL; + } + } + break; + case EXIF_RATIONAL: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._rats != NULL) { + free(m_Entries[i].tag_entry.data._rats); + m_Entries[i].tag_entry.data._rats = NULL; + } + } + break; + case EXIF_UNDEFINED: + { + if (m_Entries[i].tag_entry.data._undefined != NULL) { + free(m_Entries[i].tag_entry.data._undefined); + m_Entries[i].tag_entry.data._undefined = NULL; + } + } + break; + case EXIF_SLONG: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._slongs != NULL) { + free(m_Entries[i].tag_entry.data._slongs); + m_Entries[i].tag_entry.data._slongs = NULL; + } + } + break; + case EXIF_SRATIONAL: + { + if (m_Entries[i].tag_entry.count > 1 && + m_Entries[i].tag_entry.data._srats != NULL) { + free(m_Entries[i].tag_entry.data._srats); + m_Entries[i].tag_entry.data._srats = NULL; + } + } + break; + default: + LOGW("Error, Unknown type"); + break; + } + } +} + +/*=========================================================================== + * FUNCTION : addEntry + * + * DESCRIPTION: function to add an entry to exif data + * + * PARAMETERS : + * @tagid : exif tag ID + * @type : data type + * @count : number of data in uint of its type + * @data : input data ptr + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Exif::addEntry(exif_tag_id_t tagid, + exif_tag_type_t type, + uint32_t count, + void *data) +{ + int32_t rc = NO_ERROR; + if(m_nNumEntries >= MAX_HAL3_EXIF_TABLE_ENTRIES) { + LOGE("Number of entries exceeded limit"); + return NO_MEMORY; + } + + m_Entries[m_nNumEntries].tag_id = tagid; + m_Entries[m_nNumEntries].tag_entry.type = type; + m_Entries[m_nNumEntries].tag_entry.count = count; + m_Entries[m_nNumEntries].tag_entry.copy = 1; + switch (type) { + case EXIF_BYTE: + { + if (count > 1) { + uint8_t *values = (uint8_t *)malloc(count); + if (values == NULL) { + LOGE("No memory for byte array"); + rc = NO_MEMORY; + } else { + memcpy(values, data, count); + m_Entries[m_nNumEntries].tag_entry.data._bytes = values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._byte = + *(uint8_t *)data; + } + } + break; + case EXIF_ASCII: + { + char *str = NULL; + str = (char *)malloc(count + 1); + if (str == NULL) { + LOGE("No memory for ascii string"); + rc = NO_MEMORY; + } else { + memset(str, 0, count + 1); + memcpy(str, data, count); + m_Entries[m_nNumEntries].tag_entry.data._ascii = str; + } + } + break; + case EXIF_SHORT: + { + uint16_t *exif_data = (uint16_t *)data; + if (count > 1) { + uint16_t *values = + (uint16_t *)malloc(count * sizeof(uint16_t)); + if (values == NULL) { + LOGE("No memory for short array"); + rc = NO_MEMORY; + } else { + memcpy(values, exif_data, count * sizeof(uint16_t)); + m_Entries[m_nNumEntries].tag_entry.data._shorts = values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._short = + *(uint16_t *)data; + } + } + break; + case EXIF_LONG: + { + uint32_t *exif_data = (uint32_t *)data; + if (count > 1) { + uint32_t *values = + (uint32_t *)malloc(count * sizeof(uint32_t)); + if (values == NULL) { + LOGE("No memory for long array"); + rc = NO_MEMORY; + } else { + memcpy(values, exif_data, count * sizeof(uint32_t)); + m_Entries[m_nNumEntries].tag_entry.data._longs = values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._long = + *(uint32_t *)data; + } + } + break; + case EXIF_RATIONAL: + { + rat_t *exif_data = (rat_t *)data; + if (count > 1) { + rat_t *values = (rat_t *)malloc(count * sizeof(rat_t)); + if (values == NULL) { + LOGE("No memory for rational array"); + rc = NO_MEMORY; + } else { + memcpy(values, exif_data, count * sizeof(rat_t)); + m_Entries[m_nNumEntries].tag_entry.data._rats = values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._rat = + *(rat_t *)data; + } + } + break; + case EXIF_UNDEFINED: + { + uint8_t *values = (uint8_t *)malloc(count); + if (values == NULL) { + LOGE("No memory for undefined array"); + rc = NO_MEMORY; + } else { + memcpy(values, data, count); + m_Entries[m_nNumEntries].tag_entry.data._undefined = values; + } + } + break; + case EXIF_SLONG: + { + int32_t *exif_data = (int32_t *)data; + if (count > 1) { + int32_t *values = + (int32_t *)malloc(count * sizeof(int32_t)); + if (values == NULL) { + LOGE("No memory for signed long array"); + rc = NO_MEMORY; + } else { + memcpy(values, exif_data, count * sizeof(int32_t)); + m_Entries[m_nNumEntries].tag_entry.data._slongs =values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._slong = + *(int32_t *)data; + } + } + break; + case EXIF_SRATIONAL: + { + srat_t *exif_data = (srat_t *)data; + if (count > 1) { + srat_t *values = (srat_t *)malloc(count * sizeof(srat_t)); + if (values == NULL) { + LOGE("No memory for sign rational array"); + rc = NO_MEMORY; + } else { + memcpy(values, exif_data, count * sizeof(srat_t)); + m_Entries[m_nNumEntries].tag_entry.data._srats = values; + } + } else { + m_Entries[m_nNumEntries].tag_entry.data._srat = + *(srat_t *)data; + } + } + break; + default: + LOGE("Error, Unknown type"); + break; + } + + // Increase number of entries + m_nNumEntries++; + return rc; +} + +}; // namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3PostProc.h b/camera/QCamera2/HAL3/QCamera3PostProc.h new file mode 100644 index 0000000..e2ba073 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3PostProc.h @@ -0,0 +1,192 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __QCamera3_POSTPROC_H__ +#define __QCamera3_POSTPROC_H__ + +// Camera dependencies +#include "camera3.h" +#include "QCamera3HALHeader.h" +#include "QCameraCmdThread.h" +#include "QCameraQueue.h" + +extern "C" { +#include "mm_camera_interface.h" +#include "mm_jpeg_interface.h" +} + +namespace qcamera { + +class QCamera3Exif; +class QCamera3ProcessingChannel; +class QCamera3ReprocessChannel; +class QCamera3Stream; +class QCamera3StreamMem; + +typedef struct { + camera3_stream_buffer_t src_frame;// source frame + mm_camera_buf_def_t metadata_buffer; + mm_camera_buf_def_t input_buffer; + reprocess_config_t reproc_config; + buffer_handle_t *output_buffer; + uint32_t frameNumber; +} qcamera_fwk_input_pp_data_t; + +typedef struct { + uint32_t jobId; // job ID + uint32_t client_hdl; // handle of jpeg client (obtained when open jpeg) + mm_camera_super_buf_t *src_frame;// source frame (need to be returned back to kernel after done) + mm_camera_super_buf_t *src_reproc_frame; // original source frame for reproc if not NULL + qcamera_fwk_input_pp_data_t *fwk_frame; // source framework buffer + qcamera_fwk_input_pp_data_t *fwk_src_buffer; // original framework source frame for reproc + QCamera3Exif *pJpegExifObj; + metadata_buffer_t *metadata; + mm_camera_super_buf_t *src_metadata; + jpeg_settings_t *jpeg_settings; +} qcamera_hal3_jpeg_data_t; + +typedef struct { + uint32_t jobId; // job ID + mm_camera_super_buf_t *src_frame;// source frame (need to be returned back to kernel after done) + qcamera_fwk_input_pp_data_t *fwk_src_frame;// source frame + metadata_buffer_t *metadata; + jpeg_settings_t *jpeg_settings; + mm_camera_super_buf_t *src_metadata; +} qcamera_hal3_pp_data_t; + +typedef struct { + mm_camera_super_buf_t *input; + buffer_handle_t *output; + uint32_t frameNumber; +} qcamera_hal3_pp_buffer_t; + +#define MAX_HAL3_EXIF_TABLE_ENTRIES 23 +class QCamera3Exif +{ +public: + QCamera3Exif(); + virtual ~QCamera3Exif(); + + int32_t addEntry(exif_tag_id_t tagid, + exif_tag_type_t type, + uint32_t count, + void *data); + uint32_t getNumOfEntries() {return m_nNumEntries;}; + QEXIF_INFO_DATA *getEntries() {return m_Entries;}; + +private: + QEXIF_INFO_DATA m_Entries[MAX_HAL3_EXIF_TABLE_ENTRIES]; // exif tags for JPEG encoder + uint32_t m_nNumEntries; // number of valid entries +}; + +class QCamera3PostProcessor +{ +public: + QCamera3PostProcessor(QCamera3ProcessingChannel *ch_ctrl); + virtual ~QCamera3PostProcessor(); + + int32_t init(QCamera3StreamMem *mMemory); + int32_t initJpeg(jpeg_encode_callback_t jpeg_cb, + cam_dimension_t *m_max_pic_dim, + void *user_data); + int32_t deinit(); + int32_t start(const reprocess_config_t &config); + int32_t stop(); + int32_t flush(); + int32_t processData(qcamera_fwk_input_pp_data_t *frame); + int32_t processData(mm_camera_super_buf_t *input, + buffer_handle_t *output, uint32_t frameNumber); + int32_t processData(mm_camera_super_buf_t *input); + int32_t processPPData(mm_camera_super_buf_t *frame); + int32_t processPPMetadata(mm_camera_super_buf_t *reproc_meta); + int32_t processJpegSettingData(jpeg_settings_t *jpeg_settings); + qcamera_hal3_pp_data_t *dequeuePPJob(uint32_t frameNumber); + qcamera_hal3_jpeg_data_t *findJpegJobByJobId(uint32_t jobId); + void releaseJpegJobData(qcamera_hal3_jpeg_data_t *job); + int32_t releaseOfflineBuffers(bool all); + void releasePPJobData(qcamera_hal3_pp_data_t *job); + +private: + int32_t sendEvtNotify(int32_t msg_type, int32_t ext1, int32_t ext2); + mm_jpeg_color_format getColorfmtFromImgFmt(cam_format_t img_fmt); + mm_jpeg_format_t getJpegImgTypeFromImgFmt(cam_format_t img_fmt); + int32_t getJpegEncodeConfig(mm_jpeg_encode_params_t& encode_parm, + QCamera3Stream *main_stream, + jpeg_settings_t *jpeg_settings); + int32_t getFWKJpegEncodeConfig(mm_jpeg_encode_params_t& encode_parm, + qcamera_fwk_input_pp_data_t *frame, + jpeg_settings_t *jpeg_settings); + QCamera3Exif * getExifData(metadata_buffer_t *metadata, + jpeg_settings_t *jpeg_settings, bool needJpegExifRotation); + int32_t encodeData(qcamera_hal3_jpeg_data_t *jpeg_job_data, + uint8_t &needNewSess); + int32_t encodeFWKData(qcamera_hal3_jpeg_data_t *jpeg_job_data, + uint8_t &needNewSess); + void releaseSuperBuf(mm_camera_super_buf_t *super_buf); + static void releaseNotifyData(void *user_data, void *cookie); + int32_t processRawImageImpl(mm_camera_super_buf_t *recvd_frame); + + static void releaseJpegData(void *data, void *user_data); + static void releasePPInputData(void *data, void *user_data); + static void releaseMetadata(void *data, void *user_data); + static void releaseOngoingPPData(void *data, void *user_data); + + static void *dataProcessRoutine(void *data); + + bool needsReprocess(qcamera_fwk_input_pp_data_t *frame); + +private: + QCamera3ProcessingChannel *m_parent; + jpeg_encode_callback_t mJpegCB; + void * mJpegUserData; + mm_jpeg_ops_t mJpegHandle; + uint32_t mJpegClientHandle; + uint32_t mJpegSessionId; + cam_jpeg_metadata_t mJpegMetadata; + + uint32_t m_bThumbnailNeeded; + QCamera3StreamMem *mOutputMem; + QCamera3ReprocessChannel * m_pReprocChannel; + + QCameraQueue m_inputPPQ; // input queue for postproc + QCameraQueue m_inputFWKPPQ; // framework input queue for postproc + QCameraQueue m_ongoingPPQ; // ongoing postproc queue + QCameraQueue m_inputJpegQ; // input jpeg job queue + QCameraQueue m_ongoingJpegQ; // ongoing jpeg job queue + QCameraQueue m_inputRawQ; // input raw job queue + QCameraQueue m_inputMetaQ; // input meta queue + QCameraQueue m_jpegSettingsQ; // input jpeg setting queue + QCameraCmdThread m_dataProcTh; // thread for data processing + + pthread_mutex_t mReprocJobLock; +}; + +}; // namespace qcamera + +#endif /* __QCamera3_POSTPROC_H__ */ diff --git a/camera/QCamera2/HAL3/QCamera3Stream.cpp b/camera/QCamera2/HAL3/QCamera3Stream.cpp new file mode 100644 index 0000000..71935e8 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Stream.cpp @@ -0,0 +1,1527 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#define LOG_TAG "QCamera3Stream" + +// Camera dependencies +#include "QCamera3HWI.h" +#include "QCamera3Stream.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +using namespace android; + +namespace qcamera { +#define MAX_BATCH_SIZE 32 + +const char* QCamera3Stream::mStreamNames[] = { + "CAM_DEFAULT", + "CAM_PREVIEW", + "CAM_POSTVIEW", + "CAM_SNAPSHOT", + "CAM_VIDEO", + "CAM_CALLBACK", + "CAM_IMPL_DEFINED", + "CAM_METADATA", + "CAM_RAW", + "CAM_OFFLINE_PROC", + "CAM_PARM", + "CAM_ANALYSIS" + "CAM_MAX" }; + +/*=========================================================================== + * FUNCTION : get_bufs + * + * DESCRIPTION: static function entry to allocate stream buffers + * + * PARAMETERS : + * @offset : offset info of stream buffers + * @num_bufs : number of buffers allocated + * @initial_reg_flag: flag to indicate if buffer needs to be registered + * at kernel initially + * @bufs : output of allocated buffers + * @ops_tbl : ptr to buf mapping/unmapping ops + * @user_data : user data ptr of ops_tbl + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::get_bufs( + cam_frame_len_offset_t *offset, + uint8_t *num_bufs, + uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl, + void *user_data) +{ + int32_t rc = NO_ERROR; + QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data); + if (!stream) { + LOGE("getBufs invalid stream pointer"); + return NO_MEMORY; + } + rc = stream->getBufs(offset, num_bufs, initial_reg_flag, bufs, ops_tbl); + if (NO_ERROR != rc) { + LOGE("stream->getBufs failed"); + return NO_MEMORY; + } + if (stream->mBatchSize) { + //Allocate batch buffers if mBatchSize is non-zero. All the output + //arguments correspond to batch containers and not image buffers + rc = stream->getBatchBufs(num_bufs, initial_reg_flag, + bufs, ops_tbl); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : put_bufs + * + * DESCRIPTION: static function entry to deallocate stream buffers + * + * PARAMETERS : + * @ops_tbl : ptr to buf mapping/unmapping ops + * @user_data : user data ptr of ops_tbl + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::put_bufs( + mm_camera_map_unmap_ops_tbl_t *ops_tbl, + void *user_data) +{ + int32_t rc = NO_ERROR; + QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data); + if (!stream) { + LOGE("putBufs invalid stream pointer"); + return NO_MEMORY; + } + + if (stream->mBatchSize) { + rc = stream->putBatchBufs(ops_tbl); + if (NO_ERROR != rc) { + LOGE("stream->putBatchBufs failed"); + } + } + rc = stream->putBufs(ops_tbl); + return rc; +} + +/*=========================================================================== + * FUNCTION : invalidate_buf + * + * DESCRIPTION: static function entry to invalidate a specific stream buffer + * + * PARAMETERS : + * @index : index of the stream buffer to invalidate + * @user_data : user data ptr of ops_tbl + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::invalidate_buf(uint32_t index, void *user_data) +{ + int32_t rc = NO_ERROR; + + QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data); + if (!stream) { + LOGE("invalid stream pointer"); + return NO_MEMORY; + } + if (stream->mBatchSize) { + int32_t retVal = NO_ERROR; + for (size_t i = 0; + i < stream->mBatchBufDefs[index].user_buf.bufs_used; i++) { + uint32_t buf_idx = stream->mBatchBufDefs[index].user_buf.buf_idx[i]; + retVal = stream->invalidateBuf(buf_idx); + if (NO_ERROR != retVal) { + LOGE("invalidateBuf failed for buf_idx: %d err: %d", + buf_idx, retVal); + } + rc |= retVal; + } + } else { + rc = stream->invalidateBuf(index); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : clean_invalidate_buf + * + * DESCRIPTION: static function entry to clean and invalidate a specific stream buffer + * + * PARAMETERS : + * @index : index of the stream buffer to invalidate + * @user_data : user data ptr of ops_tbl + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::clean_invalidate_buf(uint32_t index, void *user_data) +{ + int32_t rc = NO_ERROR; + + QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data); + if (!stream) { + LOGE("invalid stream pointer"); + return NO_MEMORY; + } + if (stream->mBatchSize) { + int32_t retVal = NO_ERROR; + for (size_t i = 0; + i < stream->mBatchBufDefs[index].user_buf.bufs_used; i++) { + uint32_t buf_idx = stream->mBatchBufDefs[index].user_buf.buf_idx[i]; + retVal = stream->cleanInvalidateBuf(buf_idx); + if (NO_ERROR != retVal) { + LOGE("invalidateBuf failed for buf_idx: %d err: %d", + buf_idx, retVal); + } + rc |= retVal; + } + } else { + rc = stream->cleanInvalidateBuf(index); + } + return rc; +} + +/*=========================================================================== + * FUNCTION : QCamera3Stream + * + * DESCRIPTION: constructor of QCamera3Stream + * + * PARAMETERS : + * @allocator : memory allocator obj + * @camHandle : camera handle + * @chId : channel handle + * @camOps : ptr to camera ops table + * @paddingInfo: ptr to padding info + * + * RETURN : None + *==========================================================================*/ +QCamera3Stream::QCamera3Stream(uint32_t camHandle, + uint32_t chId, + mm_camera_ops_t *camOps, + cam_padding_info_t *paddingInfo, + QCamera3Channel *channel) : + mCamHandle(camHandle), + mChannelHandle(chId), + mHandle(0), + mCamOps(camOps), + mStreamInfo(NULL), + mMemOps(NULL), + mNumBufs(0), + mDataCB(NULL), + mUserData(NULL), + mDataQ(releaseFrameData, this), + mStreamInfoBuf(NULL), + mStreamBufs(NULL), + mBufDefs(NULL), + mChannel(channel), + mBatchSize(0), + mNumBatchBufs(0), + mStreamBatchBufs(NULL), + mBatchBufDefs(NULL), + mCurrentBatchBufDef(NULL), + mBufsStaged(0), + mFreeBatchBufQ(NULL, this) +{ + mMemVtbl.user_data = this; + mMemVtbl.get_bufs = get_bufs; + mMemVtbl.put_bufs = put_bufs; + mMemVtbl.invalidate_buf = invalidate_buf; + mMemVtbl.clean_invalidate_buf = clean_invalidate_buf; + mMemVtbl.set_config_ops = NULL; + memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset)); + memcpy(&mPaddingInfo, paddingInfo, sizeof(cam_padding_info_t)); +} + +/*=========================================================================== + * FUNCTION : ~QCamera3Stream + * + * DESCRIPTION: deconstructor of QCamera3Stream + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +QCamera3Stream::~QCamera3Stream() +{ + if (mStreamInfoBuf != NULL) { + int rc = mCamOps->unmap_stream_buf(mCamHandle, + mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1); + if (rc < 0) { + LOGE("Failed to un-map stream info buffer"); + } + mStreamInfoBuf->deallocate(); + delete mStreamInfoBuf; + mStreamInfoBuf = NULL; + } + // delete stream + if (mHandle > 0) { + mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle); + mHandle = 0; + } +} + +/*=========================================================================== + * FUNCTION : init + * + * DESCRIPTION: initialize stream obj + * + * PARAMETERS : + * @streamType : stream type + * @streamFormat : stream format + * @streamDim : stream dimension + * @reprocess_config: reprocess stream input configuration + * @minNumBuffers : minimal buffer count for particular stream type + * @postprocess_mask: PP mask + * @is_type : Image stabilization type, cam_is_type_t + * @batchSize : Number of image buffers in a batch. + * 0: No batch. N: container with N image buffers + * @stream_cb : callback handle + * @userdata : user data + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::init(cam_stream_type_t streamType, + cam_format_t streamFormat, + cam_dimension_t streamDim, + cam_rotation_t streamRotation, + cam_stream_reproc_config_t* reprocess_config, + uint8_t minNumBuffers, + cam_feature_mask_t postprocess_mask, + cam_is_type_t is_type, + uint32_t batchSize, + hal3_stream_cb_routine stream_cb, + void *userdata) +{ + int32_t rc = OK; + ssize_t bufSize = BAD_INDEX; + mm_camera_stream_config_t stream_config; + LOGD("batch size is %d", batchSize); + + mHandle = mCamOps->add_stream(mCamHandle, mChannelHandle); + if (!mHandle) { + LOGE("add_stream failed"); + rc = UNKNOWN_ERROR; + goto done; + } + + // allocate and map stream info memory + mStreamInfoBuf = new QCamera3HeapMemory(1); + if (mStreamInfoBuf == NULL) { + LOGE("no memory for stream info buf obj"); + rc = -ENOMEM; + goto err1; + } + rc = mStreamInfoBuf->allocate(sizeof(cam_stream_info_t)); + if (rc < 0) { + LOGE("no memory for stream info"); + rc = -ENOMEM; + goto err2; + } + + mStreamInfo = + reinterpret_cast<cam_stream_info_t *>(mStreamInfoBuf->getPtr(0)); + memset(mStreamInfo, 0, sizeof(cam_stream_info_t)); + mStreamInfo->stream_type = streamType; + mStreamInfo->fmt = streamFormat; + mStreamInfo->dim = streamDim; + mStreamInfo->num_bufs = minNumBuffers; + mStreamInfo->pp_config.feature_mask = postprocess_mask; + mStreamInfo->is_type = is_type; + mStreamInfo->pp_config.rotation = streamRotation; + LOGD("stream_type is %d, feature_mask is %Ld", + mStreamInfo->stream_type, mStreamInfo->pp_config.feature_mask); + + bufSize = mStreamInfoBuf->getSize(0); + if (BAD_INDEX != bufSize) { + rc = mCamOps->map_stream_buf(mCamHandle, + mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, + 0, -1, mStreamInfoBuf->getFd(0), (size_t)bufSize); + if (rc < 0) { + LOGE("Failed to map stream info buffer"); + goto err3; + } + } else { + LOGE("Failed to retrieve buffer size (bad index)"); + goto err3; + } + + mNumBufs = minNumBuffers; + if (reprocess_config != NULL) { + mStreamInfo->reprocess_config = *reprocess_config; + mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BURST; + //mStreamInfo->num_of_burst = reprocess_config->offline.num_of_bufs; + mStreamInfo->num_of_burst = 1; + } else if (batchSize) { + if (batchSize > MAX_BATCH_SIZE) { + LOGE("batchSize:%d is very large", batchSize); + rc = BAD_VALUE; + goto err4; + } + else { + mNumBatchBufs = MAX_INFLIGHT_HFR_REQUESTS / batchSize; + mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BATCH; + mStreamInfo->user_buf_info.frame_buf_cnt = batchSize; + mStreamInfo->user_buf_info.size = + (uint32_t)(sizeof(msm_camera_user_buf_cont_t)); + mStreamInfo->num_bufs = mNumBatchBufs; + //Frame interval is irrelavent since time stamp calculation is not + //required from the mCamOps + mStreamInfo->user_buf_info.frameInterval = 0; + LOGD("batch size is %d", batchSize); + } + } else { + mStreamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS; + } + + // Configure the stream + stream_config.stream_info = mStreamInfo; + stream_config.mem_vtbl = mMemVtbl; + stream_config.padding_info = mPaddingInfo; + stream_config.userdata = this; + stream_config.stream_cb = dataNotifyCB; + stream_config.stream_cb_sync = NULL; + + rc = mCamOps->config_stream(mCamHandle, + mChannelHandle, mHandle, &stream_config); + if (rc < 0) { + LOGE("Failed to config stream, rc = %d", rc); + goto err4; + } + + mDataCB = stream_cb; + mUserData = userdata; + mBatchSize = batchSize; + return 0; + +err4: + mCamOps->unmap_stream_buf(mCamHandle, + mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1); +err3: + mStreamInfoBuf->deallocate(); +err2: + delete mStreamInfoBuf; + mStreamInfoBuf = NULL; + mStreamInfo = NULL; +err1: + mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle); + mHandle = 0; + mNumBufs = 0; +done: + return rc; +} + +/*=========================================================================== + * FUNCTION : start + * + * DESCRIPTION: start stream. Will start main stream thread to handle stream + * related ops. + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::start() +{ + int32_t rc = 0; + + mDataQ.init(); + if (mBatchSize) + mFreeBatchBufQ.init(); + rc = mProcTh.launch(dataProcRoutine, this); + return rc; +} + +/*=========================================================================== + * FUNCTION : stop + * + * DESCRIPTION: stop stream. Will stop main stream thread + * + * PARAMETERS : none + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::stop() +{ + int32_t rc = 0; + rc = mProcTh.exit(); + return rc; +} + +/*=========================================================================== + * FUNCTION : processDataNotify + * + * DESCRIPTION: process stream data notify + * + * PARAMETERS : + * @frame : stream frame received + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::processDataNotify(mm_camera_super_buf_t *frame) +{ + LOGD("E\n"); + int32_t rc; + if (mDataQ.enqueue((void *)frame)) { + rc = mProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE); + } else { + LOGD("Stream thread is not active, no ops here"); + bufDone(frame->bufs[0]->buf_idx); + free(frame); + rc = NO_ERROR; + } + LOGD("X\n"); + return rc; +} + +/*=========================================================================== + * FUNCTION : dataNotifyCB + * + * DESCRIPTION: callback for data notify. This function is registered with + * mm-camera-interface to handle data notify + * + * PARAMETERS : + * @recvd_frame : stream frame received + * userdata : user data ptr + * + * RETURN : none + *==========================================================================*/ +void QCamera3Stream::dataNotifyCB(mm_camera_super_buf_t *recvd_frame, + void *userdata) +{ + LOGD("E\n"); + QCamera3Stream* stream = (QCamera3Stream *)userdata; + if (stream == NULL || + recvd_frame == NULL || + recvd_frame->bufs[0] == NULL || + recvd_frame->bufs[0]->stream_id != stream->getMyHandle()) { + LOGE("Not a valid stream to handle buf"); + return; + } + + mm_camera_super_buf_t *frame = + (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t)); + if (frame == NULL) { + LOGE("No mem for mm_camera_buf_def_t"); + stream->bufDone(recvd_frame->bufs[0]->buf_idx); + return; + } + *frame = *recvd_frame; + stream->processDataNotify(frame); + return; +} + +/*=========================================================================== + * FUNCTION : dataProcRoutine + * + * DESCRIPTION: function to process data in the main stream thread + * + * PARAMETERS : + * @data : user data ptr + * + * RETURN : none + *==========================================================================*/ +void *QCamera3Stream::dataProcRoutine(void *data) +{ + int running = 1; + int ret; + QCamera3Stream *pme = (QCamera3Stream *)data; + QCameraCmdThread *cmdThread = &pme->mProcTh; + + cmdThread->setName(mStreamNames[pme->mStreamInfo->stream_type]); + + LOGD("E"); + do { + do { + ret = cam_sem_wait(&cmdThread->cmd_sem); + if (ret != 0 && errno != EINVAL) { + LOGE("cam_sem_wait error (%s)", + strerror(errno)); + return NULL; + } + } while (ret != 0); + + // we got notified about new cmd avail in cmd queue + camera_cmd_type_t cmd = cmdThread->getCmd(); + switch (cmd) { + case CAMERA_CMD_TYPE_DO_NEXT_JOB: + { + LOGD("Do next job"); + mm_camera_super_buf_t *frame = + (mm_camera_super_buf_t *)pme->mDataQ.dequeue(); + if (NULL != frame) { + if (UNLIKELY(frame->bufs[0]->buf_type == + CAM_STREAM_BUF_TYPE_USERPTR)) { + pme->handleBatchBuffer(frame); + } else if (pme->mDataCB != NULL) { + pme->mDataCB(frame, pme, pme->mUserData); + } else { + // no data cb routine, return buf here + pme->bufDone(frame->bufs[0]->buf_idx); + } + } + } + break; + case CAMERA_CMD_TYPE_EXIT: + LOGH("Exit"); + /* flush data buf queue */ + pme->mDataQ.flush(); + pme->flushFreeBatchBufQ(); + running = 0; + break; + default: + break; + } + } while (running); + LOGD("X"); + return NULL; +} + +/*=========================================================================== + * FUNCTION : bufDone + * + * DESCRIPTION: return stream buffer to kernel + * + * PARAMETERS : + * @index : index of buffer to be returned + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::bufDone(uint32_t index) +{ + int32_t rc = NO_ERROR; + Mutex::Autolock lock(mLock); + + if ((index >= mNumBufs) || (mBufDefs == NULL)) { + LOGE("index; %d, mNumBufs: %d", index, mNumBufs); + return BAD_INDEX; + } + if (mStreamBufs == NULL) + { + LOGE("putBufs already called"); + return INVALID_OPERATION; + } + + if( NULL == mBufDefs[index].mem_info) { + if (NULL == mMemOps) { + LOGE("Camera operations not initialized"); + return NO_INIT; + } + + ssize_t bufSize = mStreamBufs->getSize(index); + + if (BAD_INDEX != bufSize) { + LOGD("Map streamBufIdx: %d", index); + rc = mMemOps->map_ops(index, -1, mStreamBufs->getFd(index), + (size_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF, mMemOps->userdata); + if (rc < 0) { + LOGE("Failed to map camera buffer %d", index); + return rc; + } + + rc = mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[index], index); + if (NO_ERROR != rc) { + LOGE("Couldn't find camera buffer definition"); + mMemOps->unmap_ops(index, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, mMemOps->userdata); + return rc; + } + } else { + LOGE("Failed to retrieve buffer size (bad index)"); + return INVALID_OPERATION; + } + } + + if (UNLIKELY(mBatchSize)) { + rc = aggregateBufToBatch(mBufDefs[index]); + } else { + rc = mCamOps->qbuf(mCamHandle, mChannelHandle, &mBufDefs[index]); + if (rc < 0) { + return FAILED_TRANSACTION; + } + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : bufRelease + * + * DESCRIPTION: release all resources associated with this buffer + * + * PARAMETERS : + * @index : index of buffer to be released + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::bufRelease(int32_t index) +{ + int32_t rc = NO_ERROR; + Mutex::Autolock lock(mLock); + + if ((index >= mNumBufs) || (mBufDefs == NULL)) { + return BAD_INDEX; + } + + if (NULL != mBufDefs[index].mem_info) { + if (NULL == mMemOps) { + LOGE("Camera operations not initialized"); + return NO_INIT; + } + + rc = mMemOps->unmap_ops(index, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, + mMemOps->userdata); + if (rc < 0) { + LOGE("Failed to un-map camera buffer %d", index); + return rc; + } + + mBufDefs[index].mem_info = NULL; + } else { + LOGE("Buffer at index %d not registered"); + return BAD_INDEX; + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : getBufs + * + * DESCRIPTION: allocate stream buffers + * + * PARAMETERS : + * @offset : offset info of stream buffers + * @num_bufs : number of buffers allocated + * @initial_reg_flag: flag to indicate if buffer needs to be registered + * at kernel initially + * @bufs : output of allocated buffers + * @ops_tbl : ptr to buf mapping/unmapping ops + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getBufs(cam_frame_len_offset_t *offset, + uint8_t *num_bufs, + uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl) +{ + int rc = NO_ERROR; + uint8_t *regFlags; + Mutex::Autolock lock(mLock); + + if (!ops_tbl) { + LOGE("ops_tbl is NULL"); + return INVALID_OPERATION; + } + + mFrameLenOffset = *offset; + mMemOps = ops_tbl; + + if (mStreamBufs != NULL) { + LOGE("Failed getBufs being called twice in a row without a putBufs call"); + return INVALID_OPERATION; + } + mStreamBufs = mChannel->getStreamBufs(mFrameLenOffset.frame_len); + if (!mStreamBufs) { + LOGE("Failed to allocate stream buffers"); + return NO_MEMORY; + } + + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i)) { + ssize_t bufSize = mStreamBufs->getSize(i); + if (BAD_INDEX != bufSize) { + rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i), + (size_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF, + ops_tbl->userdata); + if (rc < 0) { + LOGE("map_stream_buf failed: %d", rc); + for (uint32_t j = 0; j < i; j++) { + if (mStreamBufs->valid(j)) { + ops_tbl->unmap_ops(j, -1, + CAM_MAPPING_BUF_TYPE_STREAM_BUF, + ops_tbl->userdata); + } + } + return INVALID_OPERATION; + } + } else { + LOGE("Failed to retrieve buffer size (bad index)"); + return INVALID_OPERATION; + } + } + } + + //regFlags array is allocated by us, but consumed and freed by mm-camera-interface + regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs); + if (!regFlags) { + LOGE("Out of memory"); + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i)) { + ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, + ops_tbl->userdata); + } + } + return NO_MEMORY; + } + memset(regFlags, 0, sizeof(uint8_t) * mNumBufs); + + mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t)); + if (mBufDefs == NULL) { + LOGE("Failed to allocate mm_camera_buf_def_t %d", rc); + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i)) { + ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, + ops_tbl->userdata); + } + } + free(regFlags); + regFlags = NULL; + return INVALID_OPERATION; + } + memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t)); + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i)) { + mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i); + } + } + + rc = mStreamBufs->getRegFlags(regFlags); + if (rc < 0) { + LOGE("getRegFlags failed %d", rc); + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i)) { + ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, + ops_tbl->userdata); + } + } + free(mBufDefs); + mBufDefs = NULL; + free(regFlags); + regFlags = NULL; + return INVALID_OPERATION; + } + + *num_bufs = mNumBufs; + *initial_reg_flag = regFlags; + *bufs = mBufDefs; + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : putBufs + * + * DESCRIPTION: deallocate stream buffers + * + * PARAMETERS : + * @ops_tbl : ptr to buf mapping/unmapping ops + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl) +{ + int rc = NO_ERROR; + Mutex::Autolock lock(mLock); + + for (uint32_t i = 0; i < mNumBufs; i++) { + if (mStreamBufs->valid(i) && NULL != mBufDefs[i].mem_info) { + rc = ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata); + if (rc < 0) { + LOGE("un-map stream buf failed: %d", rc); + } + } + } + mBufDefs = NULL; // mBufDefs just keep a ptr to the buffer + // mm-camera-interface own the buffer, so no need to free + memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset)); + + if (mStreamBufs == NULL) { + LOGE("getBuf failed previously, or calling putBufs twice"); + } + + mChannel->putStreamBufs(); + + //need to set mStreamBufs to null because putStreamBufs deletes that memory + mStreamBufs = NULL; + + return rc; +} + +/*=========================================================================== + * FUNCTION : invalidateBuf + * + * DESCRIPTION: invalidate a specific stream buffer + * + * PARAMETERS : + * @index : index of the buffer to invalidate + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::invalidateBuf(uint32_t index) +{ + if (mStreamBufs == NULL) { + LOGE("putBufs already called"); + return INVALID_OPERATION; + } else + return mStreamBufs->invalidateCache(index); +} + +/*=========================================================================== + * FUNCTION : cleanInvalidateBuf + * + * DESCRIPTION: clean and invalidate a specific stream buffer + * + * PARAMETERS : + * @index : index of the buffer to invalidate + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::cleanInvalidateBuf(uint32_t index) +{ + if (mStreamBufs == NULL) { + LOGE("putBufs already called"); + return INVALID_OPERATION; + } else + return mStreamBufs->cleanInvalidateCache(index); +} + +/*=========================================================================== + * FUNCTION : getFrameOffset + * + * DESCRIPTION: query stream buffer frame offset info + * + * PARAMETERS : + * @offset : reference to struct to store the queried frame offset info + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getFrameOffset(cam_frame_len_offset_t &offset) +{ + offset = mFrameLenOffset; + return 0; +} + +/*=========================================================================== + * FUNCTION : getFrameDimension + * + * DESCRIPTION: query stream frame dimension info + * + * PARAMETERS : + * @dim : reference to struct to store the queried frame dimension + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getFrameDimension(cam_dimension_t &dim) +{ + if (mStreamInfo != NULL) { + dim = mStreamInfo->dim; + return 0; + } + return -1; +} + +/*=========================================================================== + * FUNCTION : getFormat + * + * DESCRIPTION: query stream format + * + * PARAMETERS : + * @fmt : reference to stream format + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getFormat(cam_format_t &fmt) +{ + if (mStreamInfo != NULL) { + fmt = mStreamInfo->fmt; + return 0; + } + return -1; +} + +/*=========================================================================== + * FUNCTION : getMyServerID + * + * DESCRIPTION: query server stream ID + * + * PARAMETERS : None + * + * RETURN : stream ID from server + *==========================================================================*/ +uint32_t QCamera3Stream::getMyServerID() { + if (mStreamInfo != NULL) { + return mStreamInfo->stream_svr_id; + } else { + return 0; + } +} + +/*=========================================================================== + * FUNCTION : getMyType + * + * DESCRIPTION: query stream type + * + * PARAMETERS : None + * + * RETURN : type of stream + *==========================================================================*/ +cam_stream_type_t QCamera3Stream::getMyType() const +{ + if (mStreamInfo != NULL) { + return mStreamInfo->stream_type; + } else { + return CAM_STREAM_TYPE_MAX; + } +} + +/*=========================================================================== + * FUNCTION : mapBuf + * + * DESCRIPTION: map stream related buffer to backend server + * + * PARAMETERS : + * @buf_type : mapping type of buffer + * @buf_idx : index of buffer + * @plane_idx: plane index + * @fd : fd of the buffer + * @size : lenght of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::mapBuf(uint8_t buf_type, uint32_t buf_idx, + int32_t plane_idx, int fd, size_t size) +{ + return mCamOps->map_stream_buf(mCamHandle, mChannelHandle, + mHandle, buf_type, + buf_idx, plane_idx, + fd, size); + +} + +/*=========================================================================== + * FUNCTION : unmapBuf + * + * DESCRIPTION: unmap stream related buffer to backend server + * + * PARAMETERS : + * @buf_type : mapping type of buffer + * @buf_idx : index of buffer + * @plane_idx: plane index + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx) +{ + return mCamOps->unmap_stream_buf(mCamHandle, mChannelHandle, + mHandle, buf_type, + buf_idx, plane_idx); +} + +/*=========================================================================== + * FUNCTION : setParameter + * + * DESCRIPTION: set stream based parameters + * + * PARAMETERS : + * @param : ptr to parameters to be set + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::setParameter(cam_stream_parm_buffer_t ¶m) +{ + int32_t rc = NO_ERROR; + mStreamInfo->parm_buf = param; + rc = mCamOps->set_stream_parms(mCamHandle, + mChannelHandle, + mHandle, + &mStreamInfo->parm_buf); + if (rc == NO_ERROR) { + param = mStreamInfo->parm_buf; + } + return rc; +} + +/*=========================================================================== + * FUNCTION : releaseFrameData + * + * DESCRIPTION: callback function to release frame data node + * + * PARAMETERS : + * @data : ptr to post process input data + * @user_data : user data ptr (QCameraReprocessor) + * + * RETURN : None + *==========================================================================*/ +void QCamera3Stream::releaseFrameData(void *data, void *user_data) +{ + QCamera3Stream *pme = (QCamera3Stream *)user_data; + mm_camera_super_buf_t *frame = (mm_camera_super_buf_t *)data; + if (NULL != pme) { + if (UNLIKELY(pme->mBatchSize)) { + /* For batch mode, the batch buffer is added to empty list */ + if(!pme->mFreeBatchBufQ.enqueue((void*) frame->bufs[0])) { + LOGE("batchBuf.buf_idx: %d enqueue failed", + frame->bufs[0]->buf_idx); + } + } else { + pme->bufDone(frame->bufs[0]->buf_idx); + } + } +} + +/*=========================================================================== + * FUNCTION : getBatchBufs + * + * DESCRIPTION: allocate batch containers for the stream + * + * PARAMETERS : + * @num_bufs : number of buffers allocated + * @initial_reg_flag: flag to indicate if buffer needs to be registered + * at kernel initially + * @bufs : output of allocated buffers + * @ops_tbl : ptr to buf mapping/unmapping ops + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getBatchBufs( + uint8_t *num_bufs, uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl) +{ + int rc = NO_ERROR; + uint8_t *regFlags; + + if (!ops_tbl || !num_bufs || !initial_reg_flag || !bufs) { + LOGE("input args NULL"); + return INVALID_OPERATION; + } + LOGH("Batch container allocation stream type = %d", + getMyType()); + + Mutex::Autolock lock(mLock); + + mMemOps = ops_tbl; + + //Allocate batch containers + mStreamBatchBufs = new QCamera3HeapMemory(1); + if (!mStreamBatchBufs) { + LOGE("unable to create batch container memory"); + return NO_MEMORY; + } + // Allocating single buffer file-descriptor for all batch containers, + // mStreamBatchBufs considers all the container bufs as a single buffer. But + // QCamera3Stream manages that single buffer as multiple batch buffers + LOGD("Allocating batch container memory. numBatch: %d size: %d", + mNumBatchBufs, mStreamInfo->user_buf_info.size); + rc = mStreamBatchBufs->allocate( + mNumBatchBufs * mStreamInfo->user_buf_info.size); + if (rc < 0) { + LOGE("unable to allocate batch container memory"); + rc = NO_MEMORY; + goto err1; + } + + /* map batch buffers. getCnt here returns 1 because of single FD across + * batch bufs */ + for (uint32_t i = 0; i < mStreamBatchBufs->getCnt(); i++) { + if (mNumBatchBufs) { + //For USER_BUF, size = number_of_container bufs instead of the total + //buf size + rc = ops_tbl->map_ops(i, -1, mStreamBatchBufs->getFd(i), + (size_t)mNumBatchBufs, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, + ops_tbl->userdata); + if (rc < 0) { + LOGE("Failed to map stream container buffer: %d", + rc); + //Unmap all the buffers that were successfully mapped before + //this buffer mapping failed + for (size_t j = 0; j < i; j++) { + ops_tbl->unmap_ops(j, -1, + CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, + ops_tbl->userdata); + } + goto err2; + } + } else { + LOGE("Failed to retrieve buffer size (bad index)"); + return INVALID_OPERATION; + } + } + + LOGD("batch bufs successfully mmapped = %d", + mNumBatchBufs); + + /* regFlags array is allocated here, but consumed and freed by + * mm-camera-interface */ + regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBatchBufs); + if (!regFlags) { + LOGE("Out of memory"); + rc = NO_MEMORY; + goto err3; + } + memset(regFlags, 0, sizeof(uint8_t) * mNumBatchBufs); + /* Do not queue the container buffers as the image buffers are not yet + * queued. mStreamBatchBufs->getRegFlags is not called as mStreamBatchBufs + * considers single buffer is allocated */ + for (uint32_t i = 0; i < mNumBatchBufs; i++) { + regFlags[i] = 0; + } + + mBatchBufDefs = (mm_camera_buf_def_t *) + malloc(mNumBatchBufs * sizeof(mm_camera_buf_def_t)); + if (mBatchBufDefs == NULL) { + LOGE("mBatchBufDefs memory allocation failed"); + rc = INVALID_OPERATION; + goto err4; + } + memset(mBatchBufDefs, 0, mNumBatchBufs * sizeof(mm_camera_buf_def_t)); + + //Populate bufDef and queue to free batchBufQ + for (uint32_t i = 0; i < mNumBatchBufs; i++) { + getBatchBufDef(mBatchBufDefs[i], i); + if(mFreeBatchBufQ.enqueue((void*) &mBatchBufDefs[i])) { + LOGD("mBatchBufDefs[%d]: 0x%p", i, &mBatchBufDefs[i]); + } else { + LOGE("enqueue mBatchBufDefs[%d] failed", i); + } + } + + *num_bufs = mNumBatchBufs; + *initial_reg_flag = regFlags; + *bufs = mBatchBufDefs; + LOGH("stream type: %d, numBufs(batch): %d", + mStreamInfo->stream_type, mNumBatchBufs); + + return NO_ERROR; +err4: + free(regFlags); +err3: + for (size_t i = 0; i < mStreamBatchBufs->getCnt(); i++) { + ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, + ops_tbl->userdata); + } +err2: + mStreamBatchBufs->deallocate(); +err1: + delete mStreamBatchBufs; + mStreamBatchBufs = NULL; + return rc; +} + +/*=========================================================================== + * FUNCTION : putBatchBufs + * + * DESCRIPTION: deallocate stream batch buffers + * + * PARAMETERS : + * @ops_tbl : ptr to buf mapping/unmapping ops + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::putBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl) +{ + int rc = NO_ERROR; + Mutex::Autolock lock(mLock); + + if (mStreamBatchBufs) { + for (uint32_t i = 0; i < mStreamBatchBufs->getCnt(); i++) { + rc = ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, + ops_tbl->userdata); + if (rc < 0) { + LOGE("un-map batch buf failed: %d", rc); + } + } + mStreamBatchBufs->deallocate(); + delete mStreamBatchBufs; + mStreamBatchBufs = NULL; + } + // mm-camera-interface frees bufDefs even though bufDefs are allocated by + // QCamera3Stream. Don't free here + mBatchBufDefs = NULL; + + return rc; +} + +/*=========================================================================== + * FUNCTION : getBatchBufDef + * + * DESCRIPTION: query detailed buffer information of batch buffer + * + * PARAMETERS : + * @bufDef : [output] reference to struct to store buffer definition + * @@index : [input] index of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::getBatchBufDef(mm_camera_buf_def_t& batchBufDef, + int32_t index) +{ + int rc = NO_ERROR; + memset(&batchBufDef, 0, sizeof(mm_camera_buf_def_t)); + if (mStreamBatchBufs) { + //Single file descriptor for all batch buffers + batchBufDef.fd = mStreamBatchBufs->getFd(0); + batchBufDef.buf_type = CAM_STREAM_BUF_TYPE_USERPTR; + batchBufDef.frame_len = mStreamInfo->user_buf_info.size; + batchBufDef.mem_info = mStreamBatchBufs; + batchBufDef.buffer = (uint8_t *)mStreamBatchBufs->getPtr(0) + + (index * mStreamInfo->user_buf_info.size); + batchBufDef.buf_idx = index; + batchBufDef.user_buf.num_buffers = mBatchSize; + batchBufDef.user_buf.bufs_used = 0; + batchBufDef.user_buf.plane_buf = mBufDefs; + } + + return rc; +} + +/*=========================================================================== + * FUNCTION : aggregateBufToBatch + * + * DESCRIPTION: queue batch container to downstream. + * + * PARAMETERS : + * @bufDef : image buffer to be aggregated into batch + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::aggregateBufToBatch(mm_camera_buf_def_t& bufDef) +{ + int32_t rc = NO_ERROR; + + if (UNLIKELY(!mBatchSize)) { + LOGE("Batch mod is not enabled"); + return INVALID_OPERATION; + } + if (!mCurrentBatchBufDef) { + mCurrentBatchBufDef = (mm_camera_buf_def_t *)mFreeBatchBufQ.dequeue(); + if (!mCurrentBatchBufDef) { + LOGE("No empty batch buffers is available"); + return NO_MEMORY; + } + LOGD("batch buffer: %d dequeued from empty buffer list", + mCurrentBatchBufDef->buf_idx); + } + if (mBufsStaged == mCurrentBatchBufDef->user_buf.num_buffers) { + LOGE("batch buffer is already full"); + return NO_MEMORY; + } + + mCurrentBatchBufDef->user_buf.buf_idx[mBufsStaged] = bufDef.buf_idx; + mBufsStaged++; + LOGD("buffer id: %d aggregated into batch buffer id: %d", + bufDef.buf_idx, mCurrentBatchBufDef->buf_idx); + return rc; +} + +/*=========================================================================== + * FUNCTION : queueBatchBuf + * + * DESCRIPTION: queue batch container to downstream. + * + * PARAMETERS : None + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::queueBatchBuf() +{ + int32_t rc = NO_ERROR; + + if (!mCurrentBatchBufDef) { + LOGE("No buffers were queued into batch"); + return INVALID_OPERATION; + } + //bufs_used: number of valid buffers in the batch buffers + mCurrentBatchBufDef->user_buf.bufs_used = mBufsStaged; + + //if mBufsStaged < num_buffers, initialize the buf_idx to -1 for rest of the + //buffers + for (size_t i = mBufsStaged; i < mCurrentBatchBufDef->user_buf.num_buffers; + i++) { + mCurrentBatchBufDef->user_buf.buf_idx[i] = -1; + } + + rc = mCamOps->qbuf(mCamHandle, mChannelHandle, mCurrentBatchBufDef); + if (rc < 0) { + LOGE("queueing of batch buffer: %d failed with err: %d", + mCurrentBatchBufDef->buf_idx, rc); + return FAILED_TRANSACTION; + } + LOGD("Batch buf id: %d queued. bufs_used: %d", + mCurrentBatchBufDef->buf_idx, + mCurrentBatchBufDef->user_buf.bufs_used); + + mCurrentBatchBufDef = NULL; + mBufsStaged = 0; + + return rc; +} + +/*=========================================================================== + * FUNCTION : handleBatchBuffer + * + * DESCRIPTION: separate individual buffers from the batch and issue callback + * + * PARAMETERS : + * @superBuf : Received superbuf containing batch buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success always + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3Stream::handleBatchBuffer(mm_camera_super_buf_t *superBuf) +{ + int32_t rc = NO_ERROR; + mm_camera_super_buf_t *frame; + mm_camera_buf_def_t batchBuf; + + if (LIKELY(!mBatchSize)) { + LOGE("Stream: %d not in batch mode, but batch buffer received", + getMyType()); + return INVALID_OPERATION; + } + if (!mDataCB) { + LOGE("Data callback not set for batch mode"); + return BAD_VALUE; + } + if (!superBuf->bufs[0]) { + LOGE("superBuf->bufs[0] is NULL!!"); + return BAD_VALUE; + } + + /* Copy the batch buffer to local and queue the batch buffer to empty queue + * to handle the new requests received while callbacks are in progress */ + batchBuf = *superBuf->bufs[0]; + if (!mFreeBatchBufQ.enqueue((void*) superBuf->bufs[0])) { + LOGE("batchBuf.buf_idx: %d enqueue failed", + batchBuf.buf_idx); + free(superBuf); + return NO_MEMORY; + } + LOGD("Received batch buffer: %d bufs_used: %d", + batchBuf.buf_idx, batchBuf.user_buf.bufs_used); + //dummy local bufDef to issue multiple callbacks + mm_camera_buf_def_t buf; + memset(&buf, 0, sizeof(mm_camera_buf_def_t)); + + for (size_t i = 0; i < batchBuf.user_buf.bufs_used; i++) { + int32_t buf_idx = batchBuf.user_buf.buf_idx[i]; + buf = mBufDefs[buf_idx]; + + /* this memory is freed inside dataCB. Should not be freed here */ + frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t)); + if (!frame) { + LOGE("malloc failed. Buffers will be dropped"); + break; + } else { + memcpy(frame, superBuf, sizeof(mm_camera_super_buf_t)); + frame->bufs[0] = &buf; + + mDataCB(frame, this, mUserData); + } + } + LOGD("batch buffer: %d callbacks done", + batchBuf.buf_idx); + + free(superBuf); + return rc; +} + +/*=========================================================================== + * FUNCTION : flushFreeBatchBufQ + * + * DESCRIPTION: dequeue all the entries of mFreeBatchBufQ and call flush. + * QCameraQueue::flush calls 'free(node->data)' which should be + * avoided for mFreeBatchBufQ as the entries are not allocated + * during each enqueue + * + * PARAMETERS : None + * + * RETURN : None + *==========================================================================*/ +void QCamera3Stream::flushFreeBatchBufQ() +{ + while (!mFreeBatchBufQ.isEmpty()) { + mFreeBatchBufQ.dequeue(); + } + mFreeBatchBufQ.flush(); +} + +}; // namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3Stream.h b/camera/QCamera2/HAL3/QCamera3Stream.h new file mode 100644 index 0000000..5825be0 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3Stream.h @@ -0,0 +1,170 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __QCAMERA3_STREAM_H__ +#define __QCAMERA3_STREAM_H__ + +// System dependencies +#include <utils/Mutex.h> + +// Camera dependencies +#include "QCamera3Mem.h" +#include "QCamera3StreamMem.h" +#include "QCameraCmdThread.h" +#include "QCameraQueue.h" + +extern "C" { +#include "mm_camera_interface.h" +} + +namespace qcamera { + +class QCamera3Channel; +class QCamera3Stream; + +typedef void (*hal3_stream_cb_routine)(mm_camera_super_buf_t *frame, + QCamera3Stream *stream, + void *userdata); + +class QCamera3Stream +{ +public: + QCamera3Stream(uint32_t camHandle, + uint32_t chId, + mm_camera_ops_t *camOps, + cam_padding_info_t *paddingInfo, + QCamera3Channel *channel); + virtual ~QCamera3Stream(); + virtual int32_t init(cam_stream_type_t streamType, + cam_format_t streamFormat, + cam_dimension_t streamDim, + cam_rotation_t streamRotation, + cam_stream_reproc_config_t* reprocess_config, + uint8_t minStreamBufNum, + cam_feature_mask_t postprocess_mask, + cam_is_type_t is_type, + uint32_t batchSize, + hal3_stream_cb_routine stream_cb, + void *userdata); + virtual int32_t bufDone(uint32_t index); + virtual int32_t bufRelease(int32_t index); + virtual int32_t processDataNotify(mm_camera_super_buf_t *bufs); + virtual int32_t start(); + virtual int32_t stop(); + virtual int32_t queueBatchBuf(); + + static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame, void *userdata); + static void *dataProcRoutine(void *data); + uint32_t getMyHandle() const {return mHandle;} + cam_stream_type_t getMyType() const; + int32_t getFrameOffset(cam_frame_len_offset_t &offset); + int32_t getFrameDimension(cam_dimension_t &dim); + int32_t getFormat(cam_format_t &fmt); + QCamera3StreamMem *getStreamBufs() {return mStreamBufs;}; + uint32_t getMyServerID(); + + int32_t mapBuf(uint8_t buf_type, uint32_t buf_idx, + int32_t plane_idx, int fd, size_t size); + int32_t unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx); + int32_t setParameter(cam_stream_parm_buffer_t ¶m); + cam_stream_info_t* getStreamInfo() const {return mStreamInfo; }; + + static void releaseFrameData(void *data, void *user_data); + +private: + uint32_t mCamHandle; + uint32_t mChannelHandle; + uint32_t mHandle; // stream handle from mm-camera-interface + mm_camera_ops_t *mCamOps; + cam_stream_info_t *mStreamInfo; // ptr to stream info buf + mm_camera_stream_mem_vtbl_t mMemVtbl; + mm_camera_map_unmap_ops_tbl_t *mMemOps; + uint8_t mNumBufs; + hal3_stream_cb_routine mDataCB; + void *mUserData; + + QCameraQueue mDataQ; + QCameraCmdThread mProcTh; // thread for dataCB + + QCamera3HeapMemory *mStreamInfoBuf; + QCamera3StreamMem *mStreamBufs; + mm_camera_buf_def_t *mBufDefs; + cam_frame_len_offset_t mFrameLenOffset; + cam_padding_info_t mPaddingInfo; + QCamera3Channel *mChannel; + Mutex mLock; //Lock controlling access to 'mBufDefs' + + uint32_t mBatchSize; // 0: No batch, non-0: Number of imaage bufs in a batch + uint8_t mNumBatchBufs; //Number of batch buffers which can hold image bufs + QCamera3HeapMemory *mStreamBatchBufs; //Pointer to batch buffers memory + mm_camera_buf_def_t *mBatchBufDefs; //Pointer to array of batch bufDefs + mm_camera_buf_def_t *mCurrentBatchBufDef; //batch buffer in progress during + //aggregation + uint32_t mBufsStaged; //Number of image buffers aggregated into + //currentBatchBufDef + QCameraQueue mFreeBatchBufQ; //Buffer queue containing empty batch buffers + + static int32_t get_bufs( + cam_frame_len_offset_t *offset, + uint8_t *num_bufs, + uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl, + void *user_data); + static int32_t put_bufs( + mm_camera_map_unmap_ops_tbl_t *ops_tbl, + void *user_data); + static int32_t invalidate_buf(uint32_t index, void *user_data); + static int32_t clean_invalidate_buf(uint32_t index, void *user_data); + + int32_t getBufs(cam_frame_len_offset_t *offset, + uint8_t *num_bufs, + uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl); + int32_t putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl); + int32_t invalidateBuf(uint32_t index); + int32_t cleanInvalidateBuf(uint32_t index); + int32_t getBatchBufs( + uint8_t *num_bufs, uint8_t **initial_reg_flag, + mm_camera_buf_def_t **bufs, + mm_camera_map_unmap_ops_tbl_t *ops_tbl); + int32_t putBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl); + int32_t getBatchBufDef(mm_camera_buf_def_t& batchBufDef, + int32_t index); + int32_t aggregateBufToBatch(mm_camera_buf_def_t& bufDef); + int32_t handleBatchBuffer(mm_camera_super_buf_t *superBuf); + + static const char* mStreamNames[CAM_STREAM_TYPE_MAX]; + void flushFreeBatchBufQ(); +}; + +}; // namespace qcamera + +#endif /* __QCAMERA3_STREAM_H__ */ diff --git a/camera/QCamera2/HAL3/QCamera3StreamMem.cpp b/camera/QCamera2/HAL3/QCamera3StreamMem.cpp new file mode 100644 index 0000000..3843afb --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3StreamMem.cpp @@ -0,0 +1,477 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#define LOG_TAG "QCamera3StreamMem" + +// System dependencies +#include "gralloc_priv.h" + +// Camera dependencies +#include "QCamera3StreamMem.h" + +using namespace android; + +namespace qcamera { + +/*=========================================================================== + * FUNCTION : QCamera3StreamMem + * + * DESCRIPTION: default constructor of QCamera3StreamMem + * + * PARAMETERS : none + * + * RETURN : None + *==========================================================================*/ +QCamera3StreamMem::QCamera3StreamMem(uint32_t maxHeapBuffer, bool queueHeapBuffers) : + mHeapMem(maxHeapBuffer), + mGrallocMem(maxHeapBuffer), + mMaxHeapBuffers(maxHeapBuffer), + mQueueHeapBuffers(queueHeapBuffers) +{ +} + +/*=========================================================================== + * FUNCTION : QCamera3StreamMem + * + * DESCRIPTION: destructor of QCamera3StreamMem + * + * PARAMETERS : none + * + * RETURN : None + *==========================================================================*/ +QCamera3StreamMem::~QCamera3StreamMem() +{ + clear(); +} + +/*=========================================================================== + * FUNCTION : getCnt + * + * DESCRIPTION: query number of buffers allocated/registered + * + * PARAMETERS : none + * + * RETURN : number of buffers allocated + *==========================================================================*/ +uint32_t QCamera3StreamMem::getCnt() +{ + Mutex::Autolock lock(mLock); + + return (mHeapMem.getCnt() + mGrallocMem.getCnt()); +} + +/*=========================================================================== + * FUNCTION : getRegFlags + * + * DESCRIPTION: query initial reg flags + * + * PARAMETERS : + * @regFlags: initial reg flags of the allocated/registered buffers + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3StreamMem::getRegFlags(uint8_t * regFlags) +{ + // Assume that all buffers allocated can be queued. + for (uint32_t i = 0; i < mHeapMem.getCnt(); i ++) + regFlags[i] = (mQueueHeapBuffers ? 1 : 0); + return NO_ERROR; +} + +/*=========================================================================== + * FUNCTION : getFd + * + * DESCRIPTION: return file descriptor of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : file descriptor + *==========================================================================*/ +int QCamera3StreamMem::getFd(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return mHeapMem.getFd(index); + else + return mGrallocMem.getFd(index); +} + +/*=========================================================================== + * FUNCTION : getSize + * + * DESCRIPTION: return buffer size of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer size + *==========================================================================*/ +ssize_t QCamera3StreamMem::getSize(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return mHeapMem.getSize(index); + else + return mGrallocMem.getSize(index); +} + +/*=========================================================================== + * FUNCTION : invalidateCache + * + * DESCRIPTION: invalidate the cache of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3StreamMem::invalidateCache(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return mHeapMem.invalidateCache(index); + else + return mGrallocMem.invalidateCache(index); +} + +/*=========================================================================== + * FUNCTION : cleanInvalidateCache + * + * DESCRIPTION: clean and invalidate the cache of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3StreamMem::cleanInvalidateCache(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return mHeapMem.cleanInvalidateCache(index); + else + return mGrallocMem.cleanInvalidateCache(index); +} + +/*=========================================================================== + * FUNCTION : getBufDef + * + * DESCRIPTION: query detailed buffer information + * + * PARAMETERS : + * @offset : [input] frame buffer offset + * @bufDef : [output] reference to struct to store buffer definition + * @index : [input] index of the buffer + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3StreamMem::getBufDef(const cam_frame_len_offset_t &offset, + mm_camera_buf_def_t &bufDef, uint32_t index) +{ + int32_t ret = NO_ERROR; + + if (index < mMaxHeapBuffers) + ret = mHeapMem.getBufDef(offset, bufDef, index); + else + ret = mGrallocMem.getBufDef(offset, bufDef, index); + + bufDef.mem_info = (void *)this; + + return ret; +} + +/*=========================================================================== + * FUNCTION : getPtr + * + * DESCRIPTION: return virtual address of the indexed buffer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : virtual address + *==========================================================================*/ +void* QCamera3StreamMem::getPtr(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return mHeapMem.getPtr(index); + else + return mGrallocMem.getPtr(index); +} + +/*=========================================================================== + * FUNCTION : valid + * + * DESCRIPTION: return whether there is a valid buffer at the current index + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : true if there is a buffer, false otherwise + *==========================================================================*/ +bool QCamera3StreamMem::valid(uint32_t index) +{ + Mutex::Autolock lock(mLock); + + if (index < mMaxHeapBuffers) + return (mHeapMem.getSize(index) > 0); + else + return (mGrallocMem.getSize(index) > 0); +} + +/*=========================================================================== + * FUNCTION : registerBuffer + * + * DESCRIPTION: registers frameworks-allocated gralloc buffer_handle_t + * + * PARAMETERS : + * @buffers : buffer_handle_t pointer + * @type : cam_stream_type_t + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3StreamMem::registerBuffer(buffer_handle_t *buffer, + cam_stream_type_t type) +{ + Mutex::Autolock lock(mLock); + return mGrallocMem.registerBuffer(buffer, type); +} + + +/*=========================================================================== + * FUNCTION : unregisterBuffer + * + * DESCRIPTION: unregister buffer + * + * PARAMETERS : + * @idx : unregister buffer at index 'idx' + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3StreamMem::unregisterBuffer(size_t idx) +{ + Mutex::Autolock lock(mLock); + return mGrallocMem.unregisterBuffer(idx); +} + +/*=========================================================================== + * FUNCTION : getMatchBufIndex + * + * DESCRIPTION: query buffer index by object ptr + * + * PARAMETERS : + * @opaque : opaque ptr + * + * RETURN : buffer index if match found, + * -1 if failed + *==========================================================================*/ +int QCamera3StreamMem::getMatchBufIndex(void *object) +{ + Mutex::Autolock lock(mLock); + return mGrallocMem.getMatchBufIndex(object); +} + +/*=========================================================================== + * FUNCTION : getBufferHandle + * + * DESCRIPTION: return framework pointer + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : buffer ptr if match found + NULL if failed + *==========================================================================*/ +void *QCamera3StreamMem::getBufferHandle(uint32_t index) +{ + Mutex::Autolock lock(mLock); + return mGrallocMem.getBufferHandle(index); +} + +/*=========================================================================== + * FUNCTION : unregisterBuffers + * + * DESCRIPTION: unregister buffers + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +void QCamera3StreamMem::unregisterBuffers() +{ + Mutex::Autolock lock(mLock); + mGrallocMem.unregisterBuffers(); +} + + +/*=========================================================================== + * FUNCTION : allocate + * + * DESCRIPTION: allocate requested number of buffers of certain size + * + * PARAMETERS : + * @count : number of buffers to be allocated + * @size : lenght of the buffer to be allocated + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int QCamera3StreamMem::allocateAll(size_t size) +{ + Mutex::Autolock lock(mLock); + return mHeapMem.allocate(size); +} + +int QCamera3StreamMem::allocateOne(size_t size) +{ + Mutex::Autolock lock(mLock); + return mHeapMem.allocateOne(size); +} + +/*=========================================================================== + * FUNCTION : deallocate + * + * DESCRIPTION: deallocate heap buffers + * + * PARAMETERS : none + * + * RETURN : none + *==========================================================================*/ +void QCamera3StreamMem::deallocate() +{ + Mutex::Autolock lock(mLock); + mHeapMem.deallocate(); +} + +/*=========================================================================== + * FUNCTION : markFrameNumber + * + * DESCRIPTION: We use this function from the request call path to mark the + * buffers with the frame number they are intended for this info + * is used later when giving out callback & it is duty of PP to + * ensure that data for that particular frameNumber/Request is + * written to this buffer. + * PARAMETERS : + * @index : index of the buffer + * @frame# : Frame number from the framework + * + * RETURN : int32_t type of status + * NO_ERROR -- success + * none-zero failure code + *==========================================================================*/ +int32_t QCamera3StreamMem::markFrameNumber(uint32_t index, uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + if (index < mMaxHeapBuffers) + return mHeapMem.markFrameNumber(index, frameNumber); + else + return mGrallocMem.markFrameNumber(index, frameNumber); +} + +/*=========================================================================== + * FUNCTION : getFrameNumber + * + * DESCRIPTION: We use this to fetch the frameNumber for the request with which + * this buffer was given to HAL + * + * + * PARAMETERS : + * @index : index of the buffer + * + * RETURN : int32_t frameNumber + * positive/zero -- success + * negative failure + *==========================================================================*/ +int32_t QCamera3StreamMem::getFrameNumber(uint32_t index) +{ + Mutex::Autolock lock(mLock); + if (index < mMaxHeapBuffers) + return mHeapMem.getFrameNumber(index); + else + return mGrallocMem.getFrameNumber(index); +} + +/*=========================================================================== + * FUNCTION : getGrallocBufferIndex + * + * DESCRIPTION: We use this to fetch the gralloc buffer index based on frameNumber + * + * PARAMETERS : + * @frameNumber : frame Number + * + * RETURN : int32_t buffer index + * positive/zero -- success + * negative failure + *==========================================================================*/ +int32_t QCamera3StreamMem::getGrallocBufferIndex(uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + int32_t index = mGrallocMem.getBufferIndex(frameNumber); + return index; +} + +/*=========================================================================== + * FUNCTION : getHeapBufferIndex + * + * DESCRIPTION: We use this to fetch the heap buffer index based on frameNumber + * + * PARAMETERS : + * @frameNumber : frame Number + * + * RETURN : int32_t buffer index + * positive/zero -- success + * negative failure + *==========================================================================*/ +int32_t QCamera3StreamMem::getHeapBufferIndex(uint32_t frameNumber) +{ + Mutex::Autolock lock(mLock); + int32_t index = mHeapMem.getBufferIndex(frameNumber); + return index; +} + +}; //namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3StreamMem.h b/camera/QCamera2/HAL3/QCamera3StreamMem.h new file mode 100644 index 0000000..74bab06 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3StreamMem.h @@ -0,0 +1,97 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + */ + +#ifndef __QCAMERA3_STREAMMEM_H__ +#define __QCAMERA3_STREAMMEM_H__ + +// System dependencies +#include <utils/Mutex.h> + +// Camera dependencies +#include "QCamera3Mem.h" + +extern "C" { +#include "mm_camera_interface.h" +} + +using namespace android; + +namespace qcamera { + +class QCamera3StreamMem { +public: + QCamera3StreamMem(uint32_t maxHeapBuffer, bool queueAll = true); + virtual ~QCamera3StreamMem(); + + uint32_t getCnt(); + int getRegFlags(uint8_t *regFlags); + + // Helper function to access individual QCamera3Buffer object + int getFd(uint32_t index); + ssize_t getSize(uint32_t index); + int invalidateCache(uint32_t index); + int cleanInvalidateCache(uint32_t index); + int32_t getBufDef(const cam_frame_len_offset_t &offset, + mm_camera_buf_def_t &bufDef, uint32_t index); + void *getPtr(uint32_t index); + + bool valid(uint32_t index); + + // Gralloc buffer related functions + int registerBuffer(buffer_handle_t *buffer, cam_stream_type_t type); + int unregisterBuffer(uint32_t index); + int getMatchBufIndex(void *object); + void *getBufferHandle(uint32_t index); + void unregisterBuffers(); //TODO: relace with unififed clear() function? + + // Heap buffer related functions + int allocateAll(size_t size); + int allocateOne(size_t size); + void deallocate(); //TODO: replace with unified clear() function? + + // Clear function: unregister for gralloc buffer, and deallocate for heap buffer + void clear() {unregisterBuffers(); deallocate(); } + + // Frame number getter and setter + int32_t markFrameNumber(uint32_t index, uint32_t frameNumber); + int32_t getFrameNumber(uint32_t index); + int32_t getGrallocBufferIndex(uint32_t frameNumber); + int32_t getHeapBufferIndex(uint32_t frameNumber); + +private: + //variables + QCamera3HeapMemory mHeapMem; + QCamera3GrallocMemory mGrallocMem; + uint32_t mMaxHeapBuffers; + Mutex mLock; + bool mQueueHeapBuffers; +}; + +}; +#endif // __QCAMERA3_STREAMMEM_H__ diff --git a/camera/QCamera2/HAL3/QCamera3VendorTags.cpp b/camera/QCamera2/HAL3/QCamera3VendorTags.cpp new file mode 100644 index 0000000..faf35ab --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3VendorTags.cpp @@ -0,0 +1,429 @@ +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#define LOG_TAG "QCamera3VendorTags" + +// Camera dependencies +#include "QCamera3HWI.h" +#include "QCamera3VendorTags.h" + +extern "C" { +#include "mm_camera_dbg.h" +} + +using namespace android; + +namespace qcamera { + +enum qcamera3_ext_tags qcamera3_ext3_section_bounds[QCAMERA3_SECTIONS_END - + VENDOR_SECTION] = { + QCAMERA3_PRIVATEDATA_END, + QCAMERA3_CDS_END, + QCAMERA3_OPAQUE_RAW_END, + QCAMERA3_CROP_END, + QCAMERA3_TUNING_META_DATA_END, + QCAMERA3_TEMPORAL_DENOISE_END, + QCAMERA3_AV_TIMER_END, + QCAMERA3_SENSOR_META_DATA_END, + NEXUS_EXPERIMENTAL_2015_END, + QCAMERA3_DUALCAM_LINK_META_DATA_END, + QCAMERA3_DUALCAM_CALIB_META_DATA_END, + QCAMERA3_HAL_PRIVATEDATA_END, + QCAMERA3_JPEG_ENCODE_CROP_END +} ; + +typedef struct vendor_tag_info { + const char *tag_name; + uint8_t tag_type; +} vendor_tag_info_t; + +const char *qcamera3_ext_section_names[QCAMERA3_SECTIONS_END - + VENDOR_SECTION] = { + "org.codeaurora.qcamera3.privatedata", + "org.codeaurora.qcamera3.CDS", + "org.codeaurora.qcamera3.opaque_raw", + "org.codeaurora.qcamera3.crop", + "org.codeaurora.qcamera3.tuning_meta_data", + "org.codeaurora.qcamera3.temporal_denoise", + "org.codeaurora.qcamera3.av_timer", + "org.codeaurora.qcamera3.sensor_meta_data", + "com.google.nexus.experimental2015", + "org.codeaurora.qcamera3.dualcam_link_meta_data", + "org.codeaurora.qcamera3.dualcam_calib_meta_data", + "org.codeaurora.qcamera3.hal_private_data", + "org.codeaurora.qcamera3.jpeg_encode_crop" +}; + +vendor_tag_info_t qcamera3_privatedata[QCAMERA3_PRIVATEDATA_END - QCAMERA3_PRIVATEDATA_START] = { + { "privatedata_reprocess", TYPE_INT32 } +}; + +vendor_tag_info_t qcamera3_cds[QCAMERA3_CDS_END - QCAMERA3_CDS_START] = { + { "cds_mode", TYPE_INT32 }, + { "cds_info", TYPE_BYTE } +}; + +vendor_tag_info_t qcamera3_opaque_raw[QCAMERA3_OPAQUE_RAW_END - + QCAMERA3_OPAQUE_RAW_START] = { + { "opaque_raw_strides", TYPE_INT32 }, + { "opaque_raw_format", TYPE_BYTE } +}; + +vendor_tag_info_t qcamera3_crop[QCAMERA3_CROP_END- QCAMERA3_CROP_START] = { + { "count", TYPE_INT32 }, + { "data", TYPE_INT32}, + { "roimap", TYPE_INT32 } +}; + +vendor_tag_info_t qcamera3_tuning_meta_data[QCAMERA3_TUNING_META_DATA_END - + QCAMERA3_TUNING_META_DATA_START] = { + { "tuning_meta_data_blob", TYPE_INT32 } +}; + +vendor_tag_info_t qcamera3_temporal_denoise[QCAMERA3_TEMPORAL_DENOISE_END - + QCAMERA3_TEMPORAL_DENOISE_START] = { + { "enable", TYPE_BYTE }, + { "process_type", TYPE_INT32 } +}; + +vendor_tag_info qcamera3_av_timer[QCAMERA3_AV_TIMER_END - + QCAMERA3_AV_TIMER_START] = { + {"use_av_timer", TYPE_BYTE } +}; + +vendor_tag_info qcamera3_sensor_meta_data[QCAMERA3_SENSOR_META_DATA_END - + QCAMERA3_SENSOR_META_DATA_START] = { + {"dynamic_black_level_pattern", TYPE_FLOAT }, + {"is_mono_only", TYPE_BYTE } +}; + +vendor_tag_info_t nexus_experimental_2015[NEXUS_EXPERIMENTAL_2015_END - + NEXUS_EXPERIMENTAL_2015_START] = { + {"sensor.dynamicBlackLevel", TYPE_FLOAT }, + {"sensor.info.opticallyShieldedRegions", TYPE_INT32 } +}; + +vendor_tag_info_t + qcamera3_dualcam_link_meta_data[QCAMERA3_DUALCAM_LINK_META_DATA_END - + QCAMERA3_DUALCAM_LINK_META_DATA_START] = { + { "enable", TYPE_BYTE }, + { "is_main", TYPE_BYTE }, + { "related_camera_id", TYPE_INT32 } +}; + +vendor_tag_info_t + qcamera3_dualcam_calib_meta_data[QCAMERA3_DUALCAM_CALIB_META_DATA_END - + QCAMERA3_DUALCAM_CALIB_META_DATA_START] = { + { "dualcam_calib_meta_data_blob", TYPE_BYTE } +}; + +vendor_tag_info_t + qcamera3_hal_privatedata[QCAMERA3_HAL_PRIVATEDATA_END - + QCAMERA3_HAL_PRIVATEDATA_START] = { + { "reprocess_flags", TYPE_BYTE }, + { "reprocess_data_blob", TYPE_BYTE } +}; + +vendor_tag_info_t + qcamera3_jpep_encode_crop[QCAMERA3_JPEG_ENCODE_CROP_END - + QCAMERA3_JPEG_ENCODE_CROP_START] = { + { "enable", TYPE_BYTE }, + { "rect", TYPE_INT32 }, + { "roi", TYPE_INT32} +}; + +vendor_tag_info_t *qcamera3_tag_info[QCAMERA3_SECTIONS_END - + VENDOR_SECTION] = { + qcamera3_privatedata, + qcamera3_cds, + qcamera3_opaque_raw, + qcamera3_crop, + qcamera3_tuning_meta_data, + qcamera3_temporal_denoise, + qcamera3_av_timer, + qcamera3_sensor_meta_data, + nexus_experimental_2015, + qcamera3_dualcam_link_meta_data, + qcamera3_dualcam_calib_meta_data, + qcamera3_hal_privatedata, + qcamera3_jpep_encode_crop +}; + +uint32_t qcamera3_all_tags[] = { + // QCAMERA3_PRIVATEDATA + (uint32_t)QCAMERA3_PRIVATEDATA_REPROCESS, + + // QCAMERA3_CDS + (uint32_t)QCAMERA3_CDS_MODE, + (uint32_t)QCAMERA3_CDS_INFO, + + // QCAMERA3_OPAQUE_RAW + (uint32_t)QCAMERA3_OPAQUE_RAW_STRIDES, + (uint32_t)QCAMERA3_OPAQUE_RAW_FORMAT, + + // QCAMERA3_CROP + (uint32_t)QCAMERA3_CROP_COUNT_REPROCESS, + (uint32_t)QCAMERA3_CROP_REPROCESS, + (uint32_t)QCAMERA3_CROP_ROI_MAP_REPROCESS, + + // QCAMERA3_TUNING_META_DATA + (uint32_t)QCAMERA3_TUNING_META_DATA_BLOB, + + // QCAMERA3_TEMPORAL_DENOISE + (uint32_t)QCAMERA3_TEMPORAL_DENOISE_ENABLE, + (uint32_t)QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, + //QCAMERA3_AVTIMER + (uint32_t)QCAMERA3_USE_AV_TIMER, + + //QCAMERA3_SENSOR_META_DATA + (uint32_t)QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN, + (uint32_t)QCAMERA3_SENSOR_IS_MONO_ONLY, + + //NEXUS_EXPERIMENTAL_2015 + (uint32_t)NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL, + (uint32_t)NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS, + + // QCAMERA3_DUALCAM_LINK_META_DATA + (uint32_t)QCAMERA3_DUALCAM_LINK_ENABLE, + (uint32_t)QCAMERA3_DUALCAM_LINK_IS_MAIN, + (uint32_t)QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID, + + // QCAMERA3_DUALCAM_CALIB_META_DATA + (uint32_t)QCAMERA3_DUALCAM_CALIB_META_DATA_BLOB, + + // QCAMERA3_HAL_PRIVATEDATA + (uint32_t)QCAMERA3_HAL_PRIVATEDATA_REPROCESS_FLAGS, + (uint32_t)QCAMERA3_HAL_PRIVATEDATA_REPROCESS_DATA_BLOB, + + // QCAMERA3_JPEG_ENCODE_CROP + (uint32_t)QCAMERA3_JPEG_ENCODE_CROP_ENABLE, + (uint32_t)QCAMERA3_JPEG_ENCODE_CROP_RECT, + (uint32_t)QCAMERA3_JPEG_ENCODE_CROP_ROI + +}; + +const vendor_tag_ops_t* QCamera3VendorTags::Ops = NULL; + +/*=========================================================================== + * FUNCTION : get_vendor_tag_ops + * + * DESCRIPTION: Get the metadata vendor tag function pointers + * + * PARAMETERS : + * @ops : function pointer table to be filled by HAL + * + * + * RETURN : NONE + *==========================================================================*/ +void QCamera3VendorTags::get_vendor_tag_ops( + vendor_tag_ops_t* ops) +{ + LOGL("E"); + + Ops = ops; + + ops->get_tag_count = get_tag_count; + ops->get_all_tags = get_all_tags; + ops->get_section_name = get_section_name; + ops->get_tag_name = get_tag_name; + ops->get_tag_type = get_tag_type; + ops->reserved[0] = NULL; + + LOGL("X"); + return; +} + +/*=========================================================================== + * FUNCTION : get_tag_count + * + * DESCRIPTION: Get number of vendor tags supported + * + * PARAMETERS : + * @ops : Vendor tag ops data structure + * + * + * RETURN : Number of vendor tags supported + *==========================================================================*/ + +int QCamera3VendorTags::get_tag_count( + const vendor_tag_ops_t * ops) +{ + size_t count = 0; + if (ops == Ops) + count = sizeof(qcamera3_all_tags)/sizeof(qcamera3_all_tags[0]); + + LOGL("count is %d", count); + return (int)count; +} + +/*=========================================================================== + * FUNCTION : get_all_tags + * + * DESCRIPTION: Fill array with all supported vendor tags + * + * PARAMETERS : + * @ops : Vendor tag ops data structure + * @tag_array: array of metadata tags + * + * RETURN : Success: the section name of the specific tag + * Failure: NULL + *==========================================================================*/ +void QCamera3VendorTags::get_all_tags( + const vendor_tag_ops_t * ops, + uint32_t *g_array) +{ + if (ops != Ops) + return; + + for (size_t i = 0; + i < sizeof(qcamera3_all_tags)/sizeof(qcamera3_all_tags[0]); + i++) { + g_array[i] = qcamera3_all_tags[i]; + LOGD("g_array[%d] is %d", i, g_array[i]); + } +} + +/*=========================================================================== + * FUNCTION : get_section_name + * + * DESCRIPTION: Get section name for vendor tag + * + * PARAMETERS : + * @ops : Vendor tag ops structure + * @tag : Vendor specific tag + * + * + * RETURN : Success: the section name of the specific tag + * Failure: NULL + *==========================================================================*/ + +const char* QCamera3VendorTags::get_section_name( + const vendor_tag_ops_t * ops, + uint32_t tag) +{ + LOGL("E"); + if (ops != Ops) + return NULL; + + const char *ret; + uint32_t section = tag >> 16; + + if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END) + ret = NULL; + else + ret = qcamera3_ext_section_names[section - VENDOR_SECTION]; + + if (ret) + LOGL("section_name[%d] is %s", tag, ret); + LOGL("X"); + return ret; +} + +/*=========================================================================== + * FUNCTION : get_tag_name + * + * DESCRIPTION: Get name of a vendor specific tag + * + * PARAMETERS : + * @tag : Vendor specific tag + * + * + * RETURN : Success: the name of the specific tag + * Failure: NULL + *==========================================================================*/ +const char* QCamera3VendorTags::get_tag_name( + const vendor_tag_ops_t * ops, + uint32_t tag) +{ + LOGL("E"); + const char *ret; + uint32_t section = tag >> 16; + uint32_t section_index = section - VENDOR_SECTION; + uint32_t tag_index = tag & 0xFFFF; + + if (ops != Ops) { + ret = NULL; + goto done; + } + + if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END) + ret = NULL; + else if (tag >= (uint32_t)qcamera3_ext3_section_bounds[section_index]) + ret = NULL; + else + ret = qcamera3_tag_info[section_index][tag_index].tag_name; + + if (ret) + LOGL("tag name for tag %d is %s", tag, ret); + LOGL("X"); + +done: + return ret; +} + +/*=========================================================================== + * FUNCTION : get_tag_type + * + * DESCRIPTION: Get type of a vendor specific tag + * + * PARAMETERS : + * @tag : Vendor specific tag + * + * + * RETURN : Success: the type of the specific tag + * Failure: -1 + *==========================================================================*/ +int QCamera3VendorTags::get_tag_type( + const vendor_tag_ops_t *ops, + uint32_t tag) +{ + LOGL("E"); + int ret; + uint32_t section = tag >> 16; + uint32_t section_index = section - VENDOR_SECTION; + uint32_t tag_index = tag & 0xFFFF; + + if (ops != Ops) { + ret = -1; + goto done; + } + if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END) + ret = -1; + else if (tag >= (uint32_t )qcamera3_ext3_section_bounds[section_index]) + ret = -1; + else + ret = qcamera3_tag_info[section_index][tag_index].tag_type; + + LOGL("tag type for tag %d is %d", tag, ret); + LOGL("X"); +done: + return ret; +} + +}; //end namespace qcamera diff --git a/camera/QCamera2/HAL3/QCamera3VendorTags.h b/camera/QCamera2/HAL3/QCamera3VendorTags.h new file mode 100644 index 0000000..ffd1904 --- /dev/null +++ b/camera/QCamera2/HAL3/QCamera3VendorTags.h @@ -0,0 +1,223 @@ +/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#ifndef __QCAMERA3VENDORTAGS_H__ +#define __QCAMERA3VENDORTAGS_H__ + +// Camera dependencies +#include "camera_metadata.h" + +namespace qcamera { + +enum qcamera3_ext_section { + QCAMERA3_PRIVATEDATA = VENDOR_SECTION, + QCAMERA3_CDS, + QCAMERA3_OPAQUE_RAW, + QCAMERA3_CROP, + QCAMERA3_TUNING_META_DATA, + QCAMERA3_TEMPORAL_DENOISE, + QCAMERA3_AV_TIMER, + QCAMERA3_SENSOR_META_DATA, + NEXUS_EXPERIMENTAL_2015, + QCAMERA3_DUALCAM_LINK_META_DATA, + QCAMERA3_DUALCAM_CALIB_META_DATA, + QCAMERA3_HAL_PRIVATEDATA, + QCAMERA3_JPEG_ENCODE_CROP, + QCAMERA3_SECTIONS_END +}; + +enum qcamera3_ext_section_ranges { + QCAMERA3_PRIVATEDATA_START = QCAMERA3_PRIVATEDATA << 16, + QCAMERA3_CDS_START = QCAMERA3_CDS << 16, + QCAMERA3_OPAQUE_RAW_START = QCAMERA3_OPAQUE_RAW << 16, + QCAMERA3_CROP_START = QCAMERA3_CROP << 16, + QCAMERA3_TUNING_META_DATA_START = QCAMERA3_TUNING_META_DATA << 16, + QCAMERA3_TEMPORAL_DENOISE_START = QCAMERA3_TEMPORAL_DENOISE << 16, + QCAMERA3_AV_TIMER_START = QCAMERA3_AV_TIMER << 16, + QCAMERA3_SENSOR_META_DATA_START = QCAMERA3_SENSOR_META_DATA << 16, + NEXUS_EXPERIMENTAL_2015_START = NEXUS_EXPERIMENTAL_2015 << 16, + QCAMERA3_DUALCAM_LINK_META_DATA_START = QCAMERA3_DUALCAM_LINK_META_DATA << 16, + QCAMERA3_DUALCAM_CALIB_META_DATA_START = QCAMERA3_DUALCAM_CALIB_META_DATA << 16, + QCAMERA3_HAL_PRIVATEDATA_START = QCAMERA3_HAL_PRIVATEDATA << 16, + QCAMERA3_JPEG_ENCODE_CROP_START = QCAMERA3_JPEG_ENCODE_CROP << 16 +}; + +enum qcamera3_ext_tags { + QCAMERA3_PRIVATEDATA_REPROCESS = QCAMERA3_PRIVATEDATA_START, + QCAMERA3_PRIVATEDATA_END, + QCAMERA3_CDS_MODE = QCAMERA3_CDS_START, + QCAMERA3_CDS_INFO, + QCAMERA3_CDS_END, + + //Property Name: org.codeaurora.qcamera3.opaque_raw.opaque_raw_strides + // + //Type: int32 * n * 3 [public] + // + //Description: Distance in bytes from the beginning of one row of opaque + //raw image data to the beginning of next row. + // + //Details: The strides are listed as (raw_width, raw_height, stride) + //triplets. For each supported raw size, there will be a stride associated + //with it. + QCAMERA3_OPAQUE_RAW_STRIDES = QCAMERA3_OPAQUE_RAW_START, + + //Property Name: org.codeaurora.qcamera3.opaque_raw.opaque_raw_format + // + //Type: byte(enum) [public] + // * LEGACY - The legacy raw format where 8, 10, or 12-bit + // raw data is packed into a 64-bit word. + // * MIPI - raw format matching the data packing described + // in MIPI CSI-2 specification. In memory, the data + // is constructed by packing sequentially received pixels + // into least significant parts of the words first. + // Within each pixel, the least significant bits are also + // placed towards the least significant part of the word. + // + //Details: Lay out of opaque raw data in memory is decided by two factors: + // opaque_raw_format and bit depth (implied by whiteLevel). Below + // list illustrates their relationship: + // LEGACY8: P7(7:0) P6(7:0) P5(7:0) P4(7:0) P3(7:0) P2(7:0) P1(7:0) P0(7:0) + // 8 pixels occupy 8 bytes, no padding needed + // min_stride = CEILING8(raw_width) + // LEGACY10: 0000 P5(9:0) P4(9:0) P3(9:0) P2(9:0) P1(9:0) P0(9:0) + // 6 pixels occupy 8 bytes, 4 bits padding at MSB + // min_stride = (raw_width+5)/6 * 8 + // LEGACY12: 0000 P4(11:0) P3(11:0) P2(11:0) P1(11:0) P0(11:0) + // 5 pixels occupy 8 bytes, 4 bits padding at MSB + // min_stride = (raw_width+4)/5 * 8 + // MIPI8: P0(7:0) + // 1 pixel occupy 1 byte + // min_stride = raw_width + // MIPI10: P3(1:0) P2(1:0) P1(1:0) P0(1:0) P3(9:2) P2(9:2) P1(9:2) P0(9:2) + // 4 pixels occupy 5 bytes + // min_stride = (raw_width+3)/4 * 5 + // MIPI12: P1(3:0) P0(3:0) P1(11:4) P0(11:4) + // 2 pixels occupy 3 bytes + // min_stride = (raw_width+1)/2 * 3 + //Note that opaque_raw_stride needs to be at least the required minimum + //stride from the table above. ISP hardware may need more generous stride + //setting. For example, for LEGACY8, the actual stride may be + //CEILING16(raw_width) due to bus burst length requirement. + QCAMERA3_OPAQUE_RAW_FORMAT, + QCAMERA3_OPAQUE_RAW_END, + + QCAMERA3_CROP_COUNT_REPROCESS = QCAMERA3_CROP_START, + QCAMERA3_CROP_REPROCESS, + QCAMERA3_CROP_ROI_MAP_REPROCESS, + QCAMERA3_CROP_END, + + QCAMERA3_TUNING_META_DATA_BLOB = QCAMERA3_TUNING_META_DATA_START, + QCAMERA3_TUNING_META_DATA_END, + + QCAMERA3_TEMPORAL_DENOISE_ENABLE = QCAMERA3_TEMPORAL_DENOISE_START, + QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, + QCAMERA3_TEMPORAL_DENOISE_END, + + QCAMERA3_USE_AV_TIMER = QCAMERA3_AV_TIMER_START, + QCAMERA3_AV_TIMER_END, + + QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN = QCAMERA3_SENSOR_META_DATA_START, + QCAMERA3_SENSOR_IS_MONO_ONLY, + QCAMERA3_SENSOR_META_DATA_END, + + NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL = NEXUS_EXPERIMENTAL_2015_START, + NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS, + NEXUS_EXPERIMENTAL_2015_END, + + QCAMERA3_DUALCAM_LINK_ENABLE = QCAMERA3_DUALCAM_LINK_META_DATA_START, + QCAMERA3_DUALCAM_LINK_IS_MAIN, + QCAMERA3_DUALCAM_LINK_RELATED_CAMERA_ID, + QCAMERA3_DUALCAM_LINK_META_DATA_END, + + QCAMERA3_DUALCAM_CALIB_META_DATA_BLOB = QCAMERA3_DUALCAM_CALIB_META_DATA_START, + QCAMERA3_DUALCAM_CALIB_META_DATA_END, + + QCAMERA3_HAL_PRIVATEDATA_REPROCESS_FLAGS = QCAMERA3_HAL_PRIVATEDATA_START, + QCAMERA3_HAL_PRIVATEDATA_REPROCESS_DATA_BLOB, + QCAMERA3_HAL_PRIVATEDATA_END, + + /* Property Name: org.codeaurora.qcamera3.jpeg_encode_crop.enable + Type: byte + Description: If JPEG crop is enable + */ + QCAMERA3_JPEG_ENCODE_CROP_ENABLE = QCAMERA3_JPEG_ENCODE_CROP_START, + /* Property Name: org.codeaurora.qcamera3.jpeg_encode_crop.rect + Type: int32[4] + Description: Crop image into size width x height + from [left, top] coordinate + rect[0] = left + rect[1] = top + rect[2] = width + rect[3] = height + */ + QCAMERA3_JPEG_ENCODE_CROP_RECT, + /* Property Name: org.codeaurora.qcamera3.jpeg_encode_crop.roi + Type: int32[4] + Description: Scale the crop image into size width x height + from [left, top] coordinate. + roi[0] = left + roi[1] = top + roi[2] = width + roi[3] = height + */ + QCAMERA3_JPEG_ENCODE_CROP_ROI, + QCAMERA3_JPEG_ENCODE_CROP_END +}; + +// QCAMERA3_OPAQUE_RAW_FORMAT +typedef enum qcamera3_ext_opaque_raw_format { + QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY, + QCAMERA3_OPAQUE_RAW_FORMAT_MIPI +} qcamera3_ext_opaque_raw_format_t; + +class QCamera3VendorTags { + +public: + static void get_vendor_tag_ops(vendor_tag_ops_t* ops); + static int get_tag_count( + const vendor_tag_ops_t *ops); + static void get_all_tags( + const vendor_tag_ops_t *ops, + uint32_t *tag_array); + static const char* get_section_name( + const vendor_tag_ops_t *ops, + uint32_t tag); + static const char* get_tag_name( + const vendor_tag_ops_t *ops, + uint32_t tag); + static int get_tag_type( + const vendor_tag_ops_t *ops, + uint32_t tag); + + static const vendor_tag_ops_t *Ops; +}; + +}; // namespace qcamera + +#endif /* __QCAMERA3VENDORTAGS_H__ */ diff --git a/camera/QCamera2/HAL3/android/QCamera3External.h b/camera/QCamera2/HAL3/android/QCamera3External.h new file mode 100644 index 0000000..2553eae --- /dev/null +++ b/camera/QCamera2/HAL3/android/QCamera3External.h @@ -0,0 +1,47 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +* +*/ + +#ifndef __QCAMERA3EXTERNAL_H__ +#define __QCAMERA3EXTERNAL_H__ + +// System dependencies +#include <utils/Errors.h> + +// Display dependencies +#include "QServiceUtils.h" + +namespace qcamera { + +inline android::status_t setCameraLaunchStatus(uint32_t on) { + return ::setCameraLaunchStatus(on); +} + +}; // namespace qcamera + +#endif /* __QCAMERA3EXTERNAL_H__ */ |