diff options
Diffstat (limited to 'drivers/misc')
49 files changed, 31235 insertions, 3 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 88056d1e8feb..52f75b1faec0 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -516,6 +516,30 @@ config SRAM the genalloc API. It is supposed to be used for small on-chip SRAM areas found on many SoCs. +config QSEECOM + tristate "QTI Secure Execution Communicator driver" + help + Provides a communication interface between userspace and + QTI Secure Execution Environment (QSEE) using Secure Channel + Manager (SCM) interface. It exposes APIs for both userspace and + kernel clients. + +config HDCP_QSEECOM + tristate "QTI High-Bandwidth Digital Content Protection Module" + help + This module implements HDCP 2.2 features over HDMI. It exposes APIs + for HDMI driver to communicate with QTI Secure Execution + Environment (QSEE) via the QSEECOM Driver and also calls the APIs + exposed by the HDMI driver to communicate with the Receiver. + +config PROFILER + tristate "Qualcomm Technologies, Inc. trustzone Communicator driver" + help + Provides a communication interface between userspace and + trustzone using Secure Channel Manager (SCM) interface. + It exposes APIs for userspace to get system profiling + information. + config VEXPRESS_SYSCFG bool "Versatile Express System Configuration driver" depends on VEXPRESS_CONFIG @@ -533,6 +557,15 @@ config UID_SYS_STATS Per UID based io statistics exported to /proc/uid_io Per UID based procstat control in /proc/uid_procstat +config QPNP_MISC + tristate "QPNP Misc Peripheral" + depends on SPMI || MSM_SPMI + help + Say 'y' here to include support for the QTI QPNP MISC + peripheral. The MISC peripheral holds the USB ID interrupt + and the driver provides an API to check if this interrupt + is available on the current PMIC chip. + config UID_SYS_STATS_DEBUG bool "Per-TASK statistics" depends on UID_SYS_STATS @@ -554,6 +587,7 @@ source "drivers/misc/lis3lv02d/Kconfig" source "drivers/misc/altera-stapl/Kconfig" source "drivers/misc/mei/Kconfig" source "drivers/misc/vmw_vmci/Kconfig" +source "drivers/misc/qcom/Kconfig" source "drivers/misc/mic/Kconfig" source "drivers/misc/genwqe/Kconfig" source "drivers/misc/echo/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 9a3b402921b2..b0718228d2d9 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -53,8 +53,16 @@ obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o obj-$(CONFIG_SRAM) += sram.o obj-y += mic/ obj-$(CONFIG_GENWQE) += genwqe/ +obj-$(CONFIG_QSEECOM) += qseecom.o +obj-$(CONFIG_PROFILER) += profiler.o +obj-$(CONFIG_HDCP_QSEECOM) += hdcp.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_QSEECOM) += compat_qseecom.o +endif obj-$(CONFIG_ECHO) += echo/ obj-$(CONFIG_VEXPRESS_SYSCFG) += vexpress-syscfg.o obj-$(CONFIG_CXL_BASE) += cxl/ obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o +obj-y += qcom/ +obj-$(CONFIG_QPNP_MISC) += qpnp-misc.o obj-$(CONFIG_MEMORY_STATE_TIME) += memory_state_time.o diff --git a/drivers/misc/compat_qseecom.c b/drivers/misc/compat_qseecom.c new file mode 100644 index 000000000000..2e9ffc71e452 --- /dev/null +++ b/drivers/misc/compat_qseecom.c @@ -0,0 +1,923 @@ +/* Copyright (c) 2014-2015,2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#define pr_fmt(fmt) "COMPAT-QSEECOM: %s: " fmt, __func__ + +#include <linux/debugfs.h> +#include <linux/uaccess.h> +#include <linux/qseecom.h> +#include <linux/compat.h> +#include "compat_qseecom.h" + +static int compat_get_qseecom_register_listener_req( + struct compat_qseecom_register_listener_req __user *data32, + struct qseecom_register_listener_req __user *data) +{ + int err; + compat_ulong_t listener_id; + compat_long_t ifd_data_fd; + compat_uptr_t virt_sb_base; + compat_ulong_t sb_size; + + err = get_user(listener_id, &data32->listener_id); + err |= put_user(listener_id, &data->listener_id); + err |= get_user(ifd_data_fd, &data32->ifd_data_fd); + err |= put_user(ifd_data_fd, &data->ifd_data_fd); + + err |= get_user(virt_sb_base, &data32->virt_sb_base); + /* upper bits won't get set, zero them */ + err |= put_user(NULL, &data->virt_sb_base); + err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base); + + err |= get_user(sb_size, &data32->sb_size); + err |= put_user(sb_size, &data->sb_size); + return err; +} + +static int compat_get_qseecom_load_img_req( + struct compat_qseecom_load_img_req __user *data32, + struct qseecom_load_img_req __user *data) +{ + int err; + compat_ulong_t mdt_len; + compat_ulong_t img_len; + compat_long_t ifd_data_fd; + compat_ulong_t app_arch; + compat_uint_t app_id; + + err = get_user(mdt_len, &data32->mdt_len); + err |= put_user(mdt_len, &data->mdt_len); + err |= get_user(img_len, &data32->img_len); + err |= put_user(img_len, &data->img_len); + err |= get_user(ifd_data_fd, &data32->ifd_data_fd); + err |= put_user(ifd_data_fd, &data->ifd_data_fd); + err |= copy_in_user(data->img_name, data32->img_name, + MAX_APP_NAME_SIZE); + err |= get_user(app_arch, &data32->app_arch); + err |= put_user(app_arch, &data->app_arch); + err |= get_user(app_id, &data32->app_id); + err |= put_user(app_id, &data->app_id); + return err; +} + +static int compat_get_qseecom_send_cmd_req( + struct compat_qseecom_send_cmd_req __user *data32, + struct qseecom_send_cmd_req __user *data) +{ + int err; + compat_uptr_t cmd_req_buf; + compat_uint_t cmd_req_len; + compat_uptr_t resp_buf; + compat_uint_t resp_len; + + err = get_user(cmd_req_buf, &data32->cmd_req_buf); + err |= put_user(NULL, &data->cmd_req_buf); + err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf); + err |= get_user(cmd_req_len, &data32->cmd_req_len); + err |= put_user(cmd_req_len, &data->cmd_req_len); + + err |= get_user(resp_buf, &data32->resp_buf); + err |= put_user(NULL, &data->resp_buf); + err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + return err; +} + +static int compat_get_qseecom_send_modfd_cmd_req( + struct compat_qseecom_send_modfd_cmd_req __user *data32, + struct qseecom_send_modfd_cmd_req __user *data) +{ + int err; + unsigned int i; + compat_uptr_t cmd_req_buf; + compat_uint_t cmd_req_len; + compat_uptr_t resp_buf; + compat_uint_t resp_len; + compat_long_t fd; + compat_ulong_t cmd_buf_offset; + + err = get_user(cmd_req_buf, &data32->cmd_req_buf); + err |= put_user(NULL, &data->cmd_req_buf); + err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf); + err |= get_user(cmd_req_len, &data32->cmd_req_len); + err |= put_user(cmd_req_len, &data->cmd_req_len); + err |= get_user(resp_buf, &data32->resp_buf); + err |= put_user(NULL, &data->resp_buf); + err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + for (i = 0; i < MAX_ION_FD; i++) { + err |= get_user(fd, &data32->ifd_data[i].fd); + err |= put_user(fd, &data->ifd_data[i].fd); + err |= get_user(cmd_buf_offset, + &data32->ifd_data[i].cmd_buf_offset); + err |= put_user(cmd_buf_offset, + &data->ifd_data[i].cmd_buf_offset); + } + return err; +} + +static int compat_get_qseecom_set_sb_mem_param_req( + struct compat_qseecom_set_sb_mem_param_req __user *data32, + struct qseecom_set_sb_mem_param_req __user *data) +{ + int err; + compat_long_t ifd_data_fd; + compat_uptr_t virt_sb_base; + compat_ulong_t sb_len; + + err = get_user(ifd_data_fd, &data32->ifd_data_fd); + err |= put_user(ifd_data_fd, &data->ifd_data_fd); + err |= get_user(virt_sb_base, &data32->virt_sb_base); + err |= put_user(NULL, &data->virt_sb_base); + err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base); + err |= get_user(sb_len, &data32->sb_len); + err |= put_user(sb_len, &data->sb_len); + return err; +} + +static int compat_get_qseecom_qseos_version_req( + struct compat_qseecom_qseos_version_req __user *data32, + struct qseecom_qseos_version_req __user *data) +{ + int err; + compat_uint_t qseos_version; + + err = get_user(qseos_version, &data32->qseos_version); + err |= put_user(qseos_version, &data->qseos_version); + return err; +} + +static int compat_get_qseecom_qseos_app_load_query( + struct compat_qseecom_qseos_app_load_query __user *data32, + struct qseecom_qseos_app_load_query __user *data) +{ + int err = 0; + unsigned int i; + compat_uint_t app_id; + char app_name; + compat_ulong_t app_arch; + + for (i = 0; i < MAX_APP_NAME_SIZE; i++) { + err |= get_user(app_name, &(data32->app_name[i])); + err |= put_user(app_name, &(data->app_name[i])); + } + err |= get_user(app_id, &data32->app_id); + err |= put_user(app_id, &data->app_id); + err |= get_user(app_arch, &data32->app_arch); + err |= put_user(app_arch, &data->app_arch); + return err; +} + +static int compat_get_qseecom_send_svc_cmd_req( + struct compat_qseecom_send_svc_cmd_req __user *data32, + struct qseecom_send_svc_cmd_req __user *data) +{ + int err; + compat_ulong_t cmd_id; + compat_uptr_t cmd_req_buf; + compat_uint_t cmd_req_len; + compat_uptr_t resp_buf; + compat_uint_t resp_len; + + err = get_user(cmd_id, &data32->cmd_id); + err |= put_user(cmd_id, &data->cmd_id); + err |= get_user(cmd_req_buf, &data32->cmd_req_buf); + err |= put_user(NULL, &data->cmd_req_buf); + err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf); + err |= get_user(cmd_req_len, &data32->cmd_req_len); + err |= put_user(cmd_req_len, &data->cmd_req_len); + err |= get_user(resp_buf, &data32->resp_buf); + err |= put_user(NULL, &data->resp_buf); + err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + return err; +} + +static int compat_get_qseecom_create_key_req( + struct compat_qseecom_create_key_req __user *data32, + struct qseecom_create_key_req __user *data) +{ + int err; + compat_uint_t usage; + + err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE); + err |= get_user(usage, &data32->usage); + err |= put_user(usage, &data->usage); + + return err; +} + +static int compat_get_qseecom_wipe_key_req( + struct compat_qseecom_wipe_key_req __user *data32, + struct qseecom_wipe_key_req __user *data) +{ + int err; + compat_uint_t usage; + compat_int_t wipe_key_flag; + + err = get_user(usage, &data32->usage); + err |= put_user(usage, &data->usage); + err |= get_user(wipe_key_flag, &data32->wipe_key_flag); + err |= put_user(wipe_key_flag, &data->wipe_key_flag); + + return err; +} + +static int compat_get_qseecom_update_key_userinfo_req( + struct compat_qseecom_update_key_userinfo_req __user *data32, + struct qseecom_update_key_userinfo_req __user *data) +{ + int err = 0; + compat_uint_t usage; + + err = copy_in_user(data->current_hash32, data32->current_hash32, + QSEECOM_HASH_SIZE); + err |= copy_in_user(data->new_hash32, data32->new_hash32, + QSEECOM_HASH_SIZE); + err |= get_user(usage, &data32->usage); + err |= put_user(usage, &data->usage); + + return err; +} + +static int compat_get_qseecom_save_partition_hash_req( + struct compat_qseecom_save_partition_hash_req __user *data32, + struct qseecom_save_partition_hash_req __user *data) +{ + int err; + compat_int_t partition_id; + + err = get_user(partition_id, &data32->partition_id); + err |= put_user(partition_id, &data->partition_id); + err |= copy_in_user(data->digest, data32->digest, + SHA256_DIGEST_LENGTH); + return err; +} + +static int compat_get_qseecom_is_es_activated_req( + struct compat_qseecom_is_es_activated_req __user *data32, + struct qseecom_is_es_activated_req __user *data) +{ + compat_int_t is_activated; + int err; + + err = get_user(is_activated, &data32->is_activated); + err |= put_user(is_activated, &data->is_activated); + return err; +} + +static int compat_get_qseecom_mdtp_cipher_dip_req( + struct compat_qseecom_mdtp_cipher_dip_req __user *data32, + struct qseecom_mdtp_cipher_dip_req __user *data) +{ + int err; + compat_int_t in_buf_size; + compat_uptr_t in_buf; + compat_int_t out_buf_size; + compat_uptr_t out_buf; + compat_int_t direction; + + err = get_user(in_buf_size, &data32->in_buf_size); + err |= put_user(in_buf_size, &data->in_buf_size); + err |= get_user(out_buf_size, &data32->out_buf_size); + err |= put_user(out_buf_size, &data->out_buf_size); + err |= get_user(direction, &data32->direction); + err |= put_user(direction, &data->direction); + err |= get_user(in_buf, &data32->in_buf); + err |= put_user(NULL, &data->in_buf); + err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf); + err |= get_user(out_buf, &data32->out_buf); + err |= put_user(NULL, &data->out_buf); + err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf); + + return err; +} + +static int compat_get_qseecom_send_modfd_listener_resp( + struct compat_qseecom_send_modfd_listener_resp __user *data32, + struct qseecom_send_modfd_listener_resp __user *data) +{ + int err; + unsigned int i; + compat_uptr_t resp_buf_ptr; + compat_uint_t resp_len; + compat_long_t fd; + compat_ulong_t cmd_buf_offset; + + err = get_user(resp_buf_ptr, &data32->resp_buf_ptr); + err |= put_user(NULL, &data->resp_buf_ptr); + err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + + for (i = 0; i < MAX_ION_FD; i++) { + err |= get_user(fd, &data32->ifd_data[i].fd); + err |= put_user(fd, &data->ifd_data[i].fd); + err |= get_user(cmd_buf_offset, + &data32->ifd_data[i].cmd_buf_offset); + err |= put_user(cmd_buf_offset, + &data->ifd_data[i].cmd_buf_offset); + } + return err; +} + + +static int compat_get_qseecom_qteec_req( + struct compat_qseecom_qteec_req __user *data32, + struct qseecom_qteec_req __user *data) +{ + compat_uptr_t req_ptr; + compat_ulong_t req_len; + compat_uptr_t resp_ptr; + compat_ulong_t resp_len; + int err; + + err = get_user(req_ptr, &data32->req_ptr); + err |= put_user(NULL, &data->req_ptr); + err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr); + err |= get_user(req_len, &data32->req_len); + err |= put_user(req_len, &data->req_len); + + err |= get_user(resp_ptr, &data32->resp_ptr); + err |= put_user(NULL, &data->resp_ptr); + err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + return err; +} + +static int compat_get_qseecom_qteec_modfd_req( + struct compat_qseecom_qteec_modfd_req __user *data32, + struct qseecom_qteec_modfd_req __user *data) +{ + compat_uptr_t req_ptr; + compat_ulong_t req_len; + compat_uptr_t resp_ptr; + compat_ulong_t resp_len; + compat_long_t fd; + compat_ulong_t cmd_buf_offset; + int err, i; + + err = get_user(req_ptr, &data32->req_ptr); + err |= put_user(NULL, &data->req_ptr); + err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr); + err |= get_user(req_len, &data32->req_len); + err |= put_user(req_len, &data->req_len); + + err |= get_user(resp_ptr, &data32->resp_ptr); + err |= put_user(NULL, &data->resp_ptr); + err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr); + err |= get_user(resp_len, &data32->resp_len); + err |= put_user(resp_len, &data->resp_len); + + for (i = 0; i < MAX_ION_FD; i++) { + err |= get_user(fd, &data32->ifd_data[i].fd); + err |= put_user(fd, &data->ifd_data[i].fd); + err |= get_user(cmd_buf_offset, + &data32->ifd_data[i].cmd_buf_offset); + err |= put_user(cmd_buf_offset, + &data->ifd_data[i].cmd_buf_offset); + } + return err; +} + +static int compat_get_int(compat_int_t __user *data32, + int __user *data) +{ + compat_int_t x; + int err; + + err = get_user(x, data32); + err |= put_user(x, data); + return err; +} + +static int compat_put_qseecom_load_img_req( + struct compat_qseecom_load_img_req __user *data32, + struct qseecom_load_img_req __user *data) +{ + int err; + compat_ulong_t mdt_len; + compat_ulong_t img_len; + compat_long_t ifd_data_fd; + compat_ulong_t app_arch; + compat_int_t app_id; + + err = get_user(mdt_len, &data->mdt_len); + err |= put_user(mdt_len, &data32->mdt_len); + err |= get_user(img_len, &data->img_len); + err |= put_user(img_len, &data32->img_len); + err |= get_user(ifd_data_fd, &data->ifd_data_fd); + err |= put_user(ifd_data_fd, &data32->ifd_data_fd); + err |= copy_in_user(data32->img_name, data->img_name, + MAX_APP_NAME_SIZE); + err |= get_user(app_arch, &data->app_arch); + err |= put_user(app_arch, &data32->app_arch); + err |= get_user(app_id, &data->app_id); + err |= put_user(app_id, &data32->app_id); + return err; +} + +static int compat_put_qseecom_qseos_version_req( + struct compat_qseecom_qseos_version_req __user *data32, + struct qseecom_qseos_version_req __user *data) +{ + compat_uint_t qseos_version; + int err; + + err = get_user(qseos_version, &data->qseos_version); + err |= put_user(qseos_version, &data32->qseos_version); + return err; +} + +static int compat_put_qseecom_qseos_app_load_query( + struct compat_qseecom_qseos_app_load_query __user *data32, + struct qseecom_qseos_app_load_query __user *data) +{ + int err = 0; + unsigned int i; + compat_int_t app_id; + compat_ulong_t app_arch; + char app_name; + + for (i = 0; i < MAX_APP_NAME_SIZE; i++) { + err |= get_user(app_name, &(data->app_name[i])); + err |= put_user(app_name, &(data32->app_name[i])); + } + err |= get_user(app_id, &data->app_id); + err |= put_user(app_id, &data32->app_id); + err |= get_user(app_arch, &data->app_arch); + err |= put_user(app_arch, &data32->app_arch); + + return err; +} + +static int compat_put_qseecom_is_es_activated_req( + struct compat_qseecom_is_es_activated_req __user *data32, + struct qseecom_is_es_activated_req __user *data) +{ + compat_int_t is_activated; + int err; + + err = get_user(is_activated, &data->is_activated); + err |= put_user(is_activated, &data32->is_activated); + return err; +} + +static unsigned int convert_cmd(unsigned int cmd) +{ + switch (cmd) { + case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: + return QSEECOM_IOCTL_REGISTER_LISTENER_REQ; + case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: + return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ; + case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: + return QSEECOM_IOCTL_LOAD_APP_REQ; + case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ: + return QSEECOM_IOCTL_RECEIVE_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ: + return QSEECOM_IOCTL_SEND_RESP_REQ; + case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ: + return QSEECOM_IOCTL_UNLOAD_APP_REQ; + case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ: + return QSEECOM_IOCTL_PERF_ENABLE_REQ; + case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ: + return QSEECOM_IOCTL_PERF_DISABLE_REQ; + case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: + return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ; + case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: + return QSEECOM_IOCTL_SET_BUS_SCALING_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: + return QSEECOM_IOCTL_SEND_CMD_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: + return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ; + case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: + return QSEECOM_IOCTL_SET_MEM_PARAM_REQ; + case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: + return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ; + case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: + return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ; + case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: + return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: + return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ; + case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: + return QSEECOM_IOCTL_CREATE_KEY_REQ; + case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: + return QSEECOM_IOCTL_WIPE_KEY_REQ; + case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: + return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ; + case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: + return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ; + case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: + return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP: + return QSEECOM_IOCTL_SEND_MODFD_RESP; + case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: + return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ; + case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: + return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ; + case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: + return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ; + case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: + return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ; + case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: + return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: + return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: + return QSEECOM_IOCTL_SEND_MODFD_RESP_64; + + default: + return cmd; + } +} + +long compat_qseecom_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + switch (cmd) { + + case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: + case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ: + case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ: + case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ: + case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ: + case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ: + case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: { + return qseecom_ioctl(file, convert_cmd(cmd), 0); + } + break; + case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: { + struct compat_qseecom_register_listener_req __user *data32; + struct qseecom_register_listener_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_register_listener_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: { + struct compat_qseecom_load_img_req __user *data32; + struct qseecom_load_img_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_load_img_req(data32, data); + if (err) + return err; + + ret = qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + err = compat_put_qseecom_load_img_req(data32, data); + return ret ? ret : err; + } + break; + case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: { + struct compat_qseecom_send_cmd_req __user *data32; + struct qseecom_send_cmd_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_send_cmd_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: { + struct compat_qseecom_send_modfd_cmd_req __user *data32; + struct qseecom_send_modfd_cmd_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_send_modfd_cmd_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: { + struct compat_qseecom_set_sb_mem_param_req __user *data32; + struct qseecom_set_sb_mem_param_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_set_sb_mem_param_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: { + struct compat_qseecom_qseos_version_req __user *data32; + struct qseecom_qseos_version_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_qseos_version_req(data32, data); + if (err) + return err; + + ret = qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + err = compat_put_qseecom_qseos_version_req(data32, data); + + return ret ? ret : err; + } + break; + case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: { + compat_int_t __user *data32; + int __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + err = compat_get_int(data32, data); + if (err) + return err; + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: { + struct compat_qseecom_load_img_req __user *data32; + struct qseecom_load_img_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_load_img_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + struct compat_qseecom_qseos_app_load_query __user *data32; + struct qseecom_qseos_app_load_query __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_qseos_app_load_query(data32, data); + if (err) + return err; + + ret = qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + err = compat_put_qseecom_qseos_app_load_query(data32, data); + return ret ? ret : err; + } + break; + case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: { + struct compat_qseecom_send_svc_cmd_req __user *data32; + struct qseecom_send_svc_cmd_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_send_svc_cmd_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: { + struct compat_qseecom_create_key_req __user *data32; + struct qseecom_create_key_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_create_key_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: { + struct compat_qseecom_wipe_key_req __user *data32; + struct qseecom_wipe_key_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_wipe_key_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: { + struct compat_qseecom_update_key_userinfo_req __user *data32; + struct qseecom_update_key_userinfo_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_update_key_userinfo_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: { + struct compat_qseecom_save_partition_hash_req __user *data32; + struct qseecom_save_partition_hash_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_save_partition_hash_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: { + struct compat_qseecom_is_es_activated_req __user *data32; + struct qseecom_is_es_activated_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_is_es_activated_req(data32, data); + if (err) + return err; + + ret = qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + err = compat_put_qseecom_is_es_activated_req(data32, data); + return ret ? ret : err; + } + break; + case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: { + struct compat_qseecom_mdtp_cipher_dip_req __user *data32; + struct qseecom_mdtp_cipher_dip_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP: + case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: { + struct compat_qseecom_send_modfd_listener_resp __user *data32; + struct qseecom_send_modfd_listener_resp __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_send_modfd_listener_resp(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: { + struct compat_qseecom_qteec_req __user *data32; + struct qseecom_qteec_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_qteec_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: + case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: + case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: { + struct compat_qseecom_qteec_modfd_req __user *data32; + struct qseecom_qteec_modfd_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_qseecom_qteec_modfd_req(data32, data); + if (err) + return err; + + return qseecom_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + } + break; + default: + return -ENOIOCTLCMD; + break; + } + return 0; +} + diff --git a/drivers/misc/compat_qseecom.h b/drivers/misc/compat_qseecom.h new file mode 100644 index 000000000000..c934ef87e20a --- /dev/null +++ b/drivers/misc/compat_qseecom.h @@ -0,0 +1,334 @@ +#ifndef _UAPI_COMPAT_QSEECOM_H_ +#define _UAPI_COMPAT_QSEECOM_H_ + +#include <linux/types.h> +#include <linux/ioctl.h> + +#if IS_ENABLED(CONFIG_COMPAT) +#include <linux/compat.h> + +/* + * struct compat_qseecom_register_listener_req - + * for register listener ioctl request + * @listener_id - service id (shared between userspace and QSE) + * @ifd_data_fd - ion handle + * @virt_sb_base - shared buffer base in user space + * @sb_size - shared buffer size + */ +struct compat_qseecom_register_listener_req { + compat_ulong_t listener_id; /* in */ + compat_long_t ifd_data_fd; /* in */ + compat_uptr_t virt_sb_base; /* in */ + compat_ulong_t sb_size; /* in */ +}; + +/* + * struct compat_qseecom_send_cmd_req - for send command ioctl request + * @cmd_req_len - command buffer length + * @cmd_req_buf - command buffer + * @resp_len - response buffer length + * @resp_buf - response buffer + */ +struct compat_qseecom_send_cmd_req { + compat_uptr_t cmd_req_buf; /* in */ + compat_uint_t cmd_req_len; /* in */ + compat_uptr_t resp_buf; /* in/out */ + compat_uint_t resp_len; /* in/out */ +}; + +/* + * struct qseecom_ion_fd_info - ion fd handle data information + * @fd - ion handle to some memory allocated in user space + * @cmd_buf_offset - command buffer offset + */ +struct compat_qseecom_ion_fd_info { + compat_long_t fd; + compat_ulong_t cmd_buf_offset; +}; +/* + * struct qseecom_send_modfd_cmd_req - for send command ioctl request + * @cmd_req_len - command buffer length + * @cmd_req_buf - command buffer + * @resp_len - response buffer length + * @resp_buf - response buffer + * @ifd_data_fd - ion handle to memory allocated in user space + * @cmd_buf_offset - command buffer offset + */ +struct compat_qseecom_send_modfd_cmd_req { + compat_uptr_t cmd_req_buf; /* in */ + compat_uint_t cmd_req_len; /* in */ + compat_uptr_t resp_buf; /* in/out */ + compat_uint_t resp_len; /* in/out */ + struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; +}; + +/* + * struct compat_qseecom_listener_send_resp_req + * signal to continue the send_cmd req. + * Used as a trigger from HLOS service to notify QSEECOM that it's done with its + * operation and provide the response for QSEECOM can continue the incomplete + * command execution + * @resp_len - Length of the response + * @resp_buf - Response buffer where the response of the cmd should go. + */ +struct compat_qseecom_send_resp_req { + compat_uptr_t resp_buf; /* in */ + compat_uint_t resp_len; /* in */ +}; + +/* + * struct compat_qseecom_load_img_data + * for sending image length information and + * ion file descriptor to the qseecom driver. ion file descriptor is used + * for retrieving the ion file handle and in turn the physical address of + * the image location. + * @mdt_len - Length of the .mdt file in bytes. + * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes + * @ion_fd - Ion file descriptor used when allocating memory. + * @img_name - Name of the image. +*/ +struct compat_qseecom_load_img_req { + compat_ulong_t mdt_len; /* in */ + compat_ulong_t img_len; /* in */ + compat_long_t ifd_data_fd; /* in */ + char img_name[MAX_APP_NAME_SIZE]; /* in */ + compat_ulong_t app_arch; /* in */ + compat_uint_t app_id; /* out*/ +}; + +struct compat_qseecom_set_sb_mem_param_req { + compat_long_t ifd_data_fd; /* in */ + compat_uptr_t virt_sb_base; /* in */ + compat_ulong_t sb_len; /* in */ +}; + +/* + * struct compat_qseecom_qseos_version_req - get qseos version + * @qseos_version - version number + */ +struct compat_qseecom_qseos_version_req { + compat_uint_t qseos_version; /* in */ +}; + +/* + * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee + * @app_name[MAX_APP_NAME_SIZE]- name of the app. + * @app_id - app id. + */ +struct compat_qseecom_qseos_app_load_query { + char app_name[MAX_APP_NAME_SIZE]; /* in */ + compat_uint_t app_id; /* out */ + compat_ulong_t app_arch; +}; + +struct compat_qseecom_send_svc_cmd_req { + compat_ulong_t cmd_id; + compat_uptr_t cmd_req_buf; /* in */ + compat_uint_t cmd_req_len; /* in */ + compat_uptr_t resp_buf; /* in/out */ + compat_uint_t resp_len; /* in/out */ +}; + +struct compat_qseecom_create_key_req { + unsigned char hash32[QSEECOM_HASH_SIZE]; + enum qseecom_key_management_usage_type usage; +}; + +struct compat_qseecom_wipe_key_req { + enum qseecom_key_management_usage_type usage; + compat_int_t wipe_key_flag; +}; + +struct compat_qseecom_update_key_userinfo_req { + unsigned char current_hash32[QSEECOM_HASH_SIZE]; + unsigned char new_hash32[QSEECOM_HASH_SIZE]; + enum qseecom_key_management_usage_type usage; +}; + +/* + * struct compat_qseecom_save_partition_hash_req + * @partition_id - partition id. + * @hash[SHA256_DIGEST_LENGTH] - sha256 digest. + */ +struct compat_qseecom_save_partition_hash_req { + compat_int_t partition_id; /* in */ + char digest[SHA256_DIGEST_LENGTH]; /* in */ +}; + +/* + * struct compat_qseecom_is_es_activated_req + * @is_activated - 1=true , 0=false + */ +struct compat_qseecom_is_es_activated_req { + compat_int_t is_activated; /* out */ +}; + +/* + * struct compat_qseecom_mdtp_cipher_dip_req + * @in_buf - input buffer + * @in_buf_size - input buffer size + * @out_buf - output buffer + * @out_buf_size - output buffer size + * @direction - 0=encrypt, 1=decrypt + */ +struct compat_qseecom_mdtp_cipher_dip_req { + compat_uptr_t in_buf; + compat_uint_t in_buf_size; + compat_uptr_t out_buf; + compat_uint_t out_buf_size; + compat_uint_t direction; +}; + +/* + * struct qseecom_send_modfd_resp - for send command ioctl request + * @req_len - command buffer length + * @req_buf - command buffer + * @ifd_data_fd - ion handle to memory allocated in user space + * @cmd_buf_offset - command buffer offset + */ +struct compat_qseecom_send_modfd_listener_resp { + compat_uptr_t resp_buf_ptr; /* in */ + compat_uint_t resp_len; /* in */ + struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */ +}; + +struct compat_qseecom_qteec_req { + compat_uptr_t req_ptr; + compat_ulong_t req_len; + compat_uptr_t resp_ptr; + compat_ulong_t resp_len; +}; + +struct compat_qseecom_qteec_modfd_req { + compat_uptr_t req_ptr; + compat_ulong_t req_len; + compat_uptr_t resp_ptr; + compat_ulong_t resp_len; + struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; +}; + +struct compat_qseecom_ce_pipe_entry { + compat_int_t valid; + compat_uint_t ce_num; + compat_uint_t ce_pipe_pair; +}; + +struct compat_qseecom_ce_info_req { + unsigned char handle[MAX_CE_INFO_HANDLE_SIZE]; + compat_uint_t usage; + compat_uint_t unit_num; + compat_uint_t num_ce_pipe_entries; + struct compat_qseecom_ce_pipe_entry + ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT]; +}; + +struct file; +extern long compat_qseecom_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); + +#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req) + +#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \ + _IO(QSEECOM_IOC_MAGIC, 2) + +#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req) + +#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req) + +#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \ + _IO(QSEECOM_IOC_MAGIC, 5) + +#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \ + _IO(QSEECOM_IOC_MAGIC, 6) + +#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req) + +#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req) + +#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \ + _IO(QSEECOM_IOC_MAGIC, 9) + +#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req) + +#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \ + _IO(QSEECOM_IOC_MAGIC, 11) + +#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \ + _IO(QSEECOM_IOC_MAGIC, 12) + +#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req) + +#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \ + _IO(QSEECOM_IOC_MAGIC, 14) + +#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query) + +#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req) + +#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req) + +#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req) + +#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 19, \ + struct compat_qseecom_save_partition_hash_req) + +#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req) + +#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \ + _IOWR(QSEECOM_IOC_MAGIC, 21, \ + struct compat_qseecom_send_modfd_listener_resp) + +#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 23, int) + +#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 24, \ + struct compat_qseecom_update_key_userinfo_req) + +#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req) + +#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req) + +#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req) + +#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req) + +#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req) + +#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \ + _IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req) + +#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \ + _IOWR(QSEECOM_IOC_MAGIC, 36, \ + struct compat_qseecom_send_modfd_listener_resp) +#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \ + _IOWR(QSEECOM_IOC_MAGIC, 40, \ + struct compat_qseecom_ce_info_req) +#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \ + _IOWR(QSEECOM_IOC_MAGIC, 41, \ + struct compat_qseecom_ce_info_req) +#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \ + _IOWR(QSEECOM_IOC_MAGIC, 42, \ + struct compat_qseecom_ce_info_req) + +#endif +#endif /* _UAPI_COMPAT_QSEECOM_H_ */ + diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c new file mode 100644 index 000000000000..cc11ede6a456 --- /dev/null +++ b/drivers/misc/hdcp.c @@ -0,0 +1,3061 @@ +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/platform_device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/uaccess.h> +#include <linux/cdev.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/io.h> +#include <linux/ion.h> +#include <linux/types.h> +#include <linux/device.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/completion.h> +#include <linux/errno.h> +#include <linux/hdcp_qseecom.h> +#include <linux/kthread.h> +#include <linux/of.h> +#include <video/msm_hdmi_hdcp_mgr.h> + +#include "qseecom_kernel.h" + +#define SRMAPP_NAME "hdcpsrm" +#define TZAPP_NAME "hdcp2p2" +#define HDCP1_APP_NAME "hdcp1" +#define QSEECOM_SBUFF_SIZE 0x1000 + +#define MAX_TX_MESSAGE_SIZE 129 +#define MAX_RX_MESSAGE_SIZE 534 +#define MAX_TOPOLOGY_ELEMS 32 +#define HDCP1_AKSV_SIZE 8 + +/* parameters related to LC_Init message */ +#define MESSAGE_ID_SIZE 1 +#define LC_INIT_MESSAGE_SIZE (MESSAGE_ID_SIZE+BITS_64_IN_BYTES) + +/* parameters related to SKE_Send_EKS message */ +#define SKE_SEND_EKS_MESSAGE_SIZE \ + (MESSAGE_ID_SIZE+BITS_128_IN_BYTES+BITS_64_IN_BYTES) + +#define HDCP2_0_REPEATER_DOWNSTREAM BIT(1) +#define HDCP1_DEVICE_DOWNSTREAM BIT(0) + +/* all message IDs */ +#define INVALID_MESSAGE_ID 0 +#define AKE_INIT_MESSAGE_ID 2 +#define AKE_SEND_CERT_MESSAGE_ID 3 +#define AKE_NO_STORED_KM_MESSAGE_ID 4 +#define AKE_STORED_KM_MESSAGE_ID 5 +#define AKE_SEND_H_PRIME_MESSAGE_ID 7 +#define AKE_SEND_PAIRING_INFO_MESSAGE_ID 8 +#define LC_INIT_MESSAGE_ID 9 +#define LC_SEND_L_PRIME_MESSAGE_ID 10 +#define SKE_SEND_EKS_MESSAGE_ID 11 +#define REP_SEND_RECV_ID_LIST_ID 12 +#define REP_SEND_ACK_ID 15 +#define REP_STREAM_MANAGE_ID 16 +#define REP_STREAM_READY_ID 17 +#define SKE_SEND_TYPE_ID 18 +#define HDCP2P2_MAX_MESSAGES 19 + +#define HDCP1_SET_KEY_MESSAGE_ID 202 +#define HDCP1_SET_ENC_MESSAGE_ID 205 + +#define BITS_8_IN_BYTES 1 +#define BITS_16_IN_BYTES 2 +#define BITS_24_IN_BYTES 3 +#define BITS_32_IN_BYTES 4 +#define BITS_40_IN_BYTES 5 +#define BITS_64_IN_BYTES 8 +#define BITS_128_IN_BYTES 16 +#define BITS_160_IN_BYTES 20 +#define BITS_256_IN_BYTES 32 +#define BITS_1024_IN_BYTES 128 +#define BITS_3072_IN_BYTES 384 +#define TXCAPS_SIZE 3 +#define RXCAPS_SIZE 3 +#define RXINFO_SIZE 2 +#define SEQ_NUM_V_SIZE 3 + +#define HDCP_SRM_CMD_CHECK_DEVICE_ID 2 + +#define RCVR_ID_SIZE BITS_40_IN_BYTES +#define MAX_RCVR_IDS_ALLOWED_IN_LIST 31 +#define MAX_RCVR_ID_LIST_SIZE \ + (RCVR_ID_SIZE*MAX_RCVR_IDS_ALLOWED_IN_LIST) +/* + * minimum wait as per standard is 200 ms. keep it 300 ms + * to be on safe side. + */ +#define SLEEP_SET_HW_KEY_MS 220 + +/* hdcp command status */ +#define HDCP_SUCCESS 0 + +/* flags set by tz in response message */ +#define HDCP_TXMTR_SUBSTATE_INIT 0 +#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST 1 +#define HDCP_TXMTR_SUBSTATE_PROCESSED_RECIEVERID_LIST 2 +#define HDCP_TXMTR_SUBSTATE_WAITING_FOR_STREAM_READY_MESSAGE 3 +#define HDCP_TXMTR_SUBSTATE_REPEATER_AUTH_COMPLETE 4 + +#define HDCP_DEVICE_ID 0x0008000 +#define HDCP_CREATE_DEVICE_ID(x) (HDCP_DEVICE_ID | (x)) + +#define HDCP_TXMTR_HDMI HDCP_CREATE_DEVICE_ID(1) +#define HDCP_TXMTR_DP HDCP_CREATE_DEVICE_ID(2) +#define HDCP_TXMTR_SERVICE_ID 0x0001000 +#define SERVICE_CREATE_CMD(x) (HDCP_TXMTR_SERVICE_ID | x) + +#define HDCP_TXMTR_INIT SERVICE_CREATE_CMD(1) +#define HDCP_TXMTR_DEINIT SERVICE_CREATE_CMD(2) +#define HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE SERVICE_CREATE_CMD(3) +#define HDCP_TXMTR_SEND_MESSAGE_TIMEOUT SERVICE_CREATE_CMD(4) +#define HDCP_TXMTR_SET_HW_KEY SERVICE_CREATE_CMD(5) +#define HDCP_TXMTR_QUERY_STREAM_TYPE SERVICE_CREATE_CMD(6) +#define HDCP_TXMTR_GET_KSXORLC128_AND_RIV SERVICE_CREATE_CMD(7) +#define HDCP_TXMTR_PROVISION_KEY SERVICE_CREATE_CMD(8) +#define HDCP_TXMTR_GET_TOPOLOGY_INFO SERVICE_CREATE_CMD(9) +#define HDCP_TXMTR_UPDATE_SRM SERVICE_CREATE_CMD(10) +#define HDCP_LIB_INIT SERVICE_CREATE_CMD(11) +#define HDCP_LIB_DEINIT SERVICE_CREATE_CMD(12) +#define HDCP_TXMTR_DELETE_PAIRING_INFO SERVICE_CREATE_CMD(13) +#define HDCP_TXMTR_GET_VERSION SERVICE_CREATE_CMD(14) +#define HDCP_TXMTR_VERIFY_KEY SERVICE_CREATE_CMD(15) +#define HDCP_SESSION_INIT SERVICE_CREATE_CMD(16) +#define HDCP_SESSION_DEINIT SERVICE_CREATE_CMD(17) +#define HDCP_TXMTR_START_AUTHENTICATE SERVICE_CREATE_CMD(18) +#define HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST SERVICE_CREATE_CMD(19) + +#define HCDP_TXMTR_GET_MAJOR_VERSION(v) (((v) >> 16) & 0xFF) +#define HCDP_TXMTR_GET_MINOR_VERSION(v) (((v) >> 8) & 0xFF) +#define HCDP_TXMTR_GET_PATCH_VERSION(v) ((v) & 0xFF) + +#define HDCP_CLIENT_MAJOR_VERSION 2 +#define HDCP_CLIENT_MINOR_VERSION 1 +#define HDCP_CLIENT_PATCH_VERSION 0 +#define HDCP_CLIENT_MAKE_VERSION(maj, min, patch) \ + ((((maj) & 0xFF) << 16) | (((min) & 0xFF) << 8) | ((patch) & 0xFF)) + +#define REAUTH_REQ BIT(3) +#define LINK_INTEGRITY_FAILURE BIT(4) + +#define HDCP_LIB_EXECUTE(x) {\ + if (handle->tethered)\ + hdcp_lib_##x(handle);\ + else\ + queue_kthread_work(&handle->worker, &handle->wk_##x);\ +} + +static const struct hdcp_msg_data hdcp_msg_lookup[HDCP2P2_MAX_MESSAGES] = { + [AKE_INIT_MESSAGE_ID] = { 2, + { {"rtx", 0x69000, 8}, {"TxCaps", 0x69008, 3} }, + 0 }, + [AKE_SEND_CERT_MESSAGE_ID] = { 3, + { {"cert-rx", 0x6900B, 522}, {"rrx", 0x69215, 8}, + {"RxCaps", 0x6921D, 3} }, + 0 }, + [AKE_NO_STORED_KM_MESSAGE_ID] = { 1, + { {"Ekpub_km", 0x69220, 128} }, + 0 }, + [AKE_STORED_KM_MESSAGE_ID] = { 2, + { {"Ekh_km", 0x692A0, 16}, {"m", 0x692B0, 16} }, + 0 }, + [AKE_SEND_H_PRIME_MESSAGE_ID] = { 1, + { {"H'", 0x692C0, 32} }, + (1 << 1) }, + [AKE_SEND_PAIRING_INFO_MESSAGE_ID] = { 1, + { {"Ekh_km", 0x692E0, 16} }, + (1 << 2) }, + [LC_INIT_MESSAGE_ID] = { 1, + { {"rn", 0x692F0, 8} }, + 0 }, + [LC_SEND_L_PRIME_MESSAGE_ID] = { 1, + { {"L'", 0x692F8, 32} }, + 0 }, + [SKE_SEND_EKS_MESSAGE_ID] = { 2, + { {"Edkey_ks", 0x69318, 16}, {"riv", 0x69328, 8} }, + 0 }, + [SKE_SEND_TYPE_ID] = { 1, + { {"type", 0x69494, 1} }, + 0 }, + [REP_SEND_RECV_ID_LIST_ID] = { 4, + { {"RxInfo", 0x69330, 2}, {"seq_num_V", 0x69332, 3}, + {"V'", 0x69335, 16}, {"ridlist", 0x69345, 155} }, + (1 << 0) }, + [REP_SEND_ACK_ID] = { 1, + { {"V", 0x693E0, 16} }, + 0 }, + [REP_STREAM_MANAGE_ID] = { 3, + { {"seq_num_M", 0x693F0, 3}, {"k", 0x693F3, 2}, + {"streamID_Type", 0x693F5, 126} }, + 0 }, + [REP_STREAM_READY_ID] = { 1, + { {"M'", 0x69473, 32} }, + 0 } +}; + +enum hdcp_state { + HDCP_STATE_INIT = 0x00, + HDCP_STATE_APP_LOADED = 0x01, + HDCP_STATE_SESSION_INIT = 0x02, + HDCP_STATE_TXMTR_INIT = 0x04, + HDCP_STATE_AUTHENTICATED = 0x08, + HDCP_STATE_ERROR = 0x10 +}; + +enum hdcp_element { + HDCP_TYPE_UNKNOWN, + HDCP_TYPE_RECEIVER, + HDCP_TYPE_REPEATER, +}; + +enum hdcp_version { + HDCP_VERSION_UNKNOWN, + HDCP_VERSION_2_2, + HDCP_VERSION_1_4 +}; + +struct receiver_info { + unsigned char rcvrInfo[RCVR_ID_SIZE]; + enum hdcp_element elem_type; + enum hdcp_version hdcp_version; +}; + +struct topology_info { + unsigned int nNumRcvrs; + struct receiver_info rcvinfo[MAX_TOPOLOGY_ELEMS]; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_set_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp1_key_set_rsp { + uint32_t commandid; + uint32_t ret; + uint8_t ksv[HDCP1_AKSV_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_version_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_version_rsp { + uint32_t commandid; + uint32_t commandId; + uint32_t appversion; +}; + +struct __attribute__ ((__packed__)) hdcp_verify_key_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_verify_key_rsp { + uint32_t status; + uint32_t commandId; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_init_req_v1 { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_init_rsp_v1 { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_init_req { + uint32_t commandid; + uint32_t clientversion; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t appversion; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_deinit_req { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_session_init_req { + uint32_t commandid; + uint32_t deviceid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_session_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_req { + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_lib_session_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_req_v1 { + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_rsp_v1 { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_req { + uint32_t commandid; + uint32_t sessionid; +}; + +struct __attribute__ ((__packed__)) hdcp_tx_init_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_deinit_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_deinit_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_rcvd_msg_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t msglen; + uint8_t msg[MAX_RX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_rcvd_msg_rsp { + uint32_t status; + uint32_t commandid; + uint32_t state; + uint32_t timeout; + uint32_t flag; + uint32_t msglen; + uint8_t msg[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_set_hw_key_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_set_hw_key_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_send_timeout_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_send_timeout_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_query_stream_type_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_query_stream_type_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t msg[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_set_stream_type_req { + uint32_t commandid; + uint32_t ctxhandle; + uint8_t streamtype; +}; + +struct __attribute__ ((__packed__)) hdcp_set_stream_type_rsp { + uint32_t status; + uint32_t commandid; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_update_srm_req { + uint32_t commandid; + uint32_t ctxhandle; + uint32_t srmoffset; + uint32_t srmlength; +}; + +struct __attribute__ ((__packed__)) hdcp_update_srm_rsp { + uint32_t status; + uint32_t commandid; +}; + +struct __attribute__ ((__packed__)) hdcp_srm_check_device_ids_req { + uint32_t commandid; + uint32_t num_device_ids; + uint8_t device_ids[1]; +}; + +struct __attribute__ ((__packed__)) hdcp_srm_check_device_ids_rsp { + uint32_t commandid; + int32_t retval; +}; + +struct __attribute__ ((__packed__)) hdcp_get_topology_req { + uint32_t commandid; + uint32_t ctxhandle; +}; + +struct __attribute__ ((__packed__)) hdcp_get_topology_rsp { + uint32_t status; + uint32_t commandid; + struct topology_info topologyinfo; +}; + +struct __attribute__ ((__packed__)) rxvr_info_struct { + uint8_t rcvrCert[522]; + uint8_t rrx[BITS_64_IN_BYTES]; + uint8_t rxcaps[RXCAPS_SIZE]; + bool repeater; +}; + +struct __attribute__ ((__packed__)) repeater_info_struct { + uint8_t RxInfo[RXINFO_SIZE]; + uint8_t seq_num_V[SEQ_NUM_V_SIZE]; + bool seq_num_V_Rollover_flag; + uint8_t ReceiverIDList[MAX_RCVR_ID_LIST_SIZE]; + uint32_t ReceiverIDListLen; +}; + +struct __attribute__ ((__packed__)) hdcp1_set_enc_req { + uint32_t commandid; + uint32_t enable; +}; + +struct __attribute__ ((__packed__)) hdcp1_set_enc_rsp { + uint32_t commandid; + uint32_t ret; +}; + +struct __attribute__ ((__packed__)) hdcp_start_auth_req { + uint32_t commandid; + uint32_t ctxHandle; +}; + +struct __attribute__ ((__packed__)) hdcp_start_auth_rsp { + uint32_t status; + uint32_t commandid; + uint32_t ctxhandle; + uint32_t timeout; + uint32_t msglen; + uint8_t message[MAX_TX_MESSAGE_SIZE]; +}; + +struct __attribute__ ((__packed__)) hdcp_rcv_id_list_req { + uint32_t commandid; + uint32_t ctxHandle; +}; +struct __attribute__ ((__packed__)) hdcp_rcv_id_list_rsp { + uint32_t status; + uint32_t commandid; +}; + +/* + * struct hdcp1_lib_handle - handle for hdcp1 client + * @qseecom_handle - for sending commands to hdcp1 TA + * @srm_handle - for sending commands to SRM TA + * @client_ops - handle to call APIs exposed by hdcp1 client + * @client_ctx - client context maintained by hdmi + */ +struct hdcp1_lib_handle { + struct qseecom_handle *qsee_handle; + struct qseecom_handle *srm_handle; + struct hdcp_client_ops *client_ops; + void *client_ctx; +}; + +/* + * struct hdcp_lib_handle - handle for hdcp client + * @qseecom_handle - for sending commands to qseecom + * @listener_buf - buffer containing message shared with the client + * @msglen - size message in the buffer + * @tz_ctxhandle - context handle shared with tz + * @hdcp_timeout - timeout in msecs shared for hdcp messages + * @client_ctx - client context maintained by hdmi + * @client_ops - handle to call APIs exposed by hdcp client + * @timeout_lock - this lock protects hdcp_timeout field + * @msg_lock - this lock protects the message buffer + */ +struct hdcp_lib_handle { + unsigned char *listener_buf; + uint32_t msglen; + uint32_t tz_ctxhandle; + uint32_t hdcp_timeout; + uint32_t timeout_left; + uint32_t wait_timeout; + bool no_stored_km_flag; + bool feature_supported; + bool authenticated; + void *client_ctx; + struct hdcp_client_ops *client_ops; + struct mutex msg_lock; + struct mutex wakeup_mutex; + enum hdcp_state hdcp_state; + enum hdcp_lib_wakeup_cmd wakeup_cmd; + bool repeater_flag; + bool non_2p2_present; + bool update_stream; + bool tethered; + struct qseecom_handle *qseecom_handle; + int last_msg_sent; + int last_msg; + char *last_msg_recvd_buf; + uint32_t last_msg_recvd_len; + atomic_t hdcp_off; + uint32_t session_id; + bool legacy_app; + enum hdcp_device_type device_type; + + struct task_struct *thread; + struct completion poll_wait; + + struct kthread_worker worker; + struct kthread_work wk_init; + struct kthread_work wk_msg_sent; + struct kthread_work wk_msg_recvd; + struct kthread_work wk_timeout; + struct kthread_work wk_clean; + struct kthread_work wk_wait; + struct kthread_work wk_stream; + + int (*hdcp_app_init)(struct hdcp_lib_handle *handle); + int (*hdcp_txmtr_init)(struct hdcp_lib_handle *handle); +}; + +struct hdcp_lib_message_map { + int msg_id; + const char *msg_name; +}; + +struct msm_hdcp_mgr { + struct platform_device *pdev; + dev_t dev_num; + struct cdev cdev; + struct class *class; + struct device *device; + struct HDCP_V2V1_MSG_TOPOLOGY cached_tp; + u32 tp_msgid; + void *client_ctx; + struct hdcp_lib_handle *handle; +}; + +#define CLASS_NAME "hdcp" +#define DRIVER_NAME "msm_hdcp" + +static struct msm_hdcp_mgr *hdcp_drv_mgr; +static struct hdcp_lib_handle *drv_client_handle; + +static void hdcp_lib_clean(struct hdcp_lib_handle *handle); +static void hdcp_lib_init(struct hdcp_lib_handle *handle); +static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle); +static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle); +static void hdcp_lib_timeout(struct hdcp_lib_handle *handle); +static void hdcp_lib_stream(struct hdcp_lib_handle *handle); +static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle); +static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle); + +static struct qseecom_handle *hdcpsrm_handle; + +static struct hdcp1_lib_handle *hdcp1_handle; + +static bool hdcp1_supported = true; +static bool hdcp1_enc_enabled; +static struct mutex hdcp1_ta_cmd_lock; + +static const char *hdcp_lib_message_name(int msg_id) +{ + /* + * Message ID map. The first number indicates the message number + * assigned to the message by the HDCP 2.2 spec. This is also the first + * byte of every HDCP 2.2 authentication protocol message. + */ + static struct hdcp_lib_message_map hdcp_lib_msg_map[] = { + {2, "AKE_INIT"}, + {3, "AKE_SEND_CERT"}, + {4, "AKE_NO_STORED_KM"}, + {5, "AKE_STORED_KM"}, + {7, "AKE_SEND_H_PRIME"}, + {8, "AKE_SEND_PAIRING_INFO"}, + {9, "LC_INIT"}, + {10, "LC_SEND_L_PRIME"}, + {11, "SKE_SEND_EKS"}, + {12, "REPEATER_AUTH_SEND_RECEIVERID_LIST"}, + {15, "REPEATER_AUTH_SEND_ACK"}, + {16, "REPEATER_AUTH_STREAM_MANAGE"}, + {17, "REPEATER_AUTH_STREAM_READY"}, + {18, "SKE_SEND_TYPE_ID"}, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(hdcp_lib_msg_map); i++) { + if (msg_id == hdcp_lib_msg_map[i].msg_id) + return hdcp_lib_msg_map[i].msg_name; + } + return "UNKNOWN"; +} + +static int hdcp_lib_get_next_message(struct hdcp_lib_handle *handle, + struct hdmi_hdcp_wakeup_data *data) +{ + switch (handle->last_msg) { + case INVALID_MESSAGE_ID: + return AKE_INIT_MESSAGE_ID; + case AKE_INIT_MESSAGE_ID: + return AKE_SEND_CERT_MESSAGE_ID; + case AKE_SEND_CERT_MESSAGE_ID: + if (handle->no_stored_km_flag) + return AKE_NO_STORED_KM_MESSAGE_ID; + else + return AKE_STORED_KM_MESSAGE_ID; + case AKE_STORED_KM_MESSAGE_ID: + case AKE_NO_STORED_KM_MESSAGE_ID: + return AKE_SEND_H_PRIME_MESSAGE_ID; + case AKE_SEND_H_PRIME_MESSAGE_ID: + if (handle->no_stored_km_flag) + return AKE_SEND_PAIRING_INFO_MESSAGE_ID; + else + return LC_INIT_MESSAGE_ID; + case AKE_SEND_PAIRING_INFO_MESSAGE_ID: + return LC_INIT_MESSAGE_ID; + case LC_INIT_MESSAGE_ID: + return LC_SEND_L_PRIME_MESSAGE_ID; + case LC_SEND_L_PRIME_MESSAGE_ID: + return SKE_SEND_EKS_MESSAGE_ID; + case SKE_SEND_EKS_MESSAGE_ID: + if (!handle->repeater_flag && + handle->device_type == HDCP_TXMTR_DP) + return SKE_SEND_TYPE_ID; + case SKE_SEND_TYPE_ID: + case REP_STREAM_READY_ID: + case REP_SEND_ACK_ID: + if (!handle->repeater_flag) + return INVALID_MESSAGE_ID; + + if (data->cmd == HDMI_HDCP_WKUP_CMD_SEND_MESSAGE) + return REP_STREAM_MANAGE_ID; + else + return REP_SEND_RECV_ID_LIST_ID; + case REP_SEND_RECV_ID_LIST_ID: + return REP_SEND_ACK_ID; + case REP_STREAM_MANAGE_ID: + return REP_STREAM_READY_ID; + default: + pr_err("Uknown message ID (%d)", handle->last_msg); + return -EINVAL; + } +} + +static void hdcp_lib_wait_for_response(struct hdcp_lib_handle *handle, + struct hdmi_hdcp_wakeup_data *data) +{ + switch (handle->last_msg) { + case AKE_SEND_H_PRIME_MESSAGE_ID: + if (handle->no_stored_km_flag) + handle->wait_timeout = HZ; + else + handle->wait_timeout = HZ / 4; + break; + case AKE_SEND_PAIRING_INFO_MESSAGE_ID: + handle->wait_timeout = HZ / 4; + break; + case REP_SEND_RECV_ID_LIST_ID: + if (!handle->authenticated) + handle->wait_timeout = HZ * 3; + else + handle->wait_timeout = 0; + break; + default: + handle->wait_timeout = 0; + } + + if (handle->wait_timeout) + queue_kthread_work(&handle->worker, &handle->wk_wait); +} + +static void hdcp_lib_wakeup_client(struct hdcp_lib_handle *handle, + struct hdmi_hdcp_wakeup_data *data) +{ + int rc = 0, i; + + if (!handle || !handle->client_ops || !handle->client_ops->wakeup || + !data || (data->cmd == HDMI_HDCP_WKUP_CMD_INVALID)) + return; + + data->abort_mask = REAUTH_REQ | LINK_INTEGRITY_FAILURE; + + if (data->cmd == HDMI_HDCP_WKUP_CMD_RECV_MESSAGE || + data->cmd == HDMI_HDCP_WKUP_CMD_LINK_POLL) + handle->last_msg = hdcp_lib_get_next_message(handle, data); + + if (handle->last_msg != INVALID_MESSAGE_ID && + data->cmd != HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS && + data->cmd != HDMI_HDCP_WKUP_CMD_STATUS_FAILED) { + u32 msg_num, rx_status; + const struct hdcp_msg_part *msg; + + pr_debug("lib->client: %s (%s)\n", + hdmi_hdcp_cmd_to_str(data->cmd), + hdcp_lib_message_name(handle->last_msg)); + + data->message_data = &hdcp_msg_lookup[handle->last_msg]; + + msg_num = data->message_data->num_messages; + msg = data->message_data->messages; + rx_status = data->message_data->rx_status; + + pr_debug("%10s | %6s | %4s\n", "name", "offset", "len"); + + for (i = 0; i < msg_num; i++) + pr_debug("%10s | %6x | %4d\n", + msg[i].name, msg[i].offset, + msg[i].length); + } else { + pr_debug("lib->client: %s\n", hdmi_hdcp_cmd_to_str(data->cmd)); + } + + rc = handle->client_ops->wakeup(data); + if (rc) + pr_err("error sending %s to client\n", + hdmi_hdcp_cmd_to_str(data->cmd)); + + hdcp_lib_wait_for_response(handle, data); +} + +static inline void hdcp_lib_send_message(struct hdcp_lib_handle *handle) +{ + char msg_name[50]; + struct hdmi_hdcp_wakeup_data cdata = { + HDMI_HDCP_WKUP_CMD_SEND_MESSAGE + }; + + cdata.context = handle->client_ctx; + cdata.send_msg_buf = handle->listener_buf; + cdata.send_msg_len = handle->msglen; + cdata.timeout = handle->hdcp_timeout; + + snprintf(msg_name, sizeof(msg_name), "%s: ", + hdcp_lib_message_name((int)cdata.send_msg_buf[0])); + + print_hex_dump(KERN_DEBUG, msg_name, + DUMP_PREFIX_NONE, 16, 1, cdata.send_msg_buf, + cdata.send_msg_len, false); + + hdcp_lib_wakeup_client(handle, &cdata); +} + +static int hdcp_lib_enable_encryption(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_set_hw_key_req *req_buf; + struct hdcp_set_hw_key_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto error; + } + + /* + * wait at least 200ms before enabling encryption + * as per hdcp2p2 sepcifications. + */ + msleep(SLEEP_SET_HW_KEY_MS); + + req_buf = (struct hdcp_set_hw_key_req *)(handle->qseecom_handle->sbuf); + req_buf->commandid = HDCP_TXMTR_SET_HW_KEY; + req_buf->ctxhandle = handle->tz_ctxhandle; + + rsp_buf = (struct hdcp_set_hw_key_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_set_hw_key_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_set_hw_key_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_set_hw_key_rsp))); + + if ((rc < 0) || (rsp_buf->status < 0)) { + pr_err("qseecom cmd failed with err = %d status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto error; + } + + /* reached an authenticated state */ + handle->hdcp_state |= HDCP_STATE_AUTHENTICATED; + + pr_debug("success\n"); + return 0; +error: + if (handle && !atomic_read(&handle->hdcp_off)) + HDCP_LIB_EXECUTE(clean); + + return rc; +} + +static int hdcp_lib_get_version(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_version_req *req_buf; + struct hdcp_version_rsp *rsp_buf; + uint32_t app_major_version = 0; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("library not loaded\n"); + return rc; + } + + /* get the TZ hdcp2p2 app version */ + req_buf = (struct hdcp_version_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_GET_VERSION; + + rsp_buf = (struct hdcp_version_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_version_req))); + + rc = qseecom_send_command(handle->qseecom_handle, + req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err = %d\n", rc); + goto exit; + } + + app_major_version = HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion); + + pr_debug("hdp2p2 app major version %d, app version %d\n", + app_major_version, rsp_buf->appversion); + + if (app_major_version == 1) + handle->legacy_app = true; + +exit: + return rc; +} + +static int hdcp_lib_verify_keys(struct hdcp_lib_handle *handle) +{ + int rc = -EINVAL; + struct hdcp_verify_key_req *req_buf; + struct hdcp_verify_key_rsp *rsp_buf; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + goto exit; + } + + req_buf = (struct hdcp_verify_key_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_VERIFY_KEY; + + rsp_buf = (struct hdcp_verify_key_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_verify_key_req))); + + rc = qseecom_send_command(handle->qseecom_handle, + req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_verify_key_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_verify_key_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err = %d\n", rc); + goto exit; + } + + return rsp_buf->status; +exit: + return rc; +} + + +static int hdcp_app_init_legacy(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_lib_init_req_v1 *req_buf; + struct hdcp_lib_init_rsp_v1 *rsp_buf; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (!handle->legacy_app) { + pr_err("wrong init function\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("library not loaded\n"); + goto exit; + } + + /* now load the app by sending hdcp_lib_init */ + req_buf = (struct hdcp_lib_init_req_v1 *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_LIB_INIT; + rsp_buf = (struct hdcp_lib_init_rsp_v1 *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_lib_init_req_v1))); + + rc = qseecom_send_command(handle->qseecom_handle, + req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_req_v1)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_rsp_v1))); + + if (rc < 0) { + pr_err("qseecom cmd failed err = %d\n", rc); + goto exit; + } + + pr_debug("success\n"); + +exit: + return rc; +} + +static int hdcp_app_init(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_lib_init_req *req_buf; + struct hdcp_lib_init_rsp *rsp_buf; + uint32_t app_minor_version = 0; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (handle->legacy_app) { + pr_err("wrong init function\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("library not loaded\n"); + goto exit; + } + + /* now load the app by sending hdcp_lib_init */ + req_buf = (struct hdcp_lib_init_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_LIB_INIT; + req_buf->clientversion = + HDCP_CLIENT_MAKE_VERSION(HDCP_CLIENT_MAJOR_VERSION, + HDCP_CLIENT_MINOR_VERSION, + HDCP_CLIENT_PATCH_VERSION); + rsp_buf = (struct hdcp_lib_init_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_lib_init_req))); + + rc = qseecom_send_command(handle->qseecom_handle, + req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_init_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err = %d\n", rc); + goto exit; + } + + app_minor_version = HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion); + if (app_minor_version != HDCP_CLIENT_MINOR_VERSION) { + pr_err + ("client-app minor version mismatch app(%d), client(%d)\n", + app_minor_version, HDCP_CLIENT_MINOR_VERSION); + rc = -1; + goto exit; + } + pr_debug("success\n"); + pr_debug("client version major(%d), minor(%d), patch(%d)\n", + HDCP_CLIENT_MAJOR_VERSION, HDCP_CLIENT_MINOR_VERSION, + HDCP_CLIENT_PATCH_VERSION); + pr_debug("app version major(%d), minor(%d), patch(%d)\n", + HCDP_TXMTR_GET_MAJOR_VERSION(rsp_buf->appversion), + HCDP_TXMTR_GET_MINOR_VERSION(rsp_buf->appversion), + HCDP_TXMTR_GET_PATCH_VERSION(rsp_buf->appversion)); + +exit: + return rc; +} + +static int hdcp_lib_library_load(struct hdcp_lib_handle *handle) +{ + int rc = 0; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_err("library already loaded\n"); + goto exit; + } + + /* + * allocating resource for qseecom handle + * the app is not loaded here + */ + rc = qseecom_start_app(&(handle->qseecom_handle), + TZAPP_NAME, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed %d\n", rc); + goto exit; + } + + if (!hdcpsrm_handle) { + rc = qseecom_start_app(&hdcpsrm_handle, + SRMAPP_NAME, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed for SRM TA %d\n", rc); + goto exit; + } + } + + handle->hdcp_state |= HDCP_STATE_APP_LOADED; + pr_debug("qseecom_start_app success\n"); + + rc = hdcp_lib_get_version(handle); + if (rc) { + pr_err("library get version failed\n"); + goto exit; + } + + if (handle->legacy_app) { + handle->hdcp_app_init = hdcp_app_init_legacy; + handle->hdcp_txmtr_init = hdcp_lib_txmtr_init_legacy; + } else { + handle->hdcp_app_init = hdcp_app_init; + handle->hdcp_txmtr_init = hdcp_lib_txmtr_init; + } + + if (handle->hdcp_app_init == NULL) { + pr_err("invalid app init function pointer\n"); + goto exit; + } + + rc = handle->hdcp_app_init(handle); + if (rc) { + pr_err("app init failed\n"); + goto exit; + } +exit: + return rc; +} + +static int hdcp_lib_library_unload(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_lib_deinit_req *req_buf; + struct hdcp_lib_deinit_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("library not loaded\n"); + return rc; + } + + /* unloading app by sending hdcp_lib_deinit cmd */ + req_buf = (struct hdcp_lib_deinit_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_LIB_DEINIT; + rsp_buf = (struct hdcp_lib_deinit_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_lib_deinit_req))); + + rc = qseecom_send_command(handle->qseecom_handle, + req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_deinit_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_lib_deinit_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err = %d\n", rc); + goto exit; + } + + /* deallocate the resources for qseecom hdcp2p2 handle */ + rc = qseecom_shutdown_app(&handle->qseecom_handle); + if (rc) { + pr_err("hdcp2p2 qseecom_shutdown_app failed err: %d\n", rc); + goto exit; + } + + /* deallocate the resources for qseecom hdcpsrm handle */ + rc = qseecom_shutdown_app(&hdcpsrm_handle); + if (rc) { + pr_err("hdcpsrm qseecom_shutdown_app failed err: %d\n", rc); + goto exit; + } + + handle->hdcp_state &= ~HDCP_STATE_APP_LOADED; + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_session_init(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_lib_session_init_req *req_buf; + struct hdcp_lib_session_init_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + goto exit; + } + + if (handle->hdcp_state & HDCP_STATE_SESSION_INIT) { + pr_err("session already initialized\n"); + goto exit; + } + + /* send HDCP_Session_Init command to TZ */ + req_buf = + (struct hdcp_lib_session_init_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_SESSION_INIT; + req_buf->deviceid = handle->device_type; + rsp_buf = (struct hdcp_lib_session_init_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_init_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_lib_session_init_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_lib_session_init_rsp))); + + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) || + (rsp_buf->commandid != HDCP_SESSION_INIT)) { + pr_err("qseecom cmd failed with err = %d, status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + pr_debug("session id %d\n", rsp_buf->sessionid); + + handle->session_id = rsp_buf->sessionid; + handle->hdcp_state |= HDCP_STATE_SESSION_INIT; + + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_session_deinit(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_lib_session_deinit_req *req_buf; + struct hdcp_lib_session_deinit_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + /* unload library here */ + pr_err("session not initialized\n"); + goto exit; + } + + /* send command to TZ */ + req_buf = + (struct hdcp_lib_session_deinit_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_SESSION_DEINIT; + req_buf->sessionid = handle->session_id; + rsp_buf = (struct hdcp_lib_session_deinit_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_lib_session_deinit_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_lib_session_deinit_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_lib_session_deinit_rsp))); + + if ((rc < 0) || (rsp_buf->status < 0) || + (rsp_buf->commandid != HDCP_SESSION_DEINIT)) { + pr_err("qseecom cmd failed with err = %d status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + handle->hdcp_state &= ~HDCP_STATE_SESSION_INIT; + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_txmtr_init(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_tx_init_req *req_buf; + struct hdcp_tx_init_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("library not loaded\n"); + goto exit; + } + + /* send HDCP_Txmtr_Init command to TZ */ + req_buf = (struct hdcp_tx_init_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_INIT; + req_buf->sessionid = handle->session_id; + rsp_buf = (struct hdcp_tx_init_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_tx_init_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_tx_init_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_tx_init_rsp))); + + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) || + (rsp_buf->commandid != HDCP_TXMTR_INIT)) { + pr_err("qseecom cmd failed with err = %d, status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; + + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_txmtr_init_legacy(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_tx_init_req_v1 *req_buf; + struct hdcp_tx_init_rsp_v1 *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + goto exit; + } + + if (handle->hdcp_state & HDCP_STATE_TXMTR_INIT) { + pr_err("txmtr already initialized\n"); + goto exit; + } + + /* send HDCP_Txmtr_Init command to TZ */ + req_buf = (struct hdcp_tx_init_req_v1 *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_INIT; + rsp_buf = (struct hdcp_tx_init_rsp_v1 *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_tx_init_req_v1))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_tx_init_req_v1)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_tx_init_rsp_v1))); + + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) || + (rsp_buf->commandid != HDCP_TXMTR_INIT) || + (rsp_buf->msglen <= 0) || (rsp_buf->message == NULL)) { + pr_err("qseecom cmd failed with err = %d, status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + pr_debug("recvd %s from TZ at %dms\n", + hdcp_lib_message_name((int)rsp_buf->message[0]), + jiffies_to_msecs(jiffies)); + + handle->last_msg = (int)rsp_buf->message[0]; + + /* send the response to HDMI driver */ + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + memcpy(handle->listener_buf, (unsigned char *)rsp_buf->message, + rsp_buf->msglen); + handle->msglen = rsp_buf->msglen; + handle->hdcp_timeout = rsp_buf->timeout; + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + handle->hdcp_state |= HDCP_STATE_TXMTR_INIT; + + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_txmtr_deinit(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_deinit_req *req_buf; + struct hdcp_deinit_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_err("app not loaded\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + /* unload library here */ + pr_err("txmtr not initialized\n"); + goto exit; + } + + /* send command to TZ */ + req_buf = (struct hdcp_deinit_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_DEINIT; + req_buf->ctxhandle = handle->tz_ctxhandle; + rsp_buf = (struct hdcp_deinit_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof(struct hdcp_deinit_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_deinit_rsp))); + + if ((rc < 0) || (rsp_buf->status < 0) || + (rsp_buf->commandid != HDCP_TXMTR_DEINIT)) { + pr_err("qseecom cmd failed with err = %d status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + handle->hdcp_state &= ~HDCP_STATE_TXMTR_INIT; + pr_debug("success\n"); +exit: + return rc; +} + +static int hdcp_lib_start_auth(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_start_auth_req *req_buf; + struct hdcp_start_auth_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + rc = -EINVAL; + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_SESSION_INIT)) { + pr_err("session not initialized\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_TXMTR_INIT)) { + pr_err("txmtr not initialized\n"); + goto exit; + } + + /* send HDCP_Txmtr_Start_Auth command to TZ */ + req_buf = (struct hdcp_start_auth_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_START_AUTHENTICATE; + req_buf->ctxHandle = handle->tz_ctxhandle; + rsp_buf = (struct hdcp_start_auth_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_start_auth_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_start_auth_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_start_auth_rsp))); + + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS) || + (rsp_buf->commandid != HDCP_TXMTR_START_AUTHENTICATE) || + (rsp_buf->msglen <= 0) || (rsp_buf->message == NULL)) { + pr_err("qseecom cmd failed with err = %d, status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + pr_debug("recvd %s from TZ at %dms\n", + hdcp_lib_message_name((int)rsp_buf->message[0]), + jiffies_to_msecs(jiffies)); + + handle->last_msg = (int)rsp_buf->message[0]; + + /* send the response to HDMI driver */ + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + memcpy(handle->listener_buf, (unsigned char *)rsp_buf->message, + rsp_buf->msglen); + handle->msglen = rsp_buf->msglen; + handle->hdcp_timeout = rsp_buf->timeout; + + handle->tz_ctxhandle = rsp_buf->ctxhandle; + + pr_debug("success\n"); +exit: + return rc; +} + +static void hdcp_lib_stream(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_query_stream_type_req *req_buf; + struct hdcp_query_stream_type_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + return; + } + + if (atomic_read(&handle->hdcp_off)) { + pr_debug("invalid state, hdcp off\n"); + return; + } + + if (!handle->repeater_flag) { + pr_debug("invalid state, not a repeater\n"); + return; + } + + /* send command to TZ */ + req_buf = + (struct hdcp_query_stream_type_req *)handle->qseecom_handle->sbuf; + req_buf->commandid = HDCP_TXMTR_QUERY_STREAM_TYPE; + req_buf->ctxhandle = handle->tz_ctxhandle; + rsp_buf = (struct hdcp_query_stream_type_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_query_stream_type_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_query_stream_type_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_query_stream_type_rsp))); + + if ((rc < 0) || (rsp_buf->status < 0) || (rsp_buf->msglen <= 0) || + (rsp_buf->commandid != HDCP_TXMTR_QUERY_STREAM_TYPE) || + (rsp_buf->msg == NULL)) { + pr_err("qseecom cmd failed with err=%d status=%d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + pr_debug("message received from TZ: %s\n", + hdcp_lib_message_name((int)rsp_buf->msg[0])); + + handle->last_msg = (int)rsp_buf->msg[0]; + + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg, + rsp_buf->msglen); + handle->hdcp_timeout = rsp_buf->timeout; + handle->msglen = rsp_buf->msglen; +exit: + if (!rc && !atomic_read(&handle->hdcp_off)) + hdcp_lib_send_message(handle); +} + +static void hdcp_lib_query_stream_work(struct kthread_work *work) +{ + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_stream); + + hdcp_lib_stream(handle); +} + +static bool hdcp_lib_client_feature_supported(void *phdcpcontext) +{ + int rc = 0; + bool supported = false; + struct hdcp_lib_handle *handle = phdcpcontext; + + if (!handle) { + pr_err("invalid input\n"); + goto exit; + } + + if (handle->feature_supported) { + supported = true; + goto exit; + } + + rc = hdcp_lib_library_load(handle); + if (!rc) { + if (!hdcp_lib_verify_keys(handle)) { + pr_debug("HDCP2p2 supported\n"); + handle->feature_supported = true; + supported = true; + } + hdcp_lib_library_unload(handle); + } +exit: + return supported; +} + +static void hdcp_lib_check_worker_status(struct hdcp_lib_handle *handle) +{ + if (!list_empty(&handle->wk_init.node)) + pr_debug("init work queued\n"); + + if (handle->worker.current_work == &handle->wk_init) + pr_debug("init work executing\n"); + + if (!list_empty(&handle->wk_msg_sent.node)) + pr_debug("msg_sent work queued\n"); + + if (handle->worker.current_work == &handle->wk_msg_sent) + pr_debug("msg_sent work executing\n"); + + if (!list_empty(&handle->wk_msg_recvd.node)) + pr_debug("msg_recvd work queued\n"); + + if (handle->worker.current_work == &handle->wk_msg_recvd) + pr_debug("msg_recvd work executing\n"); + + if (!list_empty(&handle->wk_timeout.node)) + pr_debug("timeout work queued\n"); + + if (handle->worker.current_work == &handle->wk_timeout) + pr_debug("timeout work executing\n"); + + if (!list_empty(&handle->wk_clean.node)) + pr_debug("clean work queued\n"); + + if (handle->worker.current_work == &handle->wk_clean) + pr_debug("clean work executing\n"); + + if (!list_empty(&handle->wk_wait.node)) + pr_debug("wait work queued\n"); + + if (handle->worker.current_work == &handle->wk_wait) + pr_debug("wait work executing\n"); + + if (!list_empty(&handle->wk_stream.node)) + pr_debug("stream work queued\n"); + + if (handle->worker.current_work == &handle->wk_stream) + pr_debug("stream work executing\n"); +} + +static int hdcp_lib_check_valid_state(struct hdcp_lib_handle *handle) +{ + int rc = 0; + + if (!list_empty(&handle->worker.work_list)) + hdcp_lib_check_worker_status(handle); + + if (handle->wakeup_cmd == HDCP_LIB_WKUP_CMD_START) { + if (!list_empty(&handle->worker.work_list)) { + pr_debug("error: queue not empty\n"); + rc = -EBUSY; + goto exit; + } + + if (handle->hdcp_state & HDCP_STATE_APP_LOADED) { + pr_debug("library already loaded\n"); + rc = -EBUSY; + goto exit; + } + } else { + if (atomic_read(&handle->hdcp_off)) { + pr_debug("hdcp2.2 session tearing down\n"); + goto exit; + } + + if (!(handle->hdcp_state & HDCP_STATE_APP_LOADED)) { + pr_debug("hdcp 2.2 app not loaded\n"); + goto exit; + } + } +exit: + return rc; +} + +static void hdcp_lib_update_exec_type(void *ctx, bool tethered) +{ + struct hdcp_lib_handle *handle = ctx; + + if (!handle) + return; + + mutex_lock(&handle->wakeup_mutex); + + if (handle->tethered == tethered) { + pr_debug("exec mode same as %s\n", + tethered ? "tethered" : "threaded"); + } else { + handle->tethered = tethered; + + pr_debug("exec mode changed to %s\n", + tethered ? "tethered" : "threaded"); + } + + mutex_unlock(&handle->wakeup_mutex); +} + +static int hdcp_lib_wakeup_thread(struct hdcp_lib_wakeup_data *data) +{ + struct hdcp_lib_handle *handle; + int rc = 0; + + if (!data) + return -EINVAL; + + handle = data->context; + if (!handle) + return -EINVAL; + + mutex_lock(&handle->wakeup_mutex); + + handle->wakeup_cmd = data->cmd; + handle->timeout_left = data->timeout; + + pr_debug("client->lib: %s (%s)\n", + hdcp_lib_cmd_to_str(data->cmd), + hdcp_lib_message_name(handle->last_msg)); + + rc = hdcp_lib_check_valid_state(handle); + if (rc) + goto exit; + + mutex_lock(&handle->msg_lock); + if (data->recvd_msg_len) { + kzfree(handle->last_msg_recvd_buf); + + handle->last_msg_recvd_len = data->recvd_msg_len; + handle->last_msg_recvd_buf = kzalloc(data->recvd_msg_len, + GFP_KERNEL); + if (!handle->last_msg_recvd_buf) { + rc = -ENOMEM; + mutex_unlock(&handle->msg_lock); + goto exit; + } + + memcpy(handle->last_msg_recvd_buf, data->recvd_msg_buf, + data->recvd_msg_len); + } + mutex_unlock(&handle->msg_lock); + + if (!completion_done(&handle->poll_wait)) + complete_all(&handle->poll_wait); + + switch (handle->wakeup_cmd) { + case HDCP_LIB_WKUP_CMD_START: + handle->no_stored_km_flag = 0; + handle->repeater_flag = false; + handle->non_2p2_present = false; + handle->update_stream = false; + handle->last_msg_sent = 0; + handle->last_msg = INVALID_MESSAGE_ID; + handle->hdcp_timeout = 0; + handle->timeout_left = 0; + handle->legacy_app = false; + atomic_set(&handle->hdcp_off, 0); + handle->hdcp_state = HDCP_STATE_INIT; + + HDCP_LIB_EXECUTE(init); + break; + case HDCP_LIB_WKUP_CMD_STOP: + atomic_set(&handle->hdcp_off, 1); + + HDCP_LIB_EXECUTE(clean); + break; + case HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS: + handle->last_msg_sent = handle->listener_buf[0]; + + HDCP_LIB_EXECUTE(msg_sent); + break; + case HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED: + case HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED: + case HDCP_LIB_WKUP_CMD_LINK_FAILED: + handle->hdcp_state |= HDCP_STATE_ERROR; + HDCP_LIB_EXECUTE(clean); + break; + case HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS: + HDCP_LIB_EXECUTE(msg_recvd); + break; + case HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT: + HDCP_LIB_EXECUTE(timeout); + break; + case HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE: + HDCP_LIB_EXECUTE(stream); + break; + default: + pr_err("invalid wakeup command %d\n", handle->wakeup_cmd); + } +exit: + mutex_unlock(&handle->wakeup_mutex); + + return rc; +} + +static void hdcp_lib_prep_type_id(struct hdcp_lib_handle *handle, + struct hdmi_hdcp_wakeup_data *cdata) +{ + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + handle->listener_buf[0] = SKE_SEND_TYPE_ID; + handle->msglen = 2; + cdata->cmd = HDMI_HDCP_WKUP_CMD_SEND_MESSAGE; + cdata->send_msg_buf = handle->listener_buf; + cdata->send_msg_len = handle->msglen; + handle->last_msg = hdcp_lib_get_next_message(handle, + cdata); +} + +static void hdcp_lib_msg_sent(struct hdcp_lib_handle *handle) +{ + struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID }; + + if (!handle) { + pr_err("invalid handle\n"); + return; + } + + cdata.context = handle->client_ctx; + + switch (handle->last_msg_sent) { + case SKE_SEND_TYPE_ID: + if (!hdcp_lib_enable_encryption(handle)) { + handle->authenticated = true; + + cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS; + hdcp_lib_wakeup_client(handle, &cdata); + } + + /* poll for link check */ + cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL; + break; + case SKE_SEND_EKS_MESSAGE_ID: + /* + * a) if its a repeater irrespective of device type we + * start CMD_LINK_POLL to trigger repeater auth + * b) if its not a repeater and device is DP we + * first send the SKE_SEND_TYPE_ID and upon success + * enable encryption + * c) if its not a repeater and device is HDMI we + * dont send SKE_SEND_TYPE_ID and enable encryption + * and start part III of authentication + */ + if (handle->repeater_flag) { + /* poll for link check */ + cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL; + } else if (handle->device_type == HDCP_TXMTR_DP) { + hdcp_lib_prep_type_id(handle, &cdata); + } else if (handle->device_type == HDCP_TXMTR_HDMI) { + if (!hdcp_lib_enable_encryption(handle)) { + handle->authenticated = true; + cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS; + hdcp_lib_wakeup_client(handle, &cdata); + } + /* poll for link check */ + cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL; + } + break; + case REP_SEND_ACK_ID: + pr_debug("Repeater authentication successful\n"); + + if (handle->update_stream) { + HDCP_LIB_EXECUTE(stream); + handle->update_stream = false; + } else { + cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL; + } + break; + default: + cdata.cmd = HDMI_HDCP_WKUP_CMD_RECV_MESSAGE; + cdata.timeout = handle->timeout_left; + } + + hdcp_lib_wakeup_client(handle, &cdata); +} + +static void hdcp_lib_msg_sent_work(struct kthread_work *work) +{ + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_msg_sent); + + if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS) { + pr_err("invalid wakeup command %d\n", handle->wakeup_cmd); + return; + } + + hdcp_lib_msg_sent(handle); +} + +static void hdcp_lib_init(struct hdcp_lib_handle *handle) +{ + int rc = 0; + + if (!handle) { + pr_err("invalid handle\n"); + return; + } + + if (handle->wakeup_cmd != HDCP_LIB_WKUP_CMD_START) { + pr_err("invalid wakeup command %d\n", handle->wakeup_cmd); + return; + } + + rc = hdcp_lib_library_load(handle); + if (rc) + goto exit; + + if (!handle->legacy_app) { + rc = hdcp_lib_session_init(handle); + if (rc) + goto exit; + } + + if (handle->hdcp_txmtr_init == NULL) { + pr_err("invalid txmtr init function pointer\n"); + return; + } + + rc = handle->hdcp_txmtr_init(handle); + if (rc) + goto exit; + + if (!handle->legacy_app) { + rc = hdcp_lib_start_auth(handle); + if (rc) + goto exit; + } + + hdcp_lib_send_message(handle); + + return; +exit: + HDCP_LIB_EXECUTE(clean); +} + +static void hdcp_lib_init_work(struct kthread_work *work) +{ + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_init); + + hdcp_lib_init(handle); +} + +static void hdcp_lib_timeout(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_send_timeout_req *req_buf; + struct hdcp_send_timeout_rsp *rsp_buf; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_debug("invalid handle\n"); + return; + } + + if (atomic_read(&handle->hdcp_off)) { + pr_debug("invalid state, hdcp off\n"); + return; + } + + req_buf = (struct hdcp_send_timeout_req *) + (handle->qseecom_handle->sbuf); + req_buf->commandid = HDCP_TXMTR_SEND_MESSAGE_TIMEOUT; + req_buf->ctxhandle = handle->tz_ctxhandle; + + rsp_buf = (struct hdcp_send_timeout_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_send_timeout_req))); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_send_timeout_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct + hdcp_send_timeout_rsp))); + + if ((rc < 0) || (rsp_buf->status != HDCP_SUCCESS)) { + pr_err("qseecom cmd failed for with err = %d status = %d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto error; + } + + if (rsp_buf->commandid == HDCP_TXMTR_SEND_MESSAGE_TIMEOUT) { + pr_err("HDCP_TXMTR_SEND_MESSAGE_TIMEOUT\n"); + rc = -EINVAL; + goto error; + } + + /* + * if the response contains LC_Init OR RepeaterAuth_Stream_Manage + * message send the message again to the sink as this means that + * TZ would like to try again + */ + if ((rsp_buf->commandid == HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) && + ((int)rsp_buf->message[0] == LC_INIT_MESSAGE_ID || + (int)rsp_buf->message[0] == REP_STREAM_MANAGE_ID)) { + if (!atomic_read(&handle->hdcp_off)) { + /* keep local copy of TZ response */ + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + memcpy(handle->listener_buf, + (unsigned char *)rsp_buf->message, + rsp_buf->msglen); + handle->hdcp_timeout = rsp_buf->timeout; + handle->msglen = rsp_buf->msglen; + + hdcp_lib_send_message(handle); + } + } + + return; +error: + if (!atomic_read(&handle->hdcp_off)) + HDCP_LIB_EXECUTE(clean); +} + +static void hdcp_lib_manage_timeout_work(struct kthread_work *work) +{ + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_timeout); + + hdcp_lib_timeout(handle); +} + +static void hdcp_lib_clean(struct hdcp_lib_handle *handle) +{ + struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID }; + + if (!handle) { + pr_err("invalid input\n"); + return; + } + + handle->authenticated = false; + + /* AV mute the sink first to avoid artifacts */ + handle->client_ops->mute_sink(handle->client_ctx); + + hdcp_lib_txmtr_deinit(handle); + if (!handle->legacy_app) + hdcp_lib_session_deinit(handle); + hdcp_lib_library_unload(handle); + + cdata.context = handle->client_ctx; + cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_FAILED; + + if (!atomic_read(&handle->hdcp_off)) + hdcp_lib_wakeup_client(handle, &cdata); + + atomic_set(&handle->hdcp_off, 1); +} + +static void hdcp_lib_cleanup_work(struct kthread_work *work) +{ + + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_clean); + + hdcp_lib_clean(handle); +} + +static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdmi_hdcp_wakeup_data cdata = { HDMI_HDCP_WKUP_CMD_INVALID }; + struct hdcp_rcvd_msg_req *req_buf; + struct hdcp_rcvd_msg_rsp *rsp_buf; + uint32_t msglen; + char *msg = NULL; + char msg_name[50]; + uint32_t message_id_bytes = 0; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + return; + } + + if (atomic_read(&handle->hdcp_off)) { + pr_debug("invalid state, hdcp off\n"); + return; + } + + cdata.context = handle->client_ctx; + + mutex_lock(&handle->msg_lock); + msglen = handle->last_msg_recvd_len; + + if (msglen <= 0) { + pr_err("invalid msg len\n"); + mutex_unlock(&handle->msg_lock); + rc = -EINVAL; + goto exit; + } + + /* If the client is DP then allocate extra byte for message ID. */ + if (handle->device_type == HDCP_TXMTR_DP) + message_id_bytes = 1; + + msglen += message_id_bytes; + + msg = kzalloc(msglen, GFP_KERNEL); + if (!msg) { + mutex_unlock(&handle->msg_lock); + rc = -ENOMEM; + goto exit; + } + + /* copy the message id if needed */ + if (message_id_bytes) + memcpy(msg, &handle->last_msg, message_id_bytes); + + memcpy(msg + message_id_bytes, + handle->last_msg_recvd_buf, + handle->last_msg_recvd_len); + + mutex_unlock(&handle->msg_lock); + + snprintf(msg_name, sizeof(msg_name), "%s: ", + hdcp_lib_message_name((int)msg[0])); + + print_hex_dump(KERN_DEBUG, msg_name, + DUMP_PREFIX_NONE, 16, 1, msg, msglen, false); + + /* send the message to QSEECOM */ + req_buf = (struct hdcp_rcvd_msg_req *)(handle->qseecom_handle->sbuf); + req_buf->commandid = HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE; + memcpy(req_buf->msg, msg, msglen); + req_buf->msglen = msglen; + req_buf->ctxhandle = handle->tz_ctxhandle; + + rsp_buf = + (struct hdcp_rcvd_msg_rsp *)(handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof + (struct + hdcp_rcvd_msg_req))); + + pr_debug("writing %s to TZ at %dms\n", + hdcp_lib_message_name((int)msg[0]), jiffies_to_msecs(jiffies)); + + rc = qseecom_send_command(handle->qseecom_handle, req_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_rcvd_msg_req)), + rsp_buf, + QSEECOM_ALIGN(sizeof + (struct hdcp_rcvd_msg_rsp))); + + if (msg[0] == REP_SEND_RECV_ID_LIST_ID) { + if ((msg[2] & HDCP2_0_REPEATER_DOWNSTREAM) || + (msg[2] & HDCP1_DEVICE_DOWNSTREAM)) + handle->non_2p2_present = true; + else + handle->non_2p2_present = false; + } + + /* get next message from sink if we receive H PRIME on no store km */ + if ((msg[0] == AKE_SEND_H_PRIME_MESSAGE_ID) && + handle->no_stored_km_flag) { + handle->hdcp_timeout = rsp_buf->timeout; + + cdata.cmd = HDMI_HDCP_WKUP_CMD_RECV_MESSAGE; + cdata.timeout = handle->hdcp_timeout; + + goto exit; + } + + if ((msg[0] == REP_STREAM_READY_ID) && + (rc == 0) && (rsp_buf->status == 0)) { + pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n"); + + if (!handle->authenticated && + !hdcp_lib_enable_encryption(handle)) { + handle->authenticated = true; + + cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS; + hdcp_lib_wakeup_client(handle, &cdata); + } + + cdata.cmd = HDMI_HDCP_WKUP_CMD_LINK_POLL; + goto exit; + } + + if ((rc < 0) || (rsp_buf->status != 0) || (rsp_buf->msglen <= 0) || + (rsp_buf->commandid != HDCP_TXMTR_PROCESS_RECEIVED_MESSAGE) || + (rsp_buf->msg == NULL)) { + pr_err("qseecom cmd failed with err=%d status=%d\n", + rc, rsp_buf->status); + rc = -EINVAL; + goto exit; + } + + pr_debug("recvd %s from TZ at %dms\n", + hdcp_lib_message_name((int)rsp_buf->msg[0]), + jiffies_to_msecs(jiffies)); + + handle->last_msg = (int)rsp_buf->msg[0]; + + /* set the flag if response is AKE_No_Stored_km */ + if (((int)rsp_buf->msg[0] == AKE_NO_STORED_KM_MESSAGE_ID)) { + pr_debug("Setting no_stored_km_flag\n"); + handle->no_stored_km_flag = 1; + } else { + handle->no_stored_km_flag = 0; + } + + /* check if it's a repeater */ + if ((rsp_buf->msg[0] == SKE_SEND_EKS_MESSAGE_ID) && + (rsp_buf->msglen == SKE_SEND_EKS_MESSAGE_SIZE)) { + if ((rsp_buf->flag == + HDCP_TXMTR_SUBSTATE_WAITING_FOR_RECIEVERID_LIST) && + (rsp_buf->timeout > 0)) + handle->repeater_flag = true; + handle->update_stream = true; + } + + memset(handle->listener_buf, 0, MAX_TX_MESSAGE_SIZE); + memcpy(handle->listener_buf, (unsigned char *)rsp_buf->msg, + rsp_buf->msglen); + handle->hdcp_timeout = rsp_buf->timeout; + handle->msglen = rsp_buf->msglen; + + if (!atomic_read(&handle->hdcp_off)) + hdcp_lib_send_message(handle); +exit: + kzfree(msg); + + hdcp_lib_wakeup_client(handle, &cdata); + + if (rc && !atomic_read(&handle->hdcp_off)) + HDCP_LIB_EXECUTE(clean); +} + +static void hdcp_lib_msg_recvd_work(struct kthread_work *work) +{ + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, + wk_msg_recvd); + + hdcp_lib_msg_recvd(handle); +} + +static void hdcp_lib_wait_work(struct kthread_work *work) +{ + u32 timeout; + struct hdcp_lib_handle *handle = container_of(work, + struct hdcp_lib_handle, wk_wait); + + if (!handle) { + pr_err("invalid input\n"); + return; + } + + if (atomic_read(&handle->hdcp_off)) { + pr_debug("invalid state: hdcp off\n"); + return; + } + + if (handle->hdcp_state & HDCP_STATE_ERROR) { + pr_debug("invalid state: hdcp error\n"); + return; + } + + reinit_completion(&handle->poll_wait); + timeout = wait_for_completion_timeout(&handle->poll_wait, + handle->wait_timeout); + if (!timeout) { + pr_err("wait timeout\n"); + + if (!atomic_read(&handle->hdcp_off)) + HDCP_LIB_EXECUTE(clean); + } + + handle->wait_timeout = 0; +} + +bool hdcp1_check_if_supported_load_app(void) +{ + int rc = 0; + bool hdcp1_srm_supported = true; + + /* start hdcp1 app */ + if (hdcp1_supported && !hdcp1_handle->qsee_handle) { + rc = qseecom_start_app(&hdcp1_handle->qsee_handle, + HDCP1_APP_NAME, + QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("hdcp1 qseecom_start_app failed %d\n", rc); + hdcp1_supported = false; + kfree(hdcp1_handle); + } + } + + /* if hdcp1 app succeeds load SRM TA as well */ + if (hdcp1_supported && !hdcp1_handle->srm_handle) { + mutex_init(&hdcp1_ta_cmd_lock); + rc = qseecom_start_app(&hdcp1_handle->srm_handle, + SRMAPP_NAME, + QSEECOM_SBUFF_SIZE); + if (rc) { + hdcp1_srm_supported = false; + pr_err("hdcp1_srm qseecom_start_app failed %d\n", rc); + } + } + + pr_debug("hdcp1 app %s loaded\n", + hdcp1_supported ? "successfully" : "not"); + pr_debug("hdcp1 srm app %s loaded\n", + hdcp1_srm_supported ? "successfully" : "not"); + + return hdcp1_supported; +} + +void hdcp1_client_register(void *client_ctx, struct hdcp_client_ops *ops) +{ + /* initialize the hdcp1 handle */ + hdcp1_handle = kzalloc(sizeof(*hdcp1_handle), GFP_KERNEL); + + if (hdcp1_handle) { + hdcp1_handle->client_ops = ops; + hdcp1_handle->client_ctx = client_ctx; + } +} + +void hdcp1_client_unregister(void) +{ + if (hdcp1_handle && hdcp1_handle->qsee_handle) + qseecom_shutdown_app(&hdcp1_handle->qsee_handle); + + if (hdcp1_handle && hdcp1_handle->srm_handle) + qseecom_shutdown_app(&hdcp1_handle->srm_handle); + + kfree(hdcp1_handle); +} + +/* APIs exposed to all clients */ +int hdcp1_set_keys(uint32_t *aksv_msb, uint32_t *aksv_lsb) +{ + int rc = 0; + struct hdcp1_key_set_req *key_set_req; + struct hdcp1_key_set_rsp *key_set_rsp; + struct qseecom_handle *hdcp1_qsee_handle; + + if (aksv_msb == NULL || aksv_lsb == NULL) + return -EINVAL; + + if (!hdcp1_supported || !hdcp1_handle) + return -EINVAL; + + hdcp1_qsee_handle = hdcp1_handle->qsee_handle; + + if (!hdcp1_qsee_handle) + return -EINVAL; + + /* set keys and request aksv */ + key_set_req = (struct hdcp1_key_set_req *)hdcp1_qsee_handle->sbuf; + key_set_req->commandid = HDCP1_SET_KEY_MESSAGE_ID; + key_set_rsp = (struct hdcp1_key_set_rsp *)(hdcp1_qsee_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_key_set_req))); + rc = qseecom_send_command(hdcp1_qsee_handle, key_set_req, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_set_req)), + key_set_rsp, + QSEECOM_ALIGN(sizeof + (struct hdcp1_key_set_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err=%d\n", rc); + return -ENOKEY; + } + + rc = key_set_rsp->ret; + if (rc) { + pr_err("set key cmd failed, rsp=%d\n", key_set_rsp->ret); + return -ENOKEY; + } + + /* copy bytes into msb and lsb */ + *aksv_msb = key_set_rsp->ksv[0] << 24; + *aksv_msb |= key_set_rsp->ksv[1] << 16; + *aksv_msb |= key_set_rsp->ksv[2] << 8; + *aksv_msb |= key_set_rsp->ksv[3]; + *aksv_lsb = key_set_rsp->ksv[4] << 24; + *aksv_lsb |= key_set_rsp->ksv[5] << 16; + *aksv_lsb |= key_set_rsp->ksv[6] << 8; + *aksv_lsb |= key_set_rsp->ksv[7]; + + return 0; +} + +int hdcp1_validate_receiver_ids(struct hdcp_srm_device_id_t *device_ids, + uint32_t device_id_cnt) +{ + int rc = 0; + struct hdcp_srm_check_device_ids_req *recv_id_req; + struct hdcp_srm_check_device_ids_rsp *recv_id_rsp; + uint32_t sbuf_len; + uint32_t rbuf_len; + int i = 0; + struct qseecom_handle *hdcp1_srmhandle; + + /* If client has not been registered return */ + if (!hdcp1_supported || !hdcp1_handle) + return -EINVAL; + + /* Start the hdcp srm app if not already started */ + if (hdcp1_handle && !hdcp1_handle->srm_handle) { + rc = qseecom_start_app(&hdcp1_handle->srm_handle, + SRMAPP_NAME, QSEECOM_SBUFF_SIZE); + if (rc) { + pr_err("qseecom_start_app failed for SRM TA %d\n", rc); + goto end; + } + } + + pr_debug("device_id_cnt = %d\n", device_id_cnt); + + hdcp1_srmhandle = hdcp1_handle->srm_handle; + + sbuf_len = sizeof(struct hdcp_srm_check_device_ids_req) + + sizeof(struct hdcp_srm_device_id_t) * device_id_cnt + - 1; + + rbuf_len = sizeof(struct hdcp_srm_check_device_ids_rsp); + + /* Create a SRM validate receiver ID request */ + recv_id_req = (struct hdcp_srm_check_device_ids_req *) + hdcp1_srmhandle->sbuf; + recv_id_req->commandid = HDCP_SRM_CMD_CHECK_DEVICE_ID; + recv_id_req->num_device_ids = device_id_cnt; + memcpy(recv_id_req->device_ids, device_ids, + device_id_cnt * sizeof(struct hdcp_srm_device_id_t)); + + for (i = 0; i < device_id_cnt * sizeof(struct hdcp_srm_device_id_t); + i++) { + pr_debug("recv_id_req->device_ids[%d] = 0x%x\n", i, + recv_id_req->device_ids[i]); + } + + recv_id_rsp = (struct hdcp_srm_check_device_ids_rsp *) + (hdcp1_srmhandle->sbuf + + QSEECOM_ALIGN(sbuf_len)); + + rc = qseecom_send_command(hdcp1_srmhandle, + recv_id_req, + QSEECOM_ALIGN(sbuf_len), + recv_id_rsp, + QSEECOM_ALIGN(rbuf_len)); + + if (rc < 0) { + pr_err("qseecom cmd failed err=%d\n", rc); + goto end; + } + + rc = recv_id_rsp->retval; + if (rc) { + pr_err("enc cmd failed, rsp=%d\n", recv_id_rsp->retval); + rc = -EINVAL; + goto end; + } + + pr_debug("rsp=%d\n", recv_id_rsp->retval); + pr_debug("commandid=%d\n", recv_id_rsp->commandid); + +end: + return rc; +} + + +static int hdcp_validate_recv_id(struct hdcp_lib_handle *handle) +{ + int rc = 0; + struct hdcp_rcv_id_list_req *recv_id_req; + struct hdcp_rcv_id_list_rsp *recv_id_rsp; + + if (!handle || !handle->qseecom_handle || + !handle->qseecom_handle->sbuf) { + pr_err("invalid handle\n"); + return -EINVAL; + } + + /* validate the receiver ID list against the new SRM blob */ + recv_id_req = (struct hdcp_rcv_id_list_req *) + handle->qseecom_handle->sbuf; + recv_id_req->commandid = HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST; + recv_id_req->ctxHandle = handle->tz_ctxhandle; + + recv_id_rsp = (struct hdcp_rcv_id_list_rsp *) + (handle->qseecom_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req))); + + rc = qseecom_send_command(handle->qseecom_handle, + recv_id_req, + QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_req)), + recv_id_rsp, + QSEECOM_ALIGN(sizeof(struct hdcp_rcv_id_list_rsp))); + + + if ((rc < 0) || (recv_id_rsp->status != HDCP_SUCCESS) || + (recv_id_rsp->commandid != + HDCP_TXMTR_VALIDATE_RECEIVER_ID_LIST)) { + pr_err("qseecom cmd failed with err = %d status = %d\n", + rc, recv_id_rsp->status); + rc = -EINVAL; + goto exit; + } + +exit: + return rc; +} + +int hdcp1_set_enc(bool enable) +{ + int rc = 0; + struct hdcp1_set_enc_req *set_enc_req; + struct hdcp1_set_enc_rsp *set_enc_rsp; + struct qseecom_handle *hdcp1_qsee_handle; + + mutex_lock(&hdcp1_ta_cmd_lock); + + if (!hdcp1_supported || !hdcp1_handle) { + rc = -EINVAL; + goto end; + } + + hdcp1_qsee_handle = hdcp1_handle->qsee_handle; + + if (!hdcp1_qsee_handle) + return -EINVAL; + + if (hdcp1_enc_enabled == enable) { + pr_info("already %s\n", enable ? "enabled" : "disabled"); + goto end; + } + + /* set keys and request aksv */ + set_enc_req = (struct hdcp1_set_enc_req *)hdcp1_qsee_handle->sbuf; + set_enc_req->commandid = HDCP1_SET_ENC_MESSAGE_ID; + set_enc_req->enable = enable; + set_enc_rsp = (struct hdcp1_set_enc_rsp *)(hdcp1_qsee_handle->sbuf + + QSEECOM_ALIGN(sizeof(struct hdcp1_set_enc_req))); + rc = qseecom_send_command(hdcp1_qsee_handle, set_enc_req, + QSEECOM_ALIGN(sizeof + (struct hdcp1_set_enc_req)), + set_enc_rsp, + QSEECOM_ALIGN(sizeof + (struct hdcp1_set_enc_rsp))); + + if (rc < 0) { + pr_err("qseecom cmd failed err=%d\n", rc); + goto end; + } + + rc = set_enc_rsp->ret; + if (rc) { + pr_err("enc cmd failed, rsp=%d\n", set_enc_rsp->ret); + rc = -EINVAL; + goto end; + } + + hdcp1_enc_enabled = enable; + pr_info("%s success\n", enable ? "enable" : "disable"); +end: + mutex_unlock(&hdcp1_ta_cmd_lock); + return rc; +} + +int hdcp_library_register(struct hdcp_register_data *data) +{ + int rc = 0; + struct hdcp_lib_handle *handle = NULL; + + if (!data) { + pr_err("invalid input\n"); + return -EINVAL; + } + + if (!data->txmtr_ops) { + pr_err("invalid input: txmtr context\n"); + return -EINVAL; + } + + if (!data->client_ops) { + pr_err("invalid input: client_ops\n"); + return -EINVAL; + } + + if (!data->hdcp_ctx) { + pr_err("invalid input: hdcp_ctx\n"); + return -EINVAL; + } + + /* populate ops to be called by client */ + data->txmtr_ops->feature_supported = hdcp_lib_client_feature_supported; + data->txmtr_ops->wakeup = hdcp_lib_wakeup_thread; + data->txmtr_ops->update_exec_type = hdcp_lib_update_exec_type; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) { + rc = -ENOMEM; + goto unlock; + } + + handle->client_ctx = data->client_ctx; + handle->client_ops = data->client_ops; + handle->tethered = data->tethered; + handle->hdcp_app_init = NULL; + handle->hdcp_txmtr_init = NULL; + handle->device_type = data->device_type; + + pr_debug("tethered %d\n", handle->tethered); + + atomic_set(&handle->hdcp_off, 0); + + mutex_init(&handle->msg_lock); + mutex_init(&handle->wakeup_mutex); + + init_kthread_worker(&handle->worker); + + init_kthread_work(&handle->wk_init, hdcp_lib_init_work); + init_kthread_work(&handle->wk_msg_sent, hdcp_lib_msg_sent_work); + init_kthread_work(&handle->wk_msg_recvd, hdcp_lib_msg_recvd_work); + init_kthread_work(&handle->wk_timeout, hdcp_lib_manage_timeout_work); + init_kthread_work(&handle->wk_clean, hdcp_lib_cleanup_work); + init_kthread_work(&handle->wk_wait, hdcp_lib_wait_work); + init_kthread_work(&handle->wk_stream, hdcp_lib_query_stream_work); + + init_completion(&handle->poll_wait); + + handle->listener_buf = kzalloc(MAX_TX_MESSAGE_SIZE, GFP_KERNEL); + if (!(handle->listener_buf)) { + rc = -ENOMEM; + goto error; + } + + *data->hdcp_ctx = handle; + /* Cache the client ctx to be used later + * HDCP driver probe happens earlier than + * SDE driver probe hence caching it to + * be used later. + */ + + drv_client_handle = handle; + handle->thread = kthread_run(kthread_worker_fn, + &handle->worker, "hdcp_tz_lib"); + + if (IS_ERR(handle->thread)) { + pr_err("unable to start lib thread\n"); + rc = PTR_ERR(handle->thread); + handle->thread = NULL; + goto error; + } + + return 0; +error: + kzfree(handle->listener_buf); + handle->listener_buf = NULL; + kzfree(handle); + handle = NULL; +unlock: + return rc; +} +EXPORT_SYMBOL(hdcp_library_register); + +void hdcp_library_deregister(void *phdcpcontext) +{ + struct hdcp_lib_handle *handle = phdcpcontext; + + if (!handle) + return; + + kthread_stop(handle->thread); + + kzfree(handle->qseecom_handle); + kzfree(handle->last_msg_recvd_buf); + + mutex_destroy(&handle->wakeup_mutex); + + kzfree(handle->listener_buf); + kzfree(handle); +} +EXPORT_SYMBOL(hdcp_library_deregister); + +void hdcp1_notify_topology(void) +{ + char *envp[4]; + char *a; + char *b; + + a = kzalloc(SZ_16, GFP_KERNEL); + + if (!a) + return; + + b = kzalloc(SZ_16, GFP_KERNEL); + + if (!b) { + kfree(a); + return; + } + + envp[0] = "HDCP_MGR_EVENT=MSG_READY"; + envp[1] = a; + envp[2] = b; + envp[3] = NULL; + + snprintf(envp[1], 16, "%d", (int)DOWN_CHECK_TOPOLOGY); + snprintf(envp[2], 16, "%d", (int)HDCP_V1_TX); + + kobject_uevent_env(&hdcp_drv_mgr->device->kobj, KOBJ_CHANGE, envp); + kfree(a); + kfree(b); +} + +static ssize_t msm_hdcp_1x_sysfs_rda_tp(struct device *dev, +struct device_attribute *attr, char *buf) +{ + ssize_t ret = 0; + + if (!hdcp_drv_mgr) { + pr_err("invalid input\n"); + return -EINVAL; + } + + switch (hdcp_drv_mgr->tp_msgid) { + case DOWN_CHECK_TOPOLOGY: + case DOWN_REQUEST_TOPOLOGY: + buf[MSG_ID_IDX] = hdcp_drv_mgr->tp_msgid; + buf[RET_CODE_IDX] = HDCP_AUTHED; + ret = HEADER_LEN; + + memcpy(buf + HEADER_LEN, &hdcp_drv_mgr->cached_tp, + sizeof(struct HDCP_V2V1_MSG_TOPOLOGY)); + + ret += sizeof(struct HDCP_V2V1_MSG_TOPOLOGY); + + /* clear the flag once data is read back to user space*/ + hdcp_drv_mgr->tp_msgid = -1; + break; + default: + ret = -EINVAL; + } + + return ret; +} /* hdcp_1x_sysfs_rda_tp*/ + +static ssize_t msm_hdcp_1x_sysfs_wta_tp(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int msgid = 0; + ssize_t ret = count; + + if (!hdcp_drv_mgr || !buf) { + pr_err("invalid input\n"); + return -EINVAL; + } + + msgid = buf[0]; + + switch (msgid) { + case DOWN_CHECK_TOPOLOGY: + case DOWN_REQUEST_TOPOLOGY: + hdcp_drv_mgr->tp_msgid = msgid; + break; + /* more cases added here */ + default: + ret = -EINVAL; + } + + return ret; +} /* hdmi_tx_sysfs_wta_hpd */ + +static ssize_t hdmi_hdcp2p2_sysfs_wta_min_level_change(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc; + int min_enc_lvl; + struct hdcp_lib_handle *handle; + ssize_t ret = count; + + handle = hdcp_drv_mgr->handle; + + /* + * if the stream type from TZ is type 1 + * ignore subsequent writes to the min_enc_level + * to avoid state transitions which can potentially + * cause visual artifacts because the stream type + * is already at the highest level and for a HDCP 2.2 + * capable sink, we do not need to reduce the stream type + */ + if (handle && + !handle->non_2p2_present) { + pr_info("stream type is 1 returning\n"); + return ret; + } + + rc = kstrtoint(buf, 10, &min_enc_lvl); + if (rc) { + pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc); + return -EINVAL; + } + + if (handle && handle->client_ops->notify_lvl_change) { + handle->client_ops->notify_lvl_change(handle->client_ctx, + min_enc_lvl); + } + + return ret; +} + +static ssize_t hdmi_hdcp_srm_updated(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int rc; + int srm_updated; + struct hdcp_lib_handle *handle; + ssize_t ret = count; + struct hdcp_client_ops *client_ops; + void *hdcp_client_ctx; + + handle = hdcp_drv_mgr->handle; + + rc = kstrtoint(buf, 10, &srm_updated); + if (rc) { + pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc); + return -EINVAL; + } + + if (srm_updated) { + if (handle && handle->qseecom_handle) { + client_ops = handle->client_ops; + hdcp_client_ctx = handle->client_ctx; + if (hdcp_validate_recv_id(handle)) { + pr_debug("HDCP 2.2 SRM check FAILED\n"); + if (handle && client_ops->srm_cb) + client_ops->srm_cb(hdcp_client_ctx); + } else + pr_debug("HDCP 2.2 SRM check PASSED\n"); + } else if (hdcp1_handle && hdcp1_handle->qsee_handle) { + pr_debug("HDCP 1.4 SRM check\n"); + hdcp_client_ctx = hdcp1_handle->client_ctx; + client_ops = hdcp1_handle->client_ops; + if (client_ops->srm_cb) + client_ops->srm_cb(hdcp_client_ctx); + } + } + + return ret; +} + +static DEVICE_ATTR(tp, S_IRUGO | S_IWUSR, msm_hdcp_1x_sysfs_rda_tp, +msm_hdcp_1x_sysfs_wta_tp); + +static DEVICE_ATTR(min_level_change, S_IWUSR, NULL, +hdmi_hdcp2p2_sysfs_wta_min_level_change); + +static DEVICE_ATTR(srm_updated, S_IWUSR, NULL, +hdmi_hdcp_srm_updated); + +void hdcp1_cache_repeater_topology(void *hdcp1_cached_tp) +{ + memcpy((void *)&hdcp_drv_mgr->cached_tp, + hdcp1_cached_tp, + sizeof(struct HDCP_V2V1_MSG_TOPOLOGY)); +} + +static struct attribute *msm_hdcp_fs_attrs[] = { + &dev_attr_tp.attr, + &dev_attr_min_level_change.attr, + &dev_attr_srm_updated.attr, + NULL +}; + +static struct attribute_group msm_hdcp_fs_attr_group = { + .attrs = msm_hdcp_fs_attrs +}; + +static int msm_hdcp_open(struct inode *inode, struct file *file) +{ + return 0; +} + +static int msm_hdcp_close(struct inode *inode, struct file *file) +{ + return 0; +} + +static const struct file_operations msm_hdcp_fops = { + .owner = THIS_MODULE, + .open = msm_hdcp_open, + .release = msm_hdcp_close, +}; + +static const struct of_device_id msm_hdcp_dt_match[] = { + { .compatible = "qcom,msm-hdcp",}, + {} +}; + +MODULE_DEVICE_TABLE(of, msm_hdcp_dt_match); + +static int msm_hdcp_probe(struct platform_device *pdev) +{ + int ret; + + hdcp_drv_mgr = devm_kzalloc(&pdev->dev, sizeof(struct msm_hdcp_mgr), + GFP_KERNEL); + if (!hdcp_drv_mgr) + return -ENOMEM; + + hdcp_drv_mgr->pdev = pdev; + + platform_set_drvdata(pdev, hdcp_drv_mgr); + + ret = alloc_chrdev_region(&hdcp_drv_mgr->dev_num, 0, 1, DRIVER_NAME); + if (ret < 0) { + pr_err("alloc_chrdev_region failed ret = %d\n", ret); + goto error_get_dev_num; + } + + hdcp_drv_mgr->class = class_create(THIS_MODULE, CLASS_NAME); + if (IS_ERR(hdcp_drv_mgr->class)) { + ret = PTR_ERR(hdcp_drv_mgr->class); + pr_err("couldn't create class rc = %d\n", ret); + goto error_class_create; + } + + hdcp_drv_mgr->device = device_create(hdcp_drv_mgr->class, NULL, + hdcp_drv_mgr->dev_num, NULL, DRIVER_NAME); + if (IS_ERR(hdcp_drv_mgr->device)) { + ret = PTR_ERR(hdcp_drv_mgr->device); + pr_err("device_create failed %d\n", ret); + goto error_class_device_create; + } + + cdev_init(&hdcp_drv_mgr->cdev, &msm_hdcp_fops); + ret = cdev_add(&hdcp_drv_mgr->cdev, + MKDEV(MAJOR(hdcp_drv_mgr->dev_num), 0), 1); + if (ret < 0) { + pr_err("cdev_add failed %d\n", ret); + goto error_cdev_add; + } + + ret = sysfs_create_group(&hdcp_drv_mgr->device->kobj, + &msm_hdcp_fs_attr_group); + if (ret) + pr_err("unable to register rotator sysfs nodes\n"); + + /* Store the handle in the hdcp drv mgr + * to be used for the sysfs notifications + */ + hdcp_drv_mgr->handle = drv_client_handle; + + return 0; +error_cdev_add: + device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num); +error_class_device_create: + class_destroy(hdcp_drv_mgr->class); +error_class_create: + unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1); +error_get_dev_num: + devm_kfree(&pdev->dev, hdcp_drv_mgr); + hdcp_drv_mgr = NULL; + return ret; +} + +static int msm_hdcp_remove(struct platform_device *pdev) +{ + struct msm_hdcp_mgr *mgr; + + mgr = (struct msm_hdcp_mgr *)platform_get_drvdata(pdev); + if (!mgr) + return -ENODEV; + + sysfs_remove_group(&hdcp_drv_mgr->device->kobj, + &msm_hdcp_fs_attr_group); + cdev_del(&hdcp_drv_mgr->cdev); + device_destroy(hdcp_drv_mgr->class, hdcp_drv_mgr->dev_num); + class_destroy(hdcp_drv_mgr->class); + unregister_chrdev_region(hdcp_drv_mgr->dev_num, 1); + + devm_kfree(&pdev->dev, hdcp_drv_mgr); + hdcp_drv_mgr = NULL; + return 0; +} + +static struct platform_driver msm_hdcp_driver = { + .probe = msm_hdcp_probe, + .remove = msm_hdcp_remove, + .driver = { + .name = "msm_hdcp", + .of_match_table = msm_hdcp_dt_match, + .pm = NULL, + } +}; + +static int __init msm_hdcp_init(void) +{ + return platform_driver_register(&msm_hdcp_driver); +} + +static void __exit msm_hdcp_exit(void) +{ + return platform_driver_unregister(&msm_hdcp_driver); +} + +module_init(msm_hdcp_init); +module_exit(msm_hdcp_exit); + +MODULE_DESCRIPTION("MSM HDCP driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/profiler.c b/drivers/misc/profiler.c new file mode 100644 index 000000000000..92e6a6651b68 --- /dev/null +++ b/drivers/misc/profiler.c @@ -0,0 +1,462 @@ +/* + * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "PROFILER: %s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/debugfs.h> +#include <linux/cdev.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/io.h> +#include <linux/types.h> +#include <soc/qcom/scm.h> +#include <soc/qcom/socinfo.h> +#include <asm/cacheflush.h> +#include <linux/delay.h> +#include <soc/qcom/profiler.h> + +#include <linux/compat.h> + +#define PROFILER_DEV "profiler" + +static struct class *driver_class; +static dev_t profiler_device_no; + +struct profiler_control { + struct device *pdev; + struct cdev cdev; +}; + +static struct profiler_control profiler; + +struct profiler_dev_handle { + bool released; + int abort; + atomic_t ioctl_count; +}; + + +static int profiler_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id, + const void *req_buf, void *resp_buf) +{ + int ret = 0; + uint32_t qseos_cmd_id = 0; + struct scm_desc desc = {0}; + + if (!req_buf || !resp_buf) { + pr_err("Invalid buffer pointer\n"); + return -EINVAL; + } + qseos_cmd_id = *(uint32_t *)req_buf; + + switch (svc_id) { + + case SCM_SVC_BW: + switch (qseos_cmd_id) { + case TZ_BW_SVC_START_ID: + case TZ_BW_SVC_GET_ID: + case TZ_BW_SVC_STOP_ID: + /* Send the command to TZ */ + desc.arginfo = SCM_ARGS(4, SCM_RW, SCM_VAL, + SCM_RW, SCM_VAL); + desc.args[0] = virt_to_phys(& + (((struct tz_bw_svc_buf *) + req_buf)->bwreq)); + desc.args[1] = ((struct tz_bw_svc_buf *) + req_buf)->req_size; + desc.args[2] = virt_to_phys(& + ((struct tz_bw_svc_buf *) + req_buf)->bwresp); + desc.args[3] = sizeof(struct tz_bw_svc_resp); + + ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, + TZ_SVC_BW_PROF_ID), &desc); + break; + default: + pr_err("cmd_id %d is not supported by scm_call2.\n", + qseos_cmd_id); + ret = -EINVAL; + } /*end of switch (qsee_cmd_id) */ + break; + default: + pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n", + svc_id); + ret = -EINVAL; + break; + } /*end of switch svc_id */ + return ret; +} + + +static int profiler_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len) +{ + if (!is_scm_armv8()) + return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len); + else + return profiler_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf); +} + +static int bw_profiling_command(void *req) +{ + struct tz_bw_svc_resp *bw_resp = NULL; + uint32_t cmd_id = 0; + int ret; + + cmd_id = *(uint32_t *)req; + bw_resp = &((struct tz_bw_svc_buf *)req)->bwresp; + /* Flush buffers from cache to memory. */ + dmac_flush_range(req, req + + PAGE_ALIGN(sizeof(union tz_bw_svc_req))); + dmac_flush_range((void *)bw_resp, ((void *)bw_resp) + + sizeof(struct tz_bw_svc_resp)); + ret = profiler_scm_call(SCM_SVC_BW, TZ_SVC_BW_PROF_ID, req, + sizeof(struct tz_bw_svc_buf), + bw_resp, sizeof(struct tz_bw_svc_resp)); + if (ret) { + pr_err("profiler_scm_call failed with err: %d\n", ret); + return -EINVAL; + } + /* Invalidate cache. */ + dmac_inv_range((void *)bw_resp, ((void *)bw_resp) + + sizeof(struct tz_bw_svc_resp)); + /* Verify cmd id and Check that request succeeded.*/ + if ((bw_resp->status != E_BW_SUCCESS) || + (cmd_id != bw_resp->cmd_id)) { + ret = -1; + pr_err("Status: %d,Cmd: %d\n", + bw_resp->status, + bw_resp->cmd_id); + } + return ret; +} + +static int bw_profiling_start(struct tz_bw_svc_buf *bwbuf) +{ + struct tz_bw_svc_start_req *bwstartreq = NULL; + + bwstartreq = (struct tz_bw_svc_start_req *) &bwbuf->bwreq; + /* Populate request data */ + bwstartreq->cmd_id = TZ_BW_SVC_START_ID; + bwstartreq->version = TZ_BW_SVC_VERSION; + bwbuf->req_size = sizeof(struct tz_bw_svc_start_req); + return bw_profiling_command(bwbuf); +} + +static int bw_profiling_get(void __user *argp, struct tz_bw_svc_buf *bwbuf) +{ + struct tz_bw_svc_get_req *bwgetreq = NULL; + int ret; + char *buf = NULL; + const int numberofregs = 3; + struct profiler_bw_cntrs_req cnt_buf; + + memset(&cnt_buf, 0, sizeof(cnt_buf)); + bwgetreq = (struct tz_bw_svc_get_req *) &bwbuf->bwreq; + /* Allocate memory for get buffer */ + buf = kzalloc(PAGE_ALIGN(numberofregs * sizeof(uint32_t)), GFP_KERNEL); + if (buf == NULL) { + ret = -ENOMEM; + pr_err(" Failed to allocate memory\n"); + return ret; + } + /* Populate request data */ + bwgetreq->cmd_id = TZ_BW_SVC_GET_ID; + bwgetreq->buf_ptr = (uint64_t) virt_to_phys(buf); + bwgetreq->buf_size = numberofregs * sizeof(uint32_t); + bwbuf->req_size = sizeof(struct tz_bw_svc_get_req); + dmac_flush_range(buf, ((void *)buf) + PAGE_ALIGN(bwgetreq->buf_size)); + ret = bw_profiling_command(bwbuf); + if (ret) { + pr_err("bw_profiling_command failed\n"); + return ret; + } + dmac_inv_range(buf, ((void *)buf) + PAGE_ALIGN(bwgetreq->buf_size)); + cnt_buf.total = *(uint32_t *) (buf + 0 * sizeof(uint32_t)); + cnt_buf.cpu = *(uint32_t *) (buf + 1 * sizeof(uint32_t)); + cnt_buf.gpu = *(uint32_t *) (buf + 2 * sizeof(uint32_t)); + if (copy_to_user(argp, &cnt_buf, sizeof(struct profiler_bw_cntrs_req))) + pr_err("copy_to_user failed\n"); + /* Free memory for response */ + if (buf != NULL) { + kfree(buf); + buf = NULL; + } + return ret; +} + +static int bw_profiling_stop(struct tz_bw_svc_buf *bwbuf) +{ + struct tz_bw_svc_stop_req *bwstopreq = NULL; + + bwstopreq = (struct tz_bw_svc_stop_req *) &bwbuf->bwreq; + /* Populate request data */ + bwstopreq->cmd_id = TZ_BW_SVC_STOP_ID; + bwbuf->req_size = sizeof(struct tz_bw_svc_stop_req); + return bw_profiling_command(bwbuf); +} + + +static int profiler_get_bw_info(void __user *argp) +{ + int ret = 0; + struct tz_bw_svc_buf *bwbuf = NULL; + struct profiler_bw_cntrs_req cnt_buf; + + ret = copy_from_user(&cnt_buf, argp, + sizeof(struct profiler_bw_cntrs_req)); + if (ret) + return ret; + /* Allocate memory for request */ + bwbuf = kzalloc(PAGE_ALIGN(sizeof(struct tz_bw_svc_buf)), GFP_KERNEL); + if (bwbuf == NULL) + return -ENOMEM; + switch (cnt_buf.cmd) { + case TZ_BW_SVC_START_ID: + ret = bw_profiling_start(bwbuf); + if (ret) + pr_err("bw_profiling_start Failed with ret: %d\n", ret); + break; + case TZ_BW_SVC_GET_ID: + ret = bw_profiling_get(argp, bwbuf); + if (ret) + pr_err("bw_profiling_get Failed with ret: %d\n", ret); + break; + case TZ_BW_SVC_STOP_ID: + ret = bw_profiling_stop(bwbuf); + if (ret) + pr_err("bw_profiling_stop Failed with ret: %d\n", ret); + break; + default: + pr_err("Invalid IOCTL: 0x%x\n", cnt_buf.cmd); + ret = -EINVAL; + } + /* Free memory for command */ + if (bwbuf != NULL) { + kfree(bwbuf); + bwbuf = NULL; + } + return ret; +} + +static int profiler_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct profiler_dev_handle *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + file->private_data = data; + data->abort = 0; + data->released = false; + atomic_set(&data->ioctl_count, 0); + return ret; +} + +static int compat_get_profiler_bw_info( + struct compat_profiler_bw_cntrs_req __user *data32, + struct profiler_bw_cntrs_req __user *data) +{ + compat_uint_t total; + compat_uint_t cpu; + compat_uint_t gpu; + compat_uint_t cmd; + int err; + + err = get_user(total, &data32->total); + err |= put_user(total, &data->total); + err |= get_user(gpu, &data32->gpu); + err |= put_user(gpu, &data->gpu); + err |= get_user(cpu, &data32->cpu); + err |= put_user(cpu, &data->cpu); + err |= get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + return err; +} + +static int compat_put_profiler_bw_info( + struct compat_profiler_bw_cntrs_req __user *data32, + struct profiler_bw_cntrs_req __user *data) +{ + compat_uint_t total; + compat_uint_t cpu; + compat_uint_t gpu; + compat_uint_t cmd; + int err; + + err = get_user(total, &data->total); + err |= put_user(total, &data32->total); + err |= get_user(gpu, &data->gpu); + err |= put_user(gpu, &data32->gpu); + err |= get_user(cpu, &data->cpu); + err |= put_user(cpu, &data32->cpu); + err |= get_user(cmd, &data->cmd); + err |= put_user(cmd, &data32->cmd); + return err; +} + +static unsigned int convert_cmd(unsigned int cmd) +{ + switch (cmd) { + case COMPAT_PROFILER_IOCTL_GET_BW_INFO: + return PROFILER_IOCTL_GET_BW_INFO; + + default: + return cmd; + } +} + + +long profiler_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + int ret = 0; + struct profiler_dev_handle *data = file->private_data; + void __user *argp = (void __user *) arg; + + if (!data) { + pr_err("Invalid/uninitialized device handle\n"); + return -EINVAL; + } + + if (data->abort) { + pr_err("Aborting profiler driver\n"); + return -ENODEV; + } + + switch (cmd) { + case PROFILER_IOCTL_GET_BW_INFO: + atomic_inc(&data->ioctl_count); + ret = profiler_get_bw_info(argp); + if (ret) + pr_err("failed get system bandwidth info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + default: + pr_err("Invalid IOCTL: 0x%x\n", cmd); + return -EINVAL; + } + return ret; +} + +long compat_profiler_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + long ret; + + switch (cmd) { + case COMPAT_PROFILER_IOCTL_GET_BW_INFO:{ + struct compat_profiler_bw_cntrs_req __user *data32; + struct profiler_bw_cntrs_req __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + err = compat_get_profiler_bw_info(data32, data); + if (err) + return err; + ret = profiler_ioctl(file, convert_cmd(cmd), + (unsigned long)data); + err = compat_put_profiler_bw_info(data32, data); + return ret ? ret : err; + } + default: + return -ENOIOCTLCMD; + } + return 0; +} + + +static int profiler_release(struct inode *inode, struct file *file) +{ + pr_info("profiler release\n"); + return 0; +} + +static const struct file_operations profiler_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = profiler_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_profiler_ioctl, +#endif + .open = profiler_open, + .release = profiler_release +}; + +static int profiler_init(void) +{ + int rc; + struct device *class_dev; + + rc = alloc_chrdev_region(&profiler_device_no, 0, 1, PROFILER_DEV); + if (rc < 0) { + pr_err("alloc_chrdev_region failed %d\n", rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, PROFILER_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("class_create failed %d\n", rc); + goto exit_unreg_chrdev_region; + } + + class_dev = device_create(driver_class, NULL, profiler_device_no, NULL, + PROFILER_DEV); + if (IS_ERR(class_dev)) { + pr_err("class_device_create failed %d\n", rc); + rc = -ENOMEM; + goto exit_destroy_class; + } + + cdev_init(&profiler.cdev, &profiler_fops); + profiler.cdev.owner = THIS_MODULE; + + rc = cdev_add(&profiler.cdev, MKDEV(MAJOR(profiler_device_no), 0), 1); + if (rc < 0) { + pr_err("%s: cdev_add failed %d\n", __func__, rc); + goto exit_destroy_device; + } + + profiler.pdev = class_dev; + return 0; + +exit_destroy_device: + device_destroy(driver_class, profiler_device_no); +exit_destroy_class: + class_destroy(driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(profiler_device_no, 1); + return rc; +} + +static void profiler_exit(void) +{ + pr_info("Exiting from profiler\n"); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. trustzone Communicator"); + +module_init(profiler_init); +module_exit(profiler_exit); diff --git a/drivers/misc/qcom/Kconfig b/drivers/misc/qcom/Kconfig new file mode 100644 index 000000000000..9c73960f01ff --- /dev/null +++ b/drivers/misc/qcom/Kconfig @@ -0,0 +1,20 @@ +config MSM_QDSP6V2_CODECS + bool "Audio QDSP6V2 APR support" + depends on MSM_SMD + select SND_SOC_QDSP6V2 + help + Enable Audio codecs with APR IPC protocol support between + application processor and QDSP6 for B-family. APR is + used by audio driver to configure QDSP6's + ASM, ADM and AFE. + +config MSM_ULTRASOUND + bool "QDSP6V2 HW Ultrasound support" + select SND_SOC_QDSP6V2 + help + Enable HW Ultrasound support in QDSP6V2. + QDSP6V2 can support HW encoder & decoder and + ultrasound processing. It will enable + ultrasound data paths between + HW and services, calculating input events + upon the ultrasound data. diff --git a/drivers/misc/qcom/Makefile b/drivers/misc/qcom/Makefile new file mode 100644 index 000000000000..120bdddcbc84 --- /dev/null +++ b/drivers/misc/qcom/Makefile @@ -0,0 +1 @@ +obj-y += qdsp6v2/ diff --git a/drivers/misc/qcom/qdsp6v2/Makefile b/drivers/misc/qcom/qdsp6v2/Makefile new file mode 100644 index 000000000000..90a123adbb7f --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_MSM_QDSP6V2_CODECS) += aac_in.o qcelp_in.o evrc_in.o amrnb_in.o g711mlaw_in.o g711alaw_in.o audio_utils.o +obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_wma.o audio_wmapro.o audio_aac.o audio_multi_aac.o audio_alac.o audio_ape.o audio_utils_aio.o +obj-$(CONFIG_MSM_QDSP6V2_CODECS) += q6audio_v2.o q6audio_v2_aio.o +obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_g711mlaw.o audio_g711alaw.o +obj-$(CONFIG_MSM_QDSP6V2_CODECS) += audio_mp3.o audio_amrnb.o audio_amrwb.o audio_amrwbplus.o audio_evrc.o audio_qcelp.o amrwb_in.o audio_hwacc_effects.o +obj-$(CONFIG_MSM_ULTRASOUND) += ultrasound/ diff --git a/drivers/misc/qcom/qdsp6v2/aac_in.c b/drivers/misc/qcom/qdsp6v2/aac_in.c new file mode 100644 index 000000000000..ef963451a3f9 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/aac_in.c @@ -0,0 +1,709 @@ +/* + * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/msm_audio_aac.h> +#include <linux/compat.h> +#include <asm/atomic.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 5 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((1536+sizeof(struct meta_out_dsp)) * 5)) + +#define AAC_FORMAT_ADTS 65535 + +#define MAX_SAMPLE_RATE_384K 384000 + +static long aac_in_ioctl_shared(struct file *file, unsigned int cmd, void *arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + uint32_t aac_mode = AAC_ENC_MODE_AAC_LC; + + enc_cfg = audio->enc_cfg; + aac_config = audio->codec_cfg; + /* ENCODE CFG (after new set of API's are published )bharath*/ + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + + if (audio->opened) { + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + } else { + if (audio->feedback == NON_TUNNEL_MODE) { + pr_debug("%s: starting in non_tunnel mode", + __func__); + rc = q6asm_open_read_write(audio->ac, + FORMAT_MPEG4_AAC, FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:open read write failed\n", + __func__); + break; + } + } + if (audio->feedback == TUNNEL_MODE) { + pr_debug("%s: starting in tunnel mode", + __func__); + rc = q6asm_open_read(audio->ac, + FORMAT_MPEG4_AAC); + + if (rc < 0) { + pr_err("%s:open read failed\n", + __func__); + break; + } + } + audio->stopped = 0; + } + + pr_debug("%s:sbr_ps_flag = %d, sbr_flag = %d\n", __func__, + aac_config->sbr_ps_on_flag, aac_config->sbr_on_flag); + if (aac_config->sbr_ps_on_flag) + aac_mode = AAC_ENC_MODE_EAAC_P; + else if (aac_config->sbr_on_flag) + aac_mode = AAC_ENC_MODE_AAC_P; + else + aac_mode = AAC_ENC_MODE_AAC_LC; + + rc = q6asm_enc_cfg_blk_aac(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->sample_rate, + enc_cfg->channels, + enc_cfg->bit_rate, + aac_mode, + enc_cfg->stream_format); + if (rc < 0) { + pr_err("%s:session id %d: cmd media format block" + "failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("%s:session id %d: media format block" + "failed\n", __func__, audio->ac->session); + break; + } + } + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure" + "failed rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: Rxed AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed" + "rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_GET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config *cfg; + struct msm_audio_aac_enc_config *enc_cfg; + + cfg = (struct msm_audio_aac_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer for %s\n", + __func__, "AUDIO_GET_AAC_CONFIG"); + rc = -EINVAL; + break; + } + memset(cfg, 0, sizeof(*cfg)); + enc_cfg = audio->enc_cfg; + if (enc_cfg->channels == CH_MODE_MONO) + cfg->channels = 1; + else + cfg->channels = 2; + + cfg->sample_rate = enc_cfg->sample_rate; + cfg->bit_rate = enc_cfg->bit_rate; + switch (enc_cfg->stream_format) { + case 0x00: + cfg->stream_format = AUDIO_AAC_FORMAT_ADTS; + break; + case 0x01: + cfg->stream_format = AUDIO_AAC_FORMAT_LOAS; + break; + case 0x02: + cfg->stream_format = AUDIO_AAC_FORMAT_ADIF; + break; + default: + case 0x03: + cfg->stream_format = AUDIO_AAC_FORMAT_RAW; + } + pr_debug("%s:session id %d: Get-aac-cfg: format=%d sr=%d" + "bitrate=%d\n", __func__, audio->ac->session, + cfg->stream_format, cfg->sample_rate, cfg->bit_rate); + break; + } + case AUDIO_SET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config *cfg; + struct msm_audio_aac_enc_config *enc_cfg; + uint32_t min_bitrate, max_bitrate; + + cfg = (struct msm_audio_aac_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer for %s\n", + "AUDIO_SET_AAC_ENC_CONFIG", __func__); + rc = -EINVAL; + break; + } + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: Set-aac-cfg: stream=%d\n", __func__, + audio->ac->session, cfg->stream_format); + + switch (cfg->stream_format) { + case AUDIO_AAC_FORMAT_ADTS: + enc_cfg->stream_format = 0x00; + break; + case AUDIO_AAC_FORMAT_LOAS: + enc_cfg->stream_format = 0x01; + break; + case AUDIO_AAC_FORMAT_ADIF: + enc_cfg->stream_format = 0x02; + break; + case AUDIO_AAC_FORMAT_RAW: + enc_cfg->stream_format = 0x03; + break; + default: + pr_err("%s:session id %d: unsupported AAC format %d\n", + __func__, audio->ac->session, + cfg->stream_format); + rc = -EINVAL; + break; + } + + if (cfg->channels == 1) { + cfg->channels = CH_MODE_MONO; + } else if (cfg->channels == 2) { + cfg->channels = CH_MODE_STEREO; + } else { + rc = -EINVAL; + break; + } + + if (cfg->sample_rate > MAX_SAMPLE_RATE_384K) { + pr_err("%s: ERROR: invalid sample rate = %u", + __func__, cfg->sample_rate); + rc = -EINVAL; + break; + } + + min_bitrate = ((cfg->sample_rate)*(cfg->channels))/2; + /* This calculation should be based on AAC mode. But we cannot + * get AAC mode in this setconfig. min_bitrate's logical max + * value is 24000. So if min_bitrate is higher than 24000, + * choose 24000. + */ + if (min_bitrate > 24000) + min_bitrate = 24000; + max_bitrate = 6*(cfg->sample_rate)*(cfg->channels); + if (max_bitrate > 192000) + max_bitrate = 192000; + if ((cfg->bit_rate < min_bitrate) || + (cfg->bit_rate > max_bitrate)) { + pr_err("%s: bitrate permissible: max=%d, min=%d\n", + __func__, max_bitrate, min_bitrate); + pr_err("%s: ERROR in setting bitrate = %d\n", + __func__, cfg->bit_rate); + rc = -EINVAL; + break; + } + enc_cfg->sample_rate = cfg->sample_rate; + enc_cfg->channels = cfg->channels; + enc_cfg->bit_rate = cfg->bit_rate; + pr_debug("%s:session id %d: Set-aac-cfg:SR= 0x%x ch=0x%x" + "bitrate=0x%x, format(adts/raw) = %d\n", + __func__, audio->ac->session, enc_cfg->sample_rate, + enc_cfg->channels, enc_cfg->bit_rate, + enc_cfg->stream_format); + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config *aac_cfg; + struct msm_audio_aac_config *audio_aac_cfg; + struct msm_audio_aac_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + audio_aac_cfg = audio->codec_cfg; + aac_cfg = (struct msm_audio_aac_config *)arg; + + if (aac_cfg == NULL) { + pr_err("%s: NULL config pointer %s\n", + __func__, "AUDIO_SET_AAC_CONFIG"); + rc = -EINVAL; + break; + } + pr_debug("%s:session id %d: AUDIO_SET_AAC_CONFIG: sbr_flag = %d sbr_ps_flag = %d\n", + __func__, audio->ac->session, aac_cfg->sbr_on_flag, + aac_cfg->sbr_ps_on_flag); + audio_aac_cfg->sbr_on_flag = aac_cfg->sbr_on_flag; + audio_aac_cfg->sbr_ps_on_flag = aac_cfg->sbr_ps_on_flag; + if ((audio_aac_cfg->sbr_on_flag == 1) || + (audio_aac_cfg->sbr_ps_on_flag == 1)) { + if (enc_cfg->sample_rate < 24000) { + pr_err("%s: ERROR in setting samplerate = %d" + "\n", __func__, enc_cfg->sample_rate); + rc = -EINVAL; + break; + } + } + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +static long aac_in_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = aac_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + rc = aac_in_ioctl_shared(file, cmd, &cfg); + if (rc) { + pr_err("%s:AUDIO_GET_AAC_ENC_CONFIG failed. rc=%d\n", + __func__, rc); + break; + } + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AAC_ENC_CONFIG: { + struct msm_audio_aac_enc_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = aac_in_ioctl_shared(file, cmd, &cfg); + if (rc) + pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n", + __func__, rc); + break; + } + case AUDIO_GET_AAC_CONFIG: { + if (copy_to_user((void *)arg, &audio->codec_cfg, + sizeof(struct msm_audio_aac_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config aac_cfg; + if (copy_from_user(&aac_cfg, (void *)arg, + sizeof(struct msm_audio_aac_config))) { + pr_err("%s: copy_to_user for AUDIO_SET_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = aac_in_ioctl_shared(file, cmd, &aac_cfg); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. rc=%d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd=%d\n", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_aac_enc_config32 { + u32 channels; + u32 sample_rate; + u32 bit_rate; + u32 stream_format; +}; + +struct msm_audio_aac_config32 { + s16 format; + u16 audio_object; + u16 ep_config; /* 0 ~ 3 useful only obj = ERLC */ + u16 aac_section_data_resilience_flag; + u16 aac_scalefactor_data_resilience_flag; + u16 aac_spectral_data_resilience_flag; + u16 sbr_on_flag; + u16 sbr_ps_on_flag; + u16 dual_mono_mode; + u16 channel_configuration; + u16 sample_rate; +}; + +enum { + AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32), + AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32), + AUDIO_SET_AAC_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_aac_enc_config32), + AUDIO_GET_AAC_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+4), struct msm_audio_aac_enc_config32) +}; + +static long aac_in_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = aac_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AAC_ENC_CONFIG_32: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config32 cfg_32; + + memset(&cfg_32, 0, sizeof(cfg_32)); + + cmd = AUDIO_GET_AAC_ENC_CONFIG; + rc = aac_in_ioctl_shared(file, cmd, &cfg); + if (rc) { + pr_err("%s:AUDIO_GET_AAC_ENC_CONFIG_32 failed. Rc= %d\n", + __func__, rc); + break; + } + cfg_32.channels = cfg.channels; + cfg_32.sample_rate = cfg.sample_rate; + cfg_32.bit_rate = cfg.bit_rate; + cfg_32.stream_format = cfg.stream_format; + if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AAC_ENC_CONFIG_32: { + struct msm_audio_aac_enc_config cfg; + struct msm_audio_aac_enc_config32 cfg_32; + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_GET_AAC_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.channels = cfg_32.channels; + cfg.sample_rate = cfg_32.sample_rate; + cfg.bit_rate = cfg_32.bit_rate; + cfg.stream_format = cfg_32.stream_format; + /* The command should be converted from 32 bit to normal + * before the shared ioctl is called as shared ioctl + * can process only normal commands */ + cmd = AUDIO_SET_AAC_ENC_CONFIG; + rc = aac_in_ioctl_shared(file, cmd, &cfg); + if (rc) + pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG_32 failed. rc=%d\n", + __func__, rc); + break; + } + case AUDIO_GET_AAC_CONFIG_32: { + struct msm_audio_aac_config *aac_config; + struct msm_audio_aac_config32 aac_config_32; + + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; + aac_config_32.format = aac_config->format; + aac_config_32.audio_object = aac_config->audio_object; + aac_config_32.ep_config = aac_config->ep_config; + aac_config_32.aac_section_data_resilience_flag = + aac_config->aac_section_data_resilience_flag; + aac_config_32.aac_scalefactor_data_resilience_flag = + aac_config->aac_scalefactor_data_resilience_flag; + aac_config_32.aac_spectral_data_resilience_flag = + aac_config->aac_spectral_data_resilience_flag; + aac_config_32.sbr_on_flag = aac_config->sbr_on_flag; + aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag; + aac_config_32.dual_mono_mode = aac_config->dual_mono_mode; + aac_config_32.channel_configuration = + aac_config->channel_configuration; + aac_config_32.sample_rate = aac_config->sample_rate; + + if (copy_to_user((void *)arg, &aac_config_32, + sizeof(aac_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG_32: { + struct msm_audio_aac_config aac_cfg; + struct msm_audio_aac_config32 aac_cfg_32; + if (copy_from_user(&aac_cfg_32, (void *)arg, + sizeof(aac_cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + aac_cfg.format = aac_cfg_32.format; + aac_cfg.audio_object = aac_cfg_32.audio_object; + aac_cfg.ep_config = aac_cfg_32.ep_config; + aac_cfg.aac_section_data_resilience_flag = + aac_cfg_32.aac_section_data_resilience_flag; + aac_cfg.aac_scalefactor_data_resilience_flag = + aac_cfg_32.aac_scalefactor_data_resilience_flag; + aac_cfg.aac_spectral_data_resilience_flag = + aac_cfg_32.aac_spectral_data_resilience_flag; + aac_cfg.sbr_on_flag = aac_cfg_32.sbr_on_flag; + aac_cfg.sbr_ps_on_flag = aac_cfg_32.sbr_ps_on_flag; + aac_cfg.dual_mono_mode = aac_cfg_32.dual_mono_mode; + aac_cfg.channel_configuration = + aac_cfg_32.channel_configuration; + aac_cfg.sample_rate = aac_cfg_32.sample_rate; + + cmd = AUDIO_SET_AAC_CONFIG; + rc = aac_in_ioctl_shared(file, cmd, &aac_cfg); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d\n", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#else +#define aac_in_compat_ioctl NULL +#endif + +static int aac_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_aac_enc_config *enc_cfg; + struct msm_audio_aac_config *aac_config; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_err("%s: Could not allocate memory for aac" + "driver\n", __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_aac_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for aac" + "config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for aac" + "config\n", __func__, audio->ac->session); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + aac_config = audio->codec_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 1536; + audio->max_frames_per_buf = 5; + enc_cfg->sample_rate = 8000; + enc_cfg->channels = 1; + enc_cfg->bit_rate = 16000; + enc_cfg->stream_format = 0x00;/* 0:ADTS, 3:RAW */ + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + aac_config->format = AUDIO_AAC_FORMAT_ADTS; + aac_config->audio_object = AUDIO_AAC_OBJECT_LC; + aac_config->sbr_on_flag = 0; + aac_config->sbr_ps_on_flag = 0; + aac_config->channel_configuration = 1; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for" + "audio client\n", __func__); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + /* open aac encoder in tunnel mode */ + audio->buf_cfg.frames_per_buf = 0x01; + + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_MPEG4_AAC, + FORMAT_LINEAR_PCM); + + if (rc < 0) { + pr_err("%s:session id %d: NT Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x01; + pr_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_MPEG4_AAC); + + if (rc < 0) { + pr_err("%s:session id %d: Tunnel Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration" + "failed rc=%d\n", __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + audio->buf_cfg.meta_info_enable = 0x00; + pr_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + audio->opened = 1; + audio->reset_event = false; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = aac_in_compat_ioctl; + audio->enc_ioctl = aac_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = aac_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, + .compat_ioctl = audio_in_compat_ioctl +}; + +struct miscdevice audio_aac_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_aac_in", + .fops = &audio_in_fops, +}; + +static int __init aac_in_init(void) +{ + return misc_register(&audio_aac_in_misc); +} +device_initcall(aac_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/amrnb_in.c b/drivers/misc/qcom/qdsp6v2/amrnb_in.c new file mode 100644 index 000000000000..1bb441bd2ff4 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/amrnb_in.c @@ -0,0 +1,404 @@ +/* Copyright (c) 2010-2012, 2014, 2016 The Linux Foundation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/msm_audio_amrnb.h> +#include <linux/compat.h> +#include <asm/atomic.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((32+sizeof(struct meta_out_dsp)) * 10)) + +static long amrnb_in_ioctl_shared(struct file *file, + unsigned int cmd, void *arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + rc = q6asm_enc_cfg_blk_amrnb(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->band_mode, + enc_cfg->dtx_enable); + + if (rc < 0) { + pr_err("%s:session id %d: cmd amrnb media format block" + "failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block" + "failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, + audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed" + "rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:AUDIO_STOP\n", __func__); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed" + "rc=%d\n", __func__, + audio->ac->session, rc); + break; + } + break; + } + case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { + struct msm_audio_amrnb_enc_config_v2 *cfg; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + cfg = (struct msm_audio_amrnb_enc_config_v2 *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer for %s\n", + __func__, + "AUDIO_SET_AMRNB_ENC_CONFIG_V2"); + rc = -EINVAL; + break; + } + + enc_cfg = audio->enc_cfg; + if (cfg->band_mode > 8 || + cfg->band_mode < 1) { + pr_err("%s:session id %d: invalid band mode\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + /* AMR NB encoder accepts values between 0-7 + while openmax provides value between 1-8 + as per spec */ + enc_cfg->band_mode = (cfg->band_mode - 1); + enc_cfg->dtx_enable = (cfg->dtx_enable ? 1 : 0); + enc_cfg->frame_format = 0; + pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n", + __func__, audio->ac->session, + enc_cfg->band_mode, enc_cfg->dtx_enable); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +static long amrnb_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = amrnb_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AMRNB_ENC_CONFIG_V2: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_amrnb_enc_config_v2))) { + pr_err("%s: copy_to_user for AUDIO_GET_AMRNB_ENC_CONFIG_V2 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AMRNB_ENC_CONFIG_V2: { + struct msm_audio_amrnb_enc_config_v2 cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(cfg))) { + pr_err("%s: copy_from_user for AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = amrnb_in_ioctl_shared(file, cmd, &cfg); + if (rc) + pr_err("%s: AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed. rc=%d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd=%d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_amrnb_enc_config_v2_32 { + u32 band_mode; + u32 dtx_enable; + u32 frame_format; +}; + +enum { + AUDIO_GET_AMRNB_ENC_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+2), + struct msm_audio_amrnb_enc_config_v2_32), + AUDIO_SET_AMRNB_ENC_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+3), + struct msm_audio_amrnb_enc_config_v2_32) +}; + +static long amrnb_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = amrnb_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AMRNB_ENC_CONFIG_V2_32: { + struct msm_audio_amrnb_enc_config_v2 *amrnb_config; + struct msm_audio_amrnb_enc_config_v2_32 amrnb_config_32; + + memset(&amrnb_config_32, 0, sizeof(amrnb_config_32)); + + amrnb_config = + (struct msm_audio_amrnb_enc_config_v2 *)audio->enc_cfg; + amrnb_config_32.band_mode = amrnb_config->band_mode; + amrnb_config_32.dtx_enable = amrnb_config->dtx_enable; + amrnb_config_32.frame_format = amrnb_config->frame_format; + + if (copy_to_user((void *)arg, &amrnb_config_32, + sizeof(amrnb_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AMRNB_ENC_CONFIG_V2_32 failed", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AMRNB_ENC_CONFIG_V2_32: { + struct msm_audio_amrnb_enc_config_v2_32 cfg_32; + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AMRNB_ENC_CONFIG_V2_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cmd = AUDIO_SET_AMRNB_ENC_CONFIG_V2; + rc = amrnb_in_ioctl_shared(file, cmd, &cfg_32); + if (rc) + pr_err("%s:AUDIO_SET_AMRNB_ENC_CONFIG_V2 failed rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#else +#define amrnb_in_compat_ioctl NULL +#endif + +static int amrnb_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_amrnb_enc_config_v2 *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_err("%s Could not allocate memory for amrnb" + "driver\n", __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrnb_enc_config_v2), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for aac" + "config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 32; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->band_mode = 7; + enc_cfg->dtx_enable = 0; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for audio" + "client\n", __func__); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open amrnb encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_AMRNB, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_AMRNB); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration" + "failed rc=%d\n", __func__, audio->ac->session, + rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: T mode encoder success\n", + __func__, audio->ac->session); + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = amrnb_in_compat_ioctl; + audio->enc_ioctl = amrnb_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = amrnb_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, + .compat_ioctl = audio_in_compat_ioctl +}; + +struct miscdevice audio_amrnb_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrnb_in", + .fops = &audio_in_fops, +}; + +static int __init amrnb_in_init(void) +{ + return misc_register(&audio_amrnb_in_misc); +} + +device_initcall(amrnb_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/amrwb_in.c b/drivers/misc/qcom/qdsp6v2/amrwb_in.c new file mode 100644 index 000000000000..5e9dbca420a7 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/amrwb_in.c @@ -0,0 +1,399 @@ +/* Copyright (c) 2011-2012, 2014, 2016 The Linux Foundation. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/dma-mapping.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/msm_audio_amrwb.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/compat.h> +#include <asm/atomic.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((61+sizeof(struct meta_out_dsp)) * 10)) + +static long amrwb_in_ioctl_shared(struct file *file, + unsigned int cmd, void *arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_amrwb_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + rc = q6asm_enc_cfg_blk_amrwb(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->band_mode, + enc_cfg->dtx_enable); + + if (rc < 0) { + pr_err("%s:session id %d: cmd amrwb media format block" + "failed\n", __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block" + "failed\n", __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, + audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed" + "rc=%d\n", __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:AUDIO_STOP\n", __func__); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed" + "rc=%d\n", __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_SET_AMRWB_ENC_CONFIG: { + struct msm_audio_amrwb_enc_config *cfg; + struct msm_audio_amrwb_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + cfg = (struct msm_audio_amrwb_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer for %s\n", + __func__, "AUDIO_SET_AMRWB_ENC_CONFIG"); + rc = -EINVAL; + break; + } + + if (cfg->band_mode > 8) { + pr_err("%s:session id %d: invalid band mode\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + /* ToDo: AMR WB encoder accepts values between 0-8 + while openmax provides value between 9-17 + as per spec */ + enc_cfg->band_mode = cfg->band_mode; + enc_cfg->dtx_enable = (cfg->dtx_enable ? 1 : 0); + /* Currently DSP does not support different frameformat */ + enc_cfg->frame_format = 0; + pr_debug("%s:session id %d: band_mode = 0x%x dtx_enable=0x%x\n", + __func__, audio->ac->session, + enc_cfg->band_mode, enc_cfg->dtx_enable); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +static long amrwb_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = amrwb_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AMRWB_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_amrwb_enc_config))) + pr_err("%s: copy_to_user for AUDIO_GET_AMRWB_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + case AUDIO_SET_AMRWB_ENC_CONFIG: { + struct msm_audio_amrwb_enc_config cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(cfg))) { + pr_err("%s: copy_from_user for AUDIO_SET_AMRWB_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = amrwb_in_ioctl_shared(file, cmd, &cfg); + if (rc) + pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_amrwb_enc_config_32 { + u32 band_mode; + u32 dtx_enable; + u32 frame_format; +}; + +enum { + AUDIO_GET_AMRWB_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), + struct msm_audio_amrwb_enc_config_32), + AUDIO_SET_AMRWB_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), + struct msm_audio_amrwb_enc_config_32) +}; + +static long amrwb_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = amrwb_in_ioctl_shared(file, cmd, NULL); + break; + } + case AUDIO_GET_AMRWB_ENC_CONFIG_32: { + struct msm_audio_amrwb_enc_config *amrwb_config; + struct msm_audio_amrwb_enc_config_32 amrwb_config_32; + + memset(&amrwb_config_32, 0, sizeof(amrwb_config_32)); + + amrwb_config = + (struct msm_audio_amrwb_enc_config *)audio->enc_cfg; + amrwb_config_32.band_mode = amrwb_config->band_mode; + amrwb_config_32.dtx_enable = amrwb_config->dtx_enable; + amrwb_config_32.frame_format = amrwb_config->frame_format; + + if (copy_to_user((void *)arg, &amrwb_config_32, + sizeof(struct msm_audio_amrwb_enc_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AMRWB_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AMRWB_ENC_CONFIG_32: { + struct msm_audio_amrwb_enc_config cfg_32; + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AMRWB_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cmd = AUDIO_SET_AMRWB_ENC_CONFIG; + rc = amrwb_in_ioctl_shared(file, cmd, &cfg_32); + if (rc) + pr_err("%s:AUDIO_SET_AAC_ENC_CONFIG failed. rc=%d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#else +#define amrwb_in_compat_ioctl NULL +#endif + +static int amrwb_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_amrwb_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_err("%s: Could not allocate memory for amrwb driver\n", + __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_amrwb_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for amrwb" + "config param\n", __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 32; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->band_mode = 8; + enc_cfg->dtx_enable = 0; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 16000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s:audio[%pK]: Could not allocate memory for audio" + "client\n", __func__, audio); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open amrwb encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_AMRWB, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_AMRWB); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration" + "failed rc=%d\n", __func__, audio->ac->session, + rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: T mode encoder success\n", + __func__, audio->ac->session); + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = amrwb_in_compat_ioctl; + audio->enc_ioctl = amrwb_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = amrwb_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, + .compat_ioctl = audio_in_compat_ioctl +}; + +struct miscdevice audio_amrwb_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrwb_in", + .fops = &audio_in_fops, +}; + +static int __init amrwb_in_init(void) +{ + return misc_register(&audio_amrwb_in_misc); +} + +device_initcall(amrwb_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_aac.c b/drivers/misc/qcom/qdsp6v2/audio_aac.c new file mode 100644 index 000000000000..1f02576a0848 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_aac.c @@ -0,0 +1,472 @@ +/* aac audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/msm_audio_aac.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +#define AUDIO_AAC_DUAL_MONO_INVALID -1 +#define PCM_BUFSZ_MIN_AAC ((8*1024) + sizeof(struct dec_meta_out)) + +static struct miscdevice audio_aac_misc; +static struct ws_mgr audio_aac_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_aac_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + switch (cmd) { + case AUDIO_START: { + struct asm_aac_cfg aac_cfg; + struct msm_audio_aac_config *aac_config; + uint32_t sbr_ps = 0x00; + pr_debug("%s: AUDIO_START session_id[%d]\n", __func__, + audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + /* turn on both sbr and ps */ + rc = q6asm_enable_sbrps(audio->ac, sbr_ps); + if (rc < 0) + pr_err("sbr-ps enable failed\n"); + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; + if (aac_config->sbr_ps_on_flag) + aac_cfg.aot = AAC_ENC_MODE_EAAC_P; + else if (aac_config->sbr_on_flag) + aac_cfg.aot = AAC_ENC_MODE_AAC_P; + else + aac_cfg.aot = AAC_ENC_MODE_AAC_LC; + + switch (aac_config->format) { + case AUDIO_AAC_FORMAT_ADTS: + aac_cfg.format = 0x00; + break; + case AUDIO_AAC_FORMAT_LOAS: + aac_cfg.format = 0x01; + break; + case AUDIO_AAC_FORMAT_ADIF: + aac_cfg.format = 0x02; + break; + default: + case AUDIO_AAC_FORMAT_RAW: + aac_cfg.format = 0x03; + } + aac_cfg.ep_config = aac_config->ep_config; + aac_cfg.section_data_resilience = + aac_config->aac_section_data_resilience_flag; + aac_cfg.scalefactor_data_resilience = + aac_config->aac_scalefactor_data_resilience_flag; + aac_cfg.spectral_data_resilience = + aac_config->aac_spectral_data_resilience_flag; + aac_cfg.ch_cfg = audio->pcm_cfg.channel_count; + if (audio->feedback == TUNNEL_MODE) { + aac_cfg.sample_rate = aac_config->sample_rate; + aac_cfg.ch_cfg = aac_config->channel_configuration; + } else { + aac_cfg.sample_rate = audio->pcm_cfg.sample_rate; + aac_cfg.ch_cfg = audio->pcm_cfg.channel_count; + } + + pr_debug("%s:format=%x aot=%d ch=%d sr=%d\n", + __func__, aac_cfg.format, + aac_cfg.aot, aac_cfg.ch_cfg, + aac_cfg.sample_rate); + + /* Configure Media format block */ + rc = q6asm_media_format_block_aac(audio->ac, &aac_cfg); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + rc = enable_volume_ramp(audio); + if (rc < 0) { + pr_err("%s: Failed to enable volume ramp\n", + __func__); + } + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config *aac_config; + uint16_t sce_left = 1, sce_right = 2; + + pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__); + aac_config = (struct msm_audio_aac_config *)arg; + if (aac_config == NULL) { + pr_err("%s: Invalid config pointer\n", __func__); + rc = -EINVAL; + break; + } + memcpy(audio->codec_cfg, aac_config, + sizeof(struct msm_audio_aac_config)); + /* PL_PR is 0 only need to check PL_SR */ + if (aac_config->dual_mono_mode > + AUDIO_AAC_DUAL_MONO_PL_SR) { + pr_err("%s:Invalid dual_mono mode =%d\n", __func__, + aac_config->dual_mono_mode); + } else { + /* convert the data from user into sce_left + * and sce_right based on the definitions + */ + pr_debug("%s: modify dual_mono mode =%d\n", __func__, + aac_config->dual_mono_mode); + switch (aac_config->dual_mono_mode) { + case AUDIO_AAC_DUAL_MONO_PL_PR: + sce_left = 1; + sce_right = 1; + break; + case AUDIO_AAC_DUAL_MONO_SL_SR: + sce_left = 2; + sce_right = 2; + break; + case AUDIO_AAC_DUAL_MONO_SL_PR: + sce_left = 2; + sce_right = 1; + break; + case AUDIO_AAC_DUAL_MONO_PL_SR: + default: + sce_left = 1; + sce_right = 2; + break; + } + rc = q6asm_cfg_dual_mono_aac(audio->ac, + sce_left, sce_right); + if (rc < 0) + pr_err("%s:asm cmd dualmono failed rc=%d\n", + __func__, rc); + } + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AAC_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_aac_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config aac_config; + pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__); + if (copy_from_user(&aac_config, (void *)arg, + sizeof(aac_config))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = audio_ioctl_shared(file, cmd, &aac_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("%s[%pK]:Failed in utils_ioctl: %d\n", + __func__, audio, rc); + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_aac_config32 { + s16 format; + u16 audio_object; + u16 ep_config; /* 0 ~ 3 useful only obj = ERLC */ + u16 aac_section_data_resilience_flag; + u16 aac_scalefactor_data_resilience_flag; + u16 aac_spectral_data_resilience_flag; + u16 sbr_on_flag; + u16 sbr_ps_on_flag; + u16 dual_mono_mode; + u16 channel_configuration; + u16 sample_rate; +}; + +enum { + AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32), + AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AAC_CONFIG_32: { + struct msm_audio_aac_config *aac_config; + struct msm_audio_aac_config32 aac_config_32; + + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; + aac_config_32.format = aac_config->format; + aac_config_32.audio_object = aac_config->audio_object; + aac_config_32.ep_config = aac_config->ep_config; + aac_config_32.aac_section_data_resilience_flag = + aac_config->aac_section_data_resilience_flag; + aac_config_32.aac_scalefactor_data_resilience_flag = + aac_config->aac_scalefactor_data_resilience_flag; + aac_config_32.aac_spectral_data_resilience_flag = + aac_config->aac_spectral_data_resilience_flag; + aac_config_32.sbr_on_flag = aac_config->sbr_on_flag; + aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag; + aac_config_32.dual_mono_mode = aac_config->dual_mono_mode; + aac_config_32.channel_configuration = + aac_config->channel_configuration; + aac_config_32.sample_rate = aac_config->sample_rate; + + if (copy_to_user((void *)arg, &aac_config_32, + sizeof(aac_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG_32: { + struct msm_audio_aac_config aac_config; + struct msm_audio_aac_config32 aac_config_32; + pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__); + if (copy_from_user(&aac_config_32, (void *)arg, + sizeof(aac_config_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + aac_config.format = aac_config_32.format; + aac_config.audio_object = aac_config_32.audio_object; + aac_config.ep_config = aac_config_32.ep_config; + aac_config.aac_section_data_resilience_flag = + aac_config_32.aac_section_data_resilience_flag; + aac_config.aac_scalefactor_data_resilience_flag = + aac_config_32.aac_scalefactor_data_resilience_flag; + aac_config.aac_spectral_data_resilience_flag = + aac_config_32.aac_spectral_data_resilience_flag; + aac_config.sbr_on_flag = aac_config_32.sbr_on_flag; + aac_config.sbr_ps_on_flag = aac_config_32.sbr_ps_on_flag; + aac_config.dual_mono_mode = aac_config_32.dual_mono_mode; + aac_config.channel_configuration = + aac_config_32.channel_configuration; + aac_config.sample_rate = aac_config_32.sample_rate; + + cmd = AUDIO_SET_AAC_CONFIG; + rc = audio_ioctl_shared(file, cmd, &aac_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("%s[%pK]:Failed in utils_ioctl: %d\n", + __func__, audio, rc); + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + struct msm_audio_aac_config *aac_config = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_aac_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for aac decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s:Could not allocate memory for aac" + "config\n", __func__); + kfree(audio); + return -ENOMEM; + } + aac_config = audio->codec_cfg; + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AAC; + audio->miscdevice = &audio_aac_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_aac_ws_mgr; + aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_MPEG4_AAC); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open AAC decoder, expected frames is always 1 + audio->buf_cfg.frames_per_buf = 0x01;*/ + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_MPEG4_AAC); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_aac_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_aac_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:aacdec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_aac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_aac_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_aac", + .fops = &audio_aac_fops, +}; + +static int __init audio_aac_init(void) +{ + int ret = misc_register(&audio_aac_misc); + + if (ret == 0) + device_init_wakeup(audio_aac_misc.this_device, true); + audio_aac_ws_mgr.ref_cnt = 0; + mutex_init(&audio_aac_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_aac_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_alac.c b/drivers/misc/qcom/qdsp6v2/audio_alac.c new file mode 100644 index 000000000000..f25c8ae47b4c --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_alac.c @@ -0,0 +1,438 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 and +* only version 2 as published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +*/ + +#include <linux/types.h> +#include <linux/msm_audio_alac.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_alac_misc; +static struct ws_mgr audio_alac_ws_mgr; + +static const struct file_operations audio_alac_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; + +static struct dentry *config_debugfs_create_file(const char *name, void *data) +{ + return debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)data, &audio_alac_debug_fops); +} + +static int alac_channel_map(u8 *channel_mapping, uint32_t channels); + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_alac_cfg alac_cfg; + struct msm_audio_alac_config *alac_config; + u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL]; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + + if (alac_channel_map(channel_mapping, + audio->pcm_cfg.channel_count)) { + pr_err("%s: setting channel map failed %d\n", + __func__, audio->pcm_cfg.channel_count); + } + + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count, + 16, /*bits per sample*/ + false, false, channel_mapping); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + alac_config = (struct msm_audio_alac_config *)audio->codec_cfg; + alac_cfg.frame_length = alac_config->frameLength; + alac_cfg.compatible_version = alac_config->compatVersion; + alac_cfg.bit_depth = alac_config->bitDepth; + alac_cfg.pb = alac_config->pb; + alac_cfg.mb = alac_config->mb; + alac_cfg.kb = alac_config->kb; + alac_cfg.num_channels = alac_config->channelCount; + alac_cfg.max_run = alac_config->maxRun; + alac_cfg.max_frame_bytes = alac_config->maxSize; + alac_cfg.avg_bit_rate = alac_config->averageBitRate; + alac_cfg.sample_rate = alac_config->sampleRate; + alac_cfg.channel_layout_tag = alac_config->channelLayout; + pr_debug("%s: frame_length %d compatible_version %d bit_depth %d pb %d mb %d kb %d num_channels %d max_run %d max_frame_bytes %d avg_bit_rate %d sample_rate %d channel_layout_tag %d\n", + __func__, alac_config->frameLength, + alac_config->compatVersion, + alac_config->bitDepth, alac_config->pb, + alac_config->mb, alac_config->kb, + alac_config->channelCount, alac_config->maxRun, + alac_config->maxSize, + alac_config->averageBitRate, + alac_config->sampleRate, + alac_config->channelLayout); + /* Configure Media format block */ + rc = q6asm_media_format_block_alac(audio->ac, &alac_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_ALAC_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_alac_config))) { + pr_err("%s:copy_to_user for AUDIO_GET_ALAC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_ALAC_CONFIG: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_alac_config))) { + pr_err("%s:copy_from_user for AUDIO_SET_ALAC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + default: { + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_alac_config_32 { + u32 frameLength; + u8 compatVersion; + u8 bitDepth; + u8 pb; + u8 mb; + u8 kb; + u8 channelCount; + u16 maxRun; + u32 maxSize; + u32 averageBitRate; + u32 sampleRate; + u32 channelLayout; +}; + +enum { + AUDIO_GET_ALAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_alac_config_32), + AUDIO_SET_ALAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_alac_config_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_ALAC_CONFIG_32: { + struct msm_audio_alac_config *alac_config; + struct msm_audio_alac_config_32 alac_config_32; + + memset(&alac_config_32, 0, sizeof(alac_config_32)); + + alac_config = (struct msm_audio_alac_config *)audio->codec_cfg; + alac_config_32.frameLength = alac_config->frameLength; + alac_config_32.compatVersion = + alac_config->compatVersion; + alac_config_32.bitDepth = alac_config->bitDepth; + alac_config_32.pb = alac_config->pb; + alac_config_32.mb = alac_config->mb; + alac_config_32.kb = alac_config->kb; + alac_config_32.channelCount = alac_config->channelCount; + alac_config_32.maxRun = alac_config->maxRun; + alac_config_32.maxSize = alac_config->maxSize; + alac_config_32.averageBitRate = alac_config->averageBitRate; + alac_config_32.sampleRate = alac_config->sampleRate; + alac_config_32.channelLayout = alac_config->channelLayout; + + if (copy_to_user((void *)arg, &alac_config_32, + sizeof(alac_config_32))) { + pr_err("%s: copy_to_user for GET_ALAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_ALAC_CONFIG_32: { + struct msm_audio_alac_config *alac_config; + struct msm_audio_alac_config_32 alac_config_32; + + if (copy_from_user(&alac_config_32, (void *)arg, + sizeof(alac_config_32))) { + pr_err("%s: copy_from_user for SET_ALAC_CONFIG_32 failed\n" + , __func__); + rc = -EFAULT; + break; + } + alac_config = (struct msm_audio_alac_config *)audio->codec_cfg; + alac_config->frameLength = alac_config_32.frameLength; + alac_config->compatVersion = + alac_config_32.compatVersion; + alac_config->bitDepth = alac_config_32.bitDepth; + alac_config->pb = alac_config_32.pb; + alac_config->mb = alac_config_32.mb; + alac_config->kb = alac_config_32.kb; + alac_config->channelCount = alac_config_32.channelCount; + alac_config->maxRun = alac_config_32.maxRun; + alac_config->maxSize = alac_config_32.maxSize; + alac_config->averageBitRate = alac_config_32.averageBitRate; + alac_config->sampleRate = alac_config_32.sampleRate; + alac_config->channelLayout = alac_config_32.channelLayout; + + break; + } + default: { + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_alac_" + 5]; + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (!audio) { + pr_err("Could not allocate memory for alac decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_alac_config), + GFP_KERNEL); + if (!audio->codec_cfg) { + pr_err("%s:Could not allocate memory for alac config\n", + __func__); + kfree(audio); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_alac_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_alac_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_ALAC); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open ALAC decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_ALAC); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + + snprintf(name, sizeof(name), "msm_alac_%04x", audio->ac->session); + audio->dentry = config_debugfs_create_file(name, (void *)audio); + + if (IS_ERR_OR_NULL(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); + pr_debug("%s:alacdec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static int alac_channel_map(u8 *channel_mapping, uint32_t channels) +{ + u8 *lchannel_mapping; + + lchannel_mapping = channel_mapping; + pr_debug("%s: channels passed: %d\n", __func__, channels); + if (channels == 1) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + } else if (channels == 2) { + lchannel_mapping[0] = PCM_CHANNEL_FL; + lchannel_mapping[1] = PCM_CHANNEL_FR; + } else if (channels == 3) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + } else if (channels == 4) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_CS; + } else if (channels == 5) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + } else if (channels == 6) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_LFE; + } else if (channels == 7) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_CS; + lchannel_mapping[6] = PCM_CHANNEL_LFE; + } else if (channels == 8) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FLC; + lchannel_mapping[2] = PCM_CHANNEL_FRC; + lchannel_mapping[3] = PCM_CHANNEL_FL; + lchannel_mapping[4] = PCM_CHANNEL_FR; + lchannel_mapping[5] = PCM_CHANNEL_LS; + lchannel_mapping[6] = PCM_CHANNEL_RS; + lchannel_mapping[7] = PCM_CHANNEL_LFE; + } else { + pr_err("%s: ERROR.unsupported num_ch = %u\n", + __func__, channels); + return -EINVAL; + } + return 0; +} + +static const struct file_operations audio_alac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_alac_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_alac", + .fops = &audio_alac_fops, +}; + +static int __init audio_alac_init(void) +{ + int ret = misc_register(&audio_alac_misc); + + if (ret == 0) + device_init_wakeup(audio_alac_misc.this_device, true); + audio_alac_ws_mgr.ref_cnt = 0; + mutex_init(&audio_alac_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_alac_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrnb.c b/drivers/misc/qcom/qdsp6v2/audio_amrnb.c new file mode 100644 index 000000000000..9e4f74bfacd9 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_amrnb.c @@ -0,0 +1,228 @@ +/* amrnb audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/types.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_amrnb_misc; +static struct ws_mgr audio_amrnb_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_amrnb_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + } + return rc; +} + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("%s: pcm output block config failed rc=%d\n", + __func__, rc); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s: Audio Start procedure failed rc=%d\n", + __func__, rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling compat ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + } + return rc; +} + + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_amrnb_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_amrnb_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_amrnb_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_AMRNB); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_AMRNB); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_amrnb_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_amrnb_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:amrnb decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_amrnb_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl, +}; + +static struct miscdevice audio_amrnb_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrnb", + .fops = &audio_amrnb_fops, +}; + +static int __init audio_amrnb_init(void) +{ + int ret = misc_register(&audio_amrnb_misc); + + if (ret == 0) + device_init_wakeup(audio_amrnb_misc.this_device, true); + audio_amrnb_ws_mgr.ref_cnt = 0; + mutex_init(&audio_amrnb_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_amrnb_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwb.c b/drivers/misc/qcom/qdsp6v2/audio_amrwb.c new file mode 100644 index 000000000000..2403dbbe426b --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_amrwb.c @@ -0,0 +1,232 @@ +/* amrwb audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/types.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_amrwb_misc; +static struct ws_mgr audio_amrwb_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_amrwb_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + } + return rc; +} + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("%s: pcm output block config failed rc=%d\n", + __func__, rc); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s: Audio Start procedure failed rc=%d\n", + __func__, rc); + break; + } + pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling compat ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + } + return rc; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_amrwb_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for aac decode driver\n"); + return -ENOMEM; + } + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_amrwb_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_amrwb_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_AMRWB); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_AMRWB); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_amrwb_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_amrwb_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s: AMRWB dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_amrwb_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl, +}; + +static struct miscdevice audio_amrwb_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrwb", + .fops = &audio_amrwb_fops, +}; + +static int __init audio_amrwb_init(void) +{ + int ret = misc_register(&audio_amrwb_misc); + + if (ret == 0) + device_init_wakeup(audio_amrwb_misc.this_device, true); + audio_amrwb_ws_mgr.ref_cnt = 0; + mutex_init(&audio_amrwb_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_amrwb_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c new file mode 100644 index 000000000000..727a5369c2a9 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_amrwbplus.c @@ -0,0 +1,399 @@ +/* amr-wbplus audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/msm_audio_amrwbplus.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_amrwbplus_misc; +static struct ws_mgr audio_amrwbplus_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_amrwbplus_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +static void config_debug_fs(struct q6audio_aio *audio) +{ + if (audio != NULL) { + char name[sizeof("msm_amrwbplus_") + 5]; + snprintf(name, sizeof(name), "msm_amrwbplus_%04x", + audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_amrwbplus_debug_fops); + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); + } +} +#else +static void config_debug_fs(struct q6audio_aio *audio) +{ +} +#endif + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct asm_amrwbplus_cfg q6_amrwbplus_cfg; + struct msm_audio_amrwbplus_config_v2 *amrwbplus_drv_config; + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_err("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + amrwbplus_drv_config = + (struct msm_audio_amrwbplus_config_v2 *)audio->codec_cfg; + + q6_amrwbplus_cfg.size_bytes = + amrwbplus_drv_config->size_bytes; + q6_amrwbplus_cfg.version = + amrwbplus_drv_config->version; + q6_amrwbplus_cfg.num_channels = + amrwbplus_drv_config->num_channels; + q6_amrwbplus_cfg.amr_band_mode = + amrwbplus_drv_config->amr_band_mode; + q6_amrwbplus_cfg.amr_dtx_mode = + amrwbplus_drv_config->amr_dtx_mode; + q6_amrwbplus_cfg.amr_frame_fmt = + amrwbplus_drv_config->amr_frame_fmt; + q6_amrwbplus_cfg.amr_lsf_idx = + amrwbplus_drv_config->amr_lsf_idx; + + rc = q6asm_media_format_block_amrwbplus(audio->ac, + &q6_amrwbplus_cfg); + if (rc < 0) { + pr_err("q6asm_media_format_block_amrwb+ failed...\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("%s:AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AMRWBPLUS_CONFIG_V2: { + if ((audio) && (arg) && (audio->codec_cfg)) { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_amrwbplus_config_v2))) { + rc = -EFAULT; + pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2 failed\n", + __func__); + break; + } + } else { + pr_err("%s: wb+ config v2 invalid parameters\n" + , __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AMRWBPLUS_CONFIG_V2: { + if ((audio) && (arg) && (audio->codec_cfg)) { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_amrwbplus_config_v2))) { + rc = -EFAULT; + pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2 failed\n", + __func__); + break; + } + } else { + pr_err("%s: wb+ config invalid parameters\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + break; + } + } + return rc; +} +#ifdef CONFIG_COMPAT +struct msm_audio_amrwbplus_config_v2_32 { + u32 size_bytes; + u32 version; + u32 num_channels; + u32 amr_band_mode; + u32 amr_dtx_mode; + u32 amr_frame_fmt; + u32 amr_lsf_idx; +}; + +enum { + AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+2), + struct msm_audio_amrwbplus_config_v2_32), + AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+3), + struct msm_audio_amrwbplus_config_v2_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AMRWBPLUS_CONFIG_V2_32: { + if (audio && arg && (audio->codec_cfg)) { + struct msm_audio_amrwbplus_config_v2 *amrwbplus_config; + struct msm_audio_amrwbplus_config_v2_32 + amrwbplus_config_32; + + memset(&amrwbplus_config_32, 0, + sizeof(amrwbplus_config_32)); + + amrwbplus_config = + (struct msm_audio_amrwbplus_config_v2 *) + audio->codec_cfg; + amrwbplus_config_32.size_bytes = + amrwbplus_config->size_bytes; + amrwbplus_config_32.version = + amrwbplus_config->version; + amrwbplus_config_32.num_channels = + amrwbplus_config->num_channels; + amrwbplus_config_32.amr_band_mode = + amrwbplus_config->amr_band_mode; + amrwbplus_config_32.amr_dtx_mode = + amrwbplus_config->amr_dtx_mode; + amrwbplus_config_32.amr_frame_fmt = + amrwbplus_config->amr_frame_fmt; + amrwbplus_config_32.amr_lsf_idx = + amrwbplus_config->amr_lsf_idx; + + if (copy_to_user((void *)arg, &amrwbplus_config_32, + sizeof(amrwbplus_config_32))) { + rc = -EFAULT; + pr_err("%s: copy_to_user for AUDIO_GET_AMRWBPLUS_CONFIG_V2_32 failed\n" + , __func__); + } + } else { + pr_err("%s: wb+ Get config v2 invalid parameters\n" + , __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_AMRWBPLUS_CONFIG_V2_32: { + if ((audio) && (arg) && (audio->codec_cfg)) { + struct msm_audio_amrwbplus_config_v2 *amrwbplus_config; + struct msm_audio_amrwbplus_config_v2_32 + amrwbplus_config_32; + + if (copy_from_user(&amrwbplus_config_32, (void *)arg, + sizeof(struct msm_audio_amrwbplus_config_v2_32))) { + rc = -EFAULT; + pr_err("%s: copy_from_user for AUDIO_SET_AMRWBPLUS_CONFIG_V2_32 failed\n" + , __func__); + break; + } + amrwbplus_config = + (struct msm_audio_amrwbplus_config_v2 *) + audio->codec_cfg; + amrwbplus_config->size_bytes = + amrwbplus_config_32.size_bytes; + amrwbplus_config->version = + amrwbplus_config_32.version; + amrwbplus_config->num_channels = + amrwbplus_config_32.num_channels; + amrwbplus_config->amr_band_mode = + amrwbplus_config_32.amr_band_mode; + amrwbplus_config->amr_dtx_mode = + amrwbplus_config_32.amr_dtx_mode; + amrwbplus_config->amr_frame_fmt = + amrwbplus_config_32.amr_frame_fmt; + amrwbplus_config->amr_lsf_idx = + amrwbplus_config_32.amr_lsf_idx; + } else { + pr_err("%s: wb+ config invalid parameters\n", + __func__); + rc = -EFAULT; + } + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("kzalloc failed for amrwb+ decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = + kzalloc(sizeof(struct msm_audio_amrwbplus_config_v2), GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s:failed kzalloc for amrwb+ config structure", + __func__); + kfree(audio); + return -ENOMEM; + } + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_amrwbplus_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_amrwbplus_ws_mgr; + + audio->ac = + q6asm_audio_client_alloc((app_cb) q6_audio_cb, (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_AMR_WB_PLUS); + if (rc < 0) { + pr_err("amrwbplus NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_AMR_WB_PLUS); + if (rc < 0) { + pr_err("wb+ T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("audio_amrwbplus Not supported mode\n"); + rc = -EACCES; + goto fail; + } + + config_debug_fs(audio); + pr_debug("%s: AMRWBPLUS dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_amrwbplus_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_amrwbplus_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_amrwbplus", + .fops = &audio_amrwbplus_fops, +}; + +static int __init audio_amrwbplus_init(void) +{ + int ret = misc_register(&audio_amrwbplus_misc); + + if (ret == 0) + device_init_wakeup(audio_amrwbplus_misc.this_device, true); + audio_amrwbplus_ws_mgr.ref_cnt = 0; + mutex_init(&audio_amrwbplus_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_amrwbplus_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_ape.c b/drivers/misc/qcom/qdsp6v2/audio_ape.c new file mode 100644 index 000000000000..d7d550c40dff --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_ape.c @@ -0,0 +1,361 @@ +/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 and +* only version 2 as published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +*/ + +#include <linux/types.h> +#include <linux/msm_audio_ape.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_ape_misc; +static struct ws_mgr audio_ape_ws_mgr; + +static const struct file_operations audio_ape_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +static struct dentry *config_debugfs_create_file(const char *name, void *data) +{ + return debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)data, &audio_ape_debug_fops); +} + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_ape_cfg ape_cfg; + struct msm_audio_ape_config *ape_config; + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + ape_config = (struct msm_audio_ape_config *)audio->codec_cfg; + ape_cfg.compatible_version = ape_config->compatibleVersion; + ape_cfg.compression_level = ape_config->compressionLevel; + ape_cfg.format_flags = ape_config->formatFlags; + ape_cfg.blocks_per_frame = ape_config->blocksPerFrame; + ape_cfg.final_frame_blocks = ape_config->finalFrameBlocks; + ape_cfg.total_frames = ape_config->totalFrames; + ape_cfg.bits_per_sample = ape_config->bitsPerSample; + ape_cfg.num_channels = ape_config->numChannels; + ape_cfg.sample_rate = ape_config->sampleRate; + ape_cfg.seek_table_present = ape_config->seekTablePresent; + pr_debug("%s: compatibleVersion %d compressionLevel %d formatFlags %d blocksPerFrame %d finalFrameBlocks %d totalFrames %d bitsPerSample %d numChannels %d sampleRate %d seekTablePresent %d\n", + __func__, ape_config->compatibleVersion, + ape_config->compressionLevel, + ape_config->formatFlags, + ape_config->blocksPerFrame, + ape_config->finalFrameBlocks, + ape_config->totalFrames, + ape_config->bitsPerSample, + ape_config->numChannels, + ape_config->sampleRate, + ape_config->seekTablePresent); + /* Configure Media format block */ + rc = q6asm_media_format_block_ape(audio->ac, &ape_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_APE_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_ape_config))) { + pr_err("%s:copy_to_user for AUDIO_GET_APE_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_APE_CONFIG: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_ape_config))) { + pr_err("%s:copy_from_user for AUDIO_SET_APE_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_ape_config_32 { + u16 compatibleVersion; + u16 compressionLevel; + u32 formatFlags; + u32 blocksPerFrame; + u32 finalFrameBlocks; + u32 totalFrames; + u16 bitsPerSample; + u16 numChannels; + u32 sampleRate; + u32 seekTablePresent; + +}; + +enum { + AUDIO_GET_APE_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_ape_config_32), + AUDIO_SET_APE_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_ape_config_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_APE_CONFIG_32: { + struct msm_audio_ape_config *ape_config; + struct msm_audio_ape_config_32 ape_config_32; + + memset(&ape_config_32, 0, sizeof(ape_config_32)); + + ape_config = (struct msm_audio_ape_config *)audio->codec_cfg; + ape_config_32.compatibleVersion = ape_config->compatibleVersion; + ape_config_32.compressionLevel = + ape_config->compressionLevel; + ape_config_32.formatFlags = ape_config->formatFlags; + ape_config_32.blocksPerFrame = ape_config->blocksPerFrame; + ape_config_32.finalFrameBlocks = ape_config->finalFrameBlocks; + ape_config_32.totalFrames = ape_config->totalFrames; + ape_config_32.bitsPerSample = ape_config->bitsPerSample; + ape_config_32.numChannels = ape_config->numChannels; + ape_config_32.sampleRate = ape_config->sampleRate; + ape_config_32.seekTablePresent = ape_config->seekTablePresent; + + if (copy_to_user((void *)arg, &ape_config_32, + sizeof(ape_config_32))) { + pr_err("%s: copy_to_user for GET_APE_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_APE_CONFIG_32: { + struct msm_audio_ape_config *ape_config; + struct msm_audio_ape_config_32 ape_config_32; + + if (copy_from_user(&ape_config_32, (void *)arg, + sizeof(ape_config_32))) { + pr_err("%s: copy_from_user for SET_APE_CONFIG_32 failed\n" + , __func__); + rc = -EFAULT; + break; + } + ape_config = (struct msm_audio_ape_config *)audio->codec_cfg; + ape_config->compatibleVersion = ape_config_32.compatibleVersion; + ape_config->compressionLevel = + ape_config_32.compressionLevel; + ape_config->formatFlags = ape_config_32.formatFlags; + ape_config->blocksPerFrame = ape_config_32.blocksPerFrame; + ape_config->finalFrameBlocks = ape_config_32.finalFrameBlocks; + ape_config->totalFrames = ape_config_32.totalFrames; + ape_config->bitsPerSample = ape_config_32.bitsPerSample; + ape_config->numChannels = ape_config_32.numChannels; + ape_config->sampleRate = ape_config_32.sampleRate; + ape_config->seekTablePresent = ape_config_32.seekTablePresent; + + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_ape_" + 5]; + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (!audio) { + pr_err("Could not allocate memory for ape decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_ape_config), + GFP_KERNEL); + if (!audio->codec_cfg) { + pr_err("%s:Could not allocate memory for ape config\n", + __func__); + kfree(audio); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_ape_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_ape_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_APE); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open APE decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_APE); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + + snprintf(name, sizeof(name), "msm_ape_%04x", audio->ac->session); + audio->dentry = config_debugfs_create_file(name, (void *)audio); + + if (IS_ERR_OR_NULL(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); + pr_debug("%s:apedec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_ape_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_ape_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_ape", + .fops = &audio_ape_fops, +}; + +static int __init audio_ape_init(void) +{ + int ret = misc_register(&audio_ape_misc); + + if (ret == 0) + device_init_wakeup(audio_ape_misc.this_device, true); + audio_ape_ws_mgr.ref_cnt = 0; + mutex_init(&audio_ape_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_ape_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_evrc.c b/drivers/misc/qcom/qdsp6v2/audio_evrc.c new file mode 100644 index 000000000000..5a89f4e25a27 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_evrc.c @@ -0,0 +1,186 @@ +/* evrc audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "audio_utils_aio.h" + +static struct miscdevice audio_evrc_misc; +static struct ws_mgr audio_evrc_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_evrc_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + } + return rc; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_evrc_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for aac decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_evrc_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_evrc_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_EVRC); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_EVRC); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_evrc_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_evrc_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_evrc_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, +}; + +static struct miscdevice audio_evrc_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_evrc", + .fops = &audio_evrc_fops, +}; + +static int __init audio_evrc_init(void) +{ + int ret = misc_register(&audio_evrc_misc); + + if (ret == 0) + device_init_wakeup(audio_evrc_misc.this_device, true); + audio_evrc_ws_mgr.ref_cnt = 0; + mutex_init(&audio_evrc_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_evrc_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_g711alaw.c b/drivers/misc/qcom/qdsp6v2/audio_g711alaw.c new file mode 100644 index 000000000000..6f02654d3d4c --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_g711alaw.c @@ -0,0 +1,396 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 and +* only version 2 as published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +*/ + +#include <linux/types.h> +#include <linux/msm_audio_g711_dec.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_g711alaw_misc; +static struct ws_mgr audio_g711_ws_mgr; + +static const struct file_operations audio_g711_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; + +static struct dentry *config_debugfs_create_file(const char *name, void *data) +{ + return debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)data, &audio_g711_debug_fops); +} + +static int g711_channel_map(u8 *channel_mapping, uint32_t channels); + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_g711_dec_cfg g711_dec_cfg; + struct msm_audio_g711_dec_config *g711_dec_config; + u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL]; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + memset(&g711_dec_cfg, 0, sizeof(g711_dec_cfg)); + + if (g711_channel_map(channel_mapping, + audio->pcm_cfg.channel_count)) { + pr_err("%s: setting channel map failed %d\n", + __func__, audio->pcm_cfg.channel_count); + } + + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count, + 16, /*bits per sample*/ + false, false, channel_mapping); + if (rc < 0) { + pr_err("%s: pcm output block config failed rc=%d\n", + __func__, rc); + break; + } + } + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_cfg.sample_rate = g711_dec_config->sample_rate; + /* Configure Media format block */ + rc = q6asm_media_format_block_g711(audio->ac, &g711_dec_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("%s: cmd media format block failed rc=%d\n", + __func__, rc); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s: Audio Start procedure failed rc=%d\n", + __func__, rc); + break; + } + pr_debug("%s: AUDIO_START success enable[%d]\n", + __func__, audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_G711_DEC_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_g711_dec_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_DEC_CONFIG: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_g711_dec_config))) { + pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + default: { + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("%s: Failed in audio_aio_ioctl: %d cmd=%d\n", + __func__, rc, cmd); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_g711_dec_config_32 { + u32 sample_rate; +}; + +enum { + AUDIO_SET_G711_DEC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_dec_config_32), + AUDIO_GET_G711_DEC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_dec_config_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_G711_DEC_CONFIG_32: { + struct msm_audio_g711_dec_config *g711_dec_config; + struct msm_audio_g711_dec_config_32 g711_dec_config_32; + + memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32)); + + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_config_32.sample_rate = g711_dec_config->sample_rate; + + if (copy_to_user((void *)arg, &g711_dec_config_32, + sizeof(g711_dec_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_DEC_CONFIG_32: { + struct msm_audio_g711_dec_config *g711_dec_config; + struct msm_audio_g711_dec_config_32 g711_dec_config_32; + + memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32)); + + if (copy_from_user(&g711_dec_config_32, (void *)arg, + sizeof(g711_dec_config_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_config->sample_rate = g711_dec_config_32.sample_rate; + + break; + } + default: { + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("%s: Failed in audio_aio_compat_ioctl: %d cmd=%d\n", + __func__, rc, cmd); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_g711_" + 5]; + + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (!audio) + return -ENOMEM; + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_g711_dec_config), + GFP_KERNEL); + if (!audio->codec_cfg) { + kfree(audio); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_g711alaw_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_g711_ws_mgr; + + init_waitqueue_head(&audio->event_wait); + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for audio client\n", + __func__); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ /*foramt:G711_ALAW*/ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_G711_ALAW_FS); + if (rc < 0) { + pr_err("%s: NT mode Open failed rc=%d\n", __func__, rc); + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open G711 decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_G711_ALAW_FS); + if (rc < 0) { + pr_err("%s: T mode Open failed rc=%d\n", __func__, rc); + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("%s: %d mode is not supported mode\n", + __func__, file->f_mode); + rc = -EACCES; + goto fail; + } + + snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); + audio->dentry = config_debugfs_create_file(name, (void *)audio); + + if (IS_ERR_OR_NULL(audio->dentry)) + pr_debug("%s: debugfs_create_file failed\n", __func__); + pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static int g711_channel_map(u8 *channel_mapping, uint32_t channels) +{ + u8 *lchannel_mapping; + + lchannel_mapping = channel_mapping; + pr_debug("%s: channels passed: %d\n", __func__, channels); + if (channels == 1) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + } else if (channels == 2) { + lchannel_mapping[0] = PCM_CHANNEL_FL; + lchannel_mapping[1] = PCM_CHANNEL_FR; + } else if (channels == 3) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + } else if (channels == 4) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_CS; + } else if (channels == 5) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + } else if (channels == 6) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_LFE; + } else if (channels == 7) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_CS; + lchannel_mapping[6] = PCM_CHANNEL_LFE; + } else if (channels == 8) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FLC; + lchannel_mapping[2] = PCM_CHANNEL_FRC; + lchannel_mapping[3] = PCM_CHANNEL_FL; + lchannel_mapping[4] = PCM_CHANNEL_FR; + lchannel_mapping[5] = PCM_CHANNEL_LS; + lchannel_mapping[6] = PCM_CHANNEL_RS; + lchannel_mapping[7] = PCM_CHANNEL_LFE; + } else { + pr_err("%s: ERROR.unsupported num_ch = %u\n", + __func__, channels); + return -EINVAL; + } + return 0; +} + +static const struct file_operations audio_g711_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .compat_ioctl = audio_compat_ioctl, + .fsync = audio_aio_fsync, +}; + +static struct miscdevice audio_g711alaw_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_g711alaw", + .fops = &audio_g711_fops, +}; + +static int __init audio_g711alaw_init(void) +{ + int ret = misc_register(&audio_g711alaw_misc); + + if (ret == 0) + device_init_wakeup(audio_g711alaw_misc.this_device, true); + audio_g711_ws_mgr.ref_cnt = 0; + mutex_init(&audio_g711_ws_mgr.ws_lock); + + return ret; +} +static void __exit audio_g711alaw_exit(void) +{ + misc_deregister(&audio_g711alaw_misc); + mutex_destroy(&audio_g711_ws_mgr.ws_lock); +} + +device_initcall(audio_g711alaw_init); +__exitcall(audio_g711alaw_exit); diff --git a/drivers/misc/qcom/qdsp6v2/audio_g711mlaw.c b/drivers/misc/qcom/qdsp6v2/audio_g711mlaw.c new file mode 100644 index 000000000000..cae2490feb7a --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_g711mlaw.c @@ -0,0 +1,396 @@ +/* Copyright (c) 2016, The Linux Foundation. All rights reserved. +* +* This program is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License version 2 and +* only version 2 as published by the Free Software Foundation. +* +* This program is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +* GNU General Public License for more details. +* +*/ + +#include <linux/types.h> +#include <linux/msm_audio_g711_dec.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_g711mlaw_misc; +static struct ws_mgr audio_g711_ws_mgr; + +static const struct file_operations audio_g711_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; + +static struct dentry *config_debugfs_create_file(const char *name, void *data) +{ + return debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)data, &audio_g711_debug_fops); +} + +static int g711_channel_map(u8 *channel_mapping, uint32_t channels); + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_g711_dec_cfg g711_dec_cfg; + struct msm_audio_g711_dec_config *g711_dec_config; + u8 channel_mapping[PCM_FORMAT_MAX_NUM_CHANNEL]; + + memset(channel_mapping, 0, PCM_FORMAT_MAX_NUM_CHANNEL); + memset(&g711_dec_cfg, 0, sizeof(g711_dec_cfg)); + + if (g711_channel_map(channel_mapping, + audio->pcm_cfg.channel_count)) { + pr_err("%s: setting channel map failed %d\n", + __func__, audio->pcm_cfg.channel_count); + } + + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count, + 16, /*bits per sample*/ + false, false, channel_mapping); + if (rc < 0) { + pr_err("%s: pcm output block config failed rc=%d\n", + __func__, rc); + break; + } + } + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_cfg.sample_rate = g711_dec_config->sample_rate; + /* Configure Media format block */ + rc = q6asm_media_format_block_g711(audio->ac, &g711_dec_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("%s: cmd media format block failed rc=%d\n", + __func__, rc); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s: Audio Start procedure failed rc=%d\n", + __func__, rc); + break; + } + pr_debug("%s: AUDIO_START success enable[%d]\n", + __func__, audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_G711_DEC_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_g711_dec_config))) { + pr_err("%s: AUDIO_GET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_DEC_CONFIG: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_g711_dec_config))) { + pr_err("%s: AUDIO_SET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + default: { + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("%s: Failed in audio_aio_ioctl: %d cmd=%d\n", + __func__, rc, cmd); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_g711_dec_config_32 { + u32 sample_rate; +}; + +enum { + AUDIO_SET_G711_DEC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_dec_config_32), + AUDIO_GET_G711_DEC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_dec_config_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_G711_DEC_CONFIG_32: { + struct msm_audio_g711_dec_config *g711_dec_config; + struct msm_audio_g711_dec_config_32 g711_dec_config_32; + + memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32)); + + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_config_32.sample_rate = g711_dec_config->sample_rate; + + if (copy_to_user((void *)arg, &g711_dec_config_32, + sizeof(g711_dec_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_DEC_CONFIG_32: { + struct msm_audio_g711_dec_config *g711_dec_config; + struct msm_audio_g711_dec_config_32 g711_dec_config_32; + + memset(&g711_dec_config_32, 0, sizeof(g711_dec_config_32)); + + if (copy_from_user(&g711_dec_config_32, (void *)arg, + sizeof(g711_dec_config_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_G711_DEC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + g711_dec_config = + (struct msm_audio_g711_dec_config *)audio->codec_cfg; + g711_dec_config->sample_rate = g711_dec_config_32.sample_rate; + + break; + } + default: { + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("%s: Failed in audio_aio_compat_ioctl: %d cmd=%d\n", + __func__, rc, cmd); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_g711_" + 5]; + + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (!audio) + return -ENOMEM; + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_g711_dec_config), + GFP_KERNEL); + if (!audio->codec_cfg) { + kfree(audio); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_g711mlaw_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_g711_ws_mgr; + + init_waitqueue_head(&audio->event_wait); + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for audio client\n", + __func__); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ /*foramt:G711_ALAW*/ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_G711_MLAW_FS); + if (rc < 0) { + pr_err("%s: NT mode Open failed rc=%d\n", __func__, rc); + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open G711 decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_G711_MLAW_FS); + if (rc < 0) { + pr_err("%s: T mode Open failed rc=%d\n", __func__, rc); + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("%s: %d mode is not supported\n", __func__, + file->f_mode); + rc = -EACCES; + goto fail; + } + + snprintf(name, sizeof(name), "msm_g711_%04x", audio->ac->session); + audio->dentry = config_debugfs_create_file(name, (void *)audio); + + if (IS_ERR_OR_NULL(audio->dentry)) + pr_debug("%s: debugfs_create_file failed\n", __func__); + pr_debug("%s: g711dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static int g711_channel_map(u8 *channel_mapping, uint32_t channels) +{ + u8 *lchannel_mapping; + + lchannel_mapping = channel_mapping; + pr_debug("%s: channels passed: %d\n", __func__, channels); + if (channels == 1) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + } else if (channels == 2) { + lchannel_mapping[0] = PCM_CHANNEL_FL; + lchannel_mapping[1] = PCM_CHANNEL_FR; + } else if (channels == 3) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + } else if (channels == 4) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_CS; + } else if (channels == 5) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + } else if (channels == 6) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_LFE; + } else if (channels == 7) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FL; + lchannel_mapping[2] = PCM_CHANNEL_FR; + lchannel_mapping[3] = PCM_CHANNEL_LS; + lchannel_mapping[4] = PCM_CHANNEL_RS; + lchannel_mapping[5] = PCM_CHANNEL_CS; + lchannel_mapping[6] = PCM_CHANNEL_LFE; + } else if (channels == 8) { + lchannel_mapping[0] = PCM_CHANNEL_FC; + lchannel_mapping[1] = PCM_CHANNEL_FLC; + lchannel_mapping[2] = PCM_CHANNEL_FRC; + lchannel_mapping[3] = PCM_CHANNEL_FL; + lchannel_mapping[4] = PCM_CHANNEL_FR; + lchannel_mapping[5] = PCM_CHANNEL_LS; + lchannel_mapping[6] = PCM_CHANNEL_RS; + lchannel_mapping[7] = PCM_CHANNEL_LFE; + } else { + pr_err("%s: ERROR.unsupported num_ch = %u\n", + __func__, channels); + return -EINVAL; + } + return 0; +} + +static const struct file_operations audio_g711_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .compat_ioctl = audio_compat_ioctl, + .fsync = audio_aio_fsync, +}; + +static struct miscdevice audio_g711mlaw_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_g711mlaw", + .fops = &audio_g711_fops, +}; + +static int __init audio_g711mlaw_init(void) +{ + int ret = misc_register(&audio_g711mlaw_misc); + + if (ret == 0) + device_init_wakeup(audio_g711mlaw_misc.this_device, true); + audio_g711_ws_mgr.ref_cnt = 0; + mutex_init(&audio_g711_ws_mgr.ws_lock); + + return ret; +} + +static void __exit audio_g711mlaw_exit(void) +{ + misc_deregister(&audio_g711mlaw_misc); + mutex_destroy(&audio_g711_ws_mgr.ws_lock); +} + +device_initcall(audio_g711mlaw_init); +__exitcall(audio_g711mlaw_exit); diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c new file mode 100644 index 000000000000..ebe9ab763a68 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c @@ -0,0 +1,776 @@ +/* + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/msm_audio.h> +#include <linux/compat.h> +#include "q6audio_common.h" +#include "audio_utils_aio.h" +#include <sound/msm-audio-effects-q6-v2.h> + +#define MAX_CHANNELS_SUPPORTED 8 +#define WAIT_TIMEDOUT_DURATION_SECS 1 + +struct q6audio_effects { + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + + struct audio_client *ac; + struct msm_hwacc_effects_config config; + + struct mutex lock; + + atomic_t in_count; + atomic_t out_count; + + int opened; + int started; + int buf_alloc; + struct msm_nt_eff_all_config audio_effects; +}; + +static void audio_effects_init_pp(struct audio_client *ac) +{ + int ret = 0; + struct asm_softvolume_params softvol = { + .period = SOFT_VOLUME_PERIOD, + .step = SOFT_VOLUME_STEP, + .rampingcurve = SOFT_VOLUME_CURVE_LINEAR, + }; + + if (!ac) { + pr_err("%s: audio client null to init pp\n", __func__); + return; + } + ret = q6asm_set_softvolume_v2(ac, &softvol, + SOFT_VOLUME_INSTANCE_1); + if (ret < 0) + pr_err("%s: Send SoftVolume Param failed ret=%d\n", + __func__, ret); +} + +static void audio_effects_deinit_pp(struct audio_client *ac) +{ + if (!ac) { + pr_err("%s: audio client null to deinit pp\n", __func__); + return; + } +} + +static void audio_effects_event_handler(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_effects *effects; + + if (!payload || !priv) { + pr_err("%s: invalid data to handle events, payload: %pK, priv: %pK\n", + __func__, payload, priv); + return; + } + + effects = (struct q6audio_effects *)priv; + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE_V2: { + atomic_inc(&effects->out_count); + wake_up(&effects->write_wait); + break; + } + case ASM_DATA_EVENT_READ_DONE_V2: { + atomic_inc(&effects->in_count); + wake_up(&effects->read_wait); + break; + } + case APR_BASIC_RSP_RESULT: { + pr_debug("%s: APR_BASIC_RSP_RESULT Cmd[0x%x] Status[0x%x]\n", + __func__, payload[0], payload[1]); + switch (payload[0]) { + case ASM_SESSION_CMD_RUN_V2: + pr_debug("ASM_SESSION_CMD_RUN_V2\n"); + break; + default: + pr_debug("%s: Payload = [0x%x] stat[0x%x]\n", + __func__, payload[0], payload[1]); + break; + } + break; + } + default: + pr_debug("%s: Unhandled Event 0x%x token = 0x%x\n", + __func__, opcode, token); + break; + } +} + +static int audio_effects_shared_ioctl(struct file *file, unsigned cmd, + unsigned long arg) +{ + struct q6audio_effects *effects = file->private_data; + int rc = 0; + switch (cmd) { + case AUDIO_START: { + pr_debug("%s: AUDIO_START\n", __func__); + + mutex_lock(&effects->lock); + + rc = q6asm_open_read_write_v2(effects->ac, + FORMAT_LINEAR_PCM, + FORMAT_MULTI_CHANNEL_LINEAR_PCM, + effects->config.meta_mode_enabled, + effects->config.output.bits_per_sample, + true /*overwrite topology*/, + ASM_STREAM_POSTPROC_TOPO_ID_HPX_MASTER); + if (rc < 0) { + pr_err("%s: Open failed for hw accelerated effects:rc=%d\n", + __func__, rc); + rc = -EINVAL; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + effects->opened = 1; + + pr_debug("%s: dec buf size: %d, num_buf: %d, enc buf size: %d, num_buf: %d\n", + __func__, effects->config.output.buf_size, + effects->config.output.num_buf, + effects->config.input.buf_size, + effects->config.input.num_buf); + rc = q6asm_audio_client_buf_alloc_contiguous(IN, effects->ac, + effects->config.output.buf_size, + effects->config.output.num_buf); + if (rc < 0) { + pr_err("%s: Write buffer Allocation failed rc = %d\n", + __func__, rc); + rc = -ENOMEM; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + atomic_set(&effects->in_count, effects->config.input.num_buf); + rc = q6asm_audio_client_buf_alloc_contiguous(OUT, effects->ac, + effects->config.input.buf_size, + effects->config.input.num_buf); + if (rc < 0) { + pr_err("%s: Read buffer Allocation failed rc = %d\n", + __func__, rc); + rc = -ENOMEM; + goto readbuf_fail; + } + atomic_set(&effects->out_count, effects->config.output.num_buf); + effects->buf_alloc = 1; + + pr_debug("%s: enc: sample_rate: %d, num_channels: %d\n", + __func__, effects->config.input.sample_rate, + effects->config.input.num_channels); + rc = q6asm_enc_cfg_blk_pcm(effects->ac, + effects->config.input.sample_rate, + effects->config.input.num_channels); + if (rc < 0) { + pr_err("%s: pcm read block config failed\n", __func__); + rc = -EINVAL; + goto cfg_fail; + } + pr_debug("%s: dec: sample_rate: %d, num_channels: %d, bit_width: %d\n", + __func__, effects->config.output.sample_rate, + effects->config.output.num_channels, + effects->config.output.bits_per_sample); + rc = q6asm_media_format_block_pcm_format_support( + effects->ac, effects->config.output.sample_rate, + effects->config.output.num_channels, + effects->config.output.bits_per_sample); + if (rc < 0) { + pr_err("%s: pcm write format block config failed\n", + __func__); + rc = -EINVAL; + goto cfg_fail; + } + + audio_effects_init_pp(effects->ac); + + rc = q6asm_run(effects->ac, 0x00, 0x00, 0x00); + if (!rc) + effects->started = 1; + else { + effects->started = 0; + pr_err("%s: ASM run state failed\n", __func__); + } + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_WRITE: { + char *bufptr = NULL; + uint32_t idx = 0; + uint32_t size = 0; + + mutex_lock(&effects->lock); + + if (!effects->started) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + + rc = wait_event_timeout(effects->write_wait, + atomic_read(&effects->out_count), + WAIT_TIMEDOUT_DURATION_SECS * HZ); + if (!rc) { + pr_err("%s: write wait_event_timeout\n", __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + if (!atomic_read(&effects->out_count)) { + pr_err("%s: pcm stopped out_count 0\n", __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + + bufptr = q6asm_is_cpu_buf_avail(IN, effects->ac, &size, &idx); + if (bufptr) { + if ((effects->config.buf_cfg.output_len > size) || + copy_from_user(bufptr, (void *)arg, + effects->config.buf_cfg.output_len)) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + rc = q6asm_write(effects->ac, + effects->config.buf_cfg.output_len, + 0, 0, NO_TIMESTAMP); + if (rc < 0) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + atomic_dec(&effects->out_count); + } else { + pr_err("%s: AUDIO_EFFECTS_WRITE: Buffer dropped\n", + __func__); + } + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_READ: { + char *bufptr = NULL; + uint32_t idx = 0; + uint32_t size = 0; + + mutex_lock(&effects->lock); + + if (!effects->started) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + + atomic_set(&effects->in_count, 0); + + q6asm_read_v2(effects->ac, effects->config.buf_cfg.input_len); + /* Read might fail initially, don't error out */ + if (rc < 0) + pr_err("%s: read failed\n", __func__); + + rc = wait_event_timeout(effects->read_wait, + atomic_read(&effects->in_count), + WAIT_TIMEDOUT_DURATION_SECS * HZ); + if (!rc) { + pr_err("%s: read wait_event_timeout\n", __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + if (!atomic_read(&effects->in_count)) { + pr_err("%s: pcm stopped in_count 0\n", __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + + bufptr = q6asm_is_cpu_buf_avail(OUT, effects->ac, &size, &idx); + if (bufptr) { + if (!((void *)arg)) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + if ((effects->config.buf_cfg.input_len > size) || + copy_to_user((void *)arg, bufptr, + effects->config.buf_cfg.input_len)) { + rc = -EFAULT; + mutex_unlock(&effects->lock); + goto ioctl_fail; + } + } + mutex_unlock(&effects->lock); + break; + } + default: + pr_err("%s: Invalid effects config module\n", __func__); + rc = -EINVAL; + break; + } +ioctl_fail: + return rc; +readbuf_fail: + q6asm_audio_client_buf_free_contiguous(IN, + effects->ac); + mutex_unlock(&effects->lock); + return rc; +cfg_fail: + q6asm_audio_client_buf_free_contiguous(IN, + effects->ac); + q6asm_audio_client_buf_free_contiguous(OUT, + effects->ac); + effects->buf_alloc = 0; + mutex_unlock(&effects->lock); + return rc; +} + +static long audio_effects_set_pp_param(struct q6audio_effects *effects, + long *values) +{ + int rc = 0; + int effects_module = values[0]; + switch (effects_module) { + case VIRTUALIZER_MODULE: + pr_debug("%s: VIRTUALIZER_MODULE\n", __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_virtualizer_handler( + effects->ac, + &(effects->audio_effects.virtualizer), + (long *)&values[1]); + break; + case REVERB_MODULE: + pr_debug("%s: REVERB_MODULE\n", __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_reverb_handler(effects->ac, + &(effects->audio_effects.reverb), + (long *)&values[1]); + break; + case BASS_BOOST_MODULE: + pr_debug("%s: BASS_BOOST_MODULE\n", __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_bass_boost_handler( + effects->ac, + &(effects->audio_effects.bass_boost), + (long *)&values[1]); + break; + case PBE_MODULE: + pr_debug("%s: PBE_MODULE\n", __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_pbe_handler( + effects->ac, + &(effects->audio_effects.pbe), + (long *)&values[1]); + break; + case EQ_MODULE: + pr_debug("%s: EQ_MODULE\n", __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_popless_eq_handler( + effects->ac, + &(effects->audio_effects.equalizer), + (long *)&values[1]); + break; + case SOFT_VOLUME_MODULE: + pr_debug("%s: SA PLUS VOLUME_MODULE\n", __func__); + msm_audio_effects_volume_handler_v2(effects->ac, + &(effects->audio_effects.saplus_vol), + (long *)&values[1], SOFT_VOLUME_INSTANCE_1); + break; + case SOFT_VOLUME2_MODULE: + pr_debug("%s: TOPOLOGY SWITCH VOLUME MODULE\n", + __func__); + if (msm_audio_effects_is_effmodule_supp_in_top( + effects_module, effects->ac->topology)) + msm_audio_effects_volume_handler_v2(effects->ac, + &(effects->audio_effects.topo_switch_vol), + (long *)&values[1], SOFT_VOLUME_INSTANCE_2); + break; + default: + pr_err("%s: Invalid effects config module\n", __func__); + rc = -EINVAL; + } + return rc; +} + +static long audio_effects_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_effects *effects = file->private_data; + int rc = 0; + long argvalues[MAX_PP_PARAMS_SZ] = {0}; + + switch (cmd) { + case AUDIO_SET_EFFECTS_CONFIG: { + pr_debug("%s: AUDIO_SET_EFFECTS_CONFIG\n", __func__); + mutex_lock(&effects->lock); + memset(&effects->config, 0, sizeof(effects->config)); + if (copy_from_user(&effects->config, (void *)arg, + sizeof(effects->config))) { + pr_err("%s: copy from user for AUDIO_SET_EFFECTS_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + pr_debug("%s: write buf_size: %d, num_buf: %d, sample_rate: %d, channel: %d\n", + __func__, effects->config.output.buf_size, + effects->config.output.num_buf, + effects->config.output.sample_rate, + effects->config.output.num_channels); + pr_debug("%s: read buf_size: %d, num_buf: %d, sample_rate: %d, channel: %d\n", + __func__, effects->config.input.buf_size, + effects->config.input.num_buf, + effects->config.input.sample_rate, + effects->config.input.num_channels); + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_SET_BUF_LEN: { + mutex_lock(&effects->lock); + if (copy_from_user(&effects->config.buf_cfg, (void *)arg, + sizeof(effects->config.buf_cfg))) { + pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n", + __func__); + rc = -EFAULT; + } + pr_debug("%s: write buf len: %d, read buf len: %d\n", + __func__, effects->config.buf_cfg.output_len, + effects->config.buf_cfg.input_len); + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_GET_BUF_AVAIL: { + struct msm_hwacc_buf_avail buf_avail; + + buf_avail.input_num_avail = atomic_read(&effects->in_count); + buf_avail.output_num_avail = atomic_read(&effects->out_count); + mutex_lock(&effects->lock); + pr_debug("%s: write buf avail: %d, read buf avail: %d\n", + __func__, buf_avail.output_num_avail, + buf_avail.input_num_avail); + if (copy_to_user((void *)arg, &buf_avail, + sizeof(buf_avail))) { + pr_err("%s: copy to user for AUDIO_EFFECTS_GET_NUM_BUF_AVAIL failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_SET_PP_PARAMS: { + mutex_lock(&effects->lock); + if (copy_from_user(argvalues, (void *)arg, + MAX_PP_PARAMS_SZ*sizeof(long))) { + pr_err("%s: copy from user for pp params failed\n", + __func__); + mutex_unlock(&effects->lock); + return -EFAULT; + } + rc = audio_effects_set_pp_param(effects, argvalues); + mutex_unlock(&effects->lock); + break; + } + default: + pr_debug("%s: Calling shared ioctl\n", __func__); + rc = audio_effects_shared_ioctl(file, cmd, arg); + break; + } + if (rc) + pr_err("%s: cmd 0x%x failed\n", __func__, cmd); + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_hwacc_data_config32 { + __u32 buf_size; + __u32 num_buf; + __u32 num_channels; + __u8 channel_map[MAX_CHANNELS_SUPPORTED]; + __u32 sample_rate; + __u32 bits_per_sample; +}; + +struct msm_hwacc_buf_cfg32 { + __u32 input_len; + __u32 output_len; +}; + +struct msm_hwacc_buf_avail32 { + __u32 input_num_avail; + __u32 output_num_avail; +}; + +struct msm_hwacc_effects_config32 { + struct msm_hwacc_data_config32 input; + struct msm_hwacc_data_config32 output; + struct msm_hwacc_buf_cfg32 buf_cfg; + __u32 meta_mode_enabled; + __u32 overwrite_topology; + __s32 topology; +}; + +enum { + AUDIO_SET_EFFECTS_CONFIG32 = _IOW(AUDIO_IOCTL_MAGIC, 99, + struct msm_hwacc_effects_config32), + AUDIO_EFFECTS_SET_BUF_LEN32 = _IOW(AUDIO_IOCTL_MAGIC, 100, + struct msm_hwacc_buf_cfg32), + AUDIO_EFFECTS_GET_BUF_AVAIL32 = _IOW(AUDIO_IOCTL_MAGIC, 101, + struct msm_hwacc_buf_avail32), + AUDIO_EFFECTS_WRITE32 = _IOW(AUDIO_IOCTL_MAGIC, 102, compat_uptr_t), + AUDIO_EFFECTS_READ32 = _IOWR(AUDIO_IOCTL_MAGIC, 103, compat_uptr_t), + AUDIO_EFFECTS_SET_PP_PARAMS32 = _IOW(AUDIO_IOCTL_MAGIC, 104, + compat_uptr_t), + AUDIO_START32 = _IOW(AUDIO_IOCTL_MAGIC, 0, unsigned), +}; + +static long audio_effects_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_effects *effects = file->private_data; + int rc = 0, i; + + switch (cmd) { + case AUDIO_SET_EFFECTS_CONFIG32: { + struct msm_hwacc_effects_config32 config32; + struct msm_hwacc_effects_config *config = &effects->config; + mutex_lock(&effects->lock); + memset(&effects->config, 0, sizeof(effects->config)); + if (copy_from_user(&config32, (void *)arg, + sizeof(config32))) { + pr_err("%s: copy to user for AUDIO_SET_EFFECTS_CONFIG failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + break; + } + config->input.buf_size = config32.input.buf_size; + config->input.num_buf = config32.input.num_buf; + config->input.num_channels = config32.input.num_channels; + config->input.sample_rate = config32.input.sample_rate; + config->input.bits_per_sample = config32.input.bits_per_sample; + config->input.buf_size = config32.input.buf_size; + for (i = 0; i < MAX_CHANNELS_SUPPORTED; i++) + config->input.channel_map[i] = + config32.input.channel_map[i]; + config->output.buf_size = config32.output.buf_size; + config->output.num_buf = config32.output.num_buf; + config->output.num_channels = config32.output.num_channels; + config->output.sample_rate = config32.output.sample_rate; + config->output.bits_per_sample = + config32.output.bits_per_sample; + config->output.buf_size = config32.output.buf_size; + for (i = 0; i < MAX_CHANNELS_SUPPORTED; i++) + config->output.channel_map[i] = + config32.output.channel_map[i]; + config->buf_cfg.input_len = config32.buf_cfg.input_len; + config->buf_cfg.output_len = config32.buf_cfg.output_len; + config->meta_mode_enabled = config32.meta_mode_enabled; + config->overwrite_topology = config32.overwrite_topology; + config->topology = config32.topology; + pr_debug("%s: write buf_size: %d, num_buf: %d, sample_rate: %d, channels: %d\n", + __func__, effects->config.output.buf_size, + effects->config.output.num_buf, + effects->config.output.sample_rate, + effects->config.output.num_channels); + pr_debug("%s: read buf_size: %d, num_buf: %d, sample_rate: %d, channels: %d\n", + __func__, effects->config.input.buf_size, + effects->config.input.num_buf, + effects->config.input.sample_rate, + effects->config.input.num_channels); + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_SET_BUF_LEN32: { + struct msm_hwacc_buf_cfg32 buf_cfg32; + struct msm_hwacc_effects_config *config = &effects->config; + mutex_lock(&effects->lock); + if (copy_from_user(&buf_cfg32, (void *)arg, + sizeof(buf_cfg32))) { + pr_err("%s: copy from user for AUDIO_EFFECTS_SET_BUF_LEN failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&effects->lock); + break; + } + config->buf_cfg.input_len = buf_cfg32.input_len; + config->buf_cfg.output_len = buf_cfg32.output_len; + pr_debug("%s: write buf len: %d, read buf len: %d\n", + __func__, effects->config.buf_cfg.output_len, + effects->config.buf_cfg.input_len); + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_GET_BUF_AVAIL32: { + struct msm_hwacc_buf_avail32 buf_avail; + + memset(&buf_avail, 0, sizeof(buf_avail)); + + mutex_lock(&effects->lock); + buf_avail.input_num_avail = atomic_read(&effects->in_count); + buf_avail.output_num_avail = atomic_read(&effects->out_count); + pr_debug("%s: write buf avail: %d, read buf avail: %d\n", + __func__, buf_avail.output_num_avail, + buf_avail.input_num_avail); + if (copy_to_user((void *)arg, &buf_avail, + sizeof(buf_avail))) { + pr_err("%s: copy to user for AUDIO_EFFECTS_GET_NUM_BUF_AVAIL failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&effects->lock); + break; + } + case AUDIO_EFFECTS_SET_PP_PARAMS32: { + long argvalues[MAX_PP_PARAMS_SZ] = {0}; + int argvalues32[MAX_PP_PARAMS_SZ] = {0}; + + mutex_lock(&effects->lock); + if (copy_from_user(argvalues32, (void *)arg, + MAX_PP_PARAMS_SZ*sizeof(int))) { + pr_err("%s: copy from user failed for pp params\n", + __func__); + mutex_unlock(&effects->lock); + return -EFAULT; + } + for (i = 0; i < MAX_PP_PARAMS_SZ; i++) + argvalues[i] = argvalues32[i]; + + rc = audio_effects_set_pp_param(effects, argvalues); + mutex_unlock(&effects->lock); + break; + } + case AUDIO_START32: { + rc = audio_effects_shared_ioctl(file, AUDIO_START, arg); + break; + } + case AUDIO_EFFECTS_WRITE32: { + rc = audio_effects_shared_ioctl(file, AUDIO_EFFECTS_WRITE, arg); + break; + } + case AUDIO_EFFECTS_READ32: { + rc = audio_effects_shared_ioctl(file, AUDIO_EFFECTS_READ, arg); + break; + } + default: + pr_debug("%s: unhandled ioctl\n", __func__); + rc = -EINVAL; + break; + } + return rc; +} +#endif + +static int audio_effects_release(struct inode *inode, struct file *file) +{ + struct q6audio_effects *effects = file->private_data; + int rc = 0; + if (!effects) { + pr_err("%s: effect is NULL\n", __func__); + return -EINVAL; + } + if (effects->opened) { + rc = wait_event_timeout(effects->write_wait, + atomic_read(&effects->out_count), + WAIT_TIMEDOUT_DURATION_SECS * HZ); + if (!rc) + pr_err("%s: write wait_event_timeout failed\n", + __func__); + rc = wait_event_timeout(effects->read_wait, + atomic_read(&effects->in_count), + WAIT_TIMEDOUT_DURATION_SECS * HZ); + if (!rc) + pr_err("%s: read wait_event_timeout failed\n", + __func__); + rc = q6asm_cmd(effects->ac, CMD_CLOSE); + if (rc < 0) + pr_err("%s[%pK]:Failed to close the session rc=%d\n", + __func__, effects, rc); + effects->opened = 0; + effects->started = 0; + + audio_effects_deinit_pp(effects->ac); + } + + if (effects->buf_alloc) { + q6asm_audio_client_buf_free_contiguous(IN, effects->ac); + q6asm_audio_client_buf_free_contiguous(OUT, effects->ac); + } + q6asm_audio_client_free(effects->ac); + + mutex_destroy(&effects->lock); + kfree(effects); + + pr_debug("%s: close session success\n", __func__); + return rc; +} + +static int audio_effects_open(struct inode *inode, struct file *file) +{ + struct q6audio_effects *effects; + int rc = 0; + + effects = kzalloc(sizeof(struct q6audio_effects), GFP_KERNEL); + if (!effects) { + pr_err("%s: Could not allocate memory for hw acc effects driver\n", + __func__); + return -ENOMEM; + } + + effects->ac = q6asm_audio_client_alloc( + (app_cb)audio_effects_event_handler, + (void *)effects); + if (!effects->ac) { + pr_err("%s: Could not allocate memory for audio client\n", + __func__); + kfree(effects); + return -ENOMEM; + } + + init_waitqueue_head(&effects->read_wait); + init_waitqueue_head(&effects->write_wait); + mutex_init(&effects->lock); + + effects->opened = 0; + effects->started = 0; + effects->buf_alloc = 0; + file->private_data = effects; + pr_debug("%s: open session success\n", __func__); + return rc; +} + +static const struct file_operations audio_effects_fops = { + .owner = THIS_MODULE, + .open = audio_effects_open, + .release = audio_effects_release, + .unlocked_ioctl = audio_effects_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = audio_effects_compat_ioctl, +#endif +}; + +struct miscdevice audio_effects_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_hweffects", + .fops = &audio_effects_fops, +}; + +static int __init audio_effects_init(void) +{ + return misc_register(&audio_effects_misc); +} + +device_initcall(audio_effects_init); +MODULE_DESCRIPTION("Audio hardware accelerated effects driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/misc/qcom/qdsp6v2/audio_mp3.c b/drivers/misc/qcom/qdsp6v2/audio_mp3.c new file mode 100644 index 000000000000..fa5132e83ff4 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_mp3.c @@ -0,0 +1,188 @@ +/* mp3 audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "audio_utils_aio.h" + +static struct miscdevice audio_mp3_misc; +static struct ws_mgr audio_mp3_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_mp3_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + rc = enable_volume_ramp(audio); + if (rc < 0) { + pr_err("%s: Failed to enable volume ramp\n", + __func__); + } + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + } + return rc; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_mp3_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for mp3 decode driver\n"); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_mp3_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_mp3_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_MP3); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open MP3 decoder, expected frames is always 1 + audio->buf_cfg.frames_per_buf = 0x01;*/ + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_MP3); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_mp3_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_mp3_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:mp3dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_mp3_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, +}; + +static struct miscdevice audio_mp3_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_mp3", + .fops = &audio_mp3_fops, +}; + +static int __init audio_mp3_init(void) +{ + int ret = misc_register(&audio_mp3_misc); + + if (ret == 0) + device_init_wakeup(audio_mp3_misc.this_device, true); + audio_mp3_ws_mgr.ref_cnt = 0; + mutex_init(&audio_mp3_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_mp3_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c new file mode 100644 index 000000000000..92faa1b899c9 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_multi_aac.c @@ -0,0 +1,521 @@ +/* aac audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/msm_audio_aac.h> +#include <linux/compat.h> +#include <soc/qcom/socinfo.h> +#include "audio_utils_aio.h" + +#define AUDIO_AAC_DUAL_MONO_INVALID -1 + + +/* Default number of pre-allocated event packets */ +#define PCM_BUFSZ_MIN_AACM ((8*1024) + sizeof(struct dec_meta_out)) +static struct miscdevice audio_multiaac_misc; +static struct ws_mgr audio_multiaac_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_aac_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_aac_cfg aac_cfg; + struct msm_audio_aac_config *aac_config; + uint32_t sbr_ps = 0x00; + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; + if (audio->feedback == TUNNEL_MODE) { + aac_cfg.sample_rate = aac_config->sample_rate; + aac_cfg.ch_cfg = aac_config->channel_configuration; + } else { + aac_cfg.sample_rate = audio->pcm_cfg.sample_rate; + aac_cfg.ch_cfg = audio->pcm_cfg.channel_count; + } + pr_debug("%s: AUDIO_START session_id[%d]\n", __func__, + audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm_native(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + /* turn on both sbr and ps */ + rc = q6asm_enable_sbrps(audio->ac, sbr_ps); + if (rc < 0) + pr_err("sbr-ps enable failed\n"); + if (aac_config->sbr_ps_on_flag) + aac_cfg.aot = AAC_ENC_MODE_EAAC_P; + else if (aac_config->sbr_on_flag) + aac_cfg.aot = AAC_ENC_MODE_AAC_P; + else + aac_cfg.aot = AAC_ENC_MODE_AAC_LC; + + switch (aac_config->format) { + case AUDIO_AAC_FORMAT_ADTS: + aac_cfg.format = 0x00; + break; + case AUDIO_AAC_FORMAT_LOAS: + aac_cfg.format = 0x01; + break; + case AUDIO_AAC_FORMAT_ADIF: + aac_cfg.format = 0x02; + break; + default: + case AUDIO_AAC_FORMAT_RAW: + aac_cfg.format = 0x03; + } + aac_cfg.ep_config = aac_config->ep_config; + aac_cfg.section_data_resilience = + aac_config->aac_section_data_resilience_flag; + aac_cfg.scalefactor_data_resilience = + aac_config->aac_scalefactor_data_resilience_flag; + aac_cfg.spectral_data_resilience = + aac_config->aac_spectral_data_resilience_flag; + + pr_debug("%s:format=%x aot=%d ch=%d sr=%d\n", + __func__, aac_cfg.format, + aac_cfg.aot, aac_cfg.ch_cfg, + aac_cfg.sample_rate); + + /* Configure Media format block */ + rc = q6asm_media_format_block_multi_aac(audio->ac, &aac_cfg); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = q6asm_set_encdec_chan_map(audio->ac, 2); + if (rc < 0) { + pr_err("%s: cmd set encdec_chan_map failed\n", + __func__); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_info("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config *aac_config; + uint16_t sce_left = 1, sce_right = 2; + + if (arg == NULL) { + pr_err("%s: NULL config pointer\n", __func__); + rc = -EINVAL; + break; + } + memcpy(audio->codec_cfg, arg, + sizeof(struct msm_audio_aac_config)); + aac_config = audio->codec_cfg; + if (aac_config->dual_mono_mode > + AUDIO_AAC_DUAL_MONO_PL_SR) { + pr_err("%s:AUDIO_SET_AAC_CONFIG: Invalid dual_mono mode =%d\n", + __func__, aac_config->dual_mono_mode); + } else { + /* convert the data from user into sce_left + * and sce_right based on the definitions + */ + pr_debug("%s: AUDIO_SET_AAC_CONFIG: modify dual_mono mode =%d\n", + __func__, aac_config->dual_mono_mode); + switch (aac_config->dual_mono_mode) { + case AUDIO_AAC_DUAL_MONO_PL_PR: + sce_left = 1; + sce_right = 1; + break; + case AUDIO_AAC_DUAL_MONO_SL_SR: + sce_left = 2; + sce_right = 2; + break; + case AUDIO_AAC_DUAL_MONO_SL_PR: + sce_left = 2; + sce_right = 1; + break; + case AUDIO_AAC_DUAL_MONO_PL_SR: + default: + sce_left = 1; + sce_right = 2; + break; + } + rc = q6asm_cfg_dual_mono_aac(audio->ac, + sce_left, sce_right); + if (rc < 0) + pr_err("%s: asm cmd dualmono failed rc=%d\n", + __func__, rc); + } break; + break; + } + case AUDIO_SET_AAC_MIX_CONFIG: { + u32 *mix_coeff = (u32 *)arg; + if (!arg) { + pr_err("%s: Invalid param for %s\n", + __func__, "AUDIO_SET_AAC_MIX_CONFIG"); + rc = -EINVAL; + break; + } + pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG", __func__); + pr_debug("%s, value of coeff = %d", + __func__, *mix_coeff); + q6asm_cfg_aac_sel_mix_coef(audio->ac, *mix_coeff); + if (rc < 0) + pr_err("%s asm aac_sel_mix_coef failed rc=%d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AAC_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_aac_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG failed\n" + , __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG: { + struct msm_audio_aac_config aac_config; + if (copy_from_user(&aac_config, (void *)arg, + sizeof(aac_config))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_CONFIG failed\n" + , __func__); + rc = -EFAULT; + } + rc = audio_ioctl_shared(file, cmd, &aac_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + case AUDIO_SET_AAC_MIX_CONFIG: { + u32 mix_config; + pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG", __func__); + if (copy_from_user(&mix_config, (void *)arg, + sizeof(u32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_MIX_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = audio_ioctl_shared(file, cmd, &mix_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: { + pr_debug("Calling utils ioctl\n"); + rc = audio->codec_ioctl(file, cmd, arg); + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_aac_config32 { + s16 format; + u16 audio_object; + u16 ep_config; /* 0 ~ 3 useful only obj = ERLC */ + u16 aac_section_data_resilience_flag; + u16 aac_scalefactor_data_resilience_flag; + u16 aac_spectral_data_resilience_flag; + u16 sbr_on_flag; + u16 sbr_ps_on_flag; + u16 dual_mono_mode; + u16 channel_configuration; + u16 sample_rate; +}; + +enum { + AUDIO_SET_AAC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_aac_config32), + AUDIO_GET_AAC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_aac_config32), +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_AAC_CONFIG_32: { + struct msm_audio_aac_config *aac_config; + struct msm_audio_aac_config32 aac_config_32; + + memset(&aac_config_32, 0, sizeof(aac_config_32)); + + aac_config = (struct msm_audio_aac_config *)audio->codec_cfg; + aac_config_32.format = aac_config->format; + aac_config_32.audio_object = aac_config->audio_object; + aac_config_32.ep_config = aac_config->ep_config; + aac_config_32.aac_section_data_resilience_flag = + aac_config->aac_section_data_resilience_flag; + aac_config_32.aac_scalefactor_data_resilience_flag = + aac_config->aac_scalefactor_data_resilience_flag; + aac_config_32.aac_spectral_data_resilience_flag = + aac_config->aac_spectral_data_resilience_flag; + aac_config_32.sbr_on_flag = aac_config->sbr_on_flag; + aac_config_32.sbr_ps_on_flag = aac_config->sbr_ps_on_flag; + aac_config_32.dual_mono_mode = aac_config->dual_mono_mode; + aac_config_32.channel_configuration = + aac_config->channel_configuration; + aac_config_32.sample_rate = aac_config->sample_rate; + + if (copy_to_user((void *)arg, &aac_config_32, + sizeof(aac_config_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_AAC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_AAC_CONFIG_32: { + struct msm_audio_aac_config aac_config; + struct msm_audio_aac_config32 aac_config_32; + pr_debug("%s: AUDIO_SET_AAC_CONFIG\n", __func__); + + if (copy_from_user(&aac_config_32, (void *)arg, + sizeof(aac_config_32))) { + pr_err( + "%s: copy_from_user for AUDIO_SET_AAC_CONFIG_32 failed", + __func__); + rc = -EFAULT; + break; + } + aac_config.format = aac_config_32.format; + aac_config.audio_object = aac_config_32.audio_object; + aac_config.ep_config = aac_config_32.ep_config; + aac_config.aac_section_data_resilience_flag = + aac_config_32.aac_section_data_resilience_flag; + aac_config.aac_scalefactor_data_resilience_flag = + aac_config_32.aac_scalefactor_data_resilience_flag; + aac_config.aac_spectral_data_resilience_flag = + aac_config_32.aac_spectral_data_resilience_flag; + aac_config.sbr_on_flag = aac_config_32.sbr_on_flag; + aac_config.sbr_ps_on_flag = aac_config_32.sbr_ps_on_flag; + aac_config.dual_mono_mode = aac_config_32.dual_mono_mode; + aac_config.channel_configuration = + aac_config_32.channel_configuration; + aac_config.sample_rate = aac_config_32.sample_rate; + + cmd = AUDIO_SET_AAC_CONFIG; + rc = audio_ioctl_shared(file, cmd, &aac_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + case AUDIO_SET_AAC_MIX_CONFIG: { + u32 mix_config; + pr_debug("%s, AUDIO_SET_AAC_MIX_CONFIG\n", __func__); + if (copy_from_user(&mix_config, (void *)arg, + sizeof(u32))) { + pr_err("%s: copy_from_user for AUDIO_SET_AAC_MIX_CONFIG failed\n" + , __func__); + rc = -EFAULT; + break; + } + rc = audio_ioctl_shared(file, cmd, &mix_config); + if (rc) + pr_err("%s:AUDIO_SET_AAC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: { + pr_debug("Calling utils ioctl\n"); + rc = audio->codec_compat_ioctl(file, cmd, arg); + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + struct msm_audio_aac_config *aac_config = NULL; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_multi_aac_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for aac decode driver\n"); + return -ENOMEM; + } + + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_aac_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s: Could not allocate memory for aac config\n", + __func__); + kfree(audio); + return -ENOMEM; + } + + aac_config = audio->codec_cfg; + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN_AACM; + audio->miscdevice = &audio_multiaac_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_multiaac_ws_mgr; + aac_config->dual_mono_mode = AUDIO_AAC_DUAL_MONO_INVALID; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_MPEG4_MULTI_AAC); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open AAC decoder, expected frames is always 1 + audio->buf_cfg.frames_per_buf = 0x01;*/ + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_MPEG4_MULTI_AAC); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_multi_aac_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_aac_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:AAC 5.1 Decoder OPEN success mode[%d]session[%d]\n", + __func__, audio->feedback, audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_aac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_multiaac_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_multi_aac", + .fops = &audio_aac_fops, +}; + +static int __init audio_aac_init(void) +{ + int ret = misc_register(&audio_multiaac_misc); + + if (ret == 0) + device_init_wakeup(audio_multiaac_misc.this_device, true); + audio_multiaac_ws_mgr.ref_cnt = 0; + mutex_init(&audio_multiaac_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_aac_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_qcelp.c b/drivers/misc/qcom/qdsp6v2/audio_qcelp.c new file mode 100644 index 000000000000..508a95b7bf79 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_qcelp.c @@ -0,0 +1,193 @@ +/* qcelp(v13k) audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include "audio_utils_aio.h" + +#define FRAME_SIZE_DEC_QCELP ((32) + sizeof(struct dec_meta_in)) + +static struct miscdevice audio_qcelp_misc; +static struct ws_mgr audio_qcelp_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_qcelp_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, + audio->ac->session, + audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + } + return rc; +} + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_qcelp_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for aac decode driver\n"); + return -ENOMEM; + } + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE_DEC_QCELP; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 8000; + audio->pcm_cfg.channel_count = 1; + audio->miscdevice = &audio_qcelp_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_qcelp_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_V13K); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_V13K); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_qcelp_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_qcelp_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:dec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio); + return rc; +} + +static const struct file_operations audio_qcelp_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, +}; + +static struct miscdevice audio_qcelp_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_qcelp", + .fops = &audio_qcelp_fops, +}; + +static int __init audio_qcelp_init(void) +{ + int ret = misc_register(&audio_qcelp_misc); + + if (ret == 0) + device_init_wakeup(audio_qcelp_misc.this_device, true); + audio_qcelp_ws_mgr.ref_cnt = 0; + mutex_init(&audio_qcelp_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_qcelp_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c new file mode 100644 index 000000000000..15d82d126df7 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c @@ -0,0 +1,929 @@ +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <linux/compat.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* + * Define maximum buffer size. Below values are chosen considering the higher + * values used among all native drivers. + */ +#define MAX_FRAME_SIZE 1536 +#define MAX_FRAMES 5 +#define META_SIZE (sizeof(struct meta_out_dsp)) +#define MAX_BUFFER_SIZE (1 + ((MAX_FRAME_SIZE + META_SIZE) * MAX_FRAMES)) + +static int audio_in_pause(struct q6audio_in *audio) +{ + int rc; + + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_err("%s:session id %d: pause cmd failed rc=%d\n", __func__, + audio->ac->session, rc); + + return rc; +} + +static int audio_in_flush(struct q6audio_in *audio) +{ + int rc; + + pr_debug("%s:session id %d: flush\n", __func__, audio->ac->session); + /* Flush if session running */ + if (audio->enabled) { + /* Implicitly issue a pause to the encoder before flushing */ + rc = audio_in_pause(audio); + if (rc < 0) { + pr_err("%s:session id %d: pause cmd failed rc=%d\n", + __func__, audio->ac->session, rc); + return rc; + } + + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) { + pr_err("%s:session id %d: flush cmd failed rc=%d\n", + __func__, audio->ac->session, rc); + return rc; + } + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + q6asm_run(audio->ac, 0x00, 0x00, 0x00); + pr_debug("Rerun the session\n"); + } + audio->rflush = 1; + audio->wflush = 1; + memset(audio->out_frame_info, 0, sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + /* get read_lock to ensure no more waiting read thread */ + mutex_lock(&audio->read_lock); + audio->rflush = 0; + mutex_unlock(&audio->read_lock); + wake_up(&audio->write_wait); + /* get write_lock to ensure no more waiting write thread */ + mutex_lock(&audio->write_lock); + audio->wflush = 0; + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: in_bytes %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_bytes)); + pr_debug("%s:session id %d: in_samples %d\n", __func__, + audio->ac->session, atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + atomic_set(&audio->out_count, 0); + return 0; +} + +/* must be called with audio->lock held */ +int audio_in_enable(struct q6audio_in *audio) +{ + if (audio->enabled) + return 0; + + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +/* must be called with audio->lock held */ +int audio_in_disable(struct q6audio_in *audio) +{ + int rc = 0; + if (!audio->stopped) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s:session id %d: inbytes[%d] insamples[%d]\n", + __func__, audio->ac->session, + atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_err("%s:session id %d: Failed to close the session rc=%d\n", + __func__, audio->ac->session, + rc); + audio->stopped = 1; + memset(audio->out_frame_info, 0, + sizeof(audio->out_frame_info)); + wake_up(&audio->read_wait); + wake_up(&audio->write_wait); + } + pr_debug("%s:session id %d: enabled[%d]\n", __func__, + audio->ac->session, audio->enabled); + return rc; +} + +int audio_in_buf_alloc(struct q6audio_in *audio) +{ + int rc = 0; + + switch (audio->buf_alloc) { + case NO_BUF_ALLOC: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, + audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_err("%s:session id %d: Buffer Alloc failed\n", + __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_IN: + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_err("%s:session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_OUT; + break; + case BUF_ALLOC_OUT: + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_err("%s:session id %d: Buffer Alloc failed\n", + __func__, + audio->ac->session); + rc = -ENOMEM; + break; + } + audio->buf_alloc |= BUF_ALLOC_IN; + } + break; + default: + pr_debug("%s:session id %d: buf[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + } + + return rc; +} + +int audio_in_set_config(struct file *file, + struct msm_audio_config *cfg) +{ + int rc = 0; + struct q6audio_in *audio = file->private_data; + + if (audio->feedback != NON_TUNNEL_MODE) { + pr_err("%s:session id %d: Not sufficient permission to change the record mode\n", + __func__, audio->ac->session); + rc = -EACCES; + goto ret; + } + if ((cfg->buffer_count > PCM_BUF_COUNT) || + (cfg->buffer_count == 1)) + cfg->buffer_count = PCM_BUF_COUNT; + + audio->pcm_cfg.buffer_count = cfg->buffer_count; + audio->pcm_cfg.buffer_size = cfg->buffer_size; + audio->pcm_cfg.channel_count = cfg->channel_count; + audio->pcm_cfg.sample_rate = cfg->sample_rate; + if (audio->opened && audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_audio_client_buf_alloc(IN, audio->ac, + ALIGN_BUF_SIZE(audio->pcm_cfg.buffer_size), + audio->pcm_cfg.buffer_count); + if (rc < 0) { + pr_err("%s:session id %d: Buffer Alloc failed\n", + __func__, audio->ac->session); + rc = -ENOMEM; + goto ret; + } + } + audio->buf_alloc |= BUF_ALLOC_IN; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_CONFIG %d %d\n", __func__, + audio->ac->session, audio->pcm_cfg.buffer_count, + audio->pcm_cfg.buffer_size); +ret: + return rc; +} +/* ------------------- device --------------------- */ +static long audio_in_ioctl_shared(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_FLUSH: { + /* Make sure we're stopped and we wake any threads + * that might be blocked holding the read_lock. + * While audio->stopped read threads will always + * exit immediately. + */ + rc = audio_in_flush(audio); + if (rc < 0) + pr_err("%s:session id %d: Flush Fail rc=%d\n", + __func__, audio->ac->session, rc); + else { /* Register back the flushed read buffer with DSP */ + int cnt = 0; + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + pr_debug("register the read buffer\n"); + } + break; + } + case AUDIO_PAUSE: { + pr_debug("%s:session id %d: AUDIO_PAUSE\n", __func__, + audio->ac->session); + if (audio->enabled) + audio_in_pause(audio); + break; + } + case AUDIO_GET_SESSION_ID: { + if (copy_to_user((void *) arg, &audio->ac->session, + sizeof(u16))) { + pr_err("%s: copy_to_user for AUDIO_GET_SESSION_ID failed\n", + __func__); + rc = -EFAULT; + } + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS) { + struct msm_audio_stats stats; + memset(&stats, 0, sizeof(stats)); + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *) arg, &stats, sizeof(stats))) + return -EFAULT; + return rc; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_FLUSH: + case AUDIO_PAUSE: + case AUDIO_GET_SESSION_ID: + rc = audio_in_ioctl_shared(file, cmd, arg); + break; + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) + rc = -EFAULT; + pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, cfg.buffer_size, + cfg.buffer_count); + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG failed\n" + , __func__); + rc = -EFAULT; + break; + } + /* Minimum single frame size, + but with in maximum frames number */ + if ((cfg.buffer_size < (audio->min_frame_size+ \ + sizeof(struct meta_out_dsp))) || + (cfg.buffer_count < FRAME_NUM)) { + rc = -EINVAL; + break; + } + if (cfg.buffer_size > MAX_BUFFER_SIZE) { + rc = -EINVAL; + break; + } + audio->str_cfg.buffer_size = cfg.buffer_size; + audio->str_cfg.buffer_count = cfg.buffer_count; + if (audio->opened) { + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_err("%s: session id %d: Buffer Alloc failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENOMEM; + break; + } + } + audio->buf_alloc |= BUF_ALLOC_OUT; + rc = 0; + pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, + audio->str_cfg.buffer_size, + audio->str_cfg.buffer_count); + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + rc = -EFAULT; + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + /* Restrict the num of frames per buf to coincide with + * default buf size */ + if (cfg.frames_per_buf > audio->max_frames_per_buf) { + rc = -EFAULT; + break; + } + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + audio->buf_cfg.frames_per_buf = cfg.frames_per_buf; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, + audio->ac->session, cfg.meta_info_enable, + cfg.frames_per_buf); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) + rc = -EFAULT; + break; + } + case AUDIO_GET_CONFIG: { + if (copy_to_user((void *)arg, &audio->pcm_cfg, + sizeof(struct msm_audio_config))) + rc = -EFAULT; + break; + + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config cfg; + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + pr_err("%s: copy_from_user for AUDIO_SET_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = audio_in_set_config(file, &cfg); + break; + } + default: + /* call codec specific ioctl */ + rc = audio->enc_ioctl(file, cmd, arg); + } + mutex_unlock(&audio->lock); + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_stats32 { + u32 byte_count; + u32 sample_count; + u32 unused[2]; +}; + +struct msm_audio_stream_config32 { + u32 buffer_size; + u32 buffer_count; +}; + +struct msm_audio_config32 { + u32 buffer_size; + u32 buffer_count; + u32 channel_count; + u32 sample_rate; + u32 type; + u32 meta_field; + u32 bits; + u32 unused[3]; +}; + +struct msm_audio_buf_cfg32 { + u32 meta_info_enable; + u32 frames_per_buf; +}; + +enum { + AUDIO_GET_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 3, + struct msm_audio_config32), + AUDIO_SET_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 4, + struct msm_audio_config32), + AUDIO_GET_STATS_32 = _IOR(AUDIO_IOCTL_MAGIC, 5, + struct msm_audio_stats32), + AUDIO_SET_STREAM_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 80, + struct msm_audio_stream_config32), + AUDIO_GET_STREAM_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 81, + struct msm_audio_stream_config32), + AUDIO_SET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 94, + struct msm_audio_buf_cfg32), + AUDIO_GET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 93, + struct msm_audio_buf_cfg32), +}; + +long audio_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + if (cmd == AUDIO_GET_STATS_32) { + struct msm_audio_stats32 stats_32; + memset(&stats_32, 0, sizeof(stats_32)); + stats_32.byte_count = atomic_read(&audio->in_bytes); + stats_32.sample_count = atomic_read(&audio->in_samples); + if (copy_to_user((void *) arg, &stats_32, sizeof(stats_32))) { + pr_err("%s: copy_to_user failed for AUDIO_GET_STATS_32\n", + __func__); + return -EFAULT; + } + return rc; + } + + mutex_lock(&audio->lock); + switch (cmd) { + case AUDIO_FLUSH: + case AUDIO_PAUSE: + case AUDIO_GET_SESSION_ID: + rc = audio_in_ioctl_shared(file, cmd, arg); + break; + case AUDIO_GET_STREAM_CONFIG_32: { + struct msm_audio_stream_config32 cfg_32; + memset(&cfg_32, 0, sizeof(cfg_32)); + cfg_32.buffer_size = audio->str_cfg.buffer_size; + cfg_32.buffer_count = audio->str_cfg.buffer_count; + if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) { + pr_err("%s: Copy to user failed\n", __func__); + rc = -EFAULT; + } + pr_debug("%s:session id %d: AUDIO_GET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, + cfg_32.buffer_size, + cfg_32.buffer_count); + break; + } + case AUDIO_SET_STREAM_CONFIG_32: { + struct msm_audio_stream_config32 cfg_32; + struct msm_audio_stream_config cfg; + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.buffer_size = cfg_32.buffer_size; + cfg.buffer_count = cfg_32.buffer_count; + /* Minimum single frame size, + * but with in maximum frames number */ + if ((cfg.buffer_size < (audio->min_frame_size + + sizeof(struct meta_out_dsp))) || + (cfg.buffer_count < FRAME_NUM)) { + rc = -EINVAL; + break; + } + audio->str_cfg.buffer_size = cfg.buffer_size; + audio->str_cfg.buffer_count = cfg.buffer_count; + if (audio->opened) { + rc = q6asm_audio_client_buf_alloc(OUT, audio->ac, + ALIGN_BUF_SIZE(audio->str_cfg.buffer_size), + audio->str_cfg.buffer_count); + if (rc < 0) { + pr_err("%s: session id %d:\n", + __func__, audio->ac->session); + pr_err("Buffer Alloc failed rc=%d\n", rc); + rc = -ENOMEM; + break; + } + } + audio->buf_alloc |= BUF_ALLOC_OUT; + pr_debug("%s:session id %d: AUDIO_SET_STREAM_CONFIG %d %d\n", + __func__, audio->ac->session, + audio->str_cfg.buffer_size, + audio->str_cfg.buffer_count); + break; + } + case AUDIO_SET_BUF_CFG_32: { + struct msm_audio_buf_cfg32 cfg_32; + struct msm_audio_buf_cfg cfg; + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_BUG_CFG_32 failed", + __func__); + rc = -EFAULT; + break; + } + cfg.meta_info_enable = cfg_32.meta_info_enable; + cfg.frames_per_buf = cfg_32.frames_per_buf; + + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + break; + } + + /* Restrict the num of frames per buf to coincide with + * default buf size */ + if (cfg.frames_per_buf > audio->max_frames_per_buf) { + rc = -EFAULT; + break; + } + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + audio->buf_cfg.frames_per_buf = cfg.frames_per_buf; + pr_debug("%s:session id %d: Set-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, audio->ac->session, cfg.meta_info_enable, + cfg.frames_per_buf); + break; + } + case AUDIO_GET_BUF_CFG_32: { + struct msm_audio_buf_cfg32 cfg_32; + pr_debug("%s:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable; + cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf; + + if (copy_to_user((void *)arg, &cfg_32, + sizeof(struct msm_audio_buf_cfg32))) { + pr_err("%s: Copy to user failed\n", __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_GET_CONFIG_32: { + struct msm_audio_config32 cfg_32; + memset(&cfg_32, 0, sizeof(cfg_32)); + cfg_32.buffer_size = audio->pcm_cfg.buffer_size; + cfg_32.buffer_count = audio->pcm_cfg.buffer_count; + cfg_32.channel_count = audio->pcm_cfg.channel_count; + cfg_32.sample_rate = audio->pcm_cfg.sample_rate; + cfg_32.type = audio->pcm_cfg.type; + cfg_32.meta_field = audio->pcm_cfg.meta_field; + cfg_32.bits = audio->pcm_cfg.bits; + + if (copy_to_user((void *)arg, &cfg_32, + sizeof(struct msm_audio_config32))) { + pr_err("%s: Copy to user failed\n", __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_CONFIG_32: { + struct msm_audio_config32 cfg_32; + struct msm_audio_config cfg; + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.buffer_size = cfg_32.buffer_size; + cfg.buffer_count = cfg_32.buffer_count; + cfg.channel_count = cfg_32.channel_count; + cfg.sample_rate = cfg_32.sample_rate; + cfg.type = cfg_32.type; + cfg.meta_field = cfg_32.meta_field; + cfg.bits = cfg_32.bits; + rc = audio_in_set_config(file, &cfg); + break; + } + default: + /* call codec specific ioctl */ + rc = audio->enc_compat_ioctl(file, cmd, arg); + } + mutex_unlock(&audio->lock); + return rc; +} +#endif + +ssize_t audio_in_read(struct file *file, + char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + unsigned char *data; + uint32_t offset = 0; + uint32_t size = 0; + int rc = 0; + uint32_t idx; + struct meta_out_dsp meta; + uint32_t bytes_to_copy = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + (sizeof(unsigned char) + + (sizeof(struct meta_out_dsp)*(audio->buf_cfg.frames_per_buf))); + + memset(&meta, 0, sizeof(meta)); + pr_debug("%s:session id %d: read - %zd\n", __func__, audio->ac->session, + count); + if (audio->reset_event) + return -ENETRESET; + + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->read_lock); + while (count > 0) { + rc = wait_event_interruptible( + audio->read_wait, + ((atomic_read(&audio->out_count) > 0) || + (audio->stopped) || + audio->rflush || audio->eos_rsp || + audio->event_abort)); + + if (audio->event_abort) { + rc = -EIO; + break; + } + + + if (rc < 0) + break; + + if ((audio->stopped && !(atomic_read(&audio->out_count))) || + audio->rflush) { + pr_debug("%s:session id %d: driver in stop state or flush,No more buf to read", + __func__, + audio->ac->session); + rc = 0;/* End of File */ + break; + } + if (!(atomic_read(&audio->out_count)) && + (audio->eos_rsp == 1) && + (count >= (sizeof(unsigned char) + + sizeof(struct meta_out_dsp)))) { + unsigned char num_of_frames; + pr_info("%s:session id %d: eos %d at output\n", + __func__, audio->ac->session, audio->eos_rsp); + if (buf != start) + break; + num_of_frames = 0xFF; + if (copy_to_user(buf, &num_of_frames, + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + buf += sizeof(unsigned char); + meta.frame_size = 0xFFFF; + meta.encoded_pcm_samples = 0xFFFF; + meta.msw_ts = 0x00; + meta.lsw_ts = 0x00; + meta.nflags = AUD_EOS_SET; + audio->eos_rsp = 0; + if (copy_to_user(buf, &meta, sizeof(meta))) { + rc = -EFAULT; + break; + } + buf += sizeof(meta); + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(OUT, audio->ac, + &size, &idx); + if ((count >= (size + mfield_size)) && data) { + if (audio->buf_cfg.meta_info_enable) { + if (copy_to_user(buf, + &audio->out_frame_info[idx][0], + sizeof(unsigned char))) { + rc = -EFAULT; + break; + } + bytes_to_copy = + (size + audio->out_frame_info[idx][1]); + /* Number of frames information copied */ + buf += sizeof(unsigned char); + count -= sizeof(unsigned char); + } else { + offset = audio->out_frame_info[idx][1]; + bytes_to_copy = size; + } + + pr_debug("%s:session id %d: offset=%d nr of frames= %d\n", + __func__, audio->ac->session, + audio->out_frame_info[idx][1], + audio->out_frame_info[idx][0]); + + if (copy_to_user(buf, &data[offset], bytes_to_copy)) { + rc = -EFAULT; + break; + } + count -= bytes_to_copy; + buf += bytes_to_copy; + } else { + pr_err("%s:session id %d: short read data[%pK] bytesavail[%d]bytesrequest[%zd]\n", + __func__, + audio->ac->session, + data, size, count); + } + atomic_dec(&audio->out_count); + q6asm_read(audio->ac); + break; + } + mutex_unlock(&audio->read_lock); + + pr_debug("%s:session id %d: read: %zd bytes\n", __func__, + audio->ac->session, (buf-start)); + if (buf > start) + return buf - start; + return rc; +} + +static int extract_meta_info(char *buf, unsigned long *msw_ts, + unsigned long *lsw_ts, unsigned int *flags) +{ + struct meta_in *meta = (struct meta_in *)buf; + *msw_ts = meta->ntimestamp.highpart; + *lsw_ts = meta->ntimestamp.lowpart; + *flags = meta->nflags; + return 0; +} + +ssize_t audio_in_write(struct file *file, + const char __user *buf, + size_t count, loff_t *pos) +{ + struct q6audio_in *audio = file->private_data; + const char __user *start = buf; + size_t xfer = 0; + char *cpy_ptr; + int rc = 0; + unsigned char *data; + uint32_t size = 0; + uint32_t idx = 0; + uint32_t nflags = 0; + unsigned long msw_ts = 0; + unsigned long lsw_ts = 0; + uint32_t mfield_size = (audio->buf_cfg.meta_info_enable == 0) ? 0 : + sizeof(struct meta_in); + + pr_debug("%s:session id %d: to write[%zd]\n", __func__, + audio->ac->session, count); + if (audio->reset_event) + return -ENETRESET; + + if (!audio->enabled) + return -EFAULT; + mutex_lock(&audio->write_lock); + + while (count > 0) { + rc = wait_event_interruptible(audio->write_wait, + ((atomic_read(&audio->in_count) > 0) || + (audio->stopped) || + (audio->wflush) || (audio->event_abort))); + + if (audio->event_abort) { + rc = -EIO; + break; + } + + if (rc < 0) + break; + if (audio->stopped || audio->wflush) { + pr_debug("%s: session id %d: stop or flush\n", __func__, + audio->ac->session); + rc = -EBUSY; + break; + } + /* if no PCM data, might have only eos buffer + such case do not hold cpu buffer */ + if ((buf == start) && (count == mfield_size)) { + char eos_buf[sizeof(struct meta_in)]; + /* Processing begining of user buffer */ + if (copy_from_user(eos_buf, buf, mfield_size)) { + rc = -EFAULT; + break; + } + /* Check if EOS flag is set and buffer has + * contains just meta field + */ + extract_meta_info(eos_buf, &msw_ts, &lsw_ts, + &nflags); + buf += mfield_size; + /* send the EOS and return */ + pr_debug("%s:session id %d: send EOS 0x%8x\n", + __func__, + audio->ac->session, nflags); + break; + } + data = (unsigned char *)q6asm_is_cpu_buf_avail(IN, audio->ac, + &size, &idx); + if (!data) { + pr_debug("%s:session id %d: No buf available\n", + __func__, audio->ac->session); + continue; + } + cpy_ptr = data; + if (audio->buf_cfg.meta_info_enable) { + if (buf == start) { + /* Processing beginning of user buffer */ + if (copy_from_user(cpy_ptr, buf, mfield_size)) { + rc = -EFAULT; + break; + } + /* Check if EOS flag is set and buffer has + * contains just meta field + */ + extract_meta_info(cpy_ptr, &msw_ts, &lsw_ts, + &nflags); + buf += mfield_size; + count -= mfield_size; + } else { + pr_debug("%s:session id %d: continuous buffer\n", + __func__, audio->ac->session); + } + } + xfer = (count > (audio->pcm_cfg.buffer_size)) ? + (audio->pcm_cfg.buffer_size) : count; + + if (copy_from_user(cpy_ptr, buf, xfer)) { + rc = -EFAULT; + break; + } + rc = q6asm_write(audio->ac, xfer, msw_ts, lsw_ts, 0x00); + if (rc < 0) { + rc = -EFAULT; + break; + } + atomic_dec(&audio->in_count); + count -= xfer; + buf += xfer; + } + mutex_unlock(&audio->write_lock); + pr_debug("%s:session id %d: eos_condition 0x%x buf[0x%pK] start[0x%pK]\n", + __func__, audio->ac->session, + nflags, buf, start); + if (nflags & AUD_EOS_SET) { + rc = q6asm_cmd(audio->ac, CMD_EOS); + pr_info("%s:session id %d: eos %d at input\n", __func__, + audio->ac->session, audio->eos_rsp); + } + pr_debug("%s:session id %d: Written %zd Avail Buf[%d]", __func__, + audio->ac->session, (buf - start - mfield_size), + atomic_read(&audio->in_count)); + if (!rc) { + if (buf > start) + return buf - start; + } + return rc; +} + +int audio_in_release(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = file->private_data; + pr_info("%s: session id %d\n", __func__, audio->ac->session); + mutex_lock(&audio->lock); + audio_in_disable(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->lock); + kfree(audio->enc_cfg); + kfree(audio->codec_cfg); + kfree(audio); + return 0; +} diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.h b/drivers/misc/qcom/qdsp6v2/audio_utils.h new file mode 100644 index 000000000000..c757207b42e6 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_utils.h @@ -0,0 +1,113 @@ +/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ +#include <linux/msm_audio.h> +#include <linux/compat.h> +#include "q6audio_common.h" + +#define FRAME_NUM (8) + +#define PCM_BUF_COUNT (2) + +#define AUD_EOS_SET 0x01 +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 + +#define NO_BUF_ALLOC 0x00 +#define BUF_ALLOC_IN 0x01 +#define BUF_ALLOC_OUT 0x02 +#define BUF_ALLOC_INOUT 0x03 +#define ALIGN_BUF_SIZE(size) ((size + 4095) & (~4095)) + +struct timestamp { + u32 lowpart; + u32 highpart; +} __packed; + +struct meta_in { + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __packed; + +struct meta_out_dsp { + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __packed; + +struct meta_out { + unsigned char num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __packed; + +struct q6audio_in { + spinlock_t dsp_lock; + atomic_t in_bytes; + atomic_t in_samples; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + wait_queue_head_t read_wait; + wait_queue_head_t write_wait; + + struct audio_client *ac; + struct msm_audio_stream_config str_cfg; + void *enc_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + void *codec_cfg; + + /* number of buffers available to read/write */ + atomic_t in_count; + atomic_t out_count; + + /* first idx: num of frames per buf, second idx: offset to frame */ + uint32_t out_frame_info[FRAME_NUM][2]; + int eos_rsp; + int opened; + int enabled; + int stopped; + int event_abort; + int feedback; /* Flag indicates whether used + in Non Tunnel mode */ + int rflush; + int wflush; + int buf_alloc; + uint16_t min_frame_size; + uint16_t max_frames_per_buf; + bool reset_event; + long (*enc_ioctl)(struct file *, unsigned int, unsigned long); + long (*enc_compat_ioctl)(struct file *, unsigned int, unsigned long); +}; + +int audio_in_enable(struct q6audio_in *audio); +int audio_in_disable(struct q6audio_in *audio); +int audio_in_buf_alloc(struct q6audio_in *audio); +long audio_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +#ifdef CONFIG_COMPAT +long audio_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg); +#else +#define audio_in_compat_ioctl NULL +#endif +ssize_t audio_in_read(struct file *file, char __user *buf, + size_t count, loff_t *pos); +ssize_t audio_in_write(struct file *file, const char __user *buf, + size_t count, loff_t *pos); +int audio_in_release(struct inode *inode, struct file *file); +int audio_in_set_config(struct file *file, struct msm_audio_config *cfg); diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c new file mode 100644 index 000000000000..7fa5e326fa0b --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c @@ -0,0 +1,2140 @@ +/* Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2017, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <asm/ioctls.h> +#include <linux/debugfs.h> +#include <linux/msm_audio_ion.h> +#include <linux/compat.h> +#include <linux/mutex.h> +#include "audio_utils_aio.h" +#ifdef CONFIG_USE_DEV_CTRL_VOLUME +#include <linux/qdsp6v2/audio_dev_ctl.h> +#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/ +static DEFINE_MUTEX(lock); +#ifdef CONFIG_DEBUG_FS + +int audio_aio_debug_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +ssize_t audio_aio_debug_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + const int debug_bufmax = 4096; + static char buffer[4096]; + int n = 0; + struct q6audio_aio *audio; + + mutex_lock(&lock); + if (file->private_data != NULL) { + audio = file->private_data; + mutex_lock(&audio->lock); + n = scnprintf(buffer, debug_bufmax, "opened %d\n", + audio->opened); + n += scnprintf(buffer + n, debug_bufmax - n, + "enabled %d\n", audio->enabled); + n += scnprintf(buffer + n, debug_bufmax - n, + "stopped %d\n", audio->stopped); + n += scnprintf(buffer + n, debug_bufmax - n, + "feedback %d\n", audio->feedback); + mutex_unlock(&audio->lock); + /* Following variables are only useful for debugging when + * when playback halts unexpectedly. Thus, no mutual exclusion + * enforced + */ + n += scnprintf(buffer + n, debug_bufmax - n, + "wflush %d\n", audio->wflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "rflush %d\n", audio->rflush); + n += scnprintf(buffer + n, debug_bufmax - n, + "inqueue empty %d\n", + list_empty(&audio->in_queue)); + n += scnprintf(buffer + n, debug_bufmax - n, + "outqueue empty %d\n", + list_empty(&audio->out_queue)); + } + mutex_unlock(&lock); + buffer[n] = 0; + return simple_read_from_buffer(buf, count, ppos, buffer, n); +} +#endif + +static long audio_aio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#ifdef CONFIG_COMPAT +static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg); +#else +#define audio_aio_compat_ioctl NULL +#endif +int insert_eos_buf(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node) +{ + struct dec_meta_out *eos_buf = buf_node->kvaddr; + pr_debug("%s[%pK]:insert_eos_buf\n", __func__, audio); + eos_buf->num_of_frames = 0xFFFFFFFF; + eos_buf->meta_out_dsp[0].offset_to_frame = 0x0; + eos_buf->meta_out_dsp[0].nflags = AUDIO_DEC_EOS_SET; + return sizeof(struct dec_meta_out) + + sizeof(eos_buf->meta_out_dsp[0]); +} + +/* Routine which updates read buffers of driver/dsp, + for flush operation as DSP output might not have proper + value set */ +static int insert_meta_data_flush(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node) +{ + struct dec_meta_out *meta_data = buf_node->kvaddr; + meta_data->num_of_frames = 0x0; + meta_data->meta_out_dsp[0].offset_to_frame = 0x0; + meta_data->meta_out_dsp[0].nflags = 0x0; + return sizeof(struct dec_meta_out) + + sizeof(meta_data->meta_out_dsp[0]); +} + +static int audio_aio_ion_lookup_vaddr(struct q6audio_aio *audio, void *addr, + unsigned long len, + struct audio_aio_ion_region **region) +{ + struct audio_aio_ion_region *region_elt; + + int match_count = 0; + + *region = NULL; + + /* returns physical address or zero */ + list_for_each_entry(region_elt, &audio->ion_region_queue, list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len && + addr + len > addr) { + /* to avoid integer addition overflow */ + + /* offset since we could pass vaddr inside a registerd + * ion buffer + */ + + match_count++; + if (!*region) + *region = region_elt; + } + } + + if (match_count > 1) { + pr_err("%s[%pK]:multiple hits for vaddr %pK, len %ld\n", + __func__, audio, addr, len); + list_for_each_entry(region_elt, &audio->ion_region_queue, + list) { + if (addr >= region_elt->vaddr && + addr < region_elt->vaddr + region_elt->len && + addr + len <= region_elt->vaddr + region_elt->len && + addr + len > addr) + pr_err("\t%s[%pK]:%pK, %ld --> %pK\n", + __func__, audio, + region_elt->vaddr, + region_elt->len, + ®ion_elt->paddr); + } + } + + return *region ? 0 : -1; +} + +static phys_addr_t audio_aio_ion_fixup(struct q6audio_aio *audio, void *addr, + unsigned long len, int ref_up, void **kvaddr) +{ + struct audio_aio_ion_region *region; + phys_addr_t paddr; + int ret; + + ret = audio_aio_ion_lookup_vaddr(audio, addr, len, ®ion); + if (ret) { + pr_err("%s[%pK]:lookup (%pK, %ld) failed\n", + __func__, audio, addr, len); + return 0; + } + if (ref_up) + region->ref_cnt++; + else + region->ref_cnt--; + pr_debug("%s[%pK]:found region %pK ref_cnt %d\n", + __func__, audio, region, region->ref_cnt); + paddr = region->paddr + (addr - region->vaddr); + /* provide kernel virtual address for accessing meta information */ + if (kvaddr) + *kvaddr = (void *) (region->kvaddr + (addr - region->vaddr)); + return paddr; +} + +static int audio_aio_pause(struct q6audio_aio *audio) +{ + int rc = -EINVAL; + + pr_debug("%s[%pK], enabled = %d\n", __func__, audio, + audio->enabled); + if (audio->enabled) { + rc = q6asm_cmd(audio->ac, CMD_PAUSE); + if (rc < 0) + pr_err("%s[%pK]: pause cmd failed rc=%d\n", + __func__, audio, rc); + + if (rc == 0) { + /* Send suspend only if pause was successful */ + rc = q6asm_cmd(audio->ac, CMD_SUSPEND); + if (rc < 0) + pr_err("%s[%pK]: suspend cmd failed rc=%d\n", + __func__, audio, rc); + } else + pr_err("%s[%pK]: not sending suspend since pause failed\n", + __func__, audio); + + } else + pr_err("%s[%pK]: Driver not enabled\n", __func__, audio); + return rc; +} + +static int audio_aio_flush(struct q6audio_aio *audio) +{ + int rc = 0; + + if (audio->enabled) { + /* Implicitly issue a pause to the decoder before flushing if + it is not in pause state */ + if (!(audio->drv_status & ADRV_STATUS_PAUSE)) { + rc = audio_aio_pause(audio); + if (rc < 0) + pr_err("%s[%pK}: pause cmd failed rc=%d\n", + __func__, audio, + rc); + else + audio->drv_status |= ADRV_STATUS_PAUSE; + } + rc = q6asm_cmd(audio->ac, CMD_FLUSH); + if (rc < 0) + pr_err("%s[%pK]: flush cmd failed rc=%d\n", + __func__, audio, rc); + /* Not in stop state, reenable the stream */ + if (audio->stopped == 0) { + rc = audio_aio_enable(audio); + if (rc) + pr_err("%s[%pK]:audio re-enable failed\n", + __func__, audio); + else { + audio->enabled = 1; + if (audio->drv_status & ADRV_STATUS_PAUSE) + audio->drv_status &= ~ADRV_STATUS_PAUSE; + } + } + } + pr_debug("%s[%pK]:in_bytes %d\n", + __func__, audio, atomic_read(&audio->in_bytes)); + pr_debug("%s[%pK]:in_samples %d\n", + __func__, audio, atomic_read(&audio->in_samples)); + atomic_set(&audio->in_bytes, 0); + atomic_set(&audio->in_samples, 0); + return rc; +} + +static int audio_aio_outport_flush(struct q6audio_aio *audio) +{ + int rc; + + rc = q6asm_cmd(audio->ac, CMD_OUT_FLUSH); + if (rc < 0) + pr_err("%s[%pK}: output port flush cmd failed rc=%d\n", + __func__, audio, rc); + return rc; +} + +/* Write buffer to DSP / Handle Ack from DSP */ +void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audio_aio_buffer_node *used_buf; + + /* No active flush in progress */ + if (audio->wflush) + return; + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (list_empty(&audio->out_queue)) { + pr_warning("%s: ingore unexpected event from dsp\n", __func__); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + return; + } + used_buf = list_first_entry(&audio->out_queue, + struct audio_aio_buffer_node, list); + if (token == used_buf->token) { + list_del(&used_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_debug("%s[%pK]:consumed buffer\n", __func__, audio); + event_payload.aio_buf = used_buf->buf; + audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(used_buf); + if (list_empty(&audio->out_queue) && + (audio->drv_status & ADRV_STATUS_FSYNC)) { + pr_debug("%s[%pK]: list is empty, reached EOS in Tunnel\n", + __func__, audio); + wake_up(&audio->write_wait); + } + } else { + pr_err("%s[%pK]:expected=%x ret=%x\n", + __func__, audio, used_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} + +/* ------------------- device --------------------- */ +void audio_aio_async_out_flush(struct q6audio_aio *audio) +{ + struct audio_aio_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + unsigned long flags; + + pr_debug("%s[%pK}\n", __func__, audio); + /* EOS followed by flush, EOS response not guranteed, free EOS i/p + buffer */ + spin_lock_irqsave(&audio->dsp_lock, flags); + + if (audio->eos_flag && (audio->eos_write_payload.aio_buf.buf_addr)) { + pr_debug("%s[%pK]: EOS followed by flush received,acknowledge" + " eos i/p buffer immediately\n", __func__, audio); + audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); + list_for_each_safe(ptr, next, &audio->out_queue) { + buf_node = list_entry(ptr, struct audio_aio_buffer_node, list); + list_del(&buf_node->list); + payload.aio_buf = buf_node->buf; + audio_aio_post_event(audio, AUDIO_EVENT_WRITE_DONE, payload); + kfree(buf_node); + pr_debug("%s[%pK]: Propagate WRITE_DONE during flush\n", + __func__, audio); + } +} + +void audio_aio_async_in_flush(struct q6audio_aio *audio) +{ + struct audio_aio_buffer_node *buf_node; + struct list_head *ptr, *next; + union msm_audio_event_payload payload; + + pr_debug("%s[%pK]\n", __func__, audio); + list_for_each_safe(ptr, next, &audio->in_queue) { + buf_node = list_entry(ptr, struct audio_aio_buffer_node, list); + list_del(&buf_node->list); + /* Forcefull send o/p eos buffer after flush, if no eos response + * received by dsp even after sending eos command */ + if ((audio->eos_rsp != 1) && audio->eos_flag) { + pr_debug("%s[%pK]: send eos on o/p buffer during flush\n", + __func__, audio); + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + audio->eos_flag = 0; + } else { + payload.aio_buf = buf_node->buf; + payload.aio_buf.data_len = + insert_meta_data_flush(audio, buf_node); + } + audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, payload); + kfree(buf_node); + pr_debug("%s[%pK]: Propagate READ_DONE during flush\n", + __func__, audio); + } +} + +int audio_aio_enable(struct q6audio_aio *audio) +{ + /* 2nd arg: 0 -> run immediately + 3rd arg: 0 -> msw_ts, 4th arg: 0 ->lsw_ts */ + return q6asm_run(audio->ac, 0x00, 0x00, 0x00); +} + +int audio_aio_disable(struct q6audio_aio *audio) +{ + int rc = 0; + if (audio->opened) { + audio->enabled = 0; + audio->opened = 0; + pr_debug("%s[%pK]: inbytes[%d] insamples[%d]\n", __func__, + audio, atomic_read(&audio->in_bytes), + atomic_read(&audio->in_samples)); + /* Close the session */ + rc = q6asm_cmd(audio->ac, CMD_CLOSE); + if (rc < 0) + pr_err("%s[%pK]:Failed to close the session rc=%d\n", + __func__, audio, rc); + audio->stopped = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + pr_debug("%s[%pK]:enabled[%d]\n", __func__, audio, audio->enabled); + return rc; +} + +void audio_aio_reset_ion_region(struct q6audio_aio *audio) +{ + struct audio_aio_ion_region *region; + struct list_head *ptr, *next; + + list_for_each_safe(ptr, next, &audio->ion_region_queue) { + region = list_entry(ptr, struct audio_aio_ion_region, list); + list_del(®ion->list); + msm_audio_ion_free_legacy(audio->client, region->handle); + kfree(region); + } + + return; +} + +void audio_aio_reset_event_queue(struct q6audio_aio *audio) +{ + unsigned long flags; + struct audio_aio_event *drv_evt; + struct list_head *ptr, *next; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + list_for_each_safe(ptr, next, &audio->event_queue) { + drv_evt = list_first_entry(&audio->event_queue, + struct audio_aio_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + list_for_each_safe(ptr, next, &audio->free_event_queue) { + drv_evt = list_first_entry(&audio->free_event_queue, + struct audio_aio_event, list); + list_del(&drv_evt->list); + kfree(drv_evt); + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + return; +} + +static void audio_aio_unmap_ion_region(struct q6audio_aio *audio) +{ + struct audio_aio_ion_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("%s[%pK]:\n", __func__, audio); + list_for_each_safe(ptr, next, &audio->ion_region_queue) { + region = list_entry(ptr, struct audio_aio_ion_region, list); + if (region != NULL) { + pr_debug("%s[%pK]: phy_address = 0x%pK\n", + __func__, audio, ®ion->paddr); + rc = q6asm_memory_unmap(audio->ac, + region->paddr, IN); + if (rc < 0) + pr_err("%s[%pK]: memory unmap failed\n", + __func__, audio); + } + } +} + +#ifdef CONFIG_USE_DEV_CTRL_VOLUME + +static void audio_aio_listner(u32 evt_id, union auddev_evt_data *evt_payload, + void *private_data) +{ + struct q6audio_aio *audio = (struct q6audio_aio *) private_data; + int rc = 0; + + switch (evt_id) { + case AUDDEV_EVT_STREAM_VOL_CHG: + audio->volume = evt_payload->session_vol; + pr_debug("%s[%pK]: AUDDEV_EVT_STREAM_VOL_CHG, stream vol %d, enabled = %d\n", + __func__, audio, audio->volume, audio->enabled); + if (audio->enabled == 1) { + if (audio->ac) { + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) { + pr_err("%s[%pK]: Send Volume command failed rc=%d\n", + __func__, audio, rc); + } + } + } + break; + default: + pr_err("%s[%pK]:ERROR:wrong event\n", __func__, audio); + break; + } +} + +int register_volume_listener(struct q6audio_aio *audio) +{ + int rc = 0; + audio->device_events = AUDDEV_EVT_STREAM_VOL_CHG; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + + rc = auddev_register_evt_listner(audio->device_events, + AUDDEV_CLNT_DEC, + audio->ac->session, + audio_aio_listner, + (void *)audio); + if (rc < 0) { + pr_err("%s[%pK]: Event listener failed\n", __func__, audio); + rc = -EACCES; + } + return rc; +} +void unregister_volume_listener(struct q6audio_aio *audio) +{ + auddev_unregister_evt_listner(AUDDEV_CLNT_DEC, audio->ac->session); +} + +int enable_volume_ramp(struct q6audio_aio *audio) +{ + int rc = 0; + struct asm_softpause_params softpause; + struct asm_softvolume_params softvol; + + if (audio->ac == NULL) + return -EINVAL; + pr_debug("%s[%pK]\n", __func__, audio); + softpause.enable = SOFT_PAUSE_ENABLE; + softpause.period = SOFT_PAUSE_PERIOD; + softpause.step = SOFT_PAUSE_STEP; + softpause.rampingcurve = SOFT_PAUSE_CURVE_LINEAR; + + softvol.period = SOFT_VOLUME_PERIOD; + softvol.step = SOFT_VOLUME_STEP; + softvol.rampingcurve = SOFT_VOLUME_CURVE_LINEAR; + + if (softpause.rampingcurve == SOFT_PAUSE_CURVE_LINEAR) + softpause.step = SOFT_PAUSE_STEP_LINEAR; + if (softvol.rampingcurve == SOFT_VOLUME_CURVE_LINEAR) + softvol.step = SOFT_VOLUME_STEP_LINEAR; + rc = q6asm_set_volume(audio->ac, audio->volume); + if (rc < 0) { + pr_err("%s: Send Volume command failed rc=%d\n", + __func__, rc); + return rc; + } + rc = q6asm_set_softpause(audio->ac, &softpause); + if (rc < 0) { + pr_err("%s: Send SoftPause Param failed rc=%d\n", + __func__, rc); + return rc; + } + rc = q6asm_set_softvolume(audio->ac, &softvol); + if (rc < 0) { + pr_err("%s: Send SoftVolume Param failed rc=%d\n", + __func__, rc); + return rc; + } + /* disable mute by default */ + rc = q6asm_set_mute(audio->ac, 0); + if (rc < 0) { + pr_err("%s: Send mute command failed rc=%d\n", + __func__, rc); + return rc; + } + return rc; +} + +#else /*CONFIG_USE_DEV_CTRL_VOLUME*/ +int register_volume_listener(struct q6audio_aio *audio) +{ + return 0;/* do nothing */ +} +void unregister_volume_listener(struct q6audio_aio *audio) +{ + return;/* do nothing */ +} +int enable_volume_ramp(struct q6audio_aio *audio) +{ + return 0; /* do nothing */ +} +#endif /*CONFIG_USE_DEV_CTRL_VOLUME*/ + +int audio_aio_release(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = file->private_data; + pr_debug("%s[%pK]\n", __func__, audio); + mutex_lock(&lock); + mutex_lock(&audio->lock); + mutex_lock(&audio->read_lock); + mutex_lock(&audio->write_lock); + audio->wflush = 1; + if (audio->wakelock_voted && + (audio->audio_ws_mgr != NULL) && + (audio->miscdevice != NULL)) { + audio->wakelock_voted = false; + mutex_lock(&audio->audio_ws_mgr->ws_lock); + if ((audio->audio_ws_mgr->ref_cnt > 0) && + (--audio->audio_ws_mgr->ref_cnt == 0)) { + pm_relax(audio->miscdevice->this_device); + } + mutex_unlock(&audio->audio_ws_mgr->ws_lock); + } + if (audio->enabled) + audio_aio_flush(audio); + audio->wflush = 0; + audio->drv_ops.out_flush(audio); + audio->drv_ops.in_flush(audio); + audio_aio_disable(audio); + audio_aio_unmap_ion_region(audio); + audio_aio_reset_ion_region(audio); + msm_audio_ion_client_destroy(audio->client); + audio->event_abort = 1; + wake_up(&audio->event_wait); + audio_aio_reset_event_queue(audio); + q6asm_audio_client_free(audio->ac); + mutex_unlock(&audio->write_lock); + mutex_unlock(&audio->read_lock); + mutex_unlock(&audio->lock); + mutex_destroy(&audio->lock); + mutex_destroy(&audio->read_lock); + mutex_destroy(&audio->write_lock); + mutex_destroy(&audio->get_event_lock); + unregister_volume_listener(audio); + +#ifdef CONFIG_DEBUG_FS + if (audio->dentry) + debugfs_remove(audio->dentry); +#endif + kfree(audio->codec_cfg); + kfree(audio); + file->private_data = NULL; + mutex_unlock(&lock); + return 0; +} + +int audio_aio_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + int rc = 0; + struct q6audio_aio *audio = file->private_data; + + if (!audio->enabled || audio->feedback) + return -EINVAL; + + /* Blocking client sends more data */ + mutex_lock(&audio->lock); + audio->drv_status |= ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + pr_debug("%s[%pK]:\n", __func__, audio); + + audio->eos_rsp = 0; + + pr_debug("%s[%pK]Wait for write done from DSP\n", __func__, audio); + rc = wait_event_interruptible(audio->write_wait, + (list_empty(&audio->out_queue)) || + audio->wflush || audio->stopped); + + if (audio->stopped || audio->wflush) { + pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n" + , __func__, audio); + audio->wflush = 0; + rc = -EBUSY; + } + + if (rc < 0) { + pr_err("%s[%pK]: wait event for list_empty failed, rc = %d\n", + __func__, audio, rc); + goto done; + } + + rc = q6asm_cmd(audio->ac, CMD_EOS); + pr_debug("%s[%pK]: EOS cmd sent to DSP\n", __func__, audio); + + if (rc < 0) + pr_err("%s[%pK]: q6asm_cmd failed, rc = %d", + __func__, audio, rc); + + pr_debug("%s[%pK]: wait for RENDERED_EOS from DSP\n" + , __func__, audio); + rc = wait_event_interruptible(audio->write_wait, + (audio->eos_rsp || audio->wflush || + audio->stopped)); + + if (rc < 0) { + pr_err("%s[%pK]: wait event for eos_rsp failed, rc = %d\n", + __func__, audio, rc); + goto done; + } + + if (audio->stopped || audio->wflush) { + audio->wflush = 0; + pr_debug("%s[%pK]: Audio Flushed or Stopped,this is not EOS\n" + , __func__, audio); + rc = -EBUSY; + } + + if (audio->eos_rsp == 1) + pr_debug("%s[%pK]: EOS\n", __func__, audio); + + +done: + mutex_lock(&audio->lock); + audio->drv_status &= ~ADRV_STATUS_FSYNC; + mutex_unlock(&audio->lock); + + return rc; +} + +static int audio_aio_events_pending(struct q6audio_aio *audio) +{ + unsigned long flags; + int empty; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + empty = !list_empty(&audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return empty || audio->event_abort || audio->reset_event; +} + +static long audio_aio_process_event_req_common(struct q6audio_aio *audio, + struct msm_audio_event *usr_evt) +{ + long rc; + struct audio_aio_event *drv_evt = NULL; + int timeout; + unsigned long flags; + + timeout = usr_evt->timeout_ms; + + if (timeout > 0) { + rc = wait_event_interruptible_timeout(audio->event_wait, + audio_aio_events_pending + (audio), + msecs_to_jiffies + (timeout)); + if (rc == 0) + return -ETIMEDOUT; + } else { + rc = wait_event_interruptible(audio->event_wait, + audio_aio_events_pending(audio)); + } + if (rc < 0) + return rc; + + if (audio->reset_event) { + audio->reset_event = false; + pr_err("In SSR, post ENETRESET err\n"); + return -ENETRESET; + } + + if (audio->event_abort) { + audio->event_abort = 0; + return -ENODEV; + } + + rc = 0; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + if (!list_empty(&audio->event_queue)) { + drv_evt = list_first_entry(&audio->event_queue, + struct audio_aio_event, list); + list_del(&drv_evt->list); + } + if (drv_evt) { + usr_evt->event_type = drv_evt->event_type; + usr_evt->event_payload = drv_evt->payload; + list_add_tail(&drv_evt->list, &audio->free_event_queue); + } else { + pr_err("%s[%pK]:Unexpected path\n", __func__, audio); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return -EPERM; + } + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + + if (drv_evt->event_type == AUDIO_EVENT_WRITE_DONE) { + pr_debug("%s[%pK]:posted AUDIO_EVENT_WRITE_DONE to user\n", + __func__, audio); + mutex_lock(&audio->write_lock); + audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->write_lock); + } else if (drv_evt->event_type == AUDIO_EVENT_READ_DONE) { + pr_debug("%s[%pK]:posted AUDIO_EVENT_READ_DONE to user\n", + __func__, audio); + mutex_lock(&audio->read_lock); + audio_aio_ion_fixup(audio, drv_evt->payload.aio_buf.buf_addr, + drv_evt->payload.aio_buf.buf_len, 0, 0); + mutex_unlock(&audio->read_lock); + } + + /* Some read buffer might be held up in DSP,release all + * Once EOS indicated + */ + if (audio->eos_rsp && !list_empty(&audio->in_queue)) { + pr_debug("%s[%pK]:Send flush command to release read buffers" + " held up in DSP\n", __func__, audio); + mutex_lock(&audio->lock); + audio_aio_flush(audio); + mutex_unlock(&audio->lock); + } + + return rc; +} + +static long audio_aio_process_event_req(struct q6audio_aio *audio, + void __user *arg) +{ + long rc; + struct msm_audio_event usr_evt; + + if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event))) { + pr_err("%s: copy_from_user failed\n", __func__); + return -EFAULT; + } + + rc = audio_aio_process_event_req_common(audio, &usr_evt); + + if (copy_to_user(arg, &usr_evt, sizeof(usr_evt))) { + pr_err("%s: copy_to_user failed\n", __func__); + rc = -EFAULT; + } + return rc; +} + +#ifdef CONFIG_COMPAT + +struct msm_audio_aio_buf32 { + compat_uptr_t buf_addr; + u32 buf_len; + u32 data_len; + compat_uptr_t private_data; + u16 mfield_sz; /*only useful for data has meta field */ +}; + +struct msm_audio_bitstream_info32 { + u32 codec_type; + u32 chan_info; + u32 sample_rate; + u32 bit_stream_info; + u32 bit_rate; + u32 unused[3]; +}; + +struct msm_audio_bitstream_error_info32 { + u32 dec_id; + u32 err_msg_indicator; + u32 err_type; +}; + +union msm_audio_event_payload32 { + struct msm_audio_aio_buf32 aio_buf; + struct msm_audio_bitstream_info32 stream_info; + struct msm_audio_bitstream_error_info32 error_info; + s32 reserved; +}; + +struct msm_audio_event32 { + s32 event_type; + s32 timeout_ms; + union msm_audio_event_payload32 event_payload; +}; + +static long audio_aio_process_event_req_compat(struct q6audio_aio *audio, + void __user *arg) +{ + long rc; + struct msm_audio_event32 usr_evt_32; + struct msm_audio_event usr_evt; + memset(&usr_evt, 0, sizeof(struct msm_audio_event)); + + if (copy_from_user(&usr_evt_32, arg, + sizeof(struct msm_audio_event32))) { + pr_err("%s: copy_from_user failed\n", __func__); + return -EFAULT; + } + usr_evt.timeout_ms = usr_evt_32.timeout_ms; + + rc = audio_aio_process_event_req_common(audio, &usr_evt); + if (rc < 0) { + pr_err("%s: audio process event failed, rc = %ld", + __func__, rc); + return rc; + } + + usr_evt_32.event_type = usr_evt.event_type; + switch (usr_evt_32.event_type) { + case AUDIO_EVENT_SUSPEND: + case AUDIO_EVENT_RESUME: + case AUDIO_EVENT_WRITE_DONE: + case AUDIO_EVENT_READ_DONE: + usr_evt_32.event_payload.aio_buf.buf_addr = + ptr_to_compat(usr_evt.event_payload.aio_buf.buf_addr); + usr_evt_32.event_payload.aio_buf.buf_len = + usr_evt.event_payload.aio_buf.buf_len; + usr_evt_32.event_payload.aio_buf.data_len = + usr_evt.event_payload.aio_buf.data_len; + usr_evt_32.event_payload.aio_buf.private_data = + ptr_to_compat(usr_evt.event_payload.aio_buf.private_data); + usr_evt_32.event_payload.aio_buf.mfield_sz = + usr_evt.event_payload.aio_buf.mfield_sz; + break; + case AUDIO_EVENT_STREAM_INFO: + usr_evt_32.event_payload.stream_info.codec_type = + usr_evt.event_payload.stream_info.codec_type; + usr_evt_32.event_payload.stream_info.chan_info = + usr_evt.event_payload.stream_info.chan_info; + usr_evt_32.event_payload.stream_info.sample_rate = + usr_evt.event_payload.stream_info.sample_rate; + usr_evt_32.event_payload.stream_info.bit_stream_info = + usr_evt.event_payload.stream_info.bit_stream_info; + usr_evt_32.event_payload.stream_info.bit_rate = + usr_evt.event_payload.stream_info.bit_rate; + break; + case AUDIO_EVENT_BITSTREAM_ERROR_INFO: + usr_evt_32.event_payload.error_info.dec_id = + usr_evt.event_payload.error_info.dec_id; + usr_evt_32.event_payload.error_info.err_msg_indicator = + usr_evt.event_payload.error_info.err_msg_indicator; + usr_evt_32.event_payload.error_info.err_type = + usr_evt.event_payload.error_info.err_type; + break; + default: + pr_debug("%s: unknown audio event type = %d rc = %ld", + __func__, usr_evt_32.event_type, rc); + return rc; + } + if (copy_to_user(arg, &usr_evt_32, sizeof(usr_evt_32))) { + pr_err("%s: copy_to_user failed\n", __func__); + rc = -EFAULT; + } + return rc; +} +#endif + +static int audio_aio_ion_check(struct q6audio_aio *audio, + void *vaddr, unsigned long len) +{ + struct audio_aio_ion_region *region_elt; + struct audio_aio_ion_region t = {.vaddr = vaddr, .len = len }; + + list_for_each_entry(region_elt, &audio->ion_region_queue, list) { + if (CONTAINS(region_elt, &t) || CONTAINS(&t, region_elt) || + OVERLAPS(region_elt, &t)) { + pr_err("%s[%pK]:region (vaddr %pK len %ld) clashes with registered region (vaddr %pK paddr %pK len %ld)\n", + __func__, audio, vaddr, len, + region_elt->vaddr, + ®ion_elt->paddr, region_elt->len); + return -EINVAL; + } + } + + return 0; +} + +static int audio_aio_ion_add(struct q6audio_aio *audio, + struct msm_audio_ion_info *info) +{ + ion_phys_addr_t paddr = 0; + size_t len = 0; + struct audio_aio_ion_region *region; + int rc = -EINVAL; + struct ion_handle *handle = NULL; + unsigned long ionflag; + void *kvaddr = NULL; + + pr_debug("%s[%pK]:\n", __func__, audio); + region = kmalloc(sizeof(*region), GFP_KERNEL); + + if (!region) { + rc = -ENOMEM; + goto end; + } + + rc = msm_audio_ion_import_legacy("Audio_Dec_Client", audio->client, + &handle, info->fd, &ionflag, + 0, &paddr, &len, &kvaddr); + if (rc) { + pr_err("%s: msm audio ion alloc failed\n", __func__); + goto import_error; + } + + rc = audio_aio_ion_check(audio, info->vaddr, len); + if (rc < 0) { + pr_err("%s: audio_aio_ion_check failed\n", __func__); + goto ion_error; + } + + region->handle = handle; + region->vaddr = info->vaddr; + region->fd = info->fd; + region->paddr = paddr; + region->kvaddr = kvaddr; + region->len = len; + region->ref_cnt = 0; + pr_debug("%s[%pK]:add region paddr %pK vaddr %pK, len %lu kvaddr %pK\n", + __func__, audio, + ®ion->paddr, region->vaddr, region->len, + region->kvaddr); + list_add_tail(®ion->list, &audio->ion_region_queue); + rc = q6asm_memory_map(audio->ac, paddr, IN, len, 1); + if (rc < 0) { + pr_err("%s[%pK]: memory map failed\n", __func__, audio); + goto mmap_error; + } else { + goto end; + } +mmap_error: + list_del(®ion->list); +ion_error: + msm_audio_ion_free_legacy(audio->client, handle); +import_error: + kfree(region); +end: + return rc; +} + +static int audio_aio_ion_remove(struct q6audio_aio *audio, + struct msm_audio_ion_info *info) +{ + struct audio_aio_ion_region *region; + struct list_head *ptr, *next; + int rc = -EINVAL; + + pr_debug("%s[%pK]:info fd %d vaddr %pK\n", + __func__, audio, info->fd, info->vaddr); + + list_for_each_safe(ptr, next, &audio->ion_region_queue) { + region = list_entry(ptr, struct audio_aio_ion_region, list); + + if ((region->fd == info->fd) && + (region->vaddr == info->vaddr)) { + if (region->ref_cnt) { + pr_debug("%s[%pK]:region %pK in use ref_cnt %d\n", + __func__, audio, region, + region->ref_cnt); + break; + } + pr_debug("%s[%pK]:remove region fd %d vaddr %pK\n", + __func__, audio, info->fd, info->vaddr); + rc = q6asm_memory_unmap(audio->ac, + region->paddr, IN); + if (rc < 0) + pr_err("%s[%pK]: memory unmap failed\n", + __func__, audio); + + list_del(®ion->list); + msm_audio_ion_free_legacy(audio->client, + region->handle); + kfree(region); + rc = 0; + break; + } + } + + return rc; +} + +static int audio_aio_async_write(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node) +{ + int rc; + struct audio_client *ac; + struct audio_aio_write_param param; + + memset(¶m, 0, sizeof(param)); + + if (!audio || !buf_node) { + pr_err("%s NULL pointer audio=[0x%pK], buf_node=[0x%pK]\n", + __func__, audio, buf_node); + return -EINVAL; + } + pr_debug("%s[%pK]: Send write buff %pK phy %pK len %d meta_enable = %d\n", + __func__, audio, buf_node, &buf_node->paddr, + buf_node->buf.data_len, + audio->buf_cfg.meta_info_enable); + pr_debug("%s[%pK]: flags = 0x%x\n", __func__, audio, + buf_node->meta_info.meta_in.nflags); + + ac = audio->ac; + /* Offset with appropriate meta */ + if (audio->feedback) { + /* Non Tunnel mode */ + param.paddr = buf_node->paddr + sizeof(struct dec_meta_in); + param.len = buf_node->buf.data_len - sizeof(struct dec_meta_in); + } else { + /* Tunnel mode */ + param.paddr = buf_node->paddr; + param.len = buf_node->buf.data_len; + } + param.msw_ts = buf_node->meta_info.meta_in.ntimestamp.highpart; + param.lsw_ts = buf_node->meta_info.meta_in.ntimestamp.lowpart; + param.flags = buf_node->meta_info.meta_in.nflags; + /* If no meta_info enaled, indicate no time stamp valid */ + if (!audio->buf_cfg.meta_info_enable) + param.flags = 0xFF00; + + if (buf_node->meta_info.meta_in.nflags & AUDIO_DEC_EOF_SET) + param.flags |= AUDIO_DEC_EOF_SET; + + param.uid = ac->session; + /* Read command will populate session id as token */ + buf_node->token = ac->session; + rc = q6asm_async_write(ac, ¶m); + if (rc < 0) + pr_err("%s[%pK]:failed\n", __func__, audio); + return rc; +} + +void audio_aio_post_event(struct q6audio_aio *audio, int type, + union msm_audio_event_payload payload) +{ + struct audio_aio_event *e_node = NULL; + unsigned long flags; + + spin_lock_irqsave(&audio->event_queue_lock, flags); + + if (!list_empty(&audio->free_event_queue)) { + e_node = list_first_entry(&audio->free_event_queue, + struct audio_aio_event, list); + list_del(&e_node->list); + } else { + e_node = kmalloc(sizeof(struct audio_aio_event), GFP_ATOMIC); + if (!e_node) { + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + return; + } + } + + e_node->event_type = type; + e_node->payload = payload; + + list_add_tail(&e_node->list, &audio->event_queue); + spin_unlock_irqrestore(&audio->event_queue_lock, flags); + wake_up(&audio->event_wait); +} + +static int audio_aio_async_read(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node) +{ + struct audio_client *ac; + struct audio_aio_read_param param; + int rc; + + pr_debug("%s[%pK]: Send read buff %pK phy %pK len %d\n", + __func__, audio, buf_node, + &buf_node->paddr, buf_node->buf.buf_len); + ac = audio->ac; + /* Provide address so driver can append nr frames information */ + param.paddr = buf_node->paddr + + sizeof(struct dec_meta_out); + param.len = buf_node->buf.buf_len - + sizeof(struct dec_meta_out); + param.uid = ac->session; + /* Write command will populate session_id as token */ + buf_node->token = ac->session; + rc = q6asm_async_read(ac, ¶m); + if (rc < 0) + pr_err("%s[%pK]:failed\n", __func__, audio); + return rc; +} + +static int audio_aio_buf_add_shared(struct q6audio_aio *audio, u32 dir, + struct audio_aio_buffer_node *buf_node) +{ + unsigned long flags; + int ret = 0; + pr_debug("%s[%pK]:node %pK dir %x buf_addr %pK buf_len %d data_len %d\n", + __func__, audio, buf_node, dir, buf_node->buf.buf_addr, + buf_node->buf.buf_len, buf_node->buf.data_len); + buf_node->paddr = audio_aio_ion_fixup(audio, buf_node->buf.buf_addr, + buf_node->buf.buf_len, 1, + &buf_node->kvaddr); + if (dir) { + /* write */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (!audio->feedback && !buf_node->buf.data_len)) { + kfree(buf_node); + return -EINVAL; + } + extract_meta_out_info(audio, buf_node, 1); + /* Not a EOS buffer */ + if (!(buf_node->meta_info.meta_in.nflags & AUDIO_DEC_EOS_SET)) { + spin_lock_irqsave(&audio->dsp_lock, flags); + ret = audio_aio_async_write(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->out_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } else if (buf_node->meta_info.meta_in.nflags + & AUDIO_DEC_EOS_SET) { + if (!audio->wflush) { + pr_debug("%s[%pK]:Send EOS cmd at i/p\n", + __func__, audio); + /* Driver will forcefully post writedone event + * once eos ack recived from DSP + */ + audio->eos_write_payload.aio_buf =\ + buf_node->buf; + audio->eos_flag = 1; + audio->eos_rsp = 0; + q6asm_cmd(audio->ac, CMD_EOS); + kfree(buf_node); + } else { /* Flush in progress, send back i/p + * EOS buffer as is + */ + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + audio_aio_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + event_payload); + kfree(buf_node); + } + } + } else { + /* read */ + if (!buf_node->paddr || + (buf_node->paddr & 0x1) || + (buf_node->buf.buf_len < PCM_BUFSZ_MIN)) { + kfree(buf_node); + return -EINVAL; + } + /* No EOS reached */ + if (!audio->eos_rsp) { + spin_lock_irqsave(&audio->dsp_lock, flags); + ret = audio_aio_async_read(audio, buf_node); + /* EOS buffer handled in driver */ + list_add_tail(&buf_node->list, &audio->in_queue); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } + /* EOS reached at input side fake all upcoming read buffer to + * indicate the same + */ + else { + union msm_audio_event_payload event_payload; + event_payload.aio_buf = buf_node->buf; + event_payload.aio_buf.data_len = + insert_eos_buf(audio, buf_node); + pr_debug("%s[%pK]: propagate READ_DONE as EOS done\n", + __func__, audio); + audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(buf_node); + } + } + return ret; +} +#ifdef CONFIG_COMPAT +static int audio_aio_buf_add_compat(struct q6audio_aio *audio, u32 dir, + void __user *arg) +{ + struct audio_aio_buffer_node *buf_node; + struct msm_audio_aio_buf32 aio_buf_32; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) { + pr_err("%s: Buffer node alloc failed\n", __func__); + return -ENOMEM; + } + + + if (copy_from_user(&aio_buf_32, arg, sizeof(aio_buf_32))) { + kfree(buf_node); + pr_err("%s: copy_from_user failed\n", __func__); + return -EFAULT; + } + + buf_node->buf.buf_addr = compat_ptr(aio_buf_32.buf_addr); + buf_node->buf.buf_len = aio_buf_32.buf_len; + buf_node->buf.data_len = aio_buf_32.data_len; + buf_node->buf.private_data = compat_ptr(aio_buf_32.private_data); + buf_node->buf.mfield_sz = aio_buf_32.mfield_sz; + + return audio_aio_buf_add_shared(audio, dir, buf_node); +} +#endif + +static int audio_aio_buf_add(struct q6audio_aio *audio, u32 dir, + void __user *arg) +{ + struct audio_aio_buffer_node *buf_node; + + buf_node = kzalloc(sizeof(*buf_node), GFP_KERNEL); + + if (!buf_node) { + pr_err("%s: Buffer node alloc failed\n", __func__); + return -ENOMEM; + } + + if (copy_from_user(&buf_node->buf, arg, sizeof(buf_node->buf))) { + kfree(buf_node); + pr_err("%s: copy_from_user failed\n", __func__); + return -EFAULT; + } + + return audio_aio_buf_add_shared(audio, dir, buf_node); +} + +void audio_aio_ioport_reset(struct q6audio_aio *audio) +{ + if (audio->drv_status & ADRV_STATUS_AIO_INTF) { + /* If fsync is in progress, make sure + * return value of fsync indicates + * abort due to flush + */ + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("%s[%pK]:fsync in progress\n", + __func__, audio); + audio->drv_ops.out_flush(audio); + } else + audio->drv_ops.out_flush(audio); + if (audio->feedback == NON_TUNNEL_MODE) + audio->drv_ops.in_flush(audio); + } +} + +int audio_aio_open(struct q6audio_aio *audio, struct file *file) +{ + int rc = 0; + int i; + struct audio_aio_event *e_node = NULL; + struct list_head *ptr, *next; + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + audio->pcm_cfg.sample_rate = 48000; + audio->pcm_cfg.channel_count = 2; + + /* Only AIO interface */ + if (file->f_flags & O_NONBLOCK) { + pr_debug("%s[%pK]:set to aio interface\n", __func__, audio); + audio->drv_status |= ADRV_STATUS_AIO_INTF; + audio->drv_ops.out_flush = audio_aio_async_out_flush; + audio->drv_ops.in_flush = audio_aio_async_in_flush; + q6asm_set_io_mode(audio->ac, ASYNC_IO_MODE); + } else { + pr_err("%s[%pK]:SIO interface not supported\n", + __func__, audio); + rc = -EACCES; + goto fail; + } + + /* Initialize all locks of audio instance */ + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + mutex_init(&audio->get_event_lock); + spin_lock_init(&audio->dsp_lock); + spin_lock_init(&audio->event_queue_lock); + init_waitqueue_head(&audio->cmd_wait); + init_waitqueue_head(&audio->write_wait); + init_waitqueue_head(&audio->event_wait); + INIT_LIST_HEAD(&audio->out_queue); + INIT_LIST_HEAD(&audio->in_queue); + INIT_LIST_HEAD(&audio->ion_region_queue); + INIT_LIST_HEAD(&audio->free_event_queue); + INIT_LIST_HEAD(&audio->event_queue); + + audio->drv_ops.out_flush(audio); + audio->opened = 1; + audio->reset_event = false; + file->private_data = audio; + audio->codec_ioctl = audio_aio_ioctl; + audio->codec_compat_ioctl = audio_aio_compat_ioctl; + for (i = 0; i < AUDIO_EVENT_NUM; i++) { + e_node = kmalloc(sizeof(struct audio_aio_event), GFP_KERNEL); + if (e_node) + list_add_tail(&e_node->list, &audio->free_event_queue); + else { + pr_err("%s[%pK]:event pkt alloc failed\n", + __func__, audio); + rc = -ENOMEM; + goto cleanup; + } + } + audio->client = msm_audio_ion_client_create("Audio_Dec_Client"); + if (IS_ERR_OR_NULL(audio->client)) { + pr_err("Unable to create ION client\n"); + rc = -ENOMEM; + goto cleanup; + } + pr_debug("Ion client create in audio_aio_open %pK", audio->client); + + rc = register_volume_listener(audio); + if (rc < 0) + goto ion_cleanup; + + return 0; +ion_cleanup: + msm_audio_ion_client_destroy(audio->client); + audio->client = NULL; +cleanup: + list_for_each_safe(ptr, next, &audio->free_event_queue) { + e_node = list_first_entry(&audio->free_event_queue, + struct audio_aio_event, list); + list_del(&e_node->list); + kfree(e_node); + } +fail: + return rc; +} + +static long audio_aio_shared_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_ABORT_GET_EVENT: { + audio->event_abort = 1; + wake_up(&audio->event_wait); + break; + } + case AUDIO_OUTPORT_FLUSH: { + pr_debug("%s[%pK]:AUDIO_OUTPORT_FLUSH\n", __func__, audio); + mutex_lock(&audio->read_lock); + rc = audio_aio_outport_flush(audio); + if (rc < 0) { + pr_err("%s[%pK]: AUDIO_OUTPORT_FLUSH failed\n", + __func__, audio); + rc = -EINTR; + } + mutex_unlock(&audio->read_lock); + break; + } + case AUDIO_STOP: { + pr_debug("%s[%pK]: AUDIO_STOP session_id[%d]\n", __func__, + audio, audio->ac->session); + mutex_lock(&audio->lock); + audio->stopped = 1; + rc = audio_aio_flush(audio); + if (rc < 0) { + pr_err("%s[%pK]:Audio Stop procedure failed rc=%d\n", + __func__, audio, rc); + mutex_unlock(&audio->lock); + break; + } + audio->enabled = 0; + audio->drv_status &= ~ADRV_STATUS_PAUSE; + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("%s[%pK] Waking up the audio_aio_fsync\n", + __func__, audio); + wake_up(&audio->write_wait); + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_PAUSE: { + pr_debug("%s[%pK]:AUDIO_PAUSE %ld\n", __func__, audio, arg); + mutex_lock(&audio->lock); + if (arg == 1) { + rc = audio_aio_pause(audio); + if (rc < 0) { + pr_err("%s[%pK]: pause FAILED rc=%d\n", + __func__, audio, rc); + mutex_unlock(&audio->lock); + break; + } + audio->drv_status |= ADRV_STATUS_PAUSE; + } else if (arg == 0) { + if (audio->drv_status & ADRV_STATUS_PAUSE) { + rc = audio_aio_enable(audio); + if (rc) + pr_err("%s[%pK]: audio enable failed\n", + __func__, audio); + else { + audio->drv_status &= ~ADRV_STATUS_PAUSE; + audio->enabled = 1; + } + } + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_FLUSH: { + pr_debug("%s[%pK]: AUDIO_FLUSH sessionid[%d]\n", __func__, + audio, audio->ac->session); + mutex_lock(&audio->lock); + audio->rflush = 1; + audio->wflush = 1; + if (audio->drv_status & ADRV_STATUS_FSYNC) { + pr_debug("%s[%pK] Waking up the audio_aio_fsync\n", + __func__, audio); + wake_up(&audio->write_wait); + } + /* Flush DSP */ + rc = audio_aio_flush(audio); + /* Flush input / Output buffer in software*/ + audio_aio_ioport_reset(audio); + if (rc < 0) { + pr_err("%s[%pK]:AUDIO_FLUSH interrupted\n", + __func__, audio); + rc = -EINTR; + } else { + audio->rflush = 0; + if (audio->drv_status & ADRV_STATUS_FSYNC) + wake_up(&audio->write_wait); + else + audio->wflush = 0; + + } + audio->eos_flag = 0; + audio->eos_rsp = 0; + mutex_unlock(&audio->lock); + break; + } + case AUDIO_GET_SESSION_ID: { + mutex_lock(&audio->lock); + if (copy_to_user((void *)arg, &audio->ac->session, + sizeof(u16))) { + pr_err("%s: copy_to_user for AUDIO_GET_SESSION_ID failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_PM_AWAKE: { + if ((audio->audio_ws_mgr == NULL) || + (audio->miscdevice == NULL)) { + pr_err("%s[%pK]: invalid ws_mgr or miscdevice", + __func__, audio); + rc = -EACCES; + break; + } + pr_debug("%s[%pK]:AUDIO_PM_AWAKE\n", __func__, audio); + mutex_lock(&audio->lock); + if (!audio->wakelock_voted) { + audio->wakelock_voted = true; + mutex_lock(&audio->audio_ws_mgr->ws_lock); + if (audio->audio_ws_mgr->ref_cnt++ == 0) + pm_stay_awake(audio->miscdevice->this_device); + mutex_unlock(&audio->audio_ws_mgr->ws_lock); + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_PM_RELAX: { + if ((audio->audio_ws_mgr == NULL) || + (audio->miscdevice == NULL)) { + pr_err("%s[%pK]: invalid ws_mgr or miscdevice", + __func__, audio); + rc = -EACCES; + break; + } + pr_debug("%s[%pK]:AUDIO_PM_RELAX\n", __func__, audio); + mutex_lock(&audio->lock); + if (audio->wakelock_voted) { + audio->wakelock_voted = false; + mutex_lock(&audio->audio_ws_mgr->ws_lock); + if ((audio->audio_ws_mgr->ref_cnt > 0) && + (--audio->audio_ws_mgr->ref_cnt == 0)) { + pm_relax(audio->miscdevice->this_device); + } + mutex_unlock(&audio->audio_ws_mgr->ws_lock); + } + mutex_unlock(&audio->lock); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; + + +} + +static long audio_aio_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_ABORT_GET_EVENT: + case AUDIO_OUTPORT_FLUSH: + case AUDIO_STOP: + case AUDIO_PAUSE: + case AUDIO_FLUSH: + case AUDIO_GET_SESSION_ID: + case AUDIO_PM_AWAKE: + case AUDIO_PM_RELAX: + rc = audio_aio_shared_ioctl(file, cmd, arg); + break; + case AUDIO_GET_STATS: { + struct msm_audio_stats stats; + uint64_t timestamp; + memset(&stats, 0, sizeof(struct msm_audio_stats)); + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + rc = q6asm_get_session_time(audio->ac, ×tamp); + if (rc >= 0) + memcpy(&stats.unused[0], ×tamp, sizeof(timestamp)); + else + pr_debug("Error while getting timestamp\n"); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) { + pr_err("%s: copy_frm_user for AUDIO_GET_STATS failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_GET_EVENT: { + pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audio_aio_process_event_req(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + break; + } + case AUDIO_ASYNC_WRITE: { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audio_aio_buf_add(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + break; + } + case AUDIO_ASYNC_READ: { + mutex_lock(&audio->read_lock); + if (audio->feedback) + rc = audio_aio_buf_add(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + break; + } + + case AUDIO_GET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + mutex_lock(&audio->lock); + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("%s[%pK]:GET STREAM CFG %d %d\n", + __func__, audio, cfg.buffer_size, cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_STREAM_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_STREAM_CONFIG: { + struct msm_audio_stream_config cfg; + pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio); + mutex_lock(&audio->lock); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + pr_err( + "%s: copy_from_user for AUDIO_SET_STREAM_CONFIG failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + mutex_unlock(&audio->lock); + break; + } + case AUDIO_GET_CONFIG: { + struct msm_audio_config cfg; + mutex_lock(&audio->lock); + if (copy_to_user((void *)arg, &audio->pcm_cfg, sizeof(cfg))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_CONFIG: { + struct msm_audio_config config; + pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio); + mutex_lock(&audio->lock); + if (copy_from_user(&config, (void *)arg, sizeof(config))) { + pr_err( + "%s: copy_from_user for AUDIO_SET_CONFIG failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + if (audio->feedback != NON_TUNNEL_MODE) { + pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n", + __func__, audio); + rc = -EACCES; + mutex_unlock(&audio->lock); + break; + } + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_BUF_CFG: { + struct msm_audio_buf_cfg cfg; + mutex_lock(&audio->lock); + if (copy_from_user(&cfg, (void *)arg, sizeof(cfg))) { + pr_err( + "%s: copy_from_user for AUDIO_GET_BUF CONFIG failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]", + __func__, audio, + audio->ac->session, cfg.meta_info_enable); + mutex_unlock(&audio->lock); + break; + } + case AUDIO_GET_BUF_CFG: { + pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, audio, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + mutex_lock(&audio->lock); + if (copy_to_user((void *)arg, &audio->buf_cfg, + sizeof(struct msm_audio_buf_cfg))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_BUF_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_REGISTER_ION: { + struct msm_audio_ion_info info; + pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio); + mutex_lock(&audio->lock); + if (copy_from_user(&info, (void *)arg, sizeof(info))) { + pr_err( + "%s: copy_from_user for AUDIO_REGISTER_ION failed\n", + __func__); + rc = -EFAULT; + } else { + mutex_lock(&audio->read_lock); + mutex_lock(&audio->write_lock); + rc = audio_aio_ion_add(audio, &info); + mutex_unlock(&audio->write_lock); + mutex_unlock(&audio->read_lock); + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_DEREGISTER_ION: { + struct msm_audio_ion_info info; + mutex_lock(&audio->lock); + pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio); + if (copy_from_user(&info, (void *)arg, sizeof(info))) { + pr_err( + "%s: copy_from_user for AUDIO_DEREGISTER_ION failed\n", + __func__); + rc = -EFAULT; + } else { + mutex_lock(&audio->read_lock); + mutex_lock(&audio->write_lock); + rc = audio_aio_ion_remove(audio, &info); + mutex_unlock(&audio->write_lock); + mutex_unlock(&audio->read_lock); + } + mutex_unlock(&audio->lock); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_stream_config32 { + u32 buffer_size; + u32 buffer_count; +}; + +struct msm_audio_stats32 { + u32 byte_count; + u32 sample_count; + u32 unused[2]; +}; + +struct msm_audio_config32 { + u32 buffer_size; + u32 buffer_count; + u32 channel_count; + u32 sample_rate; + u32 type; + u32 meta_field; + u32 bits; + u32 unused[3]; +}; + +struct msm_audio_buf_cfg32 { + u32 meta_info_enable; + u32 frames_per_buf; +}; + +struct msm_audio_ion_info32 { + int fd; + compat_uptr_t vaddr; +}; + +enum { + AUDIO_GET_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 3, + struct msm_audio_config32), + AUDIO_SET_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 4, + struct msm_audio_config32), + AUDIO_GET_STATS_32 = _IOR(AUDIO_IOCTL_MAGIC, 5, + struct msm_audio_stats32), + AUDIO_GET_EVENT_32 = _IOR(AUDIO_IOCTL_MAGIC, 13, + struct msm_audio_event32), + AUDIO_ASYNC_WRITE_32 = _IOW(AUDIO_IOCTL_MAGIC, 17, + struct msm_audio_aio_buf32), + AUDIO_ASYNC_READ_32 = _IOW(AUDIO_IOCTL_MAGIC, 18, + struct msm_audio_aio_buf32), + AUDIO_SET_STREAM_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, 80, + struct msm_audio_stream_config32), + AUDIO_GET_STREAM_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, 81, + struct msm_audio_stream_config32), + AUDIO_GET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 93, + struct msm_audio_buf_cfg32), + AUDIO_SET_BUF_CFG_32 = _IOW(AUDIO_IOCTL_MAGIC, 94, + struct msm_audio_buf_cfg32), + AUDIO_REGISTER_ION_32 = _IOW(AUDIO_IOCTL_MAGIC, 97, + struct msm_audio_ion_info32), + AUDIO_DEREGISTER_ION_32 = _IOW(AUDIO_IOCTL_MAGIC, 98, + struct msm_audio_ion_info32), +}; + +static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_ABORT_GET_EVENT: + case AUDIO_OUTPORT_FLUSH: + case AUDIO_STOP: + case AUDIO_PAUSE: + case AUDIO_FLUSH: + case AUDIO_GET_SESSION_ID: + case AUDIO_PM_AWAKE: + case AUDIO_PM_RELAX: + rc = audio_aio_shared_ioctl(file, cmd, arg); + break; + case AUDIO_GET_STATS_32: { + struct msm_audio_stats32 stats; + uint64_t timestamp; + memset(&stats, 0, sizeof(struct msm_audio_stats32)); + stats.byte_count = atomic_read(&audio->in_bytes); + stats.sample_count = atomic_read(&audio->in_samples); + rc = q6asm_get_session_time(audio->ac, ×tamp); + if (rc >= 0) + memcpy(&stats.unused[0], ×tamp, sizeof(timestamp)); + else + pr_debug("Error while getting timestamp\n"); + if (copy_to_user((void *)arg, &stats, sizeof(stats))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_STATS_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_GET_EVENT_32: { + pr_debug("%s[%pK]:AUDIO_GET_EVENT\n", __func__, audio); + if (mutex_trylock(&audio->get_event_lock)) { + rc = audio_aio_process_event_req_compat(audio, + (void __user *)arg); + mutex_unlock(&audio->get_event_lock); + } else + rc = -EBUSY; + break; + } + case AUDIO_ASYNC_WRITE_32: { + mutex_lock(&audio->write_lock); + if (audio->drv_status & ADRV_STATUS_FSYNC) + rc = -EBUSY; + else { + if (audio->enabled) + rc = audio_aio_buf_add_compat(audio, 1, + (void __user *)arg); + else + rc = -EPERM; + } + mutex_unlock(&audio->write_lock); + break; + } + case AUDIO_ASYNC_READ_32: { + mutex_lock(&audio->read_lock); + if (audio->feedback) + rc = audio_aio_buf_add_compat(audio, 0, + (void __user *)arg); + else + rc = -EPERM; + mutex_unlock(&audio->read_lock); + break; + } + + case AUDIO_GET_STREAM_CONFIG_32: { + struct msm_audio_stream_config32 cfg; + mutex_lock(&audio->lock); + memset(&cfg, 0, sizeof(cfg)); + cfg.buffer_size = audio->str_cfg.buffer_size; + cfg.buffer_count = audio->str_cfg.buffer_count; + pr_debug("%s[%pK]:GET STREAM CFG %d %d\n", + __func__, audio, cfg.buffer_size, cfg.buffer_count); + if (copy_to_user((void *)arg, &cfg, sizeof(cfg))) { + pr_err("%s: copy_to_user for AUDIO_GET_STREAM_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_STREAM_CONFIG_32: { + struct msm_audio_stream_config32 cfg_32; + struct msm_audio_stream_config cfg; + pr_debug("%s[%pK]:SET STREAM CONFIG\n", __func__, audio); + mutex_lock(&audio->lock); + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_STREAM_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + cfg.buffer_size = cfg_32.buffer_size; + cfg.buffer_count = cfg_32.buffer_count; + + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + rc = 0; + mutex_unlock(&audio->lock); + break; + } + case AUDIO_GET_CONFIG_32: { + struct msm_audio_config32 cfg_32; + mutex_lock(&audio->lock); + memset(&cfg_32, 0, sizeof(cfg_32)); + cfg_32.buffer_size = audio->pcm_cfg.buffer_size; + cfg_32.buffer_count = audio->pcm_cfg.buffer_count; + cfg_32.channel_count = audio->pcm_cfg.channel_count; + cfg_32.sample_rate = audio->pcm_cfg.sample_rate; + cfg_32.type = audio->pcm_cfg.type; + cfg_32.meta_field = audio->pcm_cfg.meta_field; + cfg_32.bits = audio->pcm_cfg.bits; + + if (copy_to_user((void *)arg, &cfg_32, sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_CONFIG_32: { + struct msm_audio_config config; + struct msm_audio_config32 config_32; + mutex_lock(&audio->lock); + + if (audio->feedback != NON_TUNNEL_MODE) { + pr_err("%s[%pK]:Not sufficient permission to change the playback mode\n", + __func__, audio); + rc = -EACCES; + mutex_unlock(&audio->lock); + break; + } + pr_err("%s[%pK]:AUDIO_SET_CONFIG\n", __func__, audio); + if (copy_from_user(&config_32, (void *)arg, + sizeof(config_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + config.buffer_size = config_32.buffer_size; + config.buffer_count = config_32.buffer_count; + config.channel_count = config_32.channel_count; + config.sample_rate = config_32.sample_rate; + config.type = config_32.type; + config.meta_field = config_32.meta_field; + config.bits = config_32.bits; + + if ((config.buffer_count > PCM_BUF_COUNT) || + (config.buffer_count == 1)) + config.buffer_count = PCM_BUF_COUNT; + + if (config.buffer_size < PCM_BUFSZ_MIN) + config.buffer_size = PCM_BUFSZ_MIN; + + audio->pcm_cfg.buffer_count = config.buffer_count; + audio->pcm_cfg.buffer_size = config.buffer_size; + audio->pcm_cfg.channel_count = config.channel_count; + audio->pcm_cfg.sample_rate = config.sample_rate; + rc = 0; + mutex_unlock(&audio->lock); + break; + } + case AUDIO_SET_BUF_CFG_32: { + struct msm_audio_buf_cfg cfg; + struct msm_audio_buf_cfg32 cfg_32; + mutex_lock(&audio->lock); + if (copy_from_user(&cfg_32, (void *)arg, sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + cfg.meta_info_enable = cfg_32.meta_info_enable; + cfg.frames_per_buf = cfg_32.frames_per_buf; + + if ((audio->feedback == NON_TUNNEL_MODE) && + !cfg.meta_info_enable) { + rc = -EFAULT; + mutex_unlock(&audio->lock); + break; + } + + audio->buf_cfg.meta_info_enable = cfg.meta_info_enable; + pr_debug("%s[%pK]:session id %d: Set-buf-cfg: meta[%d]", + __func__, audio, + audio->ac->session, cfg.meta_info_enable); + mutex_unlock(&audio->lock); + break; + } + case AUDIO_GET_BUF_CFG_32: { + struct msm_audio_buf_cfg32 cfg_32; + pr_debug("%s[%pK]:session id %d: Get-buf-cfg: meta[%d] framesperbuf[%d]\n", + __func__, audio, + audio->ac->session, audio->buf_cfg.meta_info_enable, + audio->buf_cfg.frames_per_buf); + + mutex_lock(&audio->lock); + memset(&cfg_32, 0, sizeof(cfg_32)); + cfg_32.meta_info_enable = audio->buf_cfg.meta_info_enable; + cfg_32.frames_per_buf = audio->buf_cfg.frames_per_buf; + if (copy_to_user((void *)arg, &cfg_32, + sizeof(struct msm_audio_buf_cfg32))) { + pr_err("%s: copy_to_user for AUDIO_GET_BUF_CFG_32 failed\n", + __func__); + rc = -EFAULT; + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_REGISTER_ION_32: { + struct msm_audio_ion_info32 info_32; + struct msm_audio_ion_info info; + pr_debug("%s[%pK]:AUDIO_REGISTER_ION\n", __func__, audio); + mutex_lock(&audio->lock); + if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) { + pr_err("%s: copy_from_user for AUDIO_REGISTER_ION_32 failed\n", + __func__); + rc = -EFAULT; + } else { + info.fd = info_32.fd; + info.vaddr = compat_ptr(info_32.vaddr); + mutex_lock(&audio->read_lock); + mutex_lock(&audio->write_lock); + rc = audio_aio_ion_add(audio, &info); + mutex_unlock(&audio->write_lock); + mutex_unlock(&audio->read_lock); + } + mutex_unlock(&audio->lock); + break; + } + case AUDIO_DEREGISTER_ION_32: { + struct msm_audio_ion_info32 info_32; + struct msm_audio_ion_info info; + mutex_lock(&audio->lock); + pr_debug("%s[%pK]:AUDIO_DEREGISTER_ION\n", __func__, audio); + if (copy_from_user(&info_32, (void *)arg, sizeof(info_32))) { + pr_err("%s: copy_from_user for AUDIO_DEREGISTER_ION_32 failed\n", + __func__); + rc = -EFAULT; + } else { + info.fd = info_32.fd; + info.vaddr = compat_ptr(info_32.vaddr); + mutex_lock(&audio->read_lock); + mutex_lock(&audio->write_lock); + rc = audio_aio_ion_remove(audio, &info); + mutex_unlock(&audio->write_lock); + mutex_unlock(&audio->read_lock); + } + mutex_unlock(&audio->lock); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#endif diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h new file mode 100644 index 000000000000..9c53f58b746f --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.h @@ -0,0 +1,233 @@ +/* Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/uaccess.h> +#include <linux/wait.h> +#include <linux/wakelock.h> +#include <linux/msm_audio.h> +#include <linux/debugfs.h> +#include <linux/list.h> +#include <linux/slab.h> +#include <linux/msm_ion.h> +#include <asm/ioctls.h> +#include <asm/atomic.h> +#include "q6audio_common.h" + +#define TUNNEL_MODE 0x0000 +#define NON_TUNNEL_MODE 0x0001 + +#define ADRV_STATUS_AIO_INTF 0x00000001 /* AIO interface */ +#define ADRV_STATUS_FSYNC 0x00000008 +#define ADRV_STATUS_PAUSE 0x00000010 +#define AUDIO_DEC_EOS_SET 0x00000001 +#define AUDIO_DEC_EOF_SET 0x00000010 +#define AUDIO_EVENT_NUM 10 + +#define __CONTAINS(r, v, l) ({ \ + typeof(r) __r = r; \ + typeof(v) __v = v; \ + typeof(v) __e = __v + l; \ + int res = ((__v >= __r->vaddr) && \ + (__e <= __r->vaddr + __r->len)); \ + res; \ +}) + +#define CONTAINS(r1, r2) ({ \ + typeof(r2) __r2 = r2; \ + __CONTAINS(r1, __r2->vaddr, __r2->len); \ +}) + +#define IN_RANGE(r, v) ({ \ + typeof(r) __r = r; \ + typeof(v) __vv = v; \ + int res = ((__vv >= __r->vaddr) && \ + (__vv < (__r->vaddr + __r->len))); \ + res; \ +}) + +#define OVERLAPS(r1, r2) ({ \ + typeof(r1) __r1 = r1; \ + typeof(r2) __r2 = r2; \ + typeof(__r2->vaddr) __v = __r2->vaddr; \ + typeof(__v) __e = __v + __r2->len - 1; \ + int res = (IN_RANGE(__r1, __v) || IN_RANGE(__r1, __e)); \ + res; \ +}) + +struct timestamp { + u32 lowpart; + u32 highpart; +} __packed; + +struct meta_out_dsp { + u32 offset_to_frame; + u32 frame_size; + u32 encoded_pcm_samples; + u32 msw_ts; + u32 lsw_ts; + u32 nflags; +} __packed; + +struct dec_meta_in { + unsigned char reserved[18]; + unsigned short offset; + struct timestamp ntimestamp; + unsigned int nflags; +} __packed; + +struct dec_meta_out { + unsigned int reserved[7]; + unsigned int num_of_frames; + struct meta_out_dsp meta_out_dsp[]; +} __packed; + +/* General meta field to store meta info +locally */ +union meta_data { + struct dec_meta_out meta_out; + struct dec_meta_in meta_in; +} __packed; + +/* per device wakeup source manager */ +struct ws_mgr { + struct mutex ws_lock; + uint32_t ref_cnt; +}; + +#define PCM_BUF_COUNT (2) +/* Buffer with meta */ +#define PCM_BUFSZ_MIN ((4*1024) + sizeof(struct dec_meta_out)) + +/* FRAME_NUM must be a power of two */ +#define FRAME_NUM (2) +#define FRAME_SIZE ((4*1536) + sizeof(struct dec_meta_in)) + +struct audio_aio_ion_region { + struct list_head list; + struct ion_handle *handle; + int fd; + void *vaddr; + phys_addr_t paddr; + void *kvaddr; + unsigned long len; + unsigned ref_cnt; +}; + +struct audio_aio_event { + struct list_head list; + int event_type; + union msm_audio_event_payload payload; +}; + +struct audio_aio_buffer_node { + struct list_head list; + struct msm_audio_aio_buf buf; + unsigned long paddr; + uint32_t token; + void *kvaddr; + union meta_data meta_info; +}; + +struct q6audio_aio; +struct audio_aio_drv_operations { + void (*out_flush) (struct q6audio_aio *); + void (*in_flush) (struct q6audio_aio *); +}; + +struct q6audio_aio { + atomic_t in_bytes; + atomic_t in_samples; + + struct msm_audio_stream_config str_cfg; + struct msm_audio_buf_cfg buf_cfg; + struct msm_audio_config pcm_cfg; + void *codec_cfg; + + struct audio_client *ac; + + struct mutex lock; + struct mutex read_lock; + struct mutex write_lock; + struct mutex get_event_lock; + wait_queue_head_t cmd_wait; + wait_queue_head_t write_wait; + wait_queue_head_t event_wait; + spinlock_t dsp_lock; + spinlock_t event_queue_lock; + + struct miscdevice *miscdevice; + uint32_t wakelock_voted; + struct ws_mgr *audio_ws_mgr; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct list_head out_queue; /* queue to retain output buffers */ + struct list_head in_queue; /* queue to retain input buffers */ + struct list_head free_event_queue; + struct list_head event_queue; + struct list_head ion_region_queue; /* protected by lock */ + struct ion_client *client; + struct audio_aio_drv_operations drv_ops; + union msm_audio_event_payload eos_write_payload; + uint32_t device_events; + uint16_t volume; + uint32_t drv_status; + int event_abort; + int eos_rsp; + int eos_flag; + int opened; + int enabled; + int stopped; + int feedback; + int rflush; /* Read flush */ + int wflush; /* Write flush */ + bool reset_event; + long (*codec_ioctl)(struct file *, unsigned int, unsigned long); + long (*codec_compat_ioctl)(struct file *, unsigned int, unsigned long); +}; + +void audio_aio_async_write_ack(struct q6audio_aio *audio, uint32_t token, + uint32_t *payload); + +void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token, + uint32_t *payload); + +int insert_eos_buf(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node); + +void extract_meta_out_info(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node, int dir); + +int audio_aio_open(struct q6audio_aio *audio, struct file *file); +int audio_aio_enable(struct q6audio_aio *audio); +void audio_aio_post_event(struct q6audio_aio *audio, int type, + union msm_audio_event_payload payload); +int audio_aio_release(struct inode *inode, struct file *file); +int audio_aio_fsync(struct file *file, loff_t start, loff_t end, int datasync); +void audio_aio_async_out_flush(struct q6audio_aio *audio); +void audio_aio_async_in_flush(struct q6audio_aio *audio); +void audio_aio_ioport_reset(struct q6audio_aio *audio); +int enable_volume_ramp(struct q6audio_aio *audio); +#ifdef CONFIG_DEBUG_FS +int audio_aio_debug_open(struct inode *inode, struct file *file); +ssize_t audio_aio_debug_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos); +#endif diff --git a/drivers/misc/qcom/qdsp6v2/audio_wma.c b/drivers/misc/qcom/qdsp6v2/audio_wma.c new file mode 100644 index 000000000000..b7dfdf23bec7 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_wma.c @@ -0,0 +1,347 @@ +/* wma audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/types.h> +#include <linux/msm_audio_wma.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_wma_misc; +static struct ws_mgr audio_wma_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_wma_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_wma_cfg wma_cfg; + struct msm_audio_wma_config_v2 *wma_config; + pr_debug("%s[%pK]: AUDIO_START session_id[%d]\n", __func__, + audio, audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg; + wma_cfg.format_tag = wma_config->format_tag; + wma_cfg.ch_cfg = wma_config->numchannels; + wma_cfg.sample_rate = wma_config->samplingrate; + wma_cfg.avg_bytes_per_sec = wma_config->avgbytespersecond; + wma_cfg.block_align = wma_config->block_align; + wma_cfg.valid_bits_per_sample = + wma_config->validbitspersample; + wma_cfg.ch_mask = wma_config->channelmask; + wma_cfg.encode_opt = wma_config->encodeopt; + /* Configure Media format block */ + rc = q6asm_media_format_block_wma(audio->ac, &wma_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_WMA_CONFIG_V2: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_wma_config_v2))) { + pr_err("%s:copy_to_user for AUDIO_SET_WMA_CONFIG_V2 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMA_CONFIG_V2: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_wma_config_v2))) { + pr_err("%s:copy_from_user for AUDIO_SET_WMA_CONFIG_V2 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_wma_config_v2_32 { + u16 format_tag; + u16 numchannels; + u32 samplingrate; + u32 avgbytespersecond; + u16 block_align; + u16 validbitspersample; + u32 channelmask; + u16 encodeopt; +}; + +enum { + AUDIO_GET_WMA_CONFIG_V2_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+2), struct msm_audio_wma_config_v2_32), + AUDIO_SET_WMA_CONFIG_V2_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+3), struct msm_audio_wma_config_v2_32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + case AUDIO_GET_WMA_CONFIG_V2_32: { + struct msm_audio_wma_config_v2 *wma_config; + struct msm_audio_wma_config_v2_32 wma_config_32; + + memset(&wma_config_32, 0, sizeof(wma_config_32)); + + wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg; + wma_config_32.format_tag = wma_config->format_tag; + wma_config_32.numchannels = wma_config->numchannels; + wma_config_32.samplingrate = wma_config->samplingrate; + wma_config_32.avgbytespersecond = wma_config->avgbytespersecond; + wma_config_32.block_align = wma_config->block_align; + wma_config_32.validbitspersample = + wma_config->validbitspersample; + wma_config_32.channelmask = wma_config->channelmask; + wma_config_32.encodeopt = wma_config->encodeopt; + if (copy_to_user((void *)arg, &wma_config_32, + sizeof(wma_config_32))) { + pr_err("%s: copy_to_user for GET_WMA_CONFIG_V2_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_SET_WMA_CONFIG_V2_32: { + struct msm_audio_wma_config_v2 *wma_config; + struct msm_audio_wma_config_v2_32 wma_config_32; + + if (copy_from_user(&wma_config_32, (void *)arg, + sizeof(wma_config_32))) { + pr_err("%s: copy_from_user for SET_WMA_CONFIG_V2_32 failed\n" + , __func__); + rc = -EFAULT; + break; + } + wma_config = (struct msm_audio_wma_config_v2 *)audio->codec_cfg; + wma_config->format_tag = wma_config_32.format_tag; + wma_config->numchannels = wma_config_32.numchannels; + wma_config->samplingrate = wma_config_32.samplingrate; + wma_config->avgbytespersecond = wma_config_32.avgbytespersecond; + wma_config->block_align = wma_config_32.block_align; + wma_config->validbitspersample = + wma_config_32.validbitspersample; + wma_config->channelmask = wma_config_32.channelmask; + wma_config->encodeopt = wma_config_32.encodeopt; + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wma_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wma_config_v2), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s:Could not allocate memory for wma" + "config\n", __func__); + kfree(audio); + return -ENOMEM; + } + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_wma_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_wma_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V9); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V9); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wma_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_wma_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:wmadec success mode[%d]session[%d]\n", __func__, + audio->feedback, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wma_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_wma_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wma", + .fops = &audio_wma_fops, +}; + +static int __init audio_wma_init(void) +{ + int ret = misc_register(&audio_wma_misc); + + if (ret == 0) + device_init_wakeup(audio_wma_misc.this_device, true); + audio_wma_ws_mgr.ref_cnt = 0; + mutex_init(&audio_wma_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_wma_init); diff --git a/drivers/misc/qcom/qdsp6v2/audio_wmapro.c b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c new file mode 100644 index 000000000000..d37a5789391c --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/audio_wmapro.c @@ -0,0 +1,420 @@ +/* wmapro audio output device + * + * Copyright (C) 2008 Google, Inc. + * Copyright (C) 2008 HTC Corporation + * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/types.h> +#include <linux/msm_audio_wmapro.h> +#include <linux/compat.h> +#include "audio_utils_aio.h" + +static struct miscdevice audio_wmapro_misc; +static struct ws_mgr audio_wmapro_ws_mgr; + +#ifdef CONFIG_DEBUG_FS +static const struct file_operations audio_wmapro_debug_fops = { + .read = audio_aio_debug_read, + .open = audio_aio_debug_open, +}; +#endif + +static long audio_ioctl_shared(struct file *file, unsigned int cmd, + void *arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: { + struct asm_wmapro_cfg wmapro_cfg; + struct msm_audio_wmapro_config *wmapro_config; + pr_debug("%s: AUDIO_START session_id[%d]\n", __func__, + audio->ac->session); + if (audio->feedback == NON_TUNNEL_MODE) { + /* Configure PCM output block */ + rc = q6asm_enc_cfg_blk_pcm_v2(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count, + 16, /* bits per sample */ + true, /* use default channel map */ + true, /* use back channel map flavor */ + NULL); + if (rc < 0) { + pr_err("pcm output block config failed\n"); + break; + } + } + wmapro_config = (struct msm_audio_wmapro_config *) + audio->codec_cfg; + if ((wmapro_config->formattag == 0x162) || + (wmapro_config->formattag == 0x163) || + (wmapro_config->formattag == 0x166) || + (wmapro_config->formattag == 0x167)) { + wmapro_cfg.format_tag = wmapro_config->formattag; + } else { + pr_err("%s:AUDIO_START failed: formattag = %d\n", + __func__, wmapro_config->formattag); + rc = -EINVAL; + break; + } + if (wmapro_config->numchannels > 0) { + wmapro_cfg.ch_cfg = wmapro_config->numchannels; + } else { + pr_err("%s:AUDIO_START failed: channels = %d\n", + __func__, wmapro_config->numchannels); + rc = -EINVAL; + break; + } + if (wmapro_config->samplingrate > 0) { + wmapro_cfg.sample_rate = wmapro_config->samplingrate; + } else { + pr_err("%s:AUDIO_START failed: sample_rate = %d\n", + __func__, wmapro_config->samplingrate); + rc = -EINVAL; + break; + } + wmapro_cfg.avg_bytes_per_sec = + wmapro_config->avgbytespersecond; + if ((wmapro_config->asfpacketlength <= 13376) || + (wmapro_config->asfpacketlength > 0)) { + wmapro_cfg.block_align = + wmapro_config->asfpacketlength; + } else { + pr_err("%s:AUDIO_START failed: block_align = %d\n", + __func__, wmapro_config->asfpacketlength); + rc = -EINVAL; + break; + } + if ((wmapro_config->validbitspersample == 16) || + (wmapro_config->validbitspersample == 24)) { + wmapro_cfg.valid_bits_per_sample = + wmapro_config->validbitspersample; + } else { + pr_err("%s:AUDIO_START failed: bitspersample = %d\n", + __func__, wmapro_config->validbitspersample); + rc = -EINVAL; + break; + } + wmapro_cfg.ch_mask = wmapro_config->channelmask; + wmapro_cfg.encode_opt = wmapro_config->encodeopt; + wmapro_cfg.adv_encode_opt = + wmapro_config->advancedencodeopt; + wmapro_cfg.adv_encode_opt2 = + wmapro_config->advancedencodeopt2; + /* Configure Media format block */ + rc = q6asm_media_format_block_wmapro(audio->ac, &wmapro_cfg, + audio->ac->stream_id); + if (rc < 0) { + pr_err("cmd media format block failed\n"); + break; + } + rc = audio_aio_enable(audio); + audio->eos_rsp = 0; + audio->eos_flag = 0; + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("Audio Start procedure failed rc=%d\n", rc); + break; + } + pr_debug("AUDIO_START success enable[%d]\n", audio->enabled); + if (audio->stopped == 1) + audio->stopped = 0; + break; + } + default: + pr_err("%s: Unkown ioctl cmd %d\n", __func__, cmd); + rc = -EINVAL; + break; + } + return rc; +} + +static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_GET_WMAPRO_CONFIG: { + if (copy_to_user((void *)arg, audio->codec_cfg, + sizeof(struct msm_audio_wmapro_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_WMAPRO_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_WMAPRO_CONFIG: { + if (copy_from_user(audio->codec_cfg, (void *)arg, + sizeof(struct msm_audio_wmapro_config))) { + pr_err("%s: copy_from_user for AUDIO_SET_WMAPRO_CONFIG_V2 failed\n", + __func__); + rc = -EFAULT; + break; + } + break; + } + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} + +#ifdef CONFIG_COMPAT + +struct msm_audio_wmapro_config32 { + u16 armdatareqthr; + u8 validbitspersample; + u8 numchannels; + u16 formattag; + u32 samplingrate; + u32 avgbytespersecond; + u16 asfpacketlength; + u32 channelmask; + u16 encodeopt; + u16 advancedencodeopt; + u32 advancedencodeopt2; +}; + +enum { + AUDIO_GET_WMAPRO_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_wmapro_config32), + AUDIO_SET_WMAPRO_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_wmapro_config32) +}; + +static long audio_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct q6audio_aio *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_GET_WMAPRO_CONFIG_32: { + struct msm_audio_wmapro_config *wmapro_config; + struct msm_audio_wmapro_config32 wmapro_config_32; + + memset(&wmapro_config_32, 0, sizeof(wmapro_config_32)); + + wmapro_config = + (struct msm_audio_wmapro_config *)audio->codec_cfg; + wmapro_config_32.armdatareqthr = wmapro_config->armdatareqthr; + wmapro_config_32.validbitspersample = + wmapro_config->validbitspersample; + wmapro_config_32.numchannels = wmapro_config->numchannels; + wmapro_config_32.formattag = wmapro_config->formattag; + wmapro_config_32.samplingrate = wmapro_config->samplingrate; + wmapro_config_32.avgbytespersecond = + wmapro_config->avgbytespersecond; + wmapro_config_32.asfpacketlength = + wmapro_config->asfpacketlength; + wmapro_config_32.channelmask = wmapro_config->channelmask; + wmapro_config_32.encodeopt = wmapro_config->encodeopt; + wmapro_config_32.advancedencodeopt = + wmapro_config->advancedencodeopt; + wmapro_config_32.advancedencodeopt2 = + wmapro_config->advancedencodeopt2; + + if (copy_to_user((void *)arg, &wmapro_config_32, + sizeof(struct msm_audio_wmapro_config32))) { + pr_err("%s: copy_to_user for AUDIO_GET_WMAPRO_CONFIG_V2_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_WMAPRO_CONFIG_32: { + struct msm_audio_wmapro_config *wmapro_config; + struct msm_audio_wmapro_config32 wmapro_config_32; + + if (copy_from_user(&wmapro_config_32, (void *)arg, + sizeof(struct msm_audio_wmapro_config32))) { + pr_err( + "%s: copy_from_user for AUDIO_SET_WMAPRO_CONFG_V2_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + wmapro_config = + (struct msm_audio_wmapro_config *)audio->codec_cfg; + wmapro_config->armdatareqthr = wmapro_config_32.armdatareqthr; + wmapro_config->validbitspersample = + wmapro_config_32.validbitspersample; + wmapro_config->numchannels = wmapro_config_32.numchannels; + wmapro_config->formattag = wmapro_config_32.formattag; + wmapro_config->samplingrate = wmapro_config_32.samplingrate; + wmapro_config->avgbytespersecond = + wmapro_config_32.avgbytespersecond; + wmapro_config->asfpacketlength = + wmapro_config_32.asfpacketlength; + wmapro_config->channelmask = wmapro_config_32.channelmask; + wmapro_config->encodeopt = wmapro_config_32.encodeopt; + wmapro_config->advancedencodeopt = + wmapro_config_32.advancedencodeopt; + wmapro_config->advancedencodeopt2 = + wmapro_config_32.advancedencodeopt2; + break; + } + case AUDIO_START: { + rc = audio_ioctl_shared(file, cmd, (void *)arg); + break; + } + default: { + pr_debug("%s[%pK]: Calling utils ioctl\n", __func__, audio); + rc = audio->codec_compat_ioctl(file, cmd, arg); + if (rc) + pr_err("Failed in utils_ioctl: %d\n", rc); + break; + } + } + return rc; +} +#else +#define audio_compat_ioctl NULL +#endif + +static int audio_open(struct inode *inode, struct file *file) +{ + struct q6audio_aio *audio = NULL; + int rc = 0; + +#ifdef CONFIG_DEBUG_FS + /* 4 bytes represents decoder number, 1 byte for terminate string */ + char name[sizeof "msm_wmapro_" + 5]; +#endif + audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); + + if (audio == NULL) { + pr_err("Could not allocate memory for wma decode driver\n"); + return -ENOMEM; + } + audio->codec_cfg = kzalloc(sizeof(struct msm_audio_wmapro_config), + GFP_KERNEL); + if (audio->codec_cfg == NULL) { + pr_err("%s: Could not allocate memory for wmapro" + "config\n", __func__); + kfree(audio); + return -ENOMEM; + } + + + audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; + audio->miscdevice = &audio_wmapro_misc; + audio->wakelock_voted = false; + audio->audio_ws_mgr = &audio_wmapro_ws_mgr; + + audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("Could not allocate memory for audio client\n"); + kfree(audio->codec_cfg); + kfree(audio); + return -ENOMEM; + } + + rc = audio_aio_open(audio, file); + if (rc < 0) { + pr_err("%s: audio_aio_open rc=%d\n", + __func__, rc); + goto fail; + } + /* open in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { + rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, + FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_err("NT mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = NON_TUNNEL_MODE; + /* open WMA decoder, expected frames is always 1*/ + audio->buf_cfg.frames_per_buf = 0x01; + audio->buf_cfg.meta_info_enable = 0x01; + } else if ((file->f_mode & FMODE_WRITE) && + !(file->f_mode & FMODE_READ)) { + rc = q6asm_open_write(audio->ac, FORMAT_WMA_V10PRO); + if (rc < 0) { + pr_err("T mode Open failed rc=%d\n", rc); + rc = -ENODEV; + goto fail; + } + audio->feedback = TUNNEL_MODE; + audio->buf_cfg.meta_info_enable = 0x00; + } else { + pr_err("Not supported mode\n"); + rc = -EACCES; + goto fail; + } + +#ifdef CONFIG_DEBUG_FS + snprintf(name, sizeof name, "msm_wmapro_%04x", audio->ac->session); + audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, + NULL, (void *)audio, + &audio_wmapro_debug_fops); + + if (IS_ERR(audio->dentry)) + pr_debug("debugfs_create_file failed\n"); +#endif + pr_info("%s:wmapro decoder open success, session_id = %d\n", __func__, + audio->ac->session); + return rc; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->codec_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_wmapro_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_aio_release, + .unlocked_ioctl = audio_ioctl, + .fsync = audio_aio_fsync, + .compat_ioctl = audio_compat_ioctl +}; + +static struct miscdevice audio_wmapro_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_wmapro", + .fops = &audio_wmapro_fops, +}; + +static int __init audio_wmapro_init(void) +{ + int ret = misc_register(&audio_wmapro_misc); + + if (ret == 0) + device_init_wakeup(audio_wmapro_misc.this_device, true); + audio_wmapro_ws_mgr.ref_cnt = 0; + mutex_init(&audio_wmapro_ws_mgr.ws_lock); + + return ret; +} + +device_initcall(audio_wmapro_init); diff --git a/drivers/misc/qcom/qdsp6v2/evrc_in.c b/drivers/misc/qcom/qdsp6v2/evrc_in.c new file mode 100644 index 000000000000..aab8e27c0094 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/evrc_in.c @@ -0,0 +1,410 @@ +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/msm_audio_qcp.h> +#include <linux/atomic.h> +#include <linux/compat.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((23+sizeof(struct meta_out_dsp)) * 10)) + +static long evrc_in_ioctl_shared(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_evrc(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0); + + if (rc < 0) { + pr_err("%s:session id %d: cmd evrc media format block failed\n", + __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block failed\n", + __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + break; + } + case AUDIO_SET_EVRC_ENC_CONFIG: { + struct msm_audio_evrc_enc_config *cfg; + struct msm_audio_evrc_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + + cfg = (struct msm_audio_evrc_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer for %s\n", + __func__, "AUDIO_SET_EVRC_ENC_CONFIG"); + rc = -EINVAL; + break; + } + if (cfg->min_bit_rate > 4 || + cfg->min_bit_rate < 1 || + (cfg->min_bit_rate == 2)) { + pr_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg->max_bit_rate > 4 || + cfg->max_bit_rate < 1 || + (cfg->max_bit_rate == 2)) { + pr_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->min_bit_rate = cfg->min_bit_rate; + enc_cfg->max_bit_rate = cfg->max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x max_bit_rate=0x%x\n", + __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +static long evrc_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = evrc_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_EVRC_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_evrc_enc_config))) { + pr_err("%s: copy_to_user for AUDIO_GET_EVRC_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_EVRC_ENC_CONFIG: { + struct msm_audio_evrc_enc_config cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(struct msm_audio_evrc_enc_config))) { + pr_err("%s: copy_from_user for AUDIO_SET_EVRC_ENC_CONFIG failed\n", + __func__); + rc = -EFAULT; + break; + } + rc = evrc_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_EVRC_ENC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_evrc_enc_config32 { + u32 cdma_rate; + u32 min_bit_rate; + u32 max_bit_rate; +}; + +enum { + AUDIO_SET_EVRC_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + 2, struct msm_audio_evrc_enc_config32), + AUDIO_GET_EVRC_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + 3, struct msm_audio_evrc_enc_config32) +}; + +static long evrc_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = evrc_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_EVRC_ENC_CONFIG_32: { + struct msm_audio_evrc_enc_config32 cfg_32; + struct msm_audio_evrc_enc_config *enc_cfg; + + memset(&cfg_32, 0, sizeof(cfg_32)); + + enc_cfg = audio->enc_cfg; + cfg_32.cdma_rate = enc_cfg->cdma_rate; + cfg_32.min_bit_rate = enc_cfg->min_bit_rate; + cfg_32.max_bit_rate = enc_cfg->max_bit_rate; + + if (copy_to_user((void *)arg, &cfg_32, + sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_EVRC_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_EVRC_ENC_CONFIG_32: { + struct msm_audio_evrc_enc_config cfg; + struct msm_audio_evrc_enc_config32 cfg_32; + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_EVRC_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.cdma_rate = cfg_32.cdma_rate; + cfg.min_bit_rate = cfg_32.min_bit_rate; + cfg.max_bit_rate = cfg_32.max_bit_rate; + cmd = AUDIO_SET_EVRC_ENC_CONFIG; + rc = evrc_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_EVRC_ENC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#else +#define evrc_in_compat_ioctl NULL +#endif + +static int evrc_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_evrc_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_err("%s: Could not allocate memory for evrc driver\n", + __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_evrc_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for aac config param\n", + __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 23; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->event_abort = 0; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for audio client\n", + __func__); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open evrc encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_EVRC, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: NT mode encoder success\n", + __func__, audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_EVRC); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n", + __func__, + audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + audio->reset_event = false; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = evrc_in_compat_ioctl; + audio->enc_ioctl = evrc_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = evrc_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, + .compat_ioctl = audio_in_compat_ioctl +}; + +struct miscdevice audio_evrc_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_evrc_in", + .fops = &audio_in_fops, +}; + +static int __init evrc_in_init(void) +{ + return misc_register(&audio_evrc_in_misc); +} + +device_initcall(evrc_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/g711alaw_in.c b/drivers/misc/qcom/qdsp6v2/g711alaw_in.c new file mode 100644 index 000000000000..ac720b53ff5b --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/g711alaw_in.c @@ -0,0 +1,382 @@ +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/msm_audio_g711.h> +#include <linux/atomic.h> +#include <linux/compat.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((320+sizeof(struct meta_out_dsp)) * 10)) +static long g711_in_ioctl_shared(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_g711_enc_config *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + pr_debug("%s: sample rate %d", __func__, enc_cfg->sample_rate); + rc = q6asm_enc_cfg_blk_g711(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->sample_rate); + + if (rc < 0) { + pr_err("%s:session id %d: cmd g711 media format block failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__, + audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n", + __func__, audio->ac->session, + rc); + break; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG: { + struct msm_audio_g711_enc_config *cfg; + struct msm_audio_g711_enc_config *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg; + + cfg = (struct msm_audio_g711_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer\n", __func__); + rc = -EINVAL; + break; + } + if (cfg->sample_rate != 8000 && + cfg->sample_rate != 16000) { + pr_err("%s:session id %d: invalid sample rate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->sample_rate = cfg->sample_rate; + pr_debug("%s:session id %d: sample_rate= 0x%x", + __func__, + audio->ac->session, enc_cfg->sample_rate); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} + +static long g711_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = g711_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_G711_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_g711_enc_config))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_g711_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG: { + struct msm_audio_g711_enc_config cfg; + + if (copy_from_user(&cfg, (void *) arg, + sizeof(cfg))) { + pr_err( + "%s: copy_from_user for AUDIO_GET_G711_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + break; + } + rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_GET_G711_ENC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_g711_enc_config32 { + uint32_t sample_rate; +}; + +enum { + AUDIO_SET_G711_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_enc_config32), + AUDIO_GET_G711_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_enc_config32) +}; + +static long g711_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = g711_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_G711_ENC_CONFIG_32: { + struct msm_audio_g711_enc_config32 cfg_32; + struct msm_audio_g711_enc_config32 *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config32 *)audio->enc_cfg; + cfg_32.sample_rate = enc_cfg->sample_rate; + if (copy_to_user((void *)arg, &cfg_32, + sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_G711_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG_32: { + struct msm_audio_g711_enc_config32 cfg_32; + struct msm_audio_g711_enc_config32 cfg; + + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_G711_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.sample_rate = cfg_32.sample_rate; + cmd = AUDIO_SET_G711_ENC_CONFIG; + rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_G711_ENC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} +#else +#define g711_in_compat_ioctl NULL +#endif + +static int g711_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_g711_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) + return -ENOMEM; + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_g711_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* + * Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 320; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->sample_rate = 8000; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->event_abort = 0; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open g711 encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_G711_ALAW_FS, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_G711_ALAW_FS); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + audio->reset_event = false; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = g711_in_compat_ioctl; + audio->enc_ioctl = g711_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = g711_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = audio_in_compat_ioctl, +#endif +}; + +struct miscdevice audio_g711alaw_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_g711alaw_in", + .fops = &audio_in_fops, +}; + +static int __init g711alaw_in_init(void) +{ + return misc_register(&audio_g711alaw_in_misc); +} + +device_initcall(g711alaw_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/g711mlaw_in.c b/drivers/misc/qcom/qdsp6v2/g711mlaw_in.c new file mode 100644 index 000000000000..6660f83683f8 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/g711mlaw_in.c @@ -0,0 +1,385 @@ +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/msm_audio_g711.h> +#include <linux/atomic.h> +#include <linux/compat.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +#ifdef CONFIG_COMPAT +#undef PROC_ADD +#endif +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((320+sizeof(struct meta_out_dsp)) * 10)) +static long g711_in_ioctl_shared(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_g711_enc_config *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + pr_debug("%s: sample rate %d", __func__, enc_cfg->sample_rate); + rc = q6asm_enc_cfg_blk_g711(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->sample_rate); + + if (rc < 0) { + pr_err("%s:session id %d: cmd g711 media format block failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__, + audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n", + __func__, audio->ac->session, + rc); + break; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG: { + struct msm_audio_g711_enc_config *cfg; + struct msm_audio_g711_enc_config *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config *)audio->enc_cfg; + + cfg = (struct msm_audio_g711_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer\n", __func__); + rc = -EINVAL; + break; + } + if (cfg->sample_rate != 8000 && + cfg->sample_rate != 16000) { + pr_err("%s:session id %d: invalid sample rate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->sample_rate = cfg->sample_rate; + pr_debug("%s:session id %d: sample_rate= 0x%x", + __func__, + audio->ac->session, enc_cfg->sample_rate); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} + +static long g711_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = g711_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_G711_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_g711_enc_config))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_g711_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG: { + struct msm_audio_g711_enc_config cfg; + + if (copy_from_user(&cfg, (void *) arg, + sizeof(cfg))) { + pr_err( + "%s: copy_from_user for AUDIO_GET_G711_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + break; + } + rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_GET_G711_ENC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_g711_enc_config32 { + uint32_t sample_rate; +}; + +enum { + AUDIO_SET_G711_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+0), struct msm_audio_g711_enc_config32), + AUDIO_GET_G711_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + (AUDIO_MAX_COMMON_IOCTL_NUM+1), struct msm_audio_g711_enc_config32) +}; + +static long g711_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = g711_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_G711_ENC_CONFIG_32: { + struct msm_audio_g711_enc_config32 cfg_32; + struct msm_audio_g711_enc_config32 *enc_cfg; + + enc_cfg = (struct msm_audio_g711_enc_config32 *)audio->enc_cfg; + cfg_32.sample_rate = enc_cfg->sample_rate; + if (copy_to_user((void *)arg, &cfg_32, + sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_G711_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_G711_ENC_CONFIG_32: { + struct msm_audio_g711_enc_config32 cfg_32; + struct msm_audio_g711_enc_config32 cfg; + + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_G711_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.sample_rate = cfg_32.sample_rate; + cmd = AUDIO_SET_G711_ENC_CONFIG; + rc = g711_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_G711_ENC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -ENOIOCTLCMD; + } + return rc; +} +#else +#define g711_in_compat_ioctl NULL +#endif + +static int g711_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_g711_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) + return -ENOMEM; + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_g711_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* + * Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 320; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->sample_rate = 8000; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->event_abort = 0; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open g711 encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_G711_MLAW_FS, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_G711_MLAW_FS); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + audio->reset_event = false; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = g711_in_compat_ioctl; + audio->enc_ioctl = g711_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = g711_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = audio_in_compat_ioctl, +#endif +}; + +struct miscdevice audio_g711mlaw_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_g711mlaw_in", + .fops = &audio_in_fops, +}; + +static int __init g711mlaw_in_init(void) +{ + return misc_register(&audio_g711mlaw_in_misc); +} + +device_initcall(g711mlaw_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_common.h b/drivers/misc/qcom/qdsp6v2/q6audio_common.h new file mode 100644 index 000000000000..c41d0b651656 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/q6audio_common.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + + +/* For Decoders */ +#ifndef __Q6_AUDIO_COMMON_H__ +#define __Q6_AUDIO_COMMON_H__ + +#include <sound/apr_audio-v2.h> +#include <sound/q6asm-v2.h> + + +void q6_audio_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv); + +void audio_aio_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *audio); + + +/* For Encoders */ +void q6asm_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv); + +void audio_in_get_dsp_frames(void *audio, + uint32_t token, uint32_t *payload); + +#endif /*__Q6_AUDIO_COMMON_H__*/ diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_v2.c b/drivers/misc/qcom/qdsp6v2/q6audio_v2.c new file mode 100644 index 000000000000..51ba23da1270 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/q6audio_v2.c @@ -0,0 +1,106 @@ +/* Copyright (c) 2012-2013, 2015-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +void q6asm_in_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_in *audio = (struct q6audio_in *)priv; + unsigned long flags; + + pr_debug("%s:session id %d: opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + + spin_lock_irqsave(&audio->dsp_lock, flags); + switch (opcode) { + case ASM_DATA_EVENT_READ_DONE_V2: + audio_in_get_dsp_frames(audio, token, payload); + break; + case ASM_DATA_EVENT_WRITE_DONE_V2: + atomic_inc(&audio->in_count); + wake_up(&audio->write_wait); + break; + case ASM_DATA_EVENT_RENDERED_EOS: + audio->eos_rsp = 1; + wake_up(&audio->read_wait); + break; + case ASM_STREAM_CMDRSP_GET_PP_PARAMS_V2: + break; + case ASM_SESSION_EVENTX_OVERFLOW: + pr_err("%s:session id %d: ASM_SESSION_EVENT_TX_OVERFLOW\n", + __func__, audio->ac->session); + break; + case RESET_EVENTS: + pr_debug("%s:received RESET EVENTS\n", __func__); + audio->enabled = 0; + audio->stopped = 1; + audio->event_abort = 1; + audio->reset_event = true; + wake_up(&audio->read_wait); + wake_up(&audio->write_wait); + break; + default: + pr_debug("%s:session id %d: Ignore opcode[0x%x]\n", __func__, + audio->ac->session, opcode); + break; + } + spin_unlock_irqrestore(&audio->dsp_lock, flags); +} + +void audio_in_get_dsp_frames(void *priv, + uint32_t token, uint32_t *payload) +{ + struct q6audio_in *audio = (struct q6audio_in *)priv; + uint32_t index; + + index = q6asm_get_buf_index_from_token(token); + pr_debug("%s:session id %d: index=%d nr frames=%d offset[%d]\n", + __func__, audio->ac->session, token, payload[9], + payload[5]); + pr_debug("%s:session id %d: timemsw=%d lsw=%d\n", __func__, + audio->ac->session, payload[7], payload[6]); + pr_debug("%s:session id %d: uflags=0x%8x uid=0x%8x\n", __func__, + audio->ac->session, payload[8], payload[10]); + pr_debug("%s:session id %d: enc_framesotal_size=0x%8x\n", __func__, + audio->ac->session, payload[4]); + + /* Ensure the index is within max array size: FRAME_NUM */ + if (index >= FRAME_NUM) { + pr_err("%s: Invalid index %d\n", + __func__, index); + return; + } + + audio->out_frame_info[index][0] = payload[9]; + audio->out_frame_info[index][1] = payload[5]; + + /* statistics of read */ + atomic_add(payload[4], &audio->in_bytes); + atomic_add(payload[9], &audio->in_samples); + + if (atomic_read(&audio->out_count) <= audio->str_cfg.buffer_count) { + atomic_inc(&audio->out_count); + wake_up(&audio->read_wait); + } +} diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c new file mode 100644 index 000000000000..09b83f354406 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/q6audio_v2_aio.c @@ -0,0 +1,220 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <asm/ioctls.h> +#include "audio_utils_aio.h" + +void q6_audio_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct q6audio_aio *audio = (struct q6audio_aio *)priv; + + pr_debug("%s:opcode = %x token = 0x%x\n", __func__, opcode, token); + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE_V2: + case ASM_DATA_EVENT_READ_DONE_V2: + case ASM_DATA_EVENT_RENDERED_EOS: + case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2: + case ASM_STREAM_CMD_SET_ENCDEC_PARAM: + case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY: + case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: + case RESET_EVENTS: + audio_aio_cb(opcode, token, payload, audio); + break; + default: + pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); + break; + } +} + +void audio_aio_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv/*struct q6audio_aio *audio*/) +{ + struct q6audio_aio *audio = (struct q6audio_aio *)priv; + union msm_audio_event_payload e_payload; + + switch (opcode) { + case ASM_DATA_EVENT_WRITE_DONE_V2: + pr_debug("%s[%pK]:ASM_DATA_EVENT_WRITE_DONE token = 0x%x\n", + __func__, audio, token); + audio_aio_async_write_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_READ_DONE_V2: + pr_debug("%s[%pK]:ASM_DATA_EVENT_READ_DONE token = 0x%x\n", + __func__, audio, token); + audio_aio_async_read_ack(audio, token, payload); + break; + case ASM_DATA_EVENT_RENDERED_EOS: + /* EOS Handle */ + pr_debug("%s[%pK]:ASM_DATA_CMDRSP_EOS\n", __func__, audio); + if (audio->feedback) { /* Non-Tunnel mode */ + audio->eos_rsp = 1; + /* propagate input EOS i/p buffer, + after receiving DSP acknowledgement */ + if (audio->eos_flag && + (audio->eos_write_payload.aio_buf.buf_addr)) { + audio_aio_post_event(audio, + AUDIO_EVENT_WRITE_DONE, + audio->eos_write_payload); + memset(&audio->eos_write_payload , 0, + sizeof(union msm_audio_event_payload)); + audio->eos_flag = 0; + } + } else { /* Tunnel mode */ + audio->eos_rsp = 1; + wake_up(&audio->write_wait); + wake_up(&audio->cmd_wait); + } + break; + case ASM_DATA_CMD_MEDIA_FMT_UPDATE_V2: + case ASM_STREAM_CMD_SET_ENCDEC_PARAM: + pr_debug("%s[%pK]:payload0[%x] payloa1d[%x]opcode= 0x%x\n", + __func__, audio, payload[0], payload[1], opcode); + break; + case ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY: + case ASM_DATA_EVENT_ENC_SR_CM_CHANGE_NOTIFY: + pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, payload[0]-sr = %d, payload[1]-chl = %d, payload[2] = %d, payload[3] = %d\n", + __func__, audio, payload[0], + payload[1], payload[2], payload[3]); + + pr_debug("%s[%pK]: ASM_DATA_EVENT_SR_CM_CHANGE_NOTIFY, sr(prev) = %d, chl(prev) = %d,", + __func__, audio, audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + audio->pcm_cfg.sample_rate = payload[0]; + audio->pcm_cfg.channel_count = payload[1] & 0xFFFF; + e_payload.stream_info.chan_info = audio->pcm_cfg.channel_count; + e_payload.stream_info.sample_rate = audio->pcm_cfg.sample_rate; + audio_aio_post_event(audio, AUDIO_EVENT_STREAM_INFO, e_payload); + break; + case RESET_EVENTS: + pr_err("%s: Received opcode:0x%x\n", __func__, opcode); + audio->stopped = 1; + audio->reset_event = true; + wake_up(&audio->event_wait); + break; + default: + break; + } +} + +void extract_meta_out_info(struct q6audio_aio *audio, + struct audio_aio_buffer_node *buf_node, int dir) +{ + struct dec_meta_out *meta_data = buf_node->kvaddr; + uint32_t temp; + + if (dir) { /* input buffer - Write */ + if (audio->buf_cfg.meta_info_enable) + memcpy(&buf_node->meta_info.meta_in, + (char *)buf_node->kvaddr, sizeof(struct dec_meta_in)); + else + memset(&buf_node->meta_info.meta_in, + 0, sizeof(struct dec_meta_in)); + pr_debug("%s[%pK]:i/p: msw_ts %d lsw_ts %d nflags 0x%8x\n", + __func__, audio, + buf_node->meta_info.meta_in.ntimestamp.highpart, + buf_node->meta_info.meta_in.ntimestamp.lowpart, + buf_node->meta_info.meta_in.nflags); + } else { /* output buffer - Read */ + memcpy((char *)buf_node->kvaddr, + &buf_node->meta_info.meta_out, + sizeof(struct dec_meta_out)); + meta_data->meta_out_dsp[0].nflags = 0x00000000; + temp = meta_data->meta_out_dsp[0].msw_ts; + meta_data->meta_out_dsp[0].msw_ts = + meta_data->meta_out_dsp[0].lsw_ts; + meta_data->meta_out_dsp[0].lsw_ts = temp; + + pr_debug("%s[%pK]:o/p: msw_ts %d lsw_ts %d nflags 0x%8x, num_frames = %d\n", + __func__, audio, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].msw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].lsw_ts, + ((struct dec_meta_out *)buf_node->kvaddr)->\ + meta_out_dsp[0].nflags, + ((struct dec_meta_out *)buf_node->kvaddr)->num_of_frames); + } +} + +/* Read buffer from DSP / Handle Ack from DSP */ +void audio_aio_async_read_ack(struct q6audio_aio *audio, uint32_t token, + uint32_t *payload) +{ + unsigned long flags; + union msm_audio_event_payload event_payload; + struct audio_aio_buffer_node *filled_buf; + pr_debug("%s\n", __func__); + + /* No active flush in progress */ + if (audio->rflush) + return; + + /* Statistics of read */ + atomic_add(payload[4], &audio->in_bytes); + atomic_add(payload[9], &audio->in_samples); + + spin_lock_irqsave(&audio->dsp_lock, flags); + if (list_empty(&audio->in_queue)) { + spin_unlock_irqrestore(&audio->dsp_lock, flags); + pr_warning("%s unexpected ack from dsp\n", __func__); + return; + } + filled_buf = list_first_entry(&audio->in_queue, + struct audio_aio_buffer_node, list); + + pr_debug("%s token: 0x[%x], filled_buf->token: 0x[%x]", + __func__, token, filled_buf->token); + if (token == (filled_buf->token)) { + list_del(&filled_buf->list); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + event_payload.aio_buf = filled_buf->buf; + /* Read done Buffer due to flush/normal condition + after EOS event, so append EOS buffer */ + if (audio->eos_rsp == 0x1) { + event_payload.aio_buf.data_len = + insert_eos_buf(audio, filled_buf); + /* Reset flag back to indicate eos intimated */ + audio->eos_rsp = 0; + } else { + filled_buf->meta_info.meta_out.num_of_frames\ + = payload[9]; + event_payload.aio_buf.data_len = payload[4]\ + + payload[5] + sizeof(struct dec_meta_out); + pr_debug("%s[%pK]:nr of frames 0x%8x len=%d\n", + __func__, audio, + filled_buf->meta_info.meta_out.num_of_frames, + event_payload.aio_buf.data_len); + extract_meta_out_info(audio, filled_buf, 0); + audio->eos_rsp = 0; + } + pr_debug("%s, posting read done to the app here\n", __func__); + audio_aio_post_event(audio, AUDIO_EVENT_READ_DONE, + event_payload); + kfree(filled_buf); + } else { + pr_err("%s[%pK]:expected=%x ret=%x\n", + __func__, audio, filled_buf->token, token); + spin_unlock_irqrestore(&audio->dsp_lock, flags); + } +} diff --git a/drivers/misc/qcom/qdsp6v2/qcelp_in.c b/drivers/misc/qcom/qdsp6v2/qcelp_in.c new file mode 100644 index 000000000000..aabf5d33a507 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/qcelp_in.c @@ -0,0 +1,410 @@ +/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * +*/ + +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/msm_audio_qcp.h> +#include <linux/atomic.h> +#include <linux/compat.h> +#include <asm/ioctls.h> +#include "audio_utils.h" + +/* Buffer with meta*/ +#define PCM_BUF_SIZE (4096 + sizeof(struct meta_in)) + +/* Maximum 10 frames in buffer with meta */ +#define FRAME_SIZE (1 + ((35+sizeof(struct meta_out_dsp)) * 10)) + +static long qcelp_in_ioctl_shared(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + int cnt = 0; + + switch (cmd) { + case AUDIO_START: { + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + pr_debug("%s:session id %d: default buf alloc[%d]\n", __func__, + audio->ac->session, audio->buf_alloc); + if (audio->enabled == 1) { + pr_info("%s:AUDIO_START already over\n", __func__); + rc = 0; + break; + } + rc = audio_in_buf_alloc(audio); + if (rc < 0) { + pr_err("%s:session id %d: buffer allocation failed\n", + __func__, audio->ac->session); + break; + } + + /* reduced_rate_level, rate_modulation_cmd set to zero + currently not configurable from user space */ + rc = q6asm_enc_cfg_blk_qcelp(audio->ac, + audio->buf_cfg.frames_per_buf, + enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate, 0, 0); + + if (rc < 0) { + pr_err("%s:session id %d: cmd qcelp media format block failed\n", + __func__, audio->ac->session); + break; + } + if (audio->feedback == NON_TUNNEL_MODE) { + rc = q6asm_media_format_block_pcm(audio->ac, + audio->pcm_cfg.sample_rate, + audio->pcm_cfg.channel_count); + + if (rc < 0) { + pr_err("%s:session id %d: media format block failed\n", + __func__, audio->ac->session); + break; + } + } + pr_debug("%s:session id %d: AUDIO_START enable[%d]\n", __func__, + audio->ac->session, audio->enabled); + rc = audio_in_enable(audio); + if (!rc) { + audio->enabled = 1; + } else { + audio->enabled = 0; + pr_err("%s:session id %d: Audio Start procedure failed rc=%d\n", + __func__, audio->ac->session, rc); + break; + } + while (cnt++ < audio->str_cfg.buffer_count) + q6asm_read(audio->ac); /* Push buffer to DSP */ + rc = 0; + pr_debug("%s:session id %d: AUDIO_START success enable[%d]\n", + __func__, audio->ac->session, audio->enabled); + break; + } + case AUDIO_STOP: { + pr_debug("%s:session id %d: AUDIO_STOP\n", __func__, + audio->ac->session); + rc = audio_in_disable(audio); + if (rc < 0) { + pr_err("%s:session id %d: Audio Stop procedure failed rc=%d\n", + __func__, audio->ac->session, + rc); + break; + } + break; + } + case AUDIO_SET_QCELP_ENC_CONFIG: { + struct msm_audio_qcelp_enc_config *cfg; + struct msm_audio_qcelp_enc_config *enc_cfg; + enc_cfg = audio->enc_cfg; + + cfg = (struct msm_audio_qcelp_enc_config *)arg; + if (cfg == NULL) { + pr_err("%s: NULL config pointer\n", __func__); + rc = -EINVAL; + break; + } + if (cfg->min_bit_rate > 4 || + cfg->min_bit_rate < 1) { + pr_err("%s:session id %d: invalid min bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + if (cfg->max_bit_rate > 4 || + cfg->max_bit_rate < 1) { + pr_err("%s:session id %d: invalid max bitrate\n", + __func__, audio->ac->session); + rc = -EINVAL; + break; + } + enc_cfg->cdma_rate = cfg->cdma_rate; + enc_cfg->min_bit_rate = cfg->min_bit_rate; + enc_cfg->max_bit_rate = cfg->max_bit_rate; + pr_debug("%s:session id %d: min_bit_rate= 0x%x max_bit_rate=0x%x\n", + __func__, + audio->ac->session, enc_cfg->min_bit_rate, + enc_cfg->max_bit_rate); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +static long qcelp_in_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = qcelp_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_QCELP_ENC_CONFIG: { + if (copy_to_user((void *)arg, audio->enc_cfg, + sizeof(struct msm_audio_qcelp_enc_config))) { + pr_err( + "%s: copy_to_user for AUDIO_GET_QCELP_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + } + break; + } + case AUDIO_SET_QCELP_ENC_CONFIG: { + struct msm_audio_qcelp_enc_config cfg; + if (copy_from_user(&cfg, (void *) arg, + sizeof(cfg))) { + pr_err( + "%s: copy_from_user for AUDIO_SET_QCELP_ENC_CONFIG failed", + __func__); + rc = -EFAULT; + break; + } + rc = qcelp_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_QCELP_ENC_CONFIG failed. Rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} + +#ifdef CONFIG_COMPAT +struct msm_audio_qcelp_enc_config32 { + u32 cdma_rate; + u32 min_bit_rate; + u32 max_bit_rate; +}; + +enum { + AUDIO_SET_QCELP_ENC_CONFIG_32 = _IOW(AUDIO_IOCTL_MAGIC, + 0, struct msm_audio_qcelp_enc_config32), + AUDIO_GET_QCELP_ENC_CONFIG_32 = _IOR(AUDIO_IOCTL_MAGIC, + 1, struct msm_audio_qcelp_enc_config32) +}; + +static long qcelp_in_compat_ioctl(struct file *file, + unsigned int cmd, unsigned long arg) +{ + struct q6audio_in *audio = file->private_data; + int rc = 0; + + switch (cmd) { + case AUDIO_START: + case AUDIO_STOP: { + rc = qcelp_in_ioctl_shared(file, cmd, arg); + break; + } + case AUDIO_GET_QCELP_ENC_CONFIG_32: { + struct msm_audio_qcelp_enc_config32 cfg_32; + struct msm_audio_qcelp_enc_config *enc_cfg; + + memset(&cfg_32, 0, sizeof(cfg_32)); + + enc_cfg = (struct msm_audio_qcelp_enc_config *)audio->enc_cfg; + cfg_32.cdma_rate = enc_cfg->cdma_rate; + cfg_32.min_bit_rate = enc_cfg->min_bit_rate; + cfg_32.max_bit_rate = enc_cfg->max_bit_rate; + if (copy_to_user((void *)arg, &cfg_32, + sizeof(cfg_32))) { + pr_err("%s: copy_to_user for AUDIO_GET_QCELP_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; +} + break; + } + case AUDIO_SET_QCELP_ENC_CONFIG_32: { + struct msm_audio_qcelp_enc_config32 cfg_32; + struct msm_audio_qcelp_enc_config cfg; + if (copy_from_user(&cfg_32, (void *) arg, + sizeof(cfg_32))) { + pr_err("%s: copy_from_user for AUDIO_SET_QCELP_ENC_CONFIG_32 failed\n", + __func__); + rc = -EFAULT; + break; + } + cfg.cdma_rate = cfg_32.cdma_rate; + cfg.min_bit_rate = cfg_32.min_bit_rate; + cfg.max_bit_rate = cfg_32.max_bit_rate; + cmd = AUDIO_SET_QCELP_ENC_CONFIG; + rc = qcelp_in_ioctl_shared(file, cmd, (unsigned long)&cfg); + if (rc) + pr_err("%s:AUDIO_SET_QCELP_ENC_CONFIG failed. rc= %d\n", + __func__, rc); + break; + } + default: + pr_err("%s: Unknown ioctl cmd = %d", __func__, cmd); + rc = -EINVAL; + } + return rc; +} +#else +#define qcelp_in_compat_ioctl NULL +#endif + +static int qcelp_in_open(struct inode *inode, struct file *file) +{ + struct q6audio_in *audio = NULL; + struct msm_audio_qcelp_enc_config *enc_cfg; + int rc = 0; + + audio = kzalloc(sizeof(struct q6audio_in), GFP_KERNEL); + + if (audio == NULL) { + pr_err("%s: Could not allocate memory for qcelp driver\n", + __func__); + return -ENOMEM; + } + /* Allocate memory for encoder config param */ + audio->enc_cfg = kzalloc(sizeof(struct msm_audio_qcelp_enc_config), + GFP_KERNEL); + if (audio->enc_cfg == NULL) { + pr_err("%s:session id %d: Could not allocate memory for aac config param\n", + __func__, audio->ac->session); + kfree(audio); + return -ENOMEM; + } + enc_cfg = audio->enc_cfg; + + mutex_init(&audio->lock); + mutex_init(&audio->read_lock); + mutex_init(&audio->write_lock); + spin_lock_init(&audio->dsp_lock); + init_waitqueue_head(&audio->read_wait); + init_waitqueue_head(&audio->write_wait); + + /* Settings will be re-config at AUDIO_SET_CONFIG, + * but at least we need to have initial config + */ + audio->str_cfg.buffer_size = FRAME_SIZE; + audio->str_cfg.buffer_count = FRAME_NUM; + audio->min_frame_size = 35; + audio->max_frames_per_buf = 10; + audio->pcm_cfg.buffer_size = PCM_BUF_SIZE; + audio->pcm_cfg.buffer_count = PCM_BUF_COUNT; + enc_cfg->min_bit_rate = 4; + enc_cfg->max_bit_rate = 4; + audio->pcm_cfg.channel_count = 1; + audio->pcm_cfg.sample_rate = 8000; + audio->buf_cfg.meta_info_enable = 0x01; + audio->buf_cfg.frames_per_buf = 0x01; + audio->event_abort = 0; + + audio->ac = q6asm_audio_client_alloc((app_cb)q6asm_in_cb, + (void *)audio); + + if (!audio->ac) { + pr_err("%s: Could not allocate memory for audio client\n", + __func__); + kfree(audio->enc_cfg); + kfree(audio); + return -ENOMEM; + } + + /* open qcelp encoder in T/NT mode */ + if ((file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = NON_TUNNEL_MODE; + rc = q6asm_open_read_write(audio->ac, FORMAT_V13K, + FORMAT_LINEAR_PCM); + if (rc < 0) { + pr_err("%s:session id %d: NT mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: NT mode encoder success\n", __func__, + audio->ac->session); + } else if (!(file->f_mode & FMODE_WRITE) && + (file->f_mode & FMODE_READ)) { + audio->feedback = TUNNEL_MODE; + rc = q6asm_open_read(audio->ac, FORMAT_V13K); + if (rc < 0) { + pr_err("%s:session id %d: T mode Open failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + /* register for tx overflow (valid for tunnel mode only) */ + rc = q6asm_reg_tx_overflow(audio->ac, 0x01); + if (rc < 0) { + pr_err("%s:session id %d: TX Overflow registration failed rc=%d\n", + __func__, audio->ac->session, rc); + rc = -ENODEV; + goto fail; + } + pr_info("%s:session id %d: T mode encoder success\n", __func__, + audio->ac->session); + } else { + pr_err("%s:session id %d: Unexpected mode\n", __func__, + audio->ac->session); + rc = -EACCES; + goto fail; + } + + audio->opened = 1; + audio->reset_event = false; + atomic_set(&audio->in_count, PCM_BUF_COUNT); + atomic_set(&audio->out_count, 0x00); + audio->enc_compat_ioctl = qcelp_in_compat_ioctl; + audio->enc_ioctl = qcelp_in_ioctl; + file->private_data = audio; + + pr_info("%s:session id %d: success\n", __func__, audio->ac->session); + return 0; +fail: + q6asm_audio_client_free(audio->ac); + kfree(audio->enc_cfg); + kfree(audio); + return rc; +} + +static const struct file_operations audio_in_fops = { + .owner = THIS_MODULE, + .open = qcelp_in_open, + .release = audio_in_release, + .read = audio_in_read, + .write = audio_in_write, + .unlocked_ioctl = audio_in_ioctl, + .compat_ioctl = audio_in_compat_ioctl +}; + +struct miscdevice audio_qcelp_in_misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "msm_qcelp_in", + .fops = &audio_in_fops, +}; + +static int __init qcelp_in_init(void) +{ + return misc_register(&audio_qcelp_in_misc); +} + +device_initcall(qcelp_in_init); diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/Makefile b/drivers/misc/qcom/qdsp6v2/ultrasound/Makefile new file mode 100644 index 000000000000..41f614aa4eb3 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/Makefile @@ -0,0 +1,2 @@ +ccflags-y := -I$(src)/.. +obj-$(CONFIG_MSM_ULTRASOUND) += usf.o usfcdev.o q6usm.o diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c new file mode 100644 index 000000000000..334e705ca8f1 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.c @@ -0,0 +1,1468 @@ +/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/mutex.h> +#include <linux/wait.h> +#include <linux/msm_audio_ion.h> +#include <linux/sched.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/msm_audio.h> +#include <sound/apr_audio-v2.h> +#include <linux/qdsp6v2/apr_us.h> +#include "q6usm.h" + +#define ADSP_MEMORY_MAP_SHMEM8_4K_POOL 3 + +#define MEM_4K_OFFSET 4095 +#define MEM_4K_MASK 0xfffff000 + +#define USM_SESSION_MAX 0x02 /* aDSP:USM limit */ + +#define READDONE_IDX_STATUS 0 + +#define WRITEDONE_IDX_STATUS 0 + +/* Standard timeout in the asynchronous ops */ +#define Q6USM_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */ + +static DEFINE_MUTEX(session_lock); + +static struct us_client *session[USM_SESSION_MAX]; +static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv); +static int32_t q6usm_callback(struct apr_client_data *data, void *priv); +static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr, + uint32_t pkt_size, bool cmd_flg); + +struct usm_mmap { + atomic_t ref_cnt; + atomic_t cmd_state; + wait_queue_head_t cmd_wait; + void *apr; + int mem_handle; +}; + +static struct usm_mmap this_mmap; + +static void q6usm_add_mmaphdr(struct apr_hdr *hdr, + uint32_t pkt_size, bool cmd_flg, u32 token) +{ + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(APR_HDR_SIZE), APR_PKT_VER); + hdr->src_port = 0; + hdr->dest_port = 0; + if (cmd_flg) { + hdr->token = token; + atomic_set(&this_mmap.cmd_state, 1); + } + hdr->pkt_size = pkt_size; + return; +} + +static int q6usm_memory_map(phys_addr_t buf_add, int dir, uint32_t bufsz, + uint32_t bufcnt, uint32_t session, uint32_t *mem_handle) +{ + struct usm_cmd_memory_map_region mem_region_map; + int rc = 0; + + if (this_mmap.apr == NULL) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6usm_add_mmaphdr(&mem_region_map.hdr, + sizeof(struct usm_cmd_memory_map_region), true, + ((session << 8) | dir)); + + mem_region_map.hdr.opcode = USM_CMD_SHARED_MEM_MAP_REGION; + mem_region_map.mempool_id = ADSP_MEMORY_MAP_SHMEM8_4K_POOL; + + mem_region_map.num_regions = 1; + mem_region_map.flags = 0; + + mem_region_map.shm_addr_lsw = lower_32_bits(buf_add); + mem_region_map.shm_addr_msw = + msm_audio_populate_upper_32_bits(buf_add); + mem_region_map.mem_size_bytes = bufsz * bufcnt; + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_region_map); + if (rc < 0) { + pr_err("%s: mem_map op[0x%x]rc[%d]\n", + __func__, mem_region_map.hdr.opcode, rc); + rc = -EINVAL; + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout. waited for memory_map\n", __func__); + } else { + *mem_handle = this_mmap.mem_handle; + rc = 0; + } +fail_cmd: + return rc; +} + +int q6usm_memory_unmap(phys_addr_t buf_add, int dir, uint32_t session, + uint32_t mem_handle) +{ + struct usm_cmd_memory_unmap_region mem_unmap; + int rc = 0; + + if (this_mmap.apr == NULL) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + q6usm_add_mmaphdr(&mem_unmap.hdr, + sizeof(struct usm_cmd_memory_unmap_region), true, + ((session << 8) | dir)); + mem_unmap.hdr.opcode = USM_CMD_SHARED_MEM_UNMAP_REGION; + mem_unmap.mem_map_handle = mem_handle; + + rc = apr_send_pkt(this_mmap.apr, (uint32_t *) &mem_unmap); + if (rc < 0) { + pr_err("%s: mem_unmap op[0x%x] rc[%d]\n", + __func__, mem_unmap.hdr.opcode, rc); + goto fail_cmd; + } + + rc = wait_event_timeout(this_mmap.cmd_wait, + (atomic_read(&this_mmap.cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout. waited for memory_unmap\n", __func__); + } else + rc = 0; +fail_cmd: + return rc; +} + +static int q6usm_session_alloc(struct us_client *usc) +{ + int ind = 0; + + mutex_lock(&session_lock); + for (ind = 0; ind < USM_SESSION_MAX; ++ind) { + if (!session[ind]) { + session[ind] = usc; + mutex_unlock(&session_lock); + ++ind; /* session id: 0 reserved */ + pr_debug("%s: session[%d] was allocated\n", + __func__, ind); + return ind; + } + } + mutex_unlock(&session_lock); + return -ENOMEM; +} + +static void q6usm_session_free(struct us_client *usc) +{ + /* Session index was incremented during allocation */ + uint16_t ind = (uint16_t)usc->session - 1; + + pr_debug("%s: to free session[%d]\n", __func__, ind); + if (ind < USM_SESSION_MAX) { + mutex_lock(&session_lock); + session[ind] = NULL; + mutex_unlock(&session_lock); + } +} + +static int q6usm_us_client_buf_free(unsigned int dir, + struct us_client *usc) +{ + struct us_port_data *port; + int rc = 0; + + if ((usc == NULL) || + ((dir != IN) && (dir != OUT))) + return -EINVAL; + + mutex_lock(&usc->cmd_lock); + port = &usc->port[dir]; + if (port == NULL) { + mutex_unlock(&usc->cmd_lock); + return -EINVAL; + } + + if (port->data == NULL) { + mutex_unlock(&usc->cmd_lock); + return 0; + } + + rc = q6usm_memory_unmap(port->phys, dir, usc->session, + *((uint32_t *)port->ext)); + pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__, + (void *)port->data, (u64)port->phys, (void *)&port->phys); + + msm_audio_ion_free(port->client, port->handle); + + port->data = NULL; + port->phys = 0; + port->buf_size = 0; + port->buf_cnt = 0; + port->client = NULL; + port->handle = NULL; + + mutex_unlock(&usc->cmd_lock); + return rc; +} + +int q6usm_us_param_buf_free(unsigned int dir, + struct us_client *usc) +{ + struct us_port_data *port; + int rc = 0; + + if ((usc == NULL) || + ((dir != IN) && (dir != OUT))) + return -EINVAL; + + mutex_lock(&usc->cmd_lock); + port = &usc->port[dir]; + if (port == NULL) { + mutex_unlock(&usc->cmd_lock); + return -EINVAL; + } + + if (port->param_buf == NULL) { + mutex_unlock(&usc->cmd_lock); + return 0; + } + + rc = q6usm_memory_unmap(port->param_phys, dir, usc->session, + *((uint32_t *)port->param_buf_mem_handle)); + pr_debug("%s: data[%pK]phys[%llx][%pK]\n", __func__, + (void *)port->param_buf, (u64)port->param_phys, + (void *)&port->param_phys); + + msm_audio_ion_free(port->param_client, port->param_handle); + + port->param_buf = NULL; + port->param_phys = 0; + port->param_buf_size = 0; + port->param_client = NULL; + port->param_handle = NULL; + + mutex_unlock(&usc->cmd_lock); + return rc; +} + +void q6usm_us_client_free(struct us_client *usc) +{ + int loopcnt = 0; + struct us_port_data *port; + uint32_t *p_mem_handle = NULL; + + if ((usc == NULL) || + !(usc->session)) + return; + + for (loopcnt = 0; loopcnt <= OUT; ++loopcnt) { + port = &usc->port[loopcnt]; + if (port->data == NULL) + continue; + pr_debug("%s: loopcnt = %d\n", __func__, loopcnt); + q6usm_us_client_buf_free(loopcnt, usc); + q6usm_us_param_buf_free(loopcnt, usc); + } + q6usm_session_free(usc); + apr_deregister(usc->apr); + + pr_debug("%s: APR De-Register\n", __func__); + + if (atomic_read(&this_mmap.ref_cnt) <= 0) { + pr_err("%s: APR Common Port Already Closed\n", __func__); + goto done; + } + + atomic_dec(&this_mmap.ref_cnt); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + apr_deregister(this_mmap.apr); + pr_debug("%s: APR De-Register common port\n", __func__); + } + +done: + p_mem_handle = (uint32_t *)usc->port[IN].ext; + kfree(p_mem_handle); + kfree(usc); + pr_debug("%s:\n", __func__); + return; +} + +struct us_client *q6usm_us_client_alloc( + void (*cb)(uint32_t, uint32_t, uint32_t *, void *), + void *priv) +{ + struct us_client *usc; + uint32_t *p_mem_handle = NULL; + int n; + int lcnt = 0; + + usc = kzalloc(sizeof(struct us_client), GFP_KERNEL); + if (usc == NULL) { + pr_err("%s: us_client allocation failed\n", __func__); + return NULL; + } + p_mem_handle = kzalloc(sizeof(uint32_t) * 4, GFP_KERNEL); + if (p_mem_handle == NULL) { + pr_err("%s: p_mem_handle allocation failed\n", __func__); + kfree(usc); + return NULL; + } + + n = q6usm_session_alloc(usc); + if (n <= 0) + goto fail_session; + usc->session = n; + usc->cb = cb; + usc->priv = priv; + usc->apr = apr_register("ADSP", "USM", \ + (apr_fn)q6usm_callback,\ + ((usc->session) << 8 | 0x0001),\ + usc); + + if (usc->apr == NULL) { + pr_err("%s: Registration with APR failed\n", __func__); + goto fail; + } + pr_debug("%s: Registering the common port with APR\n", __func__); + if (atomic_read(&this_mmap.ref_cnt) == 0) { + this_mmap.apr = apr_register("ADSP", "USM", + (apr_fn)q6usm_mmapcallback, + 0x0FFFFFFFF, &this_mmap); + if (this_mmap.apr == NULL) { + pr_err("%s: USM port registration failed\n", + __func__); + goto fail; + } + } + + atomic_inc(&this_mmap.ref_cnt); + init_waitqueue_head(&usc->cmd_wait); + mutex_init(&usc->cmd_lock); + for (lcnt = 0; lcnt <= OUT; ++lcnt) { + mutex_init(&usc->port[lcnt].lock); + spin_lock_init(&usc->port[lcnt].dsp_lock); + usc->port[lcnt].ext = (void *)p_mem_handle++; + usc->port[lcnt].param_buf_mem_handle = (void *)p_mem_handle++; + pr_err("%s: usc->port[%d].ext=%pK;\n", + __func__, lcnt, usc->port[lcnt].ext); + } + atomic_set(&usc->cmd_state, 0); + + return usc; +fail: + kfree(p_mem_handle); + q6usm_us_client_free(usc); + return NULL; +fail_session: + kfree(p_mem_handle); + kfree(usc); + return NULL; +} + +int q6usm_us_client_buf_alloc(unsigned int dir, + struct us_client *usc, + unsigned int bufsz, + unsigned int bufcnt) +{ + int rc = 0; + struct us_port_data *port = NULL; + unsigned int size = bufsz*bufcnt; + size_t len; + + if ((usc == NULL) || + ((dir != IN) && (dir != OUT)) || (size == 0) || + (usc->session <= 0 || usc->session > USM_SESSION_MAX)) { + pr_err("%s: wrong parameters: size=%d; bufcnt=%d\n", + __func__, size, bufcnt); + return -EINVAL; + } + + mutex_lock(&usc->cmd_lock); + + port = &usc->port[dir]; + + /* The size to allocate should be multiple of 4K bytes */ + size = PAGE_ALIGN(size); + + rc = msm_audio_ion_alloc("ultrasound_client", + &port->client, &port->handle, + size, &port->phys, + &len, &port->data); + + if (rc) { + pr_err("%s: US ION allocation failed, rc = %d\n", + __func__, rc); + mutex_unlock(&usc->cmd_lock); + return -ENOMEM; + } + + port->buf_cnt = bufcnt; + port->buf_size = bufsz; + pr_debug("%s: data[%pK]; phys[%llx]; [%pK]\n", __func__, + (void *)port->data, + (u64)port->phys, + (void *)&port->phys); + + rc = q6usm_memory_map(port->phys, dir, size, 1, usc->session, + (uint32_t *)port->ext); + if (rc < 0) { + pr_err("%s: CMD Memory_map failed\n", __func__); + mutex_unlock(&usc->cmd_lock); + q6usm_us_client_buf_free(dir, usc); + q6usm_us_param_buf_free(dir, usc); + } else { + mutex_unlock(&usc->cmd_lock); + rc = 0; + } + + return rc; +} + +int q6usm_us_param_buf_alloc(unsigned int dir, + struct us_client *usc, + unsigned int bufsz) +{ + int rc = 0; + struct us_port_data *port = NULL; + unsigned int size = bufsz; + size_t len; + + if ((usc == NULL) || + ((dir != IN) && (dir != OUT)) || + (usc->session <= 0 || usc->session > USM_SESSION_MAX)) { + pr_err("%s: wrong parameters: direction=%d, bufsz=%d\n", + __func__, dir, bufsz); + return -EINVAL; + } + + mutex_lock(&usc->cmd_lock); + + port = &usc->port[dir]; + + if (bufsz == 0) { + pr_debug("%s: bufsz=0, get/set param commands are forbidden\n", + __func__); + port->param_buf = NULL; + mutex_unlock(&usc->cmd_lock); + return rc; + } + + /* The size to allocate should be multiple of 4K bytes */ + size = PAGE_ALIGN(size); + + rc = msm_audio_ion_alloc("ultrasound_client", + &port->param_client, &port->param_handle, + size, &port->param_phys, + &len, &port->param_buf); + + if (rc) { + pr_err("%s: US ION allocation failed, rc = %d\n", + __func__, rc); + mutex_unlock(&usc->cmd_lock); + return -ENOMEM; + } + + port->param_buf_size = bufsz; + pr_debug("%s: param_buf[%pK]; param_phys[%llx]; [%pK]\n", __func__, + (void *)port->param_buf, + (u64)port->param_phys, + (void *)&port->param_phys); + + rc = q6usm_memory_map(port->param_phys, (IN | OUT), size, 1, + usc->session, (uint32_t *)port->param_buf_mem_handle); + if (rc < 0) { + pr_err("%s: CMD Memory_map failed\n", __func__); + mutex_unlock(&usc->cmd_lock); + q6usm_us_client_buf_free(dir, usc); + q6usm_us_param_buf_free(dir, usc); + } else { + mutex_unlock(&usc->cmd_lock); + rc = 0; + } + + return rc; +} + +static int32_t q6usm_mmapcallback(struct apr_client_data *data, void *priv) +{ + uint32_t token; + uint32_t *payload = data->payload; + + pr_debug("%s: ptr0[0x%x]; ptr1[0x%x]; opcode[0x%x]\n", + __func__, payload[0], payload[1], data->opcode); + pr_debug("%s: token[0x%x]; payload_size[%d]; src[%d]; dest[%d];\n", + __func__, data->token, data->payload_size, + data->src_port, data->dest_port); + + if (data->opcode == APR_BASIC_RSP_RESULT) { + /* status field check */ + if (payload[1]) { + pr_err("%s: wrong response[%d] on cmd [%d]\n", + __func__, payload[1], payload[0]); + } else { + token = data->token; + switch (payload[0]) { + case USM_CMD_SHARED_MEM_UNMAP_REGION: + if (atomic_read(&this_mmap.cmd_state)) { + atomic_set(&this_mmap.cmd_state, 0); + wake_up(&this_mmap.cmd_wait); + } + case USM_CMD_SHARED_MEM_MAP_REGION: + /* For MEM_MAP, additional answer is waited, */ + /* therfore, no wake-up here */ + pr_debug("%s: cmd[0x%x]; result[0x%x]\n", + __func__, payload[0], payload[1]); + break; + default: + pr_debug("%s: wrong command[0x%x]\n", + __func__, payload[0]); + break; + } + } + } else { + if (data->opcode == USM_CMDRSP_SHARED_MEM_MAP_REGION) { + this_mmap.mem_handle = payload[0]; + pr_debug("%s: memory map handle = 0x%x", + __func__, payload[0]); + if (atomic_read(&this_mmap.cmd_state)) { + atomic_set(&this_mmap.cmd_state, 0); + wake_up(&this_mmap.cmd_wait); + } + } + } + return 0; +} + + +static int32_t q6usm_callback(struct apr_client_data *data, void *priv) +{ + struct us_client *usc = (struct us_client *)priv; + unsigned long dsp_flags; + uint32_t *payload = data->payload; + uint32_t token = data->token; + uint32_t opcode = Q6USM_EVENT_UNDEF; + + if (usc == NULL) { + pr_err("%s: client info is NULL\n", __func__); + return -EINVAL; + } + + if (data->opcode == APR_BASIC_RSP_RESULT) { + /* status field check */ + if (payload[1]) { + pr_err("%s: wrong response[%d] on cmd [%d]\n", + __func__, payload[1], payload[0]); + if (usc->cb) + usc->cb(data->opcode, token, + (uint32_t *)data->payload, usc->priv); + } else { + switch (payload[0]) { + case USM_SESSION_CMD_RUN: + case USM_STREAM_CMD_CLOSE: + if (token != usc->session) { + pr_err("%s: wrong token[%d]", + __func__, token); + break; + } + case USM_STREAM_CMD_OPEN_READ: + case USM_STREAM_CMD_OPEN_WRITE: + case USM_STREAM_CMD_SET_ENC_PARAM: + case USM_DATA_CMD_MEDIA_FORMAT_UPDATE: + case USM_SESSION_CMD_SIGNAL_DETECT_MODE: + case USM_STREAM_CMD_SET_PARAM: + case USM_STREAM_CMD_GET_PARAM: + if (atomic_read(&usc->cmd_state)) { + atomic_set(&usc->cmd_state, 0); + wake_up(&usc->cmd_wait); + } + if (usc->cb) + usc->cb(data->opcode, token, + (uint32_t *)data->payload, + usc->priv); + break; + default: + break; + } + } + return 0; + } + + switch (data->opcode) { + case RESET_EVENTS: { + pr_err("%s: Reset event is received: %d %d\n", + __func__, + data->reset_event, + data->reset_proc); + + opcode = RESET_EVENTS; + + apr_reset(this_mmap.apr); + this_mmap.apr = NULL; + + apr_reset(usc->apr); + usc->apr = NULL; + + break; + } + + + case USM_DATA_EVENT_READ_DONE: { + struct us_port_data *port = &usc->port[OUT]; + + opcode = Q6USM_EVENT_READ_DONE; + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + if (payload[READDONE_IDX_STATUS]) { + pr_err("%s: wrong READDONE[%d]; token[%d]\n", + __func__, + payload[READDONE_IDX_STATUS], + token); + token = USM_WRONG_TOKEN; + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + break; + } + + if (port->expected_token != token) { + u32 cpu_buf = port->cpu_buf; + pr_err("%s: expected[%d] != token[%d]\n", + __func__, port->expected_token, token); + pr_debug("%s: dsp_buf=%d; cpu_buf=%d;\n", + __func__, port->dsp_buf, cpu_buf); + + token = USM_WRONG_TOKEN; + /* To prevent data handle continiue */ + port->expected_token = USM_WRONG_TOKEN; + spin_unlock_irqrestore(&port->dsp_lock, + dsp_flags); + break; + } /* port->expected_token != data->token */ + + port->expected_token = token + 1; + if (port->expected_token == port->buf_cnt) + port->expected_token = 0; + + /* gap support */ + if (port->expected_token != port->cpu_buf) { + port->dsp_buf = port->expected_token; + token = port->dsp_buf; /* for callback */ + } else + port->dsp_buf = token; + + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + break; + } /* case USM_DATA_EVENT_READ_DONE */ + + case USM_DATA_EVENT_WRITE_DONE: { + struct us_port_data *port = &usc->port[IN]; + + opcode = Q6USM_EVENT_WRITE_DONE; + if (payload[WRITEDONE_IDX_STATUS]) { + pr_err("%s: wrong WRITEDONE_IDX_STATUS[%d]\n", + __func__, + payload[WRITEDONE_IDX_STATUS]); + break; + } + + spin_lock_irqsave(&port->dsp_lock, dsp_flags); + port->dsp_buf = token + 1; + if (port->dsp_buf == port->buf_cnt) + port->dsp_buf = 0; + spin_unlock_irqrestore(&port->dsp_lock, dsp_flags); + + break; + } /* case USM_DATA_EVENT_WRITE_DONE */ + + case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT: { + pr_debug("%s: US detect result: result=%d", + __func__, + payload[0]); + opcode = Q6USM_EVENT_SIGNAL_DETECT_RESULT; + + break; + } /* case USM_SESSION_EVENT_SIGNAL_DETECT_RESULT */ + + default: + return 0; + + } /* switch */ + + if (usc->cb) + usc->cb(opcode, token, + data->payload, usc->priv); + + return 0; +} + +uint32_t q6usm_get_virtual_address(int dir, + struct us_client *usc, + struct vm_area_struct *vms) +{ + uint32_t ret = 0xffffffff; + + if (vms && (usc != NULL) && ((dir == IN) || (dir == OUT))) { + struct us_port_data *port = &usc->port[dir]; + int size = PAGE_ALIGN(port->buf_size * port->buf_cnt); + struct audio_buffer ab; + + ab.phys = port->phys; + ab.data = port->data; + ab.used = 1; + ab.size = size; + ab.actual_size = size; + ab.handle = port->handle; + ab.client = port->client; + + ret = msm_audio_ion_mmap(&ab, vms); + + } + return ret; +} + +static void q6usm_add_hdr(struct us_client *usc, struct apr_hdr *hdr, + uint32_t pkt_size, bool cmd_flg) +{ + mutex_lock(&usc->cmd_lock); + hdr->hdr_field = APR_HDR_FIELD(APR_MSG_TYPE_SEQ_CMD, \ + APR_HDR_LEN(sizeof(struct apr_hdr)),\ + APR_PKT_VER); + hdr->src_svc = ((struct apr_svc *)usc->apr)->id; + hdr->src_domain = APR_DOMAIN_APPS; + hdr->dest_svc = APR_SVC_USM; + hdr->dest_domain = APR_DOMAIN_ADSP; + hdr->src_port = (usc->session << 8) | 0x0001; + hdr->dest_port = (usc->session << 8) | 0x0001; + if (cmd_flg) { + hdr->token = usc->session; + atomic_set(&usc->cmd_state, 1); + } + hdr->pkt_size = pkt_size; + mutex_unlock(&usc->cmd_lock); + return; +} + +static uint32_t q6usm_ext2int_format(uint32_t ext_format) +{ + uint32_t int_format = INVALID_FORMAT; + switch (ext_format) { + case FORMAT_USPS_EPOS: + int_format = US_POINT_EPOS_FORMAT_V2; + break; + case FORMAT_USRAW: + int_format = US_RAW_FORMAT_V2; + break; + case FORMAT_USPROX: + int_format = US_PROX_FORMAT_V4; + break; + case FORMAT_USGES_SYNC: + int_format = US_GES_SYNC_FORMAT; + break; + case FORMAT_USRAW_SYNC: + int_format = US_RAW_SYNC_FORMAT; + break; + default: + pr_err("%s: Invalid format[%d]\n", __func__, ext_format); + break; + } + + return int_format; +} + +int q6usm_open_read(struct us_client *usc, + uint32_t format) +{ + uint32_t int_format = INVALID_FORMAT; + int rc = 0x00; + struct usm_stream_cmd_open_read open; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: client or its apr is NULL\n", __func__); + return -EINVAL; + } + + pr_debug("%s: session[%d]", __func__, usc->session); + + q6usm_add_hdr(usc, &open.hdr, sizeof(open), true); + open.hdr.opcode = USM_STREAM_CMD_OPEN_READ; + open.src_endpoint = 0; /* AFE */ + open.pre_proc_top = 0; /* No preprocessing required */ + + int_format = q6usm_ext2int_format(format); + if (int_format == INVALID_FORMAT) + return -EINVAL; + + open.uMode = STREAM_PRIORITY_NORMAL; + open.format = int_format; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &open); + if (rc < 0) { + pr_err("%s: open failed op[0x%x]rc[%d]\n", + __func__, open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout, waited for OPEN_READ rc[%d]\n", + __func__, rc); + goto fail_cmd; + } else + rc = 0; +fail_cmd: + return rc; +} + + +int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg) +{ + uint32_t int_format = INVALID_FORMAT; + struct usm_stream_cmd_encdec_cfg_blk enc_cfg_obj; + struct usm_stream_cmd_encdec_cfg_blk *enc_cfg = &enc_cfg_obj; + int rc = 0; + uint32_t total_cfg_size = + sizeof(struct usm_stream_cmd_encdec_cfg_blk); + uint32_t round_params_size = 0; + uint8_t is_allocated = 0; + + + if ((usc == NULL) || (us_cfg == NULL)) { + pr_err("%s: wrong input", __func__); + return -EINVAL; + } + + int_format = q6usm_ext2int_format(us_cfg->format_id); + if (int_format == INVALID_FORMAT) { + pr_err("%s: wrong input format[%d]", + __func__, us_cfg->format_id); + return -EINVAL; + } + + /* Transparent configuration data is after enc_cfg */ + /* Integer number of u32s is requred */ + round_params_size = ((us_cfg->params_size + 3)/4) * 4; + if (round_params_size > USM_MAX_CFG_DATA_SIZE) { + /* Dynamic allocated encdec_cfg_blk is required */ + /* static part use */ + round_params_size -= USM_MAX_CFG_DATA_SIZE; + total_cfg_size += round_params_size; + enc_cfg = kzalloc(total_cfg_size, GFP_KERNEL); + if (enc_cfg == NULL) { + pr_err("%s: enc_cfg[%d] allocation failed\n", + __func__, total_cfg_size); + return -ENOMEM; + } + is_allocated = 1; + } else + round_params_size = 0; + + q6usm_add_hdr(usc, &enc_cfg->hdr, total_cfg_size, true); + + enc_cfg->hdr.opcode = USM_STREAM_CMD_SET_ENC_PARAM; + enc_cfg->param_id = USM_PARAM_ID_ENCDEC_ENC_CFG_BLK; + enc_cfg->param_size = sizeof(struct usm_encode_cfg_blk)+ + round_params_size; + enc_cfg->enc_blk.frames_per_buf = 1; + enc_cfg->enc_blk.format_id = int_format; + enc_cfg->enc_blk.cfg_size = sizeof(struct usm_cfg_common)+ + USM_MAX_CFG_DATA_SIZE + + round_params_size; + memcpy(&(enc_cfg->enc_blk.cfg_common), &(us_cfg->cfg_common), + sizeof(struct usm_cfg_common)); + + /* Transparent data copy */ + memcpy(enc_cfg->enc_blk.transp_data, us_cfg->params, + us_cfg->params_size); + pr_debug("%s: cfg_size[%d], params_size[%d]\n", + __func__, + enc_cfg->enc_blk.cfg_size, + us_cfg->params_size); + pr_debug("%s: params[%d,%d,%d,%d, %d,%d,%d,%d]\n", + __func__, + enc_cfg->enc_blk.transp_data[0], + enc_cfg->enc_blk.transp_data[1], + enc_cfg->enc_blk.transp_data[2], + enc_cfg->enc_blk.transp_data[3], + enc_cfg->enc_blk.transp_data[4], + enc_cfg->enc_blk.transp_data[5], + enc_cfg->enc_blk.transp_data[6], + enc_cfg->enc_blk.transp_data[7] + ); + pr_debug("%s: srate:%d, ch=%d, bps= %d;\n", + __func__, enc_cfg->enc_blk.cfg_common.sample_rate, + enc_cfg->enc_blk.cfg_common.ch_cfg, + enc_cfg->enc_blk.cfg_common.bits_per_sample); + pr_debug("dmap:[0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x]; dev_id=0x%x\n", + enc_cfg->enc_blk.cfg_common.data_map[0], + enc_cfg->enc_blk.cfg_common.data_map[1], + enc_cfg->enc_blk.cfg_common.data_map[2], + enc_cfg->enc_blk.cfg_common.data_map[3], + enc_cfg->enc_blk.cfg_common.data_map[4], + enc_cfg->enc_blk.cfg_common.data_map[5], + enc_cfg->enc_blk.cfg_common.data_map[6], + enc_cfg->enc_blk.cfg_common.data_map[7], + enc_cfg->enc_blk.cfg_common.dev_id); + + rc = apr_send_pkt(usc->apr, (uint32_t *) enc_cfg); + if (rc < 0) { + pr_err("%s:Comamnd open failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout opcode[0x%x]\n", + __func__, enc_cfg->hdr.opcode); + } else + rc = 0; + +fail_cmd: + if (is_allocated == 1) + kfree(enc_cfg); + + return rc; +} + +int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg) +{ + + uint32_t int_format = INVALID_FORMAT; + struct usm_stream_media_format_update dec_cfg_obj; + struct usm_stream_media_format_update *dec_cfg = &dec_cfg_obj; + + int rc = 0; + uint32_t total_cfg_size = sizeof(struct usm_stream_media_format_update); + uint32_t round_params_size = 0; + uint8_t is_allocated = 0; + + + if ((usc == NULL) || (us_cfg == NULL)) { + pr_err("%s: wrong input", __func__); + return -EINVAL; + } + + int_format = q6usm_ext2int_format(us_cfg->format_id); + if (int_format == INVALID_FORMAT) { + pr_err("%s: wrong input format[%d]", + __func__, us_cfg->format_id); + return -EINVAL; + } + + /* Transparent configuration data is after enc_cfg */ + /* Integer number of u32s is requred */ + round_params_size = ((us_cfg->params_size + 3)/4) * 4; + if (round_params_size > USM_MAX_CFG_DATA_SIZE) { + /* Dynamic allocated encdec_cfg_blk is required */ + /* static part use */ + round_params_size -= USM_MAX_CFG_DATA_SIZE; + total_cfg_size += round_params_size; + dec_cfg = kzalloc(total_cfg_size, GFP_KERNEL); + if (dec_cfg == NULL) { + pr_err("%s:dec_cfg[%d] allocation failed\n", + __func__, total_cfg_size); + return -ENOMEM; + } + is_allocated = 1; + } else { /* static transp_data is enough */ + round_params_size = 0; + } + + q6usm_add_hdr(usc, &dec_cfg->hdr, total_cfg_size, true); + + dec_cfg->hdr.opcode = USM_DATA_CMD_MEDIA_FORMAT_UPDATE; + dec_cfg->format_id = int_format; + dec_cfg->cfg_size = sizeof(struct usm_cfg_common) + + USM_MAX_CFG_DATA_SIZE + + round_params_size; + memcpy(&(dec_cfg->cfg_common), &(us_cfg->cfg_common), + sizeof(struct usm_cfg_common)); + /* Transparent data copy */ + memcpy(dec_cfg->transp_data, us_cfg->params, us_cfg->params_size); + pr_debug("%s: cfg_size[%d], params_size[%d]; parambytes[%d,%d,%d,%d]\n", + __func__, + dec_cfg->cfg_size, + us_cfg->params_size, + dec_cfg->transp_data[0], + dec_cfg->transp_data[1], + dec_cfg->transp_data[2], + dec_cfg->transp_data[3] + ); + + rc = apr_send_pkt(usc->apr, (uint32_t *) dec_cfg); + if (rc < 0) { + pr_err("%s:Comamnd open failed\n", __func__); + rc = -EINVAL; + goto fail_cmd; + } + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout opcode[0x%x]\n", + __func__, dec_cfg->hdr.opcode); + } else + rc = 0; + +fail_cmd: + if (is_allocated == 1) + kfree(dec_cfg); + + return rc; +} + +int q6usm_open_write(struct us_client *usc, + uint32_t format) +{ + int rc = 0; + uint32_t int_format = INVALID_FORMAT; + struct usm_stream_cmd_open_write open; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + + pr_debug("%s: session[%d]", __func__, usc->session); + + q6usm_add_hdr(usc, &open.hdr, sizeof(open), true); + open.hdr.opcode = USM_STREAM_CMD_OPEN_WRITE; + + int_format = q6usm_ext2int_format(format); + if (int_format == INVALID_FORMAT) { + pr_err("%s: wrong format[%d]", __func__, format); + return -EINVAL; + } + + open.format = int_format; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &open); + if (rc < 0) { + pr_err("%s:open failed op[0x%x]rc[%d]\n", \ + __func__, open.hdr.opcode, rc); + goto fail_cmd; + } + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s:timeout. waited for OPEN_WRITR rc[%d]\n", + __func__, rc); + goto fail_cmd; + } else + rc = 0; + +fail_cmd: + return rc; +} + +int q6usm_run(struct us_client *usc, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts) +{ + struct usm_stream_cmd_run run; + int rc = 0; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + q6usm_add_hdr(usc, &run.hdr, sizeof(run), true); + + run.hdr.opcode = USM_SESSION_CMD_RUN; + run.flags = flags; + run.msw_ts = msw_ts; + run.lsw_ts = lsw_ts; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &run); + if (rc < 0) { + pr_err("%s: Commmand run failed[%d]\n", __func__, rc); + goto fail_cmd; + } + + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: timeout. waited for run success rc[%d]\n", + __func__, rc); + } else + rc = 0; + +fail_cmd: + return rc; +} + + + +int q6usm_read(struct us_client *usc, uint32_t read_ind) +{ + struct usm_stream_cmd_read read; + struct us_port_data *port = NULL; + int rc = 0; + u32 read_counter = 0; + u32 loop_ind = 0; + u64 buf_addr = 0; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + port = &usc->port[OUT]; + + if (read_ind > port->buf_cnt) { + pr_err("%s: wrong read_ind[%d]\n", + __func__, read_ind); + return -EINVAL; + } + if (read_ind == port->cpu_buf) { + pr_err("%s: no free region\n", __func__); + return 0; + } + + if (read_ind > port->cpu_buf) { /* 1 range */ + read_counter = read_ind - port->cpu_buf; + } else { /* 2 ranges */ + read_counter = (port->buf_cnt - port->cpu_buf) + read_ind; + } + + q6usm_add_hdr(usc, &read.hdr, sizeof(read), false); + + read.hdr.opcode = USM_DATA_CMD_READ; + read.buf_size = port->buf_size; + buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf); + read.buf_addr_lsw = lower_32_bits(buf_addr); + read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr); + read.mem_map_handle = *((uint32_t *)(port->ext)); + + for (loop_ind = 0; loop_ind < read_counter; ++loop_ind) { + u32 temp_cpu_buf = port->cpu_buf; + + buf_addr = (u64)(port->phys) + + port->buf_size * (port->cpu_buf); + read.buf_addr_lsw = lower_32_bits(buf_addr); + read.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr); + read.seq_id = port->cpu_buf; + read.hdr.token = port->cpu_buf; + read.counter = 1; + + ++(port->cpu_buf); + if (port->cpu_buf == port->buf_cnt) + port->cpu_buf = 0; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &read); + + if (rc < 0) { + port->cpu_buf = temp_cpu_buf; + + pr_err("%s:read op[0x%x]rc[%d]\n", + __func__, read.hdr.opcode, rc); + break; + } else + rc = 0; + } /* bufs loop */ + + return rc; +} + +int q6usm_write(struct us_client *usc, uint32_t write_ind) +{ + int rc = 0; + struct usm_stream_cmd_write cmd_write; + struct us_port_data *port = NULL; + u32 current_dsp_buf = 0; + u64 buf_addr = 0; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + port = &usc->port[IN]; + + current_dsp_buf = port->dsp_buf; + /* free region, caused by new dsp_buf report from DSP, */ + /* can be only extended */ + if (port->cpu_buf >= current_dsp_buf) { + /* 2 -part free region, including empty buffer */ + if ((write_ind <= port->cpu_buf) && + (write_ind > current_dsp_buf)) { + pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n", + __func__, write_ind, + current_dsp_buf, port->cpu_buf); + return -EINVAL; + } + } else { + /* 1 -part free region */ + if ((write_ind <= port->cpu_buf) || + (write_ind > current_dsp_buf)) { + pr_err("%s: wrong w_ind[%d]; d_buf=%d; c_buf=%d\n", + __func__, write_ind, + current_dsp_buf, port->cpu_buf); + return -EINVAL; + } + } + + q6usm_add_hdr(usc, &cmd_write.hdr, sizeof(cmd_write), false); + + cmd_write.hdr.opcode = USM_DATA_CMD_WRITE; + cmd_write.buf_size = port->buf_size; + buf_addr = (u64)(port->phys) + port->buf_size * (port->cpu_buf); + cmd_write.buf_addr_lsw = lower_32_bits(buf_addr); + cmd_write.buf_addr_msw = msm_audio_populate_upper_32_bits(buf_addr); + cmd_write.mem_map_handle = *((uint32_t *)(port->ext)); + cmd_write.res0 = 0; + cmd_write.res1 = 0; + cmd_write.res2 = 0; + + while (port->cpu_buf != write_ind) { + u32 temp_cpu_buf = port->cpu_buf; + + buf_addr = (u64)(port->phys) + + port->buf_size * (port->cpu_buf); + cmd_write.buf_addr_lsw = lower_32_bits(buf_addr); + cmd_write.buf_addr_msw = + msm_audio_populate_upper_32_bits(buf_addr); + cmd_write.seq_id = port->cpu_buf; + cmd_write.hdr.token = port->cpu_buf; + + ++(port->cpu_buf); + if (port->cpu_buf == port->buf_cnt) + port->cpu_buf = 0; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_write); + + if (rc < 0) { + port->cpu_buf = temp_cpu_buf; + pr_err("%s:write op[0x%x];rc[%d];cpu_buf[%d]\n", + __func__, cmd_write.hdr.opcode, + rc, port->cpu_buf); + break; + } + + rc = 0; + } + + return rc; +} + +bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region) +{ + struct us_port_data *port = NULL; + u32 cpu_buf = 0; + + if ((usc == NULL) || !free_region) { + pr_err("%s: input data wrong\n", __func__); + return false; + } + port = &usc->port[IN]; + cpu_buf = port->cpu_buf + 1; + if (cpu_buf == port->buf_cnt) + cpu_buf = 0; + + *free_region = port->dsp_buf; + + return cpu_buf == *free_region; +} + +int q6usm_cmd(struct us_client *usc, int cmd) +{ + struct apr_hdr hdr; + int rc = 0; + atomic_t *state; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + q6usm_add_hdr(usc, &hdr, sizeof(hdr), true); + switch (cmd) { + case CMD_CLOSE: + hdr.opcode = USM_STREAM_CMD_CLOSE; + state = &usc->cmd_state; + break; + + default: + pr_err("%s:Invalid format[%d]\n", __func__, cmd); + goto fail_cmd; + } + + rc = apr_send_pkt(usc->apr, (uint32_t *) &hdr); + if (rc < 0) { + pr_err("%s: Command 0x%x failed\n", __func__, hdr.opcode); + goto fail_cmd; + } + rc = wait_event_timeout(usc->cmd_wait, (atomic_read(state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s:timeout. waited for response opcode[0x%x]\n", + __func__, hdr.opcode); + } else + rc = 0; +fail_cmd: + return rc; +} + +int q6usm_set_us_detection(struct us_client *usc, + struct usm_session_cmd_detect_info *detect_info, + uint16_t detect_info_size) +{ + int rc = 0; + + if ((usc == NULL) || + (detect_info_size == 0) || + (detect_info == NULL)) { + pr_err("%s: wrong input: usc=0x%pK, inf_size=%d; info=0x%pK", + __func__, + usc, + detect_info_size, + detect_info); + return -EINVAL; + } + + q6usm_add_hdr(usc, &detect_info->hdr, detect_info_size, true); + + detect_info->hdr.opcode = USM_SESSION_CMD_SIGNAL_DETECT_MODE; + + rc = apr_send_pkt(usc->apr, (uint32_t *)detect_info); + if (rc < 0) { + pr_err("%s:Comamnd signal detect failed\n", __func__); + return -EINVAL; + } + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: CMD_SIGNAL_DETECT_MODE: timeout=%d\n", + __func__, Q6USM_TIMEOUT_JIFFIES); + } else + rc = 0; + + return rc; +} + +int q6usm_set_us_stream_param(int dir, struct us_client *usc, + uint32_t module_id, uint32_t param_id, uint32_t buf_size) +{ + int rc = 0; + struct usm_stream_cmd_set_param cmd_set_param; + struct us_port_data *port = NULL; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + port = &usc->port[dir]; + + q6usm_add_hdr(usc, &cmd_set_param.hdr, sizeof(cmd_set_param), true); + + cmd_set_param.hdr.opcode = USM_STREAM_CMD_SET_PARAM; + cmd_set_param.buf_size = buf_size; + cmd_set_param.buf_addr_msw = + msm_audio_populate_upper_32_bits(port->param_phys); + cmd_set_param.buf_addr_lsw = lower_32_bits(port->param_phys); + cmd_set_param.mem_map_handle = + *((uint32_t *)(port->param_buf_mem_handle)); + cmd_set_param.module_id = module_id; + cmd_set_param.param_id = param_id; + cmd_set_param.hdr.token = 0; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_set_param); + + if (rc < 0) { + pr_err("%s:write op[0x%x];rc[%d]\n", + __func__, cmd_set_param.hdr.opcode, rc); + } + + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: CMD_SET_PARAM: timeout=%d\n", + __func__, Q6USM_TIMEOUT_JIFFIES); + } else + rc = 0; + + return rc; +} + +int q6usm_get_us_stream_param(int dir, struct us_client *usc, + uint32_t module_id, uint32_t param_id, uint32_t buf_size) +{ + int rc = 0; + struct usm_stream_cmd_get_param cmd_get_param; + struct us_port_data *port = NULL; + + if ((usc == NULL) || (usc->apr == NULL)) { + pr_err("%s: APR handle NULL\n", __func__); + return -EINVAL; + } + port = &usc->port[dir]; + + q6usm_add_hdr(usc, &cmd_get_param.hdr, sizeof(cmd_get_param), true); + + cmd_get_param.hdr.opcode = USM_STREAM_CMD_GET_PARAM; + cmd_get_param.buf_size = buf_size; + cmd_get_param.buf_addr_msw = + msm_audio_populate_upper_32_bits(port->param_phys); + cmd_get_param.buf_addr_lsw = lower_32_bits(port->param_phys); + cmd_get_param.mem_map_handle = + *((uint32_t *)(port->param_buf_mem_handle)); + cmd_get_param.module_id = module_id; + cmd_get_param.param_id = param_id; + cmd_get_param.hdr.token = 0; + + rc = apr_send_pkt(usc->apr, (uint32_t *) &cmd_get_param); + + if (rc < 0) { + pr_err("%s:write op[0x%x];rc[%d]\n", + __func__, cmd_get_param.hdr.opcode, rc); + } + + rc = wait_event_timeout(usc->cmd_wait, + (atomic_read(&usc->cmd_state) == 0), + Q6USM_TIMEOUT_JIFFIES); + if (!rc) { + rc = -ETIME; + pr_err("%s: CMD_GET_PARAM: timeout=%d\n", + __func__, Q6USM_TIMEOUT_JIFFIES); + } else + rc = 0; + + return rc; +} + +static int __init q6usm_init(void) +{ + pr_debug("%s\n", __func__); + init_waitqueue_head(&this_mmap.cmd_wait); + memset(session, 0, sizeof(session)); + return 0; +} + +device_initcall(q6usm_init); diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.h b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.h new file mode 100644 index 000000000000..d45d1657c924 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/q6usm.h @@ -0,0 +1,130 @@ +/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __Q6_USM_H__ +#define __Q6_USM_H__ + +#include <linux/qdsp6v2/apr_us.h> + +#define Q6USM_EVENT_UNDEF 0 +#define Q6USM_EVENT_READ_DONE 1 +#define Q6USM_EVENT_WRITE_DONE 2 +#define Q6USM_EVENT_SIGNAL_DETECT_RESULT 3 + +/* cyclic buffer with 1 gap support */ +#define USM_MIN_BUF_CNT 3 + +#define FORMAT_USPS_EPOS 0x00000000 +#define FORMAT_USRAW 0x00000001 +#define FORMAT_USPROX 0x00000002 +#define FORMAT_USGES_SYNC 0x00000003 +#define FORMAT_USRAW_SYNC 0x00000004 +#define INVALID_FORMAT 0xffffffff + +#define IN 0x000 +#define OUT 0x001 + +#define USM_WRONG_TOKEN 0xffffffff +#define USM_UNDEF_TOKEN 0xfffffffe + +#define CMD_CLOSE 0x0004 + +/* bit 0:1 represents priority of stream */ +#define STREAM_PRIORITY_NORMAL 0x0000 +#define STREAM_PRIORITY_LOW 0x0001 +#define STREAM_PRIORITY_HIGH 0x0002 + +/* bit 4 represents META enable of encoded data buffer */ +#define BUFFER_META_ENABLE 0x0010 + +struct us_port_data { + dma_addr_t phys; + /* cyclic region of buffers with 1 gap */ + void *data; + /* number of buffers in the region */ + uint32_t buf_cnt; + /* size of buffer */ + size_t buf_size; + /* write index */ + uint32_t dsp_buf; + /* read index */ + uint32_t cpu_buf; + /* expected token from dsp */ + uint32_t expected_token; + /* read or write locks */ + struct mutex lock; + spinlock_t dsp_lock; + /* ION memory handle */ + struct ion_handle *handle; + /* ION memory client */ + struct ion_client *client; + /* extended parameters, related to q6 variants */ + void *ext; + /* physical address of parameter buffer */ + dma_addr_t param_phys; + /* buffer which stores the parameter data */ + void *param_buf; + /* size of parameter buffer */ + uint32_t param_buf_size; + /* parameter buffer memory handle */ + void *param_buf_mem_handle; + /* ION memory handle for parameter buffer */ + struct ion_handle *param_handle; + /* ION memory client for parameter buffer */ + struct ion_client *param_client; +}; + +struct us_client { + int session; + /* idx:1 out port, 0: in port*/ + struct us_port_data port[2]; + + struct apr_svc *apr; + struct mutex cmd_lock; + + atomic_t cmd_state; + atomic_t eos_state; + wait_queue_head_t cmd_wait; + + void (*cb)(uint32_t, uint32_t, uint32_t *, void *); + void *priv; +}; + +int q6usm_run(struct us_client *usc, uint32_t flags, + uint32_t msw_ts, uint32_t lsw_ts); +int q6usm_cmd(struct us_client *usc, int cmd); +int q6usm_us_client_buf_alloc(unsigned int dir, struct us_client *usc, + unsigned int bufsz, unsigned int bufcnt); +int q6usm_us_param_buf_alloc(unsigned int dir, struct us_client *usc, + unsigned int bufsz); +int q6usm_enc_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg); +int q6usm_dec_cfg_blk(struct us_client *usc, struct us_encdec_cfg *us_cfg); +int q6usm_read(struct us_client *usc, uint32_t read_ind); +struct us_client *q6usm_us_client_alloc( + void (*cb)(uint32_t, uint32_t, uint32_t *, void *), + void *priv); +int q6usm_open_read(struct us_client *usc, uint32_t format); +void q6usm_us_client_free(struct us_client *usc); +uint32_t q6usm_get_virtual_address(int dir, struct us_client *usc, + struct vm_area_struct *vms); +int q6usm_open_write(struct us_client *usc, uint32_t format); +int q6usm_write(struct us_client *usc, uint32_t write_ind); +bool q6usm_is_write_buf_full(struct us_client *usc, uint32_t *free_region); +int q6usm_set_us_detection(struct us_client *usc, + struct usm_session_cmd_detect_info *detect_info, + uint16_t detect_info_size); +int q6usm_set_us_stream_param(int dir, struct us_client *usc, + uint32_t module_id, uint32_t param_id, uint32_t buf_size); +int q6usm_get_us_stream_param(int dir, struct us_client *usc, + uint32_t module_id, uint32_t param_id, uint32_t buf_size); + +#endif /* __Q6_USM_H__ */ diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c new file mode 100644 index 000000000000..52f7d3d2f268 --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usf.c @@ -0,0 +1,2456 @@ +/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/miscdevice.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/wait.h> +#include <linux/input.h> +#include <linux/uaccess.h> +#include <linux/time.h> +#include <linux/kmemleak.h> +#include <linux/wakelock.h> +#include <linux/mutex.h> +#include <sound/apr_audio.h> +#include <linux/qdsp6v2/usf.h> +#include "q6usm.h" +#include "usfcdev.h" + +/* The driver version*/ +#define DRV_VERSION "1.7.1" +#define USF_VERSION_ID 0x0171 + +/* Standard timeout in the asynchronous ops */ +#define USF_TIMEOUT_JIFFIES (1*HZ) /* 1 sec */ + +/* Undefined USF device */ +#define USF_UNDEF_DEV_ID 0xffff + +/* TX memory mapping flag */ +#define USF_VM_READ 1 +/* RX memory mapping flag */ +#define USF_VM_WRITE 2 + +/* Number of events, copied from the user space to kernel one */ +#define USF_EVENTS_PORTION_SIZE 20 + +/* Indexes in range definitions */ +#define MIN_IND 0 +#define MAX_IND 1 + +/* The coordinates indexes */ +#define X_IND 0 +#define Y_IND 1 +#define Z_IND 2 + +/* Shared memory limits */ +/* max_buf_size = (port_size(65535*2) * port_num(8) * group_size(3) */ +#define USF_MAX_BUF_SIZE 3145680 +#define USF_MAX_BUF_NUM 32 + +/* max size for buffer set from user space */ +#define USF_MAX_USER_BUF_SIZE 100000 + +/* Place for opreation result, received from QDSP6 */ +#define APR_RESULT_IND 1 + +/* Place for US detection result, received from QDSP6 */ +#define APR_US_DETECT_RESULT_IND 0 + +#define BITS_IN_BYTE 8 + +/* Time to stay awake after tx read event (e.g., proximity) */ +#define STAY_AWAKE_AFTER_READ_MSECS 3000 + +/* The driver states */ +enum usf_state_type { + USF_IDLE_STATE, + USF_OPENED_STATE, + USF_CONFIGURED_STATE, + USF_WORK_STATE, + USF_ADSP_RESTART_STATE, + USF_ERROR_STATE +}; + +/* The US detection status upon FW/HW based US detection results */ +enum usf_us_detect_type { + USF_US_DETECT_UNDEF, + USF_US_DETECT_YES, + USF_US_DETECT_NO +}; + +struct usf_xx_type { + /* Name of the client - event calculator */ + char client_name[USF_MAX_CLIENT_NAME_SIZE]; + /* The driver state in TX or RX direction */ + enum usf_state_type usf_state; + /* wait for q6 events mechanism */ + wait_queue_head_t wait; + /* IF with q6usm info */ + struct us_client *usc; + /* Q6:USM' Encoder/decoder configuration */ + struct us_encdec_cfg encdec_cfg; + /* Shared buffer (with Q6:USM) size */ + uint32_t buffer_size; + /* Number of the shared buffers (with Q6:USM) */ + uint32_t buffer_count; + /* Shared memory (Cyclic buffer with 1 gap) control */ + uint32_t new_region; + uint32_t prev_region; + /* Q6:USM's events handler */ + void (*cb)(uint32_t, uint32_t, uint32_t *, void *); + /* US detection result */ + enum usf_us_detect_type us_detect_type; + /* User's update info isn't acceptable */ + u8 user_upd_info_na; +}; + +struct usf_type { + /* TX device component configuration & control */ + struct usf_xx_type usf_tx; + /* RX device component configuration & control */ + struct usf_xx_type usf_rx; + /* Index into the opened device container */ + /* To prevent mutual usage of the same device */ + uint16_t dev_ind; + /* Event types, supported by device */ + uint16_t event_types; + /* The input devices are "input" module registered clients */ + struct input_dev *input_ifs[USF_MAX_EVENT_IND]; + /* Bitmap of types of events, conflicting to USF's ones */ + uint16_t conflicting_event_types; + /* Bitmap of types of events from devs, conflicting with USF */ + uint16_t conflicting_event_filters; + /* The requested buttons bitmap */ + uint16_t req_buttons_bitmap; + /* Mutex for exclusive operations (all public APIs) */ + struct mutex mutex; +}; + +struct usf_input_dev_type { + /* Input event type, supported by the input device */ + uint16_t event_type; + /* Input device name */ + const char *input_dev_name; + /* Input device registration function */ + int (*prepare_dev)(uint16_t, struct usf_type *, + struct us_input_info_type *, + const char *); + /* Input event notification function */ + void (*notify_event)(struct usf_type *, + uint16_t, + struct usf_event_type * + ); +}; + + +/* The MAX number of the supported devices */ +#define MAX_DEVS_NUMBER 1 + +/* + * code for a special button that is used to show/hide a + * hovering cursor in the input framework. Must be in + * sync with the button code definition in the framework + * (EventHub.h) + */ +#define BTN_USF_HOVERING_CURSOR 0x230 + +/* Supported buttons container */ +static const int s_button_map[] = { + BTN_STYLUS, + BTN_STYLUS2, + BTN_TOOL_PEN, + BTN_TOOL_RUBBER, + BTN_TOOL_FINGER, + BTN_USF_HOVERING_CURSOR +}; + +/* The opened devices container */ +static atomic_t s_opened_devs[MAX_DEVS_NUMBER]; + +static struct wakeup_source usf_wakeup_source; + +#define USF_NAME_PREFIX "usf_" +#define USF_NAME_PREFIX_SIZE 4 + + +static struct input_dev *allocate_dev(uint16_t ind, const char *name) +{ + struct input_dev *in_dev = input_allocate_device(); + + if (in_dev == NULL) { + pr_err("%s: input_allocate_device() failed\n", __func__); + } else { + /* Common part configuration */ + in_dev->name = name; + in_dev->phys = NULL; + in_dev->id.bustype = BUS_HOST; + in_dev->id.vendor = 0x0001; + in_dev->id.product = 0x0001; + in_dev->id.version = USF_VERSION_ID; + } + return in_dev; +} + +static int prepare_tsc_input_device(uint16_t ind, + struct usf_type *usf_info, + struct us_input_info_type *input_info, + const char *name) +{ + int i = 0; + + int num_buttons = min(ARRAY_SIZE(s_button_map), + sizeof(input_info->req_buttons_bitmap) * + BITS_IN_BYTE); + uint16_t max_buttons_bitmap = ((1 << ARRAY_SIZE(s_button_map)) - 1); + + struct input_dev *in_dev = allocate_dev(ind, name); + if (in_dev == NULL) + return -ENOMEM; + + if (input_info->req_buttons_bitmap > max_buttons_bitmap) { + pr_err("%s: Requested buttons[%d] exceeds max buttons available[%d]\n", + __func__, + input_info->req_buttons_bitmap, + max_buttons_bitmap); + input_free_device(in_dev); + return -EINVAL; + } + + usf_info->input_ifs[ind] = in_dev; + usf_info->req_buttons_bitmap = + input_info->req_buttons_bitmap; + in_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); + in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); + + for (i = 0; i < num_buttons; i++) + if (input_info->req_buttons_bitmap & (1 << i)) + in_dev->keybit[BIT_WORD(s_button_map[i])] |= + BIT_MASK(s_button_map[i]); + + input_set_abs_params(in_dev, ABS_X, + input_info->tsc_x_dim[MIN_IND], + input_info->tsc_x_dim[MAX_IND], + 0, 0); + input_set_abs_params(in_dev, ABS_Y, + input_info->tsc_y_dim[MIN_IND], + input_info->tsc_y_dim[MAX_IND], + 0, 0); + input_set_abs_params(in_dev, ABS_DISTANCE, + input_info->tsc_z_dim[MIN_IND], + input_info->tsc_z_dim[MAX_IND], + 0, 0); + + input_set_abs_params(in_dev, ABS_PRESSURE, + input_info->tsc_pressure[MIN_IND], + input_info->tsc_pressure[MAX_IND], + 0, 0); + + input_set_abs_params(in_dev, ABS_TILT_X, + input_info->tsc_x_tilt[MIN_IND], + input_info->tsc_x_tilt[MAX_IND], + 0, 0); + input_set_abs_params(in_dev, ABS_TILT_Y, + input_info->tsc_y_tilt[MIN_IND], + input_info->tsc_y_tilt[MAX_IND], + 0, 0); + + return 0; +} + +static int prepare_mouse_input_device(uint16_t ind, struct usf_type *usf_info, + struct us_input_info_type *input_info, + const char *name) +{ + struct input_dev *in_dev = allocate_dev(ind, name); + + if (in_dev == NULL) + return -ENOMEM; + + usf_info->input_ifs[ind] = in_dev; + in_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + + in_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | + BIT_MASK(BTN_MIDDLE); + in_dev->relbit[0] = BIT_MASK(REL_X) | + BIT_MASK(REL_Y) | + BIT_MASK(REL_Z); + + return 0; +} + +static int prepare_keyboard_input_device( + uint16_t ind, + struct usf_type *usf_info, + struct us_input_info_type *input_info, + const char *name) +{ + struct input_dev *in_dev = allocate_dev(ind, name); + + if (in_dev == NULL) + return -ENOMEM; + + usf_info->input_ifs[ind] = in_dev; + in_dev->evbit[0] |= BIT_MASK(EV_KEY); + /* All keys are permitted */ + memset(in_dev->keybit, 0xff, sizeof(in_dev->keybit)); + + return 0; +} + +static void notify_tsc_event(struct usf_type *usf_info, + uint16_t if_ind, + struct usf_event_type *event) + +{ + int i = 0; + int num_buttons = min(ARRAY_SIZE(s_button_map), + sizeof(usf_info->req_buttons_bitmap) * + BITS_IN_BYTE); + + struct input_dev *input_if = usf_info->input_ifs[if_ind]; + struct point_event_type *pe = &(event->event_data.point_event); + + input_report_abs(input_if, ABS_X, pe->coordinates[X_IND]); + input_report_abs(input_if, ABS_Y, pe->coordinates[Y_IND]); + input_report_abs(input_if, ABS_DISTANCE, pe->coordinates[Z_IND]); + + input_report_abs(input_if, ABS_TILT_X, pe->inclinations[X_IND]); + input_report_abs(input_if, ABS_TILT_Y, pe->inclinations[Y_IND]); + + input_report_abs(input_if, ABS_PRESSURE, pe->pressure); + input_report_key(input_if, BTN_TOUCH, !!(pe->pressure)); + + for (i = 0; i < num_buttons; i++) { + uint16_t mask = (1 << i), + btn_state = !!(pe->buttons_state_bitmap & mask); + if (usf_info->req_buttons_bitmap & mask) + input_report_key(input_if, s_button_map[i], btn_state); + } + + input_sync(input_if); + + pr_debug("%s: TSC event: xyz[%d;%d;%d], incl[%d;%d], pressure[%d], buttons[%d]\n", + __func__, + pe->coordinates[X_IND], + pe->coordinates[Y_IND], + pe->coordinates[Z_IND], + pe->inclinations[X_IND], + pe->inclinations[Y_IND], + pe->pressure, + pe->buttons_state_bitmap); +} + +static void notify_mouse_event(struct usf_type *usf_info, + uint16_t if_ind, + struct usf_event_type *event) +{ + struct input_dev *input_if = usf_info->input_ifs[if_ind]; + struct mouse_event_type *me = &(event->event_data.mouse_event); + + input_report_rel(input_if, REL_X, me->rels[X_IND]); + input_report_rel(input_if, REL_Y, me->rels[Y_IND]); + input_report_rel(input_if, REL_Z, me->rels[Z_IND]); + + input_report_key(input_if, BTN_LEFT, + me->buttons_states & USF_BUTTON_LEFT_MASK); + input_report_key(input_if, BTN_MIDDLE, + me->buttons_states & USF_BUTTON_MIDDLE_MASK); + input_report_key(input_if, BTN_RIGHT, + me->buttons_states & USF_BUTTON_RIGHT_MASK); + + input_sync(input_if); + + pr_debug("%s: mouse event: dx[%d], dy[%d], buttons_states[%d]\n", + __func__, me->rels[X_IND], + me->rels[Y_IND], me->buttons_states); +} + +static void notify_key_event(struct usf_type *usf_info, + uint16_t if_ind, + struct usf_event_type *event) +{ + struct input_dev *input_if = usf_info->input_ifs[if_ind]; + struct key_event_type *ke = &(event->event_data.key_event); + + input_report_key(input_if, ke->key, ke->key_state); + input_sync(input_if); + pr_debug("%s: key event: key[%d], state[%d]\n", + __func__, + ke->key, + ke->key_state); + +} + +static struct usf_input_dev_type s_usf_input_devs[] = { + {USF_TSC_EVENT, "usf_tsc", + prepare_tsc_input_device, notify_tsc_event}, + {USF_TSC_PTR_EVENT, "usf_tsc_ptr", + prepare_tsc_input_device, notify_tsc_event}, + {USF_MOUSE_EVENT, "usf_mouse", + prepare_mouse_input_device, notify_mouse_event}, + {USF_KEYBOARD_EVENT, "usf_kb", + prepare_keyboard_input_device, notify_key_event}, + {USF_TSC_EXT_EVENT, "usf_tsc_ext", + prepare_tsc_input_device, notify_tsc_event}, +}; + +static void usf_rx_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv; + + if (usf_xx == NULL) { + pr_err("%s: the private data is NULL\n", __func__); + return; + } + + switch (opcode) { + case Q6USM_EVENT_WRITE_DONE: + wake_up(&usf_xx->wait); + break; + + case RESET_EVENTS: + pr_err("%s: received RESET_EVENTS\n", __func__); + usf_xx->usf_state = USF_ADSP_RESTART_STATE; + wake_up(&usf_xx->wait); + break; + + default: + break; + } +} + +static void usf_tx_cb(uint32_t opcode, uint32_t token, + uint32_t *payload, void *priv) +{ + struct usf_xx_type *usf_xx = (struct usf_xx_type *) priv; + + if (usf_xx == NULL) { + pr_err("%s: the private data is NULL\n", __func__); + return; + } + + switch (opcode) { + case Q6USM_EVENT_READ_DONE: + pr_debug("%s: acquiring %d msec wake lock\n", __func__, + STAY_AWAKE_AFTER_READ_MSECS); + __pm_wakeup_event(&usf_wakeup_source, + STAY_AWAKE_AFTER_READ_MSECS); + if (token == USM_WRONG_TOKEN) + usf_xx->usf_state = USF_ERROR_STATE; + usf_xx->new_region = token; + wake_up(&usf_xx->wait); + break; + + case Q6USM_EVENT_SIGNAL_DETECT_RESULT: + usf_xx->us_detect_type = (payload[APR_US_DETECT_RESULT_IND]) ? + USF_US_DETECT_YES : + USF_US_DETECT_NO; + + wake_up(&usf_xx->wait); + break; + + case APR_BASIC_RSP_RESULT: + if (payload[APR_RESULT_IND]) { + usf_xx->usf_state = USF_ERROR_STATE; + usf_xx->new_region = USM_WRONG_TOKEN; + wake_up(&usf_xx->wait); + } + break; + + case RESET_EVENTS: + pr_err("%s: received RESET_EVENTS\n", __func__); + usf_xx->usf_state = USF_ADSP_RESTART_STATE; + wake_up(&usf_xx->wait); + break; + + default: + break; + } +} + +static void release_xx(struct usf_xx_type *usf_xx) +{ + if (usf_xx != NULL) { + if (usf_xx->usc) { + q6usm_us_client_free(usf_xx->usc); + usf_xx->usc = NULL; + } + + if (usf_xx->encdec_cfg.params != NULL) { + kfree(usf_xx->encdec_cfg.params); + usf_xx->encdec_cfg.params = NULL; + } + } +} + +static void usf_disable(struct usf_xx_type *usf_xx) +{ + if (usf_xx != NULL) { + if ((usf_xx->usf_state != USF_IDLE_STATE) && + (usf_xx->usf_state != USF_OPENED_STATE)) { + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + usf_xx->usf_state = USF_OPENED_STATE; + wake_up(&usf_xx->wait); + } + release_xx(usf_xx); + } +} + +static int config_xx(struct usf_xx_type *usf_xx, struct us_xx_info_type *config) +{ + int rc = 0; + uint16_t data_map_size = 0; + uint16_t min_map_size = 0; + + if ((usf_xx == NULL) || + (config == NULL)) + return -EINVAL; + + if ((config->buf_size == 0) || + (config->buf_size > USF_MAX_BUF_SIZE) || + (config->buf_num == 0) || + (config->buf_num > USF_MAX_BUF_NUM)) { + pr_err("%s: wrong params: buf_size=%d; buf_num=%d\n", + __func__, config->buf_size, config->buf_num); + return -EINVAL; + } + + data_map_size = sizeof(usf_xx->encdec_cfg.cfg_common.data_map); + min_map_size = min(data_map_size, config->port_cnt); + + if (config->client_name != NULL) { + if (strncpy_from_user(usf_xx->client_name, + (char __user *)config->client_name, + sizeof(usf_xx->client_name) - 1) < 0) { + pr_err("%s: get client name failed\n", __func__); + return -EINVAL; + } + } + + pr_debug("%s: name=%s; buf_size:%d; dev_id:0x%x; sample_rate:%d\n", + __func__, usf_xx->client_name, config->buf_size, + config->dev_id, config->sample_rate); + + pr_debug("%s: buf_num:%d; format:%d; port_cnt:%d; data_size=%d\n", + __func__, config->buf_num, config->stream_format, + config->port_cnt, config->params_data_size); + + pr_debug("%s: id[0]=%d, id[1]=%d, id[2]=%d, id[3]=%d, id[4]=%d,\n", + __func__, + config->port_id[0], + config->port_id[1], + config->port_id[2], + config->port_id[3], + config->port_id[4]); + + pr_debug("id[5]=%d, id[6]=%d, id[7]=%d\n", + config->port_id[5], + config->port_id[6], + config->port_id[7]); + + /* q6usm allocation & configuration */ + usf_xx->buffer_size = config->buf_size; + usf_xx->buffer_count = config->buf_num; + usf_xx->encdec_cfg.cfg_common.bits_per_sample = + config->bits_per_sample; + usf_xx->encdec_cfg.cfg_common.sample_rate = config->sample_rate; + /* AFE port e.g. AFE_PORT_ID_SLIMBUS_MULTI_CHAN_1_RX */ + usf_xx->encdec_cfg.cfg_common.dev_id = config->dev_id; + + usf_xx->encdec_cfg.cfg_common.ch_cfg = config->port_cnt; + memcpy((void *)&usf_xx->encdec_cfg.cfg_common.data_map, + (void *)config->port_id, + min_map_size); + + usf_xx->encdec_cfg.format_id = config->stream_format; + usf_xx->encdec_cfg.params_size = config->params_data_size; + usf_xx->user_upd_info_na = 1; /* it's used in US_GET_TX_UPDATE */ + + if (config->params_data_size > 0) { /* transparent data copy */ + usf_xx->encdec_cfg.params = kzalloc(config->params_data_size, + GFP_KERNEL); + /* False memory leak here - pointer in packed struct * + * is undetected by kmemleak tool */ + kmemleak_ignore(usf_xx->encdec_cfg.params); + if (usf_xx->encdec_cfg.params == NULL) { + pr_err("%s: params memory alloc[%d] failure\n", + __func__, + config->params_data_size); + return -ENOMEM; + } + rc = copy_from_user(usf_xx->encdec_cfg.params, + (uint8_t __user *)config->params_data, + config->params_data_size); + if (rc) { + pr_err("%s: transparent data copy failure\n", + __func__); + kfree(usf_xx->encdec_cfg.params); + usf_xx->encdec_cfg.params = NULL; + return -EFAULT; + } + pr_debug("%s: params_size[%d]; params[%d,%d,%d,%d, %d]\n", + __func__, + config->params_data_size, + usf_xx->encdec_cfg.params[0], + usf_xx->encdec_cfg.params[1], + usf_xx->encdec_cfg.params[2], + usf_xx->encdec_cfg.params[3], + usf_xx->encdec_cfg.params[4] + ); + } + + usf_xx->usc = q6usm_us_client_alloc(usf_xx->cb, (void *)usf_xx); + if (!usf_xx->usc) { + pr_err("%s: Could not allocate q6usm client\n", __func__); + rc = -EFAULT; + } + + return rc; +} + +static bool usf_match(uint16_t event_type_ind, struct input_dev *dev) +{ + bool rc = false; + + rc = (event_type_ind < MAX_EVENT_TYPE_NUM) && + ((dev->name == NULL) || + strncmp(dev->name, USF_NAME_PREFIX, USF_NAME_PREFIX_SIZE)); + pr_debug("%s: name=[%s]; rc=%d\n", + __func__, dev->name, rc); + + return rc; +} + +static bool usf_register_conflicting_events(uint16_t event_types) +{ + bool rc = true; + uint16_t ind = 0; + uint16_t mask = 1; + + for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) { + if (event_types & mask) { + rc = usfcdev_register(ind, usf_match); + if (!rc) + break; + } + mask = mask << 1; + } + + return rc; +} + +static void usf_unregister_conflicting_events(uint16_t event_types) +{ + uint16_t ind = 0; + uint16_t mask = 1; + + for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) { + if (event_types & mask) + usfcdev_unregister(ind); + mask = mask << 1; + } +} + +static void usf_set_event_filters(struct usf_type *usf, uint16_t event_filters) +{ + uint16_t ind = 0; + uint16_t mask = 1; + + if (usf->conflicting_event_filters != event_filters) { + for (ind = 0; ind < MAX_EVENT_TYPE_NUM; ++ind) { + if (usf->conflicting_event_types & mask) + usfcdev_set_filter(ind, event_filters&mask); + mask = mask << 1; + } + usf->conflicting_event_filters = event_filters; + } +} + +static int register_input_device(struct usf_type *usf_info, + struct us_input_info_type *input_info) +{ + int rc = 0; + bool ret = true; + uint16_t ind = 0; + + if ((usf_info == NULL) || + (input_info == NULL) || + !(input_info->event_types & USF_ALL_EVENTS)) { + pr_err("%s: wrong input parameter(s)\n", __func__); + return -EINVAL; + } + + for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) { + if (usf_info->input_ifs[ind] != NULL) { + pr_err("%s: input_if[%d] is already allocated\n", + __func__, ind); + return -EFAULT; + } + if ((input_info->event_types & + s_usf_input_devs[ind].event_type) && + s_usf_input_devs[ind].prepare_dev) { + rc = (*s_usf_input_devs[ind].prepare_dev)( + ind, + usf_info, + input_info, + s_usf_input_devs[ind].input_dev_name); + if (rc) + return rc; + + rc = input_register_device(usf_info->input_ifs[ind]); + if (rc) { + pr_err("%s: input_reg_dev() failed; rc=%d\n", + __func__, rc); + input_free_device(usf_info->input_ifs[ind]); + usf_info->input_ifs[ind] = NULL; + } else { + usf_info->event_types |= + s_usf_input_devs[ind].event_type; + pr_debug("%s: input device[%s] was registered\n", + __func__, + s_usf_input_devs[ind].input_dev_name); + } + } /* supported event */ + } /* event types loop */ + + ret = usf_register_conflicting_events( + input_info->conflicting_event_types); + if (ret) + usf_info->conflicting_event_types = + input_info->conflicting_event_types; + + return 0; +} + + +static void handle_input_event(struct usf_type *usf_info, + uint16_t event_counter, + struct usf_event_type __user *event) +{ + uint16_t ind = 0; + uint16_t events_num = 0; + struct usf_event_type usf_events[USF_EVENTS_PORTION_SIZE]; + int rc = 0; + + if ((usf_info == NULL) || + (event == NULL) || (!event_counter)) { + return; + } + + while (event_counter > 0) { + if (event_counter > USF_EVENTS_PORTION_SIZE) { + events_num = USF_EVENTS_PORTION_SIZE; + event_counter -= USF_EVENTS_PORTION_SIZE; + } else { + events_num = event_counter; + event_counter = 0; + } + rc = copy_from_user(usf_events, + (struct usf_event_type __user *)event, + events_num * sizeof(struct usf_event_type)); + if (rc) { + pr_err("%s: copy upd_rx_info from user; rc=%d\n", + __func__, rc); + return; + } + for (ind = 0; ind < events_num; ++ind) { + struct usf_event_type *p_event = &usf_events[ind]; + uint16_t if_ind = p_event->event_type_ind; + + if ((if_ind >= USF_MAX_EVENT_IND) || + (usf_info->input_ifs[if_ind] == NULL)) + continue; /* event isn't supported */ + + if (s_usf_input_devs[if_ind].notify_event) + (*s_usf_input_devs[if_ind].notify_event)( + usf_info, + if_ind, + p_event); + } /* loop in the portion */ + } /* all events loop */ +} + +static int usf_start_tx(struct usf_xx_type *usf_xx) +{ + int rc = q6usm_run(usf_xx->usc, 0, 0, 0); + + pr_debug("%s: tx: q6usm_run; rc=%d\n", __func__, rc); + if (!rc) { + if (usf_xx->buffer_count >= USM_MIN_BUF_CNT) { + /* supply all buffers */ + rc = q6usm_read(usf_xx->usc, + usf_xx->buffer_count); + pr_debug("%s: q6usm_read[%d]\n", + __func__, rc); + + if (rc) + pr_err("%s: buf read failed", + __func__); + else + usf_xx->usf_state = + USF_WORK_STATE; + } else + usf_xx->usf_state = + USF_WORK_STATE; + } + + return rc; +} /* usf_start_tx */ + +static int usf_start_rx(struct usf_xx_type *usf_xx) +{ + int rc = q6usm_run(usf_xx->usc, 0, 0, 0); + + pr_debug("%s: rx: q6usm_run; rc=%d\n", + __func__, rc); + if (!rc) + usf_xx->usf_state = USF_WORK_STATE; + + return rc; +} /* usf_start_rx */ + +static int __usf_set_us_detection(struct usf_type *usf, + struct us_detect_info_type *detect_info) +{ + uint32_t timeout = 0; + struct usm_session_cmd_detect_info *p_allocated_memory = NULL; + struct usm_session_cmd_detect_info usm_detect_info; + struct usm_session_cmd_detect_info *p_usm_detect_info = + &usm_detect_info; + uint32_t detect_info_size = sizeof(struct usm_session_cmd_detect_info); + struct usf_xx_type *usf_xx = &usf->usf_tx; + int rc = 0; + + if (detect_info->us_detector != US_DETECT_FW) { + pr_err("%s: unsupported detector: %d\n", + __func__, detect_info->us_detector); + return -EINVAL; + } + + if ((detect_info->params_data_size != 0) && + (detect_info->params_data != NULL)) { + uint8_t *p_data = NULL; + + detect_info_size += detect_info->params_data_size; + p_allocated_memory = kzalloc(detect_info_size, GFP_KERNEL); + if (p_allocated_memory == NULL) { + pr_err("%s: detect_info[%d] allocation failed\n", + __func__, detect_info_size); + return -ENOMEM; + } + p_usm_detect_info = p_allocated_memory; + p_data = (uint8_t *)p_usm_detect_info + + sizeof(struct usm_session_cmd_detect_info); + + rc = copy_from_user(p_data, + (uint8_t __user *)(detect_info->params_data), + detect_info->params_data_size); + if (rc) { + pr_err("%s: copy params from user; rc=%d\n", + __func__, rc); + kfree(p_allocated_memory); + return -EFAULT; + } + p_usm_detect_info->algorithm_cfg_size = + detect_info->params_data_size; + } else + usm_detect_info.algorithm_cfg_size = 0; + + p_usm_detect_info->detect_mode = detect_info->us_detect_mode; + p_usm_detect_info->skip_interval = detect_info->skip_time; + + usf_xx->us_detect_type = USF_US_DETECT_UNDEF; + + rc = q6usm_set_us_detection(usf_xx->usc, + p_usm_detect_info, + detect_info_size); + if (rc || (detect_info->detect_timeout == USF_NO_WAIT_TIMEOUT)) { + kfree(p_allocated_memory); + return rc; + } + + /* Get US detection result */ + if (detect_info->detect_timeout == USF_INFINITIVE_TIMEOUT) { + rc = wait_event_interruptible(usf_xx->wait, + (usf_xx->us_detect_type != + USF_US_DETECT_UNDEF) || + (usf_xx->usf_state == + USF_ADSP_RESTART_STATE)); + } else { + if (detect_info->detect_timeout == USF_DEFAULT_TIMEOUT) + timeout = USF_TIMEOUT_JIFFIES; + else + timeout = detect_info->detect_timeout * HZ; + } + rc = wait_event_interruptible_timeout(usf_xx->wait, + (usf_xx->us_detect_type != + USF_US_DETECT_UNDEF) || + (usf_xx->usf_state == + USF_ADSP_RESTART_STATE), timeout); + + /* In the case of aDSP restart, "no US" is assumed */ + if (usf_xx->usf_state == USF_ADSP_RESTART_STATE) { + rc = -EFAULT; + } + /* In the case of timeout, "no US" is assumed */ + if (rc < 0) + pr_err("%s: Getting US detection failed rc[%d]\n", + __func__, rc); + else { + usf->usf_rx.us_detect_type = usf->usf_tx.us_detect_type; + detect_info->is_us = + (usf_xx->us_detect_type == USF_US_DETECT_YES); + } + + kfree(p_allocated_memory); + + return rc; +} /* __usf_set_us_detection */ + +static int usf_set_us_detection(struct usf_type *usf, unsigned long arg) +{ + struct us_detect_info_type detect_info; + + int rc = copy_from_user(&detect_info, + (struct us_detect_info_type __user *) arg, + sizeof(detect_info)); + + if (rc) { + pr_err("%s: copy detect_info from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + if (detect_info.params_data_size > USF_MAX_USER_BUF_SIZE) { + pr_err("%s: user buffer size exceeds maximum\n", + __func__); + return -EFAULT; + } + + rc = __usf_set_us_detection(usf, &detect_info); + if (rc < 0) { + pr_err("%s: set us detection failed; rc=%d\n", + __func__, rc); + return rc; + } + + rc = copy_to_user((void __user *)arg, + &detect_info, + sizeof(detect_info)); + if (rc) { + pr_err("%s: copy detect_info to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_set_us_detection */ + +static int __usf_set_tx_info(struct usf_type *usf, + struct us_tx_info_type *config_tx) +{ + struct usf_xx_type *usf_xx = &usf->usf_tx; + int rc = 0; + + usf_xx->new_region = USM_UNDEF_TOKEN; + usf_xx->prev_region = USM_UNDEF_TOKEN; + usf_xx->cb = usf_tx_cb; + + init_waitqueue_head(&usf_xx->wait); + + if (config_tx->us_xx_info.client_name != NULL) { + int res = strncpy_from_user( + usf_xx->client_name, + (char __user *)(config_tx->us_xx_info.client_name), + sizeof(usf_xx->client_name)-1); + if (res < 0) { + pr_err("%s: get client name failed\n", + __func__); + return -EINVAL; + } + } + + rc = config_xx(usf_xx, &(config_tx->us_xx_info)); + if (rc) + return rc; + + rc = q6usm_open_read(usf_xx->usc, + usf_xx->encdec_cfg.format_id); + if (rc) + return rc; + + rc = q6usm_us_client_buf_alloc(OUT, usf_xx->usc, + usf_xx->buffer_size, + usf_xx->buffer_count); + if (rc) { + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + return rc; + } + + rc = q6usm_us_param_buf_alloc(OUT, usf_xx->usc, + config_tx->us_xx_info.max_get_set_param_buf_size); + if (rc) { + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + return rc; + } + + rc = q6usm_enc_cfg_blk(usf_xx->usc, + &usf_xx->encdec_cfg); + if (!rc && + (config_tx->input_info.event_types != USF_NO_EVENT)) { + rc = register_input_device(usf, + &(config_tx->input_info)); + } + + if (rc) + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + else + usf_xx->usf_state = USF_CONFIGURED_STATE; + + return rc; +} /* __usf_set_tx_info */ + +static int usf_set_tx_info(struct usf_type *usf, unsigned long arg) +{ + struct us_tx_info_type config_tx; + + int rc = copy_from_user(&config_tx, + (struct us_tx_info_type __user *) arg, + sizeof(config_tx)); + + if (rc) { + pr_err("%s: copy config_tx from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + if (config_tx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) { + pr_err("%s: user buffer size exceeds maximum\n", + __func__); + return -EFAULT; + } + + return __usf_set_tx_info(usf, &config_tx); +} /* usf_set_tx_info */ + +static int __usf_set_rx_info(struct usf_type *usf, + struct us_rx_info_type *config_rx) +{ + struct usf_xx_type *usf_xx = &usf->usf_rx; + int rc = 0; + + usf_xx->new_region = USM_UNDEF_TOKEN; + usf_xx->prev_region = USM_UNDEF_TOKEN; + + usf_xx->cb = usf_rx_cb; + + rc = config_xx(usf_xx, &(config_rx->us_xx_info)); + if (rc) + return rc; + + rc = q6usm_open_write(usf_xx->usc, + usf_xx->encdec_cfg.format_id); + if (rc) + return rc; + + rc = q6usm_us_client_buf_alloc( + IN, + usf_xx->usc, + usf_xx->buffer_size, + usf_xx->buffer_count); + if (rc) { + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + return rc; + } + + rc = q6usm_us_param_buf_alloc(IN, usf_xx->usc, + config_rx->us_xx_info.max_get_set_param_buf_size); + if (rc) { + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + return rc; + } + + rc = q6usm_dec_cfg_blk(usf_xx->usc, + &usf_xx->encdec_cfg); + if (rc) + (void)q6usm_cmd(usf_xx->usc, CMD_CLOSE); + else { + init_waitqueue_head(&usf_xx->wait); + usf_xx->usf_state = USF_CONFIGURED_STATE; + } + + return rc; +} /* __usf_set_rx_info */ + +static int usf_set_rx_info(struct usf_type *usf, unsigned long arg) +{ + struct us_rx_info_type config_rx; + + int rc = copy_from_user(&config_rx, + (struct us_rx_info_type __user *) arg, + sizeof(config_rx)); + + if (rc) { + pr_err("%s: copy config_rx from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + if (config_rx.us_xx_info.params_data_size > USF_MAX_USER_BUF_SIZE) { + pr_err("%s: user buffer size exceeds maximum\n", + __func__); + return -EFAULT; + } + + return __usf_set_rx_info(usf, &config_rx); +} /* usf_set_rx_info */ + +static int __usf_get_tx_update(struct usf_type *usf, + struct us_tx_update_info_type *upd_tx_info) +{ + unsigned long prev_jiffies = 0; + uint32_t timeout = 0; + struct usf_xx_type *usf_xx = &usf->usf_tx; + int rc = 0; + + if (!usf_xx->user_upd_info_na) { + usf_set_event_filters(usf, upd_tx_info->event_filters); + handle_input_event(usf, + upd_tx_info->event_counter, + upd_tx_info->event); + + /* Release available regions */ + rc = q6usm_read(usf_xx->usc, + upd_tx_info->free_region); + if (rc) + return rc; + } else + usf_xx->user_upd_info_na = 0; + + /* Get data ready regions */ + if (upd_tx_info->timeout == USF_INFINITIVE_TIMEOUT) { + rc = wait_event_interruptible(usf_xx->wait, + (usf_xx->prev_region != + usf_xx->new_region) || + (usf_xx->usf_state != + USF_WORK_STATE)); + } else { + if (upd_tx_info->timeout == USF_NO_WAIT_TIMEOUT) + rc = (usf_xx->prev_region != usf_xx->new_region); + else { + prev_jiffies = jiffies; + if (upd_tx_info->timeout == USF_DEFAULT_TIMEOUT) { + timeout = USF_TIMEOUT_JIFFIES; + rc = wait_event_timeout( + usf_xx->wait, + (usf_xx->prev_region != + usf_xx->new_region) || + (usf_xx->usf_state != + USF_WORK_STATE), + timeout); + } else { + timeout = upd_tx_info->timeout * HZ; + rc = wait_event_interruptible_timeout( + usf_xx->wait, + (usf_xx->prev_region != + usf_xx->new_region) || + (usf_xx->usf_state != + USF_WORK_STATE), + timeout); + } + } + if (!rc) { + pr_debug("%s: timeout. prev_j=%lu; j=%lu\n", + __func__, prev_jiffies, jiffies); + pr_debug("%s: timeout. prev=%d; new=%d\n", + __func__, usf_xx->prev_region, + usf_xx->new_region); + pr_debug("%s: timeout. free_region=%d;\n", + __func__, upd_tx_info->free_region); + if (usf_xx->prev_region == + usf_xx->new_region) { + pr_err("%s:read data: timeout\n", + __func__); + return -ETIME; + } + } + } + + if ((usf_xx->usf_state != USF_WORK_STATE) || + (rc == -ERESTARTSYS)) { + pr_err("%s: Get ready region failure; state[%d]; rc[%d]\n", + __func__, usf_xx->usf_state, rc); + return -EINTR; + } + + upd_tx_info->ready_region = usf_xx->new_region; + usf_xx->prev_region = upd_tx_info->ready_region; + + if (upd_tx_info->ready_region == USM_WRONG_TOKEN) { + pr_err("%s: TX path corrupted; prev=%d\n", + __func__, usf_xx->prev_region); + return -EIO; + } + + return rc; +} /* __usf_get_tx_update */ + +static int usf_get_tx_update(struct usf_type *usf, unsigned long arg) +{ + struct us_tx_update_info_type upd_tx_info; + + int rc = copy_from_user(&upd_tx_info, + (struct us_tx_update_info_type __user *) arg, + sizeof(upd_tx_info)); + + if (rc < 0) { + pr_err("%s: copy upd_tx_info from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + rc = __usf_get_tx_update(usf, &upd_tx_info); + if (rc < 0) { + pr_err("%s: get tx update failed; rc=%d\n", + __func__, rc); + return rc; + } + + rc = copy_to_user((void __user *)arg, + &upd_tx_info, + sizeof(upd_tx_info)); + if (rc) { + pr_err("%s: copy upd_tx_info to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_get_tx_update */ + +static int __usf_set_rx_update(struct usf_xx_type *usf_xx, + struct us_rx_update_info_type *upd_rx_info) +{ + int rc = 0; + + /* Send available data regions */ + if (upd_rx_info->ready_region != + usf_xx->buffer_count) { + rc = q6usm_write( + usf_xx->usc, + upd_rx_info->ready_region); + if (rc) + return rc; + } + + /* Get free regions */ + rc = wait_event_timeout( + usf_xx->wait, + !q6usm_is_write_buf_full( + usf_xx->usc, + &(upd_rx_info->free_region)) || + (usf_xx->usf_state == USF_IDLE_STATE), + USF_TIMEOUT_JIFFIES); + + if (!rc) { + rc = -ETIME; + pr_err("%s:timeout. wait for write buf not full\n", + __func__); + } else { + if (usf_xx->usf_state != + USF_WORK_STATE) { + pr_err("%s: RX: state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EINTR; + } + } + + return rc; +} /* __usf_set_rx_update */ + +static int usf_set_rx_update(struct usf_xx_type *usf_xx, unsigned long arg) +{ + struct us_rx_update_info_type upd_rx_info; + + int rc = copy_from_user(&upd_rx_info, + (struct us_rx_update_info_type __user *) arg, + sizeof(upd_rx_info)); + + if (rc) { + pr_err("%s: copy upd_rx_info from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + rc = __usf_set_rx_update(usf_xx, &upd_rx_info); + if (rc < 0) { + pr_err("%s: set rx update failed; rc=%d\n", + __func__, rc); + return rc; + } + + rc = copy_to_user((void __user *)arg, + &upd_rx_info, + sizeof(upd_rx_info)); + if (rc) { + pr_err("%s: copy rx_info to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_set_rx_update */ + +static void usf_release_input(struct usf_type *usf) +{ + uint16_t ind = 0; + + usf_unregister_conflicting_events( + usf->conflicting_event_types); + usf->conflicting_event_types = 0; + for (ind = 0; ind < USF_MAX_EVENT_IND; ++ind) { + if (usf->input_ifs[ind] == NULL) + continue; + input_unregister_device(usf->input_ifs[ind]); + usf->input_ifs[ind] = NULL; + pr_debug("%s input_unregister_device[%s]\n", + __func__, + s_usf_input_devs[ind].input_dev_name); + } +} /* usf_release_input */ + +static int usf_stop_tx(struct usf_type *usf) +{ + struct usf_xx_type *usf_xx = &usf->usf_tx; + + usf_release_input(usf); + usf_disable(usf_xx); + + return 0; +} /* usf_stop_tx */ + +static int __usf_get_version(struct us_version_info_type *version_info) +{ + int rc = 0; + + if (version_info->buf_size < sizeof(DRV_VERSION)) { + pr_err("%s: buf_size (%d) < version string size (%zu)\n", + __func__, version_info->buf_size, sizeof(DRV_VERSION)); + return -EINVAL; + } + + rc = copy_to_user((void __user *)(version_info->pbuf), + DRV_VERSION, + sizeof(DRV_VERSION)); + if (rc) { + pr_err("%s: copy to version_info.pbuf; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* __usf_get_version */ + +static int usf_get_version(unsigned long arg) +{ + struct us_version_info_type version_info; + + int rc = copy_from_user(&version_info, + (struct us_version_info_type __user *) arg, + sizeof(version_info)); + + if (rc) { + pr_err("%s: copy version_info from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + rc = __usf_get_version(&version_info); + if (rc < 0) { + pr_err("%s: get version failed; rc=%d\n", + __func__, rc); + return rc; + } + + rc = copy_to_user((void __user *)arg, + &version_info, + sizeof(version_info)); + if (rc) { + pr_err("%s: copy version_info to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_get_version */ + +static int __usf_set_stream_param(struct usf_xx_type *usf_xx, + struct us_stream_param_type *set_stream_param, + int dir) +{ + struct us_client *usc = usf_xx->usc; + struct us_port_data *port; + int rc = 0; + + if (usc == NULL) { + pr_err("%s: usc is null\n", + __func__); + return -EFAULT; + } + + port = &usc->port[dir]; + if (port == NULL) { + pr_err("%s: port is null\n", + __func__); + return -EFAULT; + } + + if (port->param_buf == NULL) { + pr_err("%s: parameter buffer is null\n", + __func__); + return -EFAULT; + } + + if (set_stream_param->buf_size > port->param_buf_size) { + pr_err("%s: buf_size (%d) > maximum buf size (%d)\n", + __func__, set_stream_param->buf_size, + port->param_buf_size); + return -EINVAL; + } + + if (set_stream_param->buf_size == 0) { + pr_err("%s: buf_size is 0\n", __func__); + return -EINVAL; + } + + rc = copy_from_user(port->param_buf, + (uint8_t __user *) set_stream_param->pbuf, + set_stream_param->buf_size); + if (rc) { + pr_err("%s: copy param buf from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + rc = q6usm_set_us_stream_param(dir, usc, set_stream_param->module_id, + set_stream_param->param_id, + set_stream_param->buf_size); + if (rc) { + pr_err("%s: q6usm_set_us_stream_param failed; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + return rc; +} + +static int usf_set_stream_param(struct usf_xx_type *usf_xx, + unsigned long arg, int dir) +{ + struct us_stream_param_type set_stream_param; + int rc = 0; + + rc = copy_from_user(&set_stream_param, + (struct us_stream_param_type __user *) arg, + sizeof(set_stream_param)); + + if (rc) { + pr_err("%s: copy set_stream_param from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + return __usf_set_stream_param(usf_xx, &set_stream_param, dir); +} /* usf_set_stream_param */ + +static int __usf_get_stream_param(struct usf_xx_type *usf_xx, + struct us_stream_param_type *get_stream_param, + int dir) +{ + struct us_client *usc = usf_xx->usc; + struct us_port_data *port; + int rc = 0; + + if (usc == NULL) { + pr_err("%s: us_client is null\n", + __func__); + return -EFAULT; + } + + port = &usc->port[dir]; + + if (port->param_buf == NULL) { + pr_err("%s: parameter buffer is null\n", + __func__); + return -EFAULT; + } + + if (get_stream_param->buf_size > port->param_buf_size) { + pr_err("%s: buf_size (%d) > maximum buf size (%d)\n", + __func__, get_stream_param->buf_size, + port->param_buf_size); + return -EINVAL; + } + + if (get_stream_param->buf_size == 0) { + pr_err("%s: buf_size is 0\n", __func__); + return -EINVAL; + } + + rc = q6usm_get_us_stream_param(dir, usc, get_stream_param->module_id, + get_stream_param->param_id, + get_stream_param->buf_size); + if (rc) { + pr_err("%s: q6usm_get_us_stream_param failed; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + rc = copy_to_user((uint8_t __user *) get_stream_param->pbuf, + port->param_buf, + get_stream_param->buf_size); + if (rc) { + pr_err("%s: copy param buf to user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + return rc; +} + +static int usf_get_stream_param(struct usf_xx_type *usf_xx, + unsigned long arg, int dir) +{ + struct us_stream_param_type get_stream_param; + int rc = 0; + + rc = copy_from_user(&get_stream_param, + (struct us_stream_param_type __user *) arg, + sizeof(get_stream_param)); + + if (rc) { + pr_err("%s: copy get_stream_param from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + return __usf_get_stream_param(usf_xx, &get_stream_param, dir); +} /* usf_get_stream_param */ + +static long __usf_ioctl(struct usf_type *usf, + unsigned int cmd, + unsigned long arg) +{ + + int rc = 0; + struct usf_xx_type *usf_xx = NULL; + + switch (cmd) { + case US_START_TX: { + usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_CONFIGURED_STATE) + rc = usf_start_tx(usf_xx); + else { + pr_err("%s: start_tx: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + break; + } + + case US_START_RX: { + usf_xx = &usf->usf_rx; + if (usf_xx->usf_state == USF_CONFIGURED_STATE) + rc = usf_start_rx(usf_xx); + else { + pr_err("%s: start_rx: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + break; + } + + case US_SET_TX_INFO: { + usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_OPENED_STATE) + rc = usf_set_tx_info(usf, arg); + else { + pr_err("%s: set_tx_info: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + + break; + } /* US_SET_TX_INFO */ + + case US_SET_RX_INFO: { + usf_xx = &usf->usf_rx; + if (usf_xx->usf_state == USF_OPENED_STATE) + rc = usf_set_rx_info(usf, arg); + else { + pr_err("%s: set_rx_info: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + + break; + } /* US_SET_RX_INFO */ + + case US_GET_TX_UPDATE: { + struct usf_xx_type *usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_get_tx_update(usf, arg); + else { + pr_err("%s: get_tx_update: wrong state[%d]\n", __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_GET_TX_UPDATE */ + + case US_SET_RX_UPDATE: { + struct usf_xx_type *usf_xx = &usf->usf_rx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_set_rx_update(usf_xx, arg); + else { + pr_err("%s: set_rx_update: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_SET_RX_UPDATE */ + + case US_STOP_TX: { + usf_xx = &usf->usf_tx; + if ((usf_xx->usf_state == USF_WORK_STATE) + || (usf_xx->usf_state == USF_ADSP_RESTART_STATE)) + rc = usf_stop_tx(usf); + else { + pr_err("%s: stop_tx: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + break; + } /* US_STOP_TX */ + + case US_STOP_RX: { + usf_xx = &usf->usf_rx; + if ((usf_xx->usf_state == USF_WORK_STATE) + || (usf_xx->usf_state == USF_ADSP_RESTART_STATE)) + usf_disable(usf_xx); + else { + pr_err("%s: stop_rx: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + break; + } /* US_STOP_RX */ + + case US_SET_DETECTION: { + struct usf_xx_type *usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_set_us_detection(usf, arg); + else { + pr_err("%s: set us detection: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_SET_DETECTION */ + + case US_GET_VERSION: { + rc = usf_get_version(arg); + break; + } /* US_GET_VERSION */ + + case US_SET_TX_STREAM_PARAM: { + rc = usf_set_stream_param(&usf->usf_tx, arg, OUT); + break; + } /* US_SET_TX_STREAM_PARAM */ + + case US_GET_TX_STREAM_PARAM: { + rc = usf_get_stream_param(&usf->usf_tx, arg, OUT); + break; + } /* US_GET_TX_STREAM_PARAM */ + + case US_SET_RX_STREAM_PARAM: { + rc = usf_set_stream_param(&usf->usf_rx, arg, IN); + break; + } /* US_SET_RX_STREAM_PARAM */ + + case US_GET_RX_STREAM_PARAM: { + rc = usf_get_stream_param(&usf->usf_rx, arg, IN); + break; + } /* US_GET_RX_STREAM_PARAM */ + + default: + pr_err("%s: unsupported IOCTL command [%d]\n", + __func__, + cmd); + rc = -ENOTTY; + break; + } + + if (rc && + ((cmd == US_SET_TX_INFO) || + (cmd == US_SET_RX_INFO))) + release_xx(usf_xx); + + return rc; +} /* __usf_ioctl */ + +static long usf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct usf_type *usf = file->private_data; + int rc = 0; + + mutex_lock(&usf->mutex); + rc = __usf_ioctl(usf, cmd, arg); + mutex_unlock(&usf->mutex); + + return rc; +} /* usf_ioctl */ + +#ifdef CONFIG_COMPAT + +#define US_SET_TX_INFO32 _IOW(USF_IOCTL_MAGIC, 0, \ + struct us_tx_info_type32) +#define US_GET_TX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 2, \ + struct us_tx_update_info_type32) +#define US_SET_RX_INFO32 _IOW(USF_IOCTL_MAGIC, 3, \ + struct us_rx_info_type32) +#define US_SET_RX_UPDATE32 _IOWR(USF_IOCTL_MAGIC, 4, \ + struct us_rx_update_info_type32) +#define US_SET_DETECTION32 _IOWR(USF_IOCTL_MAGIC, 8, \ + struct us_detect_info_type32) +#define US_GET_VERSION32 _IOWR(USF_IOCTL_MAGIC, 9, \ + struct us_version_info_type32) +#define US_SET_TX_STREAM_PARAM32 _IOW(USF_IOCTL_MAGIC, 10, \ + struct us_stream_param_type32) +#define US_GET_TX_STREAM_PARAM32 _IOWR(USF_IOCTL_MAGIC, 11, \ + struct us_stream_param_type32) +#define US_SET_RX_STREAM_PARAM32 _IOW(USF_IOCTL_MAGIC, 12, \ + struct us_stream_param_type32) +#define US_GET_RX_STREAM_PARAM32 _IOWR(USF_IOCTL_MAGIC, 13, \ + struct us_stream_param_type32) + +/* Info structure common for TX and RX */ +struct us_xx_info_type32 { +/* Input: general info */ +/* Name of the client - event calculator, ptr to char */ + const compat_uptr_t client_name; +/* Selected device identification, accepted in the kernel's CAD */ + uint32_t dev_id; +/* 0 - point_epos type; (e.g. 1 - gr_mmrd) */ + uint32_t stream_format; +/* Required sample rate in Hz */ + uint32_t sample_rate; +/* Size of a buffer (bytes) for US data transfer between the module and USF */ + uint32_t buf_size; +/* Number of the buffers for the US data transfer */ + uint16_t buf_num; +/* Number of the microphones (TX) or speakers(RX) */ + uint16_t port_cnt; +/* Microphones(TX) or speakers(RX) indexes in their enumeration */ + uint8_t port_id[USF_MAX_PORT_NUM]; +/* Bits per sample 16 or 32 */ + uint16_t bits_per_sample; +/* Input: Transparent info for encoder in the LPASS */ +/* Parameters data size in bytes */ + uint16_t params_data_size; +/* Pointer to the parameters, ptr to uint8_t */ + compat_uptr_t params_data; +/* Max size of buffer for get and set parameter */ + uint32_t max_get_set_param_buf_size; +}; + +struct us_tx_info_type32 { +/* Common info. This struct includes ptr and therefore the 32 version */ + struct us_xx_info_type32 us_xx_info; +/* Info specific for TX. This struct doesn't include long or ptr + and therefore no 32 version */ + struct us_input_info_type input_info; +}; + +struct us_tx_update_info_type32 { +/* Input general: */ +/* Number of calculated events */ + uint16_t event_counter; +/* Calculated events or NULL, ptr to struct usf_event_type */ + compat_uptr_t event; +/* Pointer (read index) to the end of available region */ +/* in the shared US data memory */ + uint32_t free_region; +/* Time (sec) to wait for data or special values: */ +/* USF_NO_WAIT_TIMEOUT, USF_INFINITIVE_TIMEOUT, USF_DEFAULT_TIMEOUT */ + uint32_t timeout; +/* Events (from conflicting devs) to be disabled/enabled */ + uint16_t event_filters; + +/* Input transparent data: */ +/* Parameters size */ + uint16_t params_data_size; +/* Pointer to the parameters, ptr to uint8_t */ + compat_uptr_t params_data; +/* Output parameters: */ +/* Pointer (write index) to the end of ready US data region */ +/* in the shared memory */ + uint32_t ready_region; +}; + +struct us_rx_info_type32 { + /* Common info */ + struct us_xx_info_type32 us_xx_info; + /* Info specific for RX*/ +}; + +struct us_rx_update_info_type32 { +/* Input general: */ +/* Pointer (write index) to the end of ready US data region */ +/* in the shared memory */ + uint32_t ready_region; +/* Input transparent data: */ +/* Parameters size */ + uint16_t params_data_size; +/* pPointer to the parameters, ptr to uint8_t */ + compat_uptr_t params_data; +/* Output parameters: */ +/* Pointer (read index) to the end of available region */ +/* in the shared US data memory */ + uint32_t free_region; +}; + +struct us_detect_info_type32 { +/* US detection place (HW|FW) */ +/* NA in the Active and OFF states */ + enum us_detect_place_enum us_detector; +/* US detection mode */ + enum us_detect_mode_enum us_detect_mode; +/* US data dropped during this time (msec) */ + uint32_t skip_time; +/* Transparent data size */ + uint16_t params_data_size; +/* Pointer to the transparent data, ptr to uint8_t */ + compat_uptr_t params_data; +/* Time (sec) to wait for US presence event */ + uint32_t detect_timeout; +/* Out parameter: US presence */ + bool is_us; +}; + +struct us_version_info_type32 { +/* Size of memory for the version string */ + uint16_t buf_size; +/* Pointer to the memory for the version string, ptr to char */ + compat_uptr_t pbuf; +}; + +struct us_stream_param_type32 { +/* Id of module */ + uint32_t module_id; +/* Id of parameter */ + uint32_t param_id; +/* Size of memory of the parameter buffer */ + uint32_t buf_size; +/* Pointer to the memory of the parameter buffer */ + compat_uptr_t pbuf; +}; + +static void usf_compat_xx_info_type(struct us_xx_info_type32 *us_xx_info32, + struct us_xx_info_type *us_xx_info) +{ + int i = 0; + us_xx_info->client_name = compat_ptr(us_xx_info32->client_name); + us_xx_info->dev_id = us_xx_info32->dev_id; + us_xx_info->stream_format = us_xx_info32->stream_format; + us_xx_info->sample_rate = us_xx_info32->sample_rate; + us_xx_info->buf_size = us_xx_info32->buf_size; + us_xx_info->buf_num = us_xx_info32->buf_num; + us_xx_info->port_cnt = us_xx_info32->port_cnt; + for (i = 0; i < USF_MAX_PORT_NUM; i++) + us_xx_info->port_id[i] = us_xx_info32->port_id[i]; + us_xx_info->bits_per_sample = us_xx_info32->bits_per_sample; + us_xx_info->params_data_size = us_xx_info32->params_data_size; + us_xx_info->params_data = compat_ptr(us_xx_info32->params_data); + us_xx_info->max_get_set_param_buf_size = + us_xx_info32->max_get_set_param_buf_size; +} + +static int usf_set_tx_info32(struct usf_type *usf, unsigned long arg) +{ + struct us_tx_info_type32 config_tx32; + struct us_tx_info_type config_tx; + + int rc = copy_from_user(&config_tx32, + (struct us_tx_info_type32 __user *) arg, + sizeof(config_tx32)); + + if (rc) { + pr_err("%s: copy config_tx from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + memset(&config_tx, 0, sizeof(config_tx)); + usf_compat_xx_info_type(&(config_tx32.us_xx_info), + &(config_tx.us_xx_info)); + config_tx.input_info = config_tx32.input_info; + + return __usf_set_tx_info(usf, &config_tx); +} /* usf_set_tx_info 32*/ + +static int usf_set_rx_info32(struct usf_type *usf, unsigned long arg) +{ + struct us_rx_info_type32 config_rx32; + struct us_rx_info_type config_rx; + + int rc = copy_from_user(&config_rx32, + (struct us_rx_info_type32 __user *) arg, + sizeof(config_rx32)); + + if (rc) { + pr_err("%s: copy config_rx from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + memset(&config_rx, 0, sizeof(config_rx)); + usf_compat_xx_info_type(&(config_rx32.us_xx_info), + &(config_rx.us_xx_info)); + + return __usf_set_rx_info(usf, &config_rx); +} /* usf_set_rx_info32 */ + +static int usf_get_tx_update32(struct usf_type *usf, unsigned long arg) +{ + struct us_tx_update_info_type32 upd_tx_info32; + struct us_tx_update_info_type upd_tx_info; + + int rc = copy_from_user(&upd_tx_info32, + (struct us_tx_update_info_type32 __user *) arg, + sizeof(upd_tx_info32)); + + if (rc) { + pr_err("%s: copy upd_tx_info32 from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + memset(&upd_tx_info, 0, sizeof(upd_tx_info)); + upd_tx_info.event_counter = upd_tx_info32.event_counter; + upd_tx_info.event = compat_ptr(upd_tx_info32.event); + upd_tx_info.free_region = upd_tx_info32.free_region; + upd_tx_info.timeout = upd_tx_info32.timeout; + upd_tx_info.event_filters = upd_tx_info32.event_filters; + upd_tx_info.params_data_size = upd_tx_info32.params_data_size; + upd_tx_info.params_data = compat_ptr(upd_tx_info32.params_data); + upd_tx_info.ready_region = upd_tx_info32.ready_region; + + rc = __usf_get_tx_update(usf, &upd_tx_info); + if (rc < 0) { + pr_err("%s: get tx update failed; rc=%d\n", + __func__, rc); + return rc; + } + + /* Update only the fields that were changed */ + upd_tx_info32.ready_region = upd_tx_info.ready_region; + + rc = copy_to_user((void __user *)arg, &upd_tx_info32, + sizeof(upd_tx_info32)); + if (rc) { + pr_err("%s: copy upd_tx_info32 to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_get_tx_update */ + +static int usf_set_rx_update32(struct usf_xx_type *usf_xx, unsigned long arg) +{ + struct us_rx_update_info_type32 upd_rx_info32; + struct us_rx_update_info_type upd_rx_info; + + int rc = copy_from_user(&upd_rx_info32, + (struct us_rx_update_info_type32 __user *) arg, + sizeof(upd_rx_info32)); + + if (rc) { + pr_err("%s: copy upd_rx_info32 from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + memset(&upd_rx_info, 0, sizeof(upd_rx_info)); + upd_rx_info.ready_region = upd_rx_info32.ready_region; + upd_rx_info.params_data_size = upd_rx_info32.params_data_size; + upd_rx_info.params_data = compat_ptr(upd_rx_info32.params_data); + upd_rx_info.free_region = upd_rx_info32.free_region; + + rc = __usf_set_rx_update(usf_xx, &upd_rx_info); + if (rc < 0) { + pr_err("%s: set rx update failed; rc=%d\n", + __func__, rc); + return rc; + } + + /* Update only the fields that were changed */ + upd_rx_info32.free_region = upd_rx_info.free_region; + + rc = copy_to_user((void __user *)arg, + &upd_rx_info32, + sizeof(upd_rx_info32)); + if (rc) { + pr_err("%s: copy rx_info32 to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_set_rx_update32 */ + +static int usf_set_us_detection32(struct usf_type *usf, unsigned long arg) +{ + struct us_detect_info_type32 detect_info32; + struct us_detect_info_type detect_info; + + int rc = copy_from_user(&detect_info32, + (struct us_detect_info_type32 __user *) arg, + sizeof(detect_info32)); + + if (rc) { + pr_err("%s: copy detect_info32 from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + if (detect_info32.params_data_size > USF_MAX_USER_BUF_SIZE) { + pr_err("%s: user buffer size exceeds maximum\n", + __func__); + return -EFAULT; + } + + memset(&detect_info, 0, sizeof(detect_info)); + detect_info.us_detector = detect_info32.us_detector; + detect_info.us_detect_mode = detect_info32.us_detect_mode; + detect_info.skip_time = detect_info32.skip_time; + detect_info.params_data_size = detect_info32.params_data_size; + detect_info.params_data = compat_ptr(detect_info32.params_data); + detect_info.detect_timeout = detect_info32.detect_timeout; + detect_info.is_us = detect_info32.is_us; + + rc = __usf_set_us_detection(usf, &detect_info); + if (rc < 0) { + pr_err("%s: set us detection failed; rc=%d\n", + __func__, rc); + return rc; + } + + /* Update only the fields that were changed */ + detect_info32.is_us = detect_info.is_us; + + rc = copy_to_user((void __user *)arg, + &detect_info32, + sizeof(detect_info32)); + if (rc) { + pr_err("%s: copy detect_info32 to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_set_us_detection32 */ + +static int usf_get_version32(unsigned long arg) +{ + struct us_version_info_type32 version_info32; + struct us_version_info_type version_info; + + int rc = copy_from_user(&version_info32, + (struct us_version_info_type32 __user *) arg, + sizeof(version_info32)); + + if (rc) { + pr_err("%s: copy version_info32 from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + memset(&version_info, 0, sizeof(version_info)); + version_info.buf_size = version_info32.buf_size; + version_info.pbuf = compat_ptr(version_info32.pbuf); + + rc = __usf_get_version(&version_info); + if (rc < 0) { + pr_err("%s: get version failed; rc=%d\n", + __func__, rc); + return rc; + } + + /* None of the fields were changed */ + + rc = copy_to_user((void __user *)arg, + &version_info32, + sizeof(version_info32)); + if (rc) { + pr_err("%s: copy version_info32 to user; rc=%d\n", + __func__, rc); + rc = -EFAULT; + } + + return rc; +} /* usf_get_version32 */ + +static int usf_set_stream_param32(struct usf_xx_type *usf_xx, + unsigned long arg, int dir) +{ + struct us_stream_param_type32 set_stream_param32; + struct us_stream_param_type set_stream_param; + int rc = 0; + + rc = copy_from_user(&set_stream_param32, + (struct us_stream_param_type32 __user *) arg, + sizeof(set_stream_param32)); + + if (rc) { + pr_err("%s: copy set_stream_param from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + memset(&set_stream_param, 0, sizeof(set_stream_param)); + set_stream_param.module_id = set_stream_param32.module_id; + set_stream_param.param_id = set_stream_param32.param_id; + set_stream_param.buf_size = set_stream_param32.buf_size; + set_stream_param.pbuf = compat_ptr(set_stream_param32.pbuf); + + return __usf_set_stream_param(usf_xx, &set_stream_param, dir); +} /* usf_set_stream_param32 */ + +static int usf_get_stream_param32(struct usf_xx_type *usf_xx, + unsigned long arg, int dir) +{ + struct us_stream_param_type32 get_stream_param32; + struct us_stream_param_type get_stream_param; + int rc = 0; + + rc = copy_from_user(&get_stream_param32, + (struct us_stream_param_type32 __user *) arg, + sizeof(get_stream_param32)); + + if (rc) { + pr_err("%s: copy get_stream_param from user; rc=%d\n", + __func__, rc); + return -EFAULT; + } + + memset(&get_stream_param, 0, sizeof(get_stream_param)); + get_stream_param.module_id = get_stream_param32.module_id; + get_stream_param.param_id = get_stream_param32.param_id; + get_stream_param.buf_size = get_stream_param32.buf_size; + get_stream_param.pbuf = compat_ptr(get_stream_param32.pbuf); + + return __usf_get_stream_param(usf_xx, &get_stream_param, dir); +} /* usf_get_stream_param32 */ + +static long __usf_compat_ioctl(struct usf_type *usf, + unsigned int cmd, + unsigned long arg) +{ + int rc = 0; + struct usf_xx_type *usf_xx = NULL; + + switch (cmd) { + case US_START_TX: + case US_START_RX: + case US_STOP_TX: + case US_STOP_RX: { + return __usf_ioctl(usf, cmd, arg); + } + + case US_SET_TX_INFO32: { + usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_OPENED_STATE) + rc = usf_set_tx_info32(usf, arg); + else { + pr_err("%s: set_tx_info32: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + + break; + } /* US_SET_TX_INFO32 */ + + case US_SET_RX_INFO32: { + usf_xx = &usf->usf_rx; + if (usf_xx->usf_state == USF_OPENED_STATE) + rc = usf_set_rx_info32(usf, arg); + else { + pr_err("%s: set_rx_info32: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + return -EBADFD; + } + + break; + } /* US_SET_RX_INFO32 */ + + case US_GET_TX_UPDATE32: { + struct usf_xx_type *usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_get_tx_update32(usf, arg); + else { + pr_err("%s: get_tx_update32: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_GET_TX_UPDATE32 */ + + case US_SET_RX_UPDATE32: { + struct usf_xx_type *usf_xx = &usf->usf_rx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_set_rx_update32(usf_xx, arg); + else { + pr_err("%s: set_rx_update: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_SET_RX_UPDATE32 */ + + case US_SET_DETECTION32: { + struct usf_xx_type *usf_xx = &usf->usf_tx; + if (usf_xx->usf_state == USF_WORK_STATE) + rc = usf_set_us_detection32(usf, arg); + else { + pr_err("%s: set us detection: wrong state[%d]\n", + __func__, + usf_xx->usf_state); + rc = -EBADFD; + } + break; + } /* US_SET_DETECTION32 */ + + case US_GET_VERSION32: { + rc = usf_get_version32(arg); + break; + } /* US_GET_VERSION32 */ + + case US_SET_TX_STREAM_PARAM32: { + rc = usf_set_stream_param32(&usf->usf_tx, arg, OUT); + break; + } /* US_SET_TX_STREAM_PARAM32 */ + + case US_GET_TX_STREAM_PARAM32: { + rc = usf_get_stream_param32(&usf->usf_tx, arg, OUT); + break; + } /* US_GET_TX_STREAM_PARAM32 */ + + case US_SET_RX_STREAM_PARAM32: { + rc = usf_set_stream_param32(&usf->usf_rx, arg, IN); + break; + } /* US_SET_RX_STREAM_PARAM32 */ + + case US_GET_RX_STREAM_PARAM32: { + rc = usf_get_stream_param32(&usf->usf_rx, arg, IN); + break; + } /* US_GET_RX_STREAM_PARAM32 */ + + default: + pr_err("%s: unsupported IOCTL command [%d]\n", + __func__, + cmd); + rc = -ENOTTY; + break; + } + + if (rc && + ((cmd == US_SET_TX_INFO) || + (cmd == US_SET_RX_INFO))) + release_xx(usf_xx); + + return rc; +} /* __usf_compat_ioctl */ + +static long usf_compat_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg) +{ + struct usf_type *usf = file->private_data; + int rc = 0; + + mutex_lock(&usf->mutex); + rc = __usf_compat_ioctl(usf, cmd, arg); + mutex_unlock(&usf->mutex); + + return rc; +} /* usf_compat_ioctl */ +#endif /* CONFIG_COMPAT */ + +static int usf_mmap(struct file *file, struct vm_area_struct *vms) +{ + struct usf_type *usf = file->private_data; + int dir = OUT; + struct usf_xx_type *usf_xx = &usf->usf_tx; + int rc = 0; + + mutex_lock(&usf->mutex); + if (vms->vm_flags & USF_VM_WRITE) { /* RX buf mapping */ + dir = IN; + usf_xx = &usf->usf_rx; + } + rc = q6usm_get_virtual_address(dir, usf_xx->usc, vms); + mutex_unlock(&usf->mutex); + + return rc; +} + +static uint16_t add_opened_dev(int minor) +{ + uint16_t ind = 0; + + for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) { + if (minor == atomic_cmpxchg(&s_opened_devs[ind], 0, minor)) { + pr_err("%s: device %d is already opened\n", + __func__, minor); + return USF_UNDEF_DEV_ID; + } else { + pr_debug("%s: device %d is added; ind=%d\n", + __func__, minor, ind); + return ind; + } + } + + pr_err("%s: there is no place for device %d\n", + __func__, minor); + return USF_UNDEF_DEV_ID; +} + +static int usf_open(struct inode *inode, struct file *file) +{ + struct usf_type *usf = NULL; + uint16_t dev_ind = 0; + int minor = MINOR(inode->i_rdev); + + dev_ind = add_opened_dev(minor); + if (dev_ind == USF_UNDEF_DEV_ID) + return -EBUSY; + + usf = kzalloc(sizeof(struct usf_type), GFP_KERNEL); + if (usf == NULL) { + pr_err("%s:usf allocation failed\n", __func__); + return -ENOMEM; + } + wakeup_source_init(&usf_wakeup_source, "usf"); + + file->private_data = usf; + usf->dev_ind = dev_ind; + + usf->usf_tx.usf_state = USF_OPENED_STATE; + usf->usf_rx.usf_state = USF_OPENED_STATE; + + usf->usf_tx.us_detect_type = USF_US_DETECT_UNDEF; + usf->usf_rx.us_detect_type = USF_US_DETECT_UNDEF; + + mutex_init(&usf->mutex); + + pr_debug("%s:usf in open\n", __func__); + return 0; +} + +static int usf_release(struct inode *inode, struct file *file) +{ + struct usf_type *usf = file->private_data; + + pr_debug("%s: release entry\n", __func__); + + mutex_lock(&usf->mutex); + usf_release_input(usf); + + usf_disable(&usf->usf_tx); + usf_disable(&usf->usf_rx); + + atomic_set(&s_opened_devs[usf->dev_ind], 0); + + wakeup_source_trash(&usf_wakeup_source); + mutex_unlock(&usf->mutex); + mutex_destroy(&usf->mutex); + kfree(usf); + pr_debug("%s: release exit\n", __func__); + return 0; +} + +extern long usf_compat_ioctl(struct file *file, + unsigned int cmd, + unsigned long arg); + +static const struct file_operations usf_fops = { + .owner = THIS_MODULE, + .open = usf_open, + .release = usf_release, + .unlocked_ioctl = usf_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = usf_compat_ioctl, +#endif /* CONFIG_COMPAT */ + .mmap = usf_mmap, +}; + +static struct miscdevice usf_misc[MAX_DEVS_NUMBER] = { + { + .minor = MISC_DYNAMIC_MINOR, + .name = "usf1", + .fops = &usf_fops, + }, +}; + +static int __init usf_init(void) +{ + int rc = 0; + uint16_t ind = 0; + + pr_debug("%s: USF SW version %s.\n", __func__, DRV_VERSION); + pr_debug("%s: Max %d devs registration\n", __func__, MAX_DEVS_NUMBER); + + for (ind = 0; ind < MAX_DEVS_NUMBER; ++ind) { + rc = misc_register(&usf_misc[ind]); + if (rc) { + pr_err("%s: misc_register() failed ind=%d; rc = %d\n", + __func__, ind, rc); + break; + } + } + + return rc; +} + +device_initcall(usf_init); + +MODULE_DESCRIPTION("Ultrasound framework driver"); diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c new file mode 100644 index 000000000000..a4d63f0c0d1a --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.c @@ -0,0 +1,424 @@ +/* Copyright (c) 2012-2013, 2016 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/input/mt.h> +#include <linux/syscalls.h> +#include "usfcdev.h" + +#define UNDEF_ID 0xffffffff +#define SLOT_CMD_ID 0 +#define MAX_RETRIES 10 + +enum usdev_event_status { + USFCDEV_EVENT_ENABLED, + USFCDEV_EVENT_DISABLING, + USFCDEV_EVENT_DISABLED, +}; + +struct usfcdev_event { + bool (*match_cb)(uint16_t, struct input_dev *dev); + bool registered_event; + bool interleaved; + enum usdev_event_status event_status; +}; +static struct usfcdev_event s_usfcdev_events[MAX_EVENT_TYPE_NUM]; + +struct usfcdev_input_command { + unsigned int type; + unsigned int code; + unsigned int value; +}; + +static long s_usf_pid; + +static bool usfcdev_filter(struct input_handle *handle, + unsigned int type, unsigned int code, int value); +static bool usfcdev_match(struct input_handler *handler, + struct input_dev *dev); +static int usfcdev_connect(struct input_handler *handler, + struct input_dev *dev, + const struct input_device_id *id); +static void usfcdev_disconnect(struct input_handle *handle); + +static const struct input_device_id usfc_tsc_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + /* assumption: ABS_X & ABS_Y are in the same long */ + .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | + BIT_MASK(ABS_Y) }, + }, + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT | + INPUT_DEVICE_ID_MATCH_ABSBIT, + .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) }, + .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, + /* assumption: MT_.._X & MT_.._Y are in the same long */ + .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = + BIT_MASK(ABS_MT_POSITION_X) | + BIT_MASK(ABS_MT_POSITION_Y) }, + }, + { } /* Terminating entry */ +}; + +MODULE_DEVICE_TABLE(input, usfc_tsc_ids); + +static struct input_handler s_usfc_handlers[MAX_EVENT_TYPE_NUM] = { + { /* TSC handler */ + .filter = usfcdev_filter, + .match = usfcdev_match, + .connect = usfcdev_connect, + .disconnect = usfcdev_disconnect, + /* .minor can be used as index in the container, */ + /* because .fops isn't supported */ + .minor = TSC_EVENT_TYPE_IND, + .name = "usfc_tsc_handler", + .id_table = usfc_tsc_ids, + }, +}; + +/* + * For each event type, there are a number conflicting devices (handles) + * The first registered device (primary) is real TSC device; it's mandatory + * Optionally, later registered devices are simulated ones. + * They are dynamically managed + * The primary device's handles are stored in the below static array + */ +static struct input_handle s_usfc_primary_handles[MAX_EVENT_TYPE_NUM] = { + { /* TSC handle */ + .handler = &s_usfc_handlers[TSC_EVENT_TYPE_IND], + .name = "usfc_tsc_handle", + }, +}; + +static struct usfcdev_input_command initial_clear_cmds[] = { + {EV_ABS, ABS_PRESSURE, 0}, + {EV_KEY, BTN_TOUCH, 0}, +}; + +static struct usfcdev_input_command slot_clear_cmds[] = { + {EV_ABS, ABS_MT_SLOT, 0}, + {EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID}, +}; + +static struct usfcdev_input_command no_filter_cmds[] = { + {EV_ABS, ABS_MT_SLOT, 0}, + {EV_ABS, ABS_MT_TRACKING_ID, UNDEF_ID}, + {EV_SYN, SYN_REPORT, 0}, +}; + +static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev) +{ + bool rc = false; + int ind = handler->minor; + + pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind); + + if (s_usfcdev_events[ind].registered_event && + s_usfcdev_events[ind].match_cb) { + rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev); + pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc); + } + return rc; +} + +static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev, + const struct input_device_id *id) +{ + int ret = 0; + uint16_t ind = handler->minor; + struct input_handle *usfc_handle = NULL; + + if (s_usfc_primary_handles[ind].dev == NULL) { + pr_debug("%s: primary device; ind=%d\n", + __func__, + ind); + usfc_handle = &s_usfc_primary_handles[ind]; + } else { + pr_debug("%s: secondary device; ind=%d\n", + __func__, + ind); + usfc_handle = kzalloc(sizeof(struct input_handle), + GFP_KERNEL); + if (!usfc_handle) { + pr_err("%s: memory allocation failed; ind=%d\n", + __func__, + ind); + return -ENOMEM; + } + usfc_handle->handler = &s_usfc_handlers[ind]; + usfc_handle->name = s_usfc_primary_handles[ind].name; + } + usfc_handle->dev = dev; + ret = input_register_handle(usfc_handle); + pr_debug("%s: name=[%s]; ind=%d; dev=0x%pK\n", + __func__, + dev->name, + ind, + usfc_handle->dev); + if (ret) + pr_err("%s: input_register_handle[%d] failed: ret=%d\n", + __func__, + ind, + ret); + else { + ret = input_open_device(usfc_handle); + if (ret) { + pr_err("%s: input_open_device[%d] failed: ret=%d\n", + __func__, + ind, + ret); + input_unregister_handle(usfc_handle); + } else + pr_debug("%s: device[%d] is opened\n", + __func__, + ind); + } + + return ret; +} + +static void usfcdev_disconnect(struct input_handle *handle) +{ + int ind = handle->handler->minor; + + input_close_device(handle); + input_unregister_handle(handle); + pr_debug("%s: handle[%d], name=[%s] is disconnected\n", + __func__, + ind, + handle->dev->name); + if (s_usfc_primary_handles[ind].dev == handle->dev) + s_usfc_primary_handles[ind].dev = NULL; + else + kfree(handle); +} + +static bool usfcdev_filter(struct input_handle *handle, + unsigned int type, unsigned int code, int value) +{ + uint16_t i = 0; + uint16_t ind = (uint16_t)handle->handler->minor; + bool rc = (s_usfcdev_events[ind].event_status != USFCDEV_EVENT_ENABLED); + + if (s_usf_pid == sys_getpid()) { + /* Pass events from usfcdev driver */ + rc = false; + pr_debug("%s: event_type=%d; type=%d; code=%d; val=%d", + __func__, + ind, + type, + code, + value); + } else if (s_usfcdev_events[ind].event_status == + USFCDEV_EVENT_DISABLING) { + uint32_t u_value = value; + s_usfcdev_events[ind].interleaved = true; + /* Pass events for freeing slots from TSC driver */ + for (i = 0; i < ARRAY_SIZE(no_filter_cmds); ++i) { + if ((no_filter_cmds[i].type == type) && + (no_filter_cmds[i].code == code) && + (no_filter_cmds[i].value <= u_value)) { + rc = false; + pr_debug("%s: no_filter_cmds[%d]; %d", + __func__, + i, + no_filter_cmds[i].value); + break; + } + } + } + + return rc; +} + +bool usfcdev_register( + uint16_t event_type_ind, + bool (*match_cb)(uint16_t, struct input_dev *dev)) +{ + int ret = 0; + bool rc = false; + + if ((event_type_ind >= MAX_EVENT_TYPE_NUM) || !match_cb) { + pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%pK\n", + __func__, + event_type_ind, + match_cb); + return false; + } + + if (s_usfcdev_events[event_type_ind].registered_event) { + pr_info("%s: handler[%d] was already registered\n", + __func__, + event_type_ind); + return true; + } + + s_usfcdev_events[event_type_ind].registered_event = true; + s_usfcdev_events[event_type_ind].match_cb = match_cb; + s_usfcdev_events[event_type_ind].event_status = USFCDEV_EVENT_ENABLED; + ret = input_register_handler(&s_usfc_handlers[event_type_ind]); + if (!ret) { + rc = true; + pr_debug("%s: handler[%d] was registered\n", + __func__, + event_type_ind); + } else { + s_usfcdev_events[event_type_ind].registered_event = false; + s_usfcdev_events[event_type_ind].match_cb = NULL; + pr_err("%s: handler[%d] registration failed: ret=%d\n", + __func__, + event_type_ind, + ret); + } + + return rc; +} + +void usfcdev_unregister(uint16_t event_type_ind) +{ + if (event_type_ind >= MAX_EVENT_TYPE_NUM) { + pr_err("%s: wrong input: event_type_ind=%d\n", + __func__, + event_type_ind); + return; + } + if (s_usfcdev_events[event_type_ind].registered_event) { + input_unregister_handler(&s_usfc_handlers[event_type_ind]); + pr_debug("%s: handler[%d] was unregistered\n", + __func__, + event_type_ind); + s_usfcdev_events[event_type_ind].registered_event = false; + s_usfcdev_events[event_type_ind].match_cb = NULL; + s_usfcdev_events[event_type_ind].event_status = + USFCDEV_EVENT_ENABLED; + + } +} + +static inline void usfcdev_send_cmd( + struct input_dev *dev, + struct usfcdev_input_command cmd) +{ + input_event(dev, cmd.type, cmd.code, cmd.value); +} + +static void usfcdev_clean_dev(uint16_t event_type_ind) +{ + struct input_dev *dev = NULL; + int i; + int j; + int retries = 0; + + if (event_type_ind >= MAX_EVENT_TYPE_NUM) { + pr_err("%s: wrong input: event_type_ind=%d\n", + __func__, + event_type_ind); + return; + } + /* Only primary device must exist */ + dev = s_usfc_primary_handles[event_type_ind].dev; + if (dev == NULL) { + pr_err("%s: NULL primary device\n", + __func__); + return; + } + + for (i = 0; i < ARRAY_SIZE(initial_clear_cmds); i++) + usfcdev_send_cmd(dev, initial_clear_cmds[i]); + input_sync(dev); + + /* Send commands to free all slots */ + for (i = 0; i < dev->mt->num_slots; i++) { + s_usfcdev_events[event_type_ind].interleaved = false; + if (input_mt_get_value(&dev->mt->slots[i], + ABS_MT_TRACKING_ID) < 0) { + pr_debug("%s: skipping slot %d", + __func__, i); + continue; + } + slot_clear_cmds[SLOT_CMD_ID].value = i; + for (j = 0; j < ARRAY_SIZE(slot_clear_cmds); j++) + usfcdev_send_cmd(dev, slot_clear_cmds[j]); + + if (s_usfcdev_events[event_type_ind].interleaved) { + pr_debug("%s: interleaved(%d): slot(%d)", + __func__, i, dev->mt->slot); + if (retries++ < MAX_RETRIES) { + --i; + continue; + } + pr_warn("%s: index(%d) reached max retires", + __func__, i); + } + + retries = 0; + input_sync(dev); + } +} + +bool usfcdev_set_filter(uint16_t event_type_ind, bool filter) +{ + bool rc = true; + + if (event_type_ind >= MAX_EVENT_TYPE_NUM) { + pr_err("%s: wrong input: event_type_ind=%d\n", + __func__, + event_type_ind); + return false; + } + + if (s_usfcdev_events[event_type_ind].registered_event) { + + pr_debug("%s: event_type[%d]; filter=%d\n", + __func__, + event_type_ind, + filter + ); + if (filter) { + s_usfcdev_events[event_type_ind].event_status = + USFCDEV_EVENT_DISABLING; + s_usf_pid = sys_getpid(); + usfcdev_clean_dev(event_type_ind); + s_usfcdev_events[event_type_ind].event_status = + USFCDEV_EVENT_DISABLED; + } else + s_usfcdev_events[event_type_ind].event_status = + USFCDEV_EVENT_ENABLED; + } else { + pr_err("%s: event_type[%d] isn't registered\n", + __func__, + event_type_ind); + rc = false; + } + + return rc; +} + +static int __init usfcdev_init(void) +{ + return 0; +} + +device_initcall(usfcdev_init); + +MODULE_DESCRIPTION("Handle of events from devices, conflicting with USF"); diff --git a/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.h b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.h new file mode 100644 index 000000000000..03b62c5ec83c --- /dev/null +++ b/drivers/misc/qcom/qdsp6v2/ultrasound/usfcdev.h @@ -0,0 +1,28 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef __USFCDEV_H__ +#define __USFCDEV_H__ + +#include <linux/input.h> + +/* TSC event type index in the containers of the handlers & handles */ +#define TSC_EVENT_TYPE_IND 0 +/* Number of supported event types to be filtered */ +#define MAX_EVENT_TYPE_NUM 1 + +bool usfcdev_register( + uint16_t event_type_ind, + bool (*match_cb)(uint16_t, struct input_dev *dev)); +void usfcdev_unregister(uint16_t event_type_ind); +bool usfcdev_set_filter(uint16_t event_type_ind, bool filter); +#endif /* __USFCDEV_H__ */ diff --git a/drivers/misc/qpnp-misc.c b/drivers/misc/qpnp-misc.c new file mode 100644 index 000000000000..c1570e32f749 --- /dev/null +++ b/drivers/misc/qpnp-misc.c @@ -0,0 +1,350 @@ +/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#define pr_fmt(fmt) "%s: " fmt, __func__ + +#include <linux/module.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/regmap.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/qpnp-misc.h> + +#define QPNP_MISC_DEV_NAME "qcom,qpnp-misc" + +#define REG_DIG_MAJOR_REV 0x01 +#define REG_SUBTYPE 0x05 +#define REG_PWM_SEL 0x49 +#define REG_GP_DRIVER_EN 0x4C + +#define PWM_SEL_MAX 0x03 +#define GP_DRIVER_EN_BIT BIT(0) + +static DEFINE_MUTEX(qpnp_misc_dev_list_mutex); +static LIST_HEAD(qpnp_misc_dev_list); + +struct qpnp_misc_version { + u8 subtype; + u8 dig_major_rev; +}; + +/** + * struct qpnp_misc_dev - holds controller device specific information + * @list: Doubly-linked list parameter linking to other + * qpnp_misc devices. + * @mutex: Mutex lock that is used to ensure mutual + * exclusion between probing and accessing misc + * driver information + * @dev: Device pointer to the misc device + * @regmap: Regmap pointer to the misc device + * @version: struct that holds the subtype and dig_major_rev + * of the chip. + */ +struct qpnp_misc_dev { + struct list_head list; + struct mutex mutex; + struct device *dev; + struct regmap *regmap; + struct qpnp_misc_version version; + + u32 base; + u8 pwm_sel; + bool enable_gp_driver; +}; + +static struct of_device_id qpnp_misc_match_table[] = { + { .compatible = QPNP_MISC_DEV_NAME }, + {} +}; + +enum qpnp_misc_version_name { + INVALID, + PM8941, + PM8226, + PMA8084, + PMDCALIFORNIUM, +}; + +static struct qpnp_misc_version irq_support_version[] = { + {0x00, 0x00}, /* INVALID */ + {0x01, 0x02}, /* PM8941 */ + {0x07, 0x00}, /* PM8226 */ + {0x09, 0x00}, /* PMA8084 */ + {0x16, 0x00}, /* PMDCALIFORNIUM */ +}; + +static int qpnp_write_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 val) +{ + int rc; + + rc = regmap_write(mdev->regmap, mdev->base + addr, val); + if (rc) + pr_err("regmap write failed rc=%d\n", rc); + + return rc; +} + +static int qpnp_read_byte(struct qpnp_misc_dev *mdev, u16 addr, u8 *val) +{ + unsigned int temp; + int rc; + + rc = regmap_read(mdev->regmap, mdev->base + addr, &temp); + if (rc) { + pr_err("regmap read failed rc=%d\n", rc); + return rc; + } + + *val = (u8)temp; + return rc; +} + +static int get_qpnp_misc_version_name(struct qpnp_misc_dev *dev) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(irq_support_version); i++) + if (dev->version.subtype == irq_support_version[i].subtype && + dev->version.dig_major_rev >= + irq_support_version[i].dig_major_rev) + return i; + + return INVALID; +} + +static bool __misc_irqs_available(struct qpnp_misc_dev *dev) +{ + int version_name = get_qpnp_misc_version_name(dev); + + if (version_name == INVALID) + return 0; + return 1; +} + +int qpnp_misc_read_reg(struct device_node *node, u16 addr, u8 *val) +{ + struct qpnp_misc_dev *mdev = NULL; + struct qpnp_misc_dev *mdev_found = NULL; + int rc; + u8 temp; + + if (IS_ERR_OR_NULL(node)) { + pr_err("Invalid device node pointer\n"); + return -EINVAL; + } + + mutex_lock(&qpnp_misc_dev_list_mutex); + list_for_each_entry(mdev, &qpnp_misc_dev_list, list) { + if (mdev->dev->of_node == node) { + mdev_found = mdev; + break; + } + } + mutex_unlock(&qpnp_misc_dev_list_mutex); + + if (!mdev_found) { + /* + * No MISC device was found. This API should only + * be called by drivers which have specified the + * misc phandle in their device tree node. + */ + pr_err("no probed misc device found\n"); + return -EPROBE_DEFER; + } + + rc = qpnp_read_byte(mdev, addr, &temp); + if (rc < 0) { + dev_err(mdev->dev, "Failed to read addr %x, rc=%d\n", addr, rc); + return rc; + } + + *val = temp; + return 0; +} + +int qpnp_misc_irqs_available(struct device *consumer_dev) +{ + struct device_node *misc_node = NULL; + struct qpnp_misc_dev *mdev = NULL; + struct qpnp_misc_dev *mdev_found = NULL; + + if (IS_ERR_OR_NULL(consumer_dev)) { + pr_err("Invalid consumer device pointer\n"); + return -EINVAL; + } + + misc_node = of_parse_phandle(consumer_dev->of_node, "qcom,misc-ref", 0); + if (!misc_node) { + pr_debug("Could not find qcom,misc-ref property in %s\n", + consumer_dev->of_node->full_name); + return 0; + } + + mutex_lock(&qpnp_misc_dev_list_mutex); + list_for_each_entry(mdev, &qpnp_misc_dev_list, list) { + if (mdev->dev->of_node == misc_node) { + mdev_found = mdev; + break; + } + } + mutex_unlock(&qpnp_misc_dev_list_mutex); + + if (!mdev_found) { + /* No MISC device was found. This API should only + * be called by drivers which have specified the + * misc phandle in their device tree node */ + pr_err("no probed misc device found\n"); + return -EPROBE_DEFER; + } + + return __misc_irqs_available(mdev_found); +} + +static int qpnp_misc_dt_init(struct qpnp_misc_dev *mdev) +{ + struct device_node *node = mdev->dev->of_node; + u32 val; + int rc; + + rc = of_property_read_u32(node, "reg", &mdev->base); + if (rc < 0 || !mdev->base) { + dev_err(mdev->dev, "Base address not defined or invalid\n"); + return -EINVAL; + } + + if (!of_property_read_u32(node, "qcom,pwm-sel", &val)) { + if (val > PWM_SEL_MAX) { + dev_err(mdev->dev, "Invalid value for pwm-sel\n"); + return -EINVAL; + } + mdev->pwm_sel = (u8)val; + } + mdev->enable_gp_driver = of_property_read_bool(node, + "qcom,enable-gp-driver"); + + WARN((mdev->pwm_sel > 0 && !mdev->enable_gp_driver), + "Setting PWM source without enabling gp driver\n"); + WARN((mdev->pwm_sel == 0 && mdev->enable_gp_driver), + "Enabling gp driver without setting PWM source\n"); + + return 0; +} + +static int qpnp_misc_config(struct qpnp_misc_dev *mdev) +{ + int rc, version_name; + + version_name = get_qpnp_misc_version_name(mdev); + + switch (version_name) { + case PMDCALIFORNIUM: + if (mdev->pwm_sel > 0 && mdev->enable_gp_driver) { + rc = qpnp_write_byte(mdev, REG_PWM_SEL, mdev->pwm_sel); + if (rc < 0) { + dev_err(mdev->dev, + "Failed to write PWM_SEL reg\n"); + return rc; + } + + rc = qpnp_write_byte(mdev, REG_GP_DRIVER_EN, + GP_DRIVER_EN_BIT); + if (rc < 0) { + dev_err(mdev->dev, + "Failed to write GP_DRIVER_EN reg\n"); + return rc; + } + } + break; + default: + break; + } + + return 0; +} + +static int qpnp_misc_probe(struct platform_device *pdev) +{ + struct qpnp_misc_dev *mdev = ERR_PTR(-EINVAL); + int rc; + + mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_KERNEL); + if (!mdev) + return -ENOMEM; + + mdev->dev = &pdev->dev; + mdev->regmap = dev_get_regmap(mdev->dev->parent, NULL); + if (!mdev->regmap) { + dev_err(mdev->dev, "Parent regmap is unavailable\n"); + return -ENXIO; + } + + rc = qpnp_misc_dt_init(mdev); + if (rc < 0) { + dev_err(mdev->dev, + "Error reading device tree properties, rc=%d\n", rc); + return rc; + } + + + rc = qpnp_read_byte(mdev, REG_SUBTYPE, &mdev->version.subtype); + if (rc < 0) { + dev_err(mdev->dev, "Failed to read subtype, rc=%d\n", rc); + return rc; + } + + rc = qpnp_read_byte(mdev, REG_DIG_MAJOR_REV, + &mdev->version.dig_major_rev); + if (rc < 0) { + dev_err(mdev->dev, "Failed to read dig_major_rev, rc=%d\n", rc); + return rc; + } + + mutex_lock(&qpnp_misc_dev_list_mutex); + list_add_tail(&mdev->list, &qpnp_misc_dev_list); + mutex_unlock(&qpnp_misc_dev_list_mutex); + + rc = qpnp_misc_config(mdev); + if (rc < 0) { + dev_err(mdev->dev, + "Error configuring module registers, rc=%d\n", rc); + return rc; + } + + dev_info(mdev->dev, "probe successful\n"); + return 0; +} + +static struct platform_driver qpnp_misc_driver = { + .probe = qpnp_misc_probe, + .driver = { + .name = QPNP_MISC_DEV_NAME, + .owner = THIS_MODULE, + .of_match_table = qpnp_misc_match_table, + }, +}; + +static int __init qpnp_misc_init(void) +{ + return platform_driver_register(&qpnp_misc_driver); +} + +static void __exit qpnp_misc_exit(void) +{ + return platform_driver_unregister(&qpnp_misc_driver); +} + +subsys_initcall(qpnp_misc_init); +module_exit(qpnp_misc_exit); + +MODULE_DESCRIPTION(QPNP_MISC_DEV_NAME); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" QPNP_MISC_DEV_NAME); diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c new file mode 100644 index 000000000000..e85b2b8972c9 --- /dev/null +++ b/drivers/misc/qseecom.c @@ -0,0 +1,8972 @@ +/*Qualcomm Secure Execution Environment Communicator (QSEECOM) driver + * + * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/fs.h> +#include <linux/platform_device.h> +#include <linux/debugfs.h> +#include <linux/cdev.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/io.h> +#include <linux/msm_ion.h> +#include <linux/types.h> +#include <linux/clk.h> +#include <linux/qseecom.h> +#include <linux/elf.h> +#include <linux/firmware.h> +#include <linux/freezer.h> +#include <linux/scatterlist.h> +#include <linux/regulator/consumer.h> +#include <linux/dma-mapping.h> +#include <soc/qcom/subsystem_restart.h> +#include <soc/qcom/scm.h> +#include <soc/qcom/socinfo.h> +#include <linux/msm-bus.h> +#include <linux/msm-bus-board.h> +#include <soc/qcom/qseecomi.h> +#include <asm/cacheflush.h> +#include "qseecom_legacy.h" +#include "qseecom_kernel.h" +#include <crypto/ice.h> +#include <linux/delay.h> + +#include <linux/compat.h> +#include "compat_qseecom.h" + +#define QSEECOM_DEV "qseecom" +#define QSEOS_VERSION_14 0x14 +#define QSEEE_VERSION_00 0x400000 +#define QSEE_VERSION_01 0x401000 +#define QSEE_VERSION_02 0x402000 +#define QSEE_VERSION_03 0x403000 +#define QSEE_VERSION_04 0x404000 +#define QSEE_VERSION_05 0x405000 +#define QSEE_VERSION_20 0x800000 +#define QSEE_VERSION_40 0x1000000 /* TZ.BF.4.0 */ + +#define QSEE_CE_CLK_100MHZ 100000000 +#define CE_CLK_DIV 1000000 + +#define QSEECOM_MAX_SG_ENTRY 512 +#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT \ + (QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT) + +#define QSEECOM_INVALID_KEY_ID 0xff + +/* Save partition image hash for authentication check */ +#define SCM_SAVE_PARTITION_HASH_ID 0x01 + +/* Check if enterprise security is activate */ +#define SCM_IS_ACTIVATED_ID 0x02 + +/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */ +#define SCM_MDTP_CIPHER_DIP 0x01 + +/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */ +#define MAX_DIP 0x20000 + +#define RPMB_SERVICE 0x2000 +#define SSD_SERVICE 0x3000 + +#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT 2000 +#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT 2000 +#define TWO 2 +#define QSEECOM_UFS_ICE_CE_NUM 10 +#define QSEECOM_SDCC_ICE_CE_NUM 20 +#define QSEECOM_ICE_FDE_KEY_INDEX 0 + +#define PHY_ADDR_4G (1ULL<<32) + +#define QSEECOM_STATE_NOT_READY 0 +#define QSEECOM_STATE_SUSPEND 1 +#define QSEECOM_STATE_READY 2 +#define QSEECOM_ICE_FDE_KEY_SIZE_MASK 2 + +/* + * default ce info unit to 0 for + * services which + * support only single instance. + * Most of services are in this category. + */ +#define DEFAULT_CE_INFO_UNIT 0 +#define DEFAULT_NUM_CE_INFO_UNIT 1 + +enum qseecom_clk_definitions { + CLK_DFAB = 0, + CLK_SFPB, +}; + +enum qseecom_ice_key_size_type { + QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE = + (0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK), + QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE = + (1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK), + QSEE_ICE_FDE_KEY_SIZE_UNDEFINED = + (0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK), +}; + +enum qseecom_client_handle_type { + QSEECOM_CLIENT_APP = 1, + QSEECOM_LISTENER_SERVICE, + QSEECOM_SECURE_SERVICE, + QSEECOM_GENERIC, + QSEECOM_UNAVAILABLE_CLIENT_APP, +}; + +enum qseecom_ce_hw_instance { + CLK_QSEE = 0, + CLK_CE_DRV, + CLK_INVALID, +}; + +static struct class *driver_class; +static dev_t qseecom_device_no; + +static DEFINE_MUTEX(qsee_bw_mutex); +static DEFINE_MUTEX(app_access_lock); +static DEFINE_MUTEX(clk_access_lock); + +struct sglist_info { + uint32_t indexAndFlags; + uint32_t sizeOrCount; +}; + +/* + * The 31th bit indicates only one or multiple physical address inside + * the request buffer. If it is set, the index locates a single physical addr + * inside the request buffer, and `sizeOrCount` is the size of the memory being + * shared at that physical address. + * Otherwise, the index locates an array of {start, len} pairs (a + * "scatter/gather list"), and `sizeOrCount` gives the number of entries in + * that array. + * + * The 30th bit indicates 64 or 32bit address; when it is set, physical addr + * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values. + * + * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer. + */ +#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \ + ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff))) + +#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD) + +#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/ + +#define MAKE_WHITELIST_VERSION(major, minor, patch) \ + (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF)) + +struct qseecom_registered_listener_list { + struct list_head list; + struct qseecom_register_listener_req svc; + void *user_virt_sb_base; + u8 *sb_virt; + phys_addr_t sb_phys; + size_t sb_length; + struct ion_handle *ihandle; /* Retrieve phy addr */ + wait_queue_head_t rcv_req_wq; + int rcv_req_flag; + int send_resp_flag; + bool listener_in_use; + /* wq for thread blocked on this listener*/ + wait_queue_head_t listener_block_app_wq; + struct sglist_info sglistinfo_ptr[MAX_ION_FD]; + uint32_t sglist_cnt; +}; + +struct qseecom_registered_app_list { + struct list_head list; + u32 app_id; + u32 ref_cnt; + char app_name[MAX_APP_NAME_SIZE]; + u32 app_arch; + bool app_blocked; + u32 blocked_on_listener_id; +}; + +struct qseecom_registered_kclient_list { + struct list_head list; + struct qseecom_handle *handle; +}; + +struct qseecom_ce_info_use { + unsigned char handle[MAX_CE_INFO_HANDLE_SIZE]; + unsigned int unit_num; + unsigned int num_ce_pipe_entries; + struct qseecom_ce_pipe_entry *ce_pipe_entry; + bool alloc; + uint32_t type; +}; + +struct ce_hw_usage_info { + uint32_t qsee_ce_hw_instance; + uint32_t num_fde; + struct qseecom_ce_info_use *fde; + uint32_t num_pfe; + struct qseecom_ce_info_use *pfe; +}; + +struct qseecom_clk { + enum qseecom_ce_hw_instance instance; + struct clk *ce_core_clk; + struct clk *ce_clk; + struct clk *ce_core_src_clk; + struct clk *ce_bus_clk; + uint32_t clk_access_cnt; +}; + +struct qseecom_control { + struct ion_client *ion_clnt; /* Ion client */ + struct list_head registered_listener_list_head; + spinlock_t registered_listener_list_lock; + + struct list_head registered_app_list_head; + spinlock_t registered_app_list_lock; + + struct list_head registered_kclient_list_head; + spinlock_t registered_kclient_list_lock; + + wait_queue_head_t send_resp_wq; + int send_resp_flag; + + uint32_t qseos_version; + uint32_t qsee_version; + struct device *pdev; + bool whitelist_support; + bool commonlib_loaded; + bool commonlib64_loaded; + struct ce_hw_usage_info ce_info; + + int qsee_bw_count; + int qsee_sfpb_bw_count; + + uint32_t qsee_perf_client; + struct qseecom_clk qsee; + struct qseecom_clk ce_drv; + + bool support_bus_scaling; + bool support_fde; + bool support_pfe; + bool fde_key_size; + uint32_t cumulative_mode; + enum qseecom_bandwidth_request_mode current_mode; + struct timer_list bw_scale_down_timer; + struct work_struct bw_inactive_req_ws; + struct cdev cdev; + bool timer_running; + bool no_clock_support; + unsigned int ce_opp_freq_hz; + bool appsbl_qseecom_support; + uint32_t qsee_reentrancy_support; + + uint32_t app_block_ref_cnt; + wait_queue_head_t app_block_wq; + atomic_t qseecom_state; + int is_apps_region_protected; + bool smcinvoke_support; +}; + +struct qseecom_sec_buf_fd_info { + bool is_sec_buf_fd; + size_t size; + void *vbase; + dma_addr_t pbase; +}; + +struct qseecom_param_memref { + uint32_t buffer; + uint32_t size; +}; + +struct qseecom_client_handle { + u32 app_id; + u8 *sb_virt; + phys_addr_t sb_phys; + unsigned long user_virt_sb_base; + size_t sb_length; + struct ion_handle *ihandle; /* Retrieve phy addr */ + char app_name[MAX_APP_NAME_SIZE]; + u32 app_arch; + struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD]; +}; + +struct qseecom_listener_handle { + u32 id; +}; + +static struct qseecom_control qseecom; + +struct qseecom_dev_handle { + enum qseecom_client_handle_type type; + union { + struct qseecom_client_handle client; + struct qseecom_listener_handle listener; + }; + bool released; + int abort; + wait_queue_head_t abort_wq; + atomic_t ioctl_count; + bool perf_enabled; + bool fast_load_enabled; + enum qseecom_bandwidth_request_mode mode; + struct sglist_info sglistinfo_ptr[MAX_ION_FD]; + uint32_t sglist_cnt; + bool use_legacy_cmd; +}; + +struct qseecom_key_id_usage_desc { + uint8_t desc[QSEECOM_KEY_ID_SIZE]; +}; + +struct qseecom_crypto_info { + unsigned int unit_num; + unsigned int ce; + unsigned int pipe_pair; +}; + +static struct qseecom_key_id_usage_desc key_id_array[] = { + { + .desc = "Undefined Usage Index", + }, + + { + .desc = "Full Disk Encryption", + }, + + { + .desc = "Per File Encryption", + }, + + { + .desc = "UFS ICE Full Disk Encryption", + }, + + { + .desc = "SDCC ICE Full Disk Encryption", + }, +}; + +/* Function proto types */ +static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t); +static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t); +static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce); +static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce); +static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce); +static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data, + char *cmnlib_name); +static int qseecom_enable_ice_setup(int usage); +static int qseecom_disable_ice_setup(int usage); +static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id); +static int qseecom_get_ce_info(struct qseecom_dev_handle *data, + void __user *argp); +static int qseecom_free_ce_info(struct qseecom_dev_handle *data, + void __user *argp); +static int qseecom_query_ce_info(struct qseecom_dev_handle *data, + void __user *argp); + +static int get_qseecom_keymaster_status(char *str) +{ + get_option(&str, &qseecom.is_apps_region_protected); + return 1; +} +__setup("androidboot.keymaster=", get_qseecom_keymaster_status); + +static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id, + const void *req_buf, void *resp_buf) +{ + int ret = 0; + uint32_t smc_id = 0; + uint32_t qseos_cmd_id = 0; + struct scm_desc desc = {0}; + struct qseecom_command_scm_resp *scm_resp = NULL; + + if (!req_buf || !resp_buf) { + pr_err("Invalid buffer pointer\n"); + return -EINVAL; + } + qseos_cmd_id = *(uint32_t *)req_buf; + scm_resp = (struct qseecom_command_scm_resp *)resp_buf; + + switch (svc_id) { + case 6: { + if (tz_cmd_id == 3) { + smc_id = TZ_INFO_GET_FEATURE_VERSION_ID; + desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID; + desc.args[0] = *(uint32_t *)req_buf; + } else { + pr_err("Unsupported svc_id %d, tz_cmd_id %d\n", + svc_id, tz_cmd_id); + return -EINVAL; + } + ret = scm_call2(smc_id, &desc); + break; + } + case SCM_SVC_ES: { + switch (tz_cmd_id) { + case SCM_SAVE_PARTITION_HASH_ID: { + u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH); + struct qseecom_save_partition_hash_req *p_hash_req = + (struct qseecom_save_partition_hash_req *) + req_buf; + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) { + pr_err("error allocating data\n"); + return -ENOMEM; + } + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, p_hash_req->digest, + SHA256_DIGEST_LENGTH); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_ES_SAVE_PARTITION_HASH_ID; + desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID; + desc.args[0] = p_hash_req->partition_id; + desc.args[1] = virt_to_phys(tzbuf); + desc.args[2] = SHA256_DIGEST_LENGTH; + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + default: { + pr_err("tz_cmd_id %d is not supported by scm_call2\n", + tz_cmd_id); + ret = -EINVAL; + break; + } + } /* end of switch (tz_cmd_id) */ + break; + } /* end of case SCM_SVC_ES */ + case SCM_SVC_TZSCHEDULER: { + switch (qseos_cmd_id) { + case QSEOS_APP_START_COMMAND: { + struct qseecom_load_app_ireq *req; + struct qseecom_load_app_64bit_ireq *req_64bit; + smc_id = TZ_OS_APP_START_ID; + desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_app_ireq *)req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_app_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_APP_SHUTDOWN_COMMAND: { + struct qseecom_unload_app_ireq *req; + req = (struct qseecom_unload_app_ireq *)req_buf; + smc_id = TZ_OS_APP_SHUTDOWN_ID; + desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID; + desc.args[0] = req->app_id; + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_APP_LOOKUP_COMMAND: { + struct qseecom_check_app_ireq *req; + u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name)); + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) { + pr_err("Allocate %d bytes buffer failed\n", + tzbuflen); + return -ENOMEM; + } + req = (struct qseecom_check_app_ireq *)req_buf; + pr_debug("Lookup app_name = %s\n", req->app_name); + strlcpy(tzbuf, req->app_name, sizeof(req->app_name)); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_OS_APP_LOOKUP_ID; + desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbuf); + desc.args[1] = strlen(req->app_name); + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + case QSEOS_APP_REGION_NOTIFICATION: { + struct qsee_apps_region_info_ireq *req; + struct qsee_apps_region_info_64bit_ireq *req_64bit; + smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID; + desc.arginfo = + TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qsee_apps_region_info_ireq *) + req_buf; + desc.args[0] = req->addr; + desc.args[1] = req->size; + } else { + req_64bit = + (struct qsee_apps_region_info_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->addr; + desc.args[1] = req_64bit->size; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_LOAD_SERV_IMAGE_COMMAND: { + struct qseecom_load_lib_image_ireq *req; + struct qseecom_load_lib_image_64bit_ireq *req_64bit; + smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID; + desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_lib_image_ireq *) + req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_lib_image_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: { + smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID; + desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_REGISTER_LISTENER: { + struct qseecom_register_listener_ireq *req; + struct qseecom_register_listener_64bit_ireq *req_64bit; + desc.arginfo = + TZ_OS_REGISTER_LISTENER_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_register_listener_ireq *) + req_buf; + desc.args[0] = req->listener_id; + desc.args[1] = req->sb_ptr; + desc.args[2] = req->sb_len; + } else { + req_64bit = + (struct qseecom_register_listener_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->listener_id; + desc.args[1] = req_64bit->sb_ptr; + desc.args[2] = req_64bit->sb_len; + } + qseecom.smcinvoke_support = true; + smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + if (ret) { + qseecom.smcinvoke_support = false; + smc_id = TZ_OS_REGISTER_LISTENER_ID; + __qseecom_reentrancy_check_if_no_app_blocked( + smc_id); + ret = scm_call2(smc_id, &desc); + } + break; + } + case QSEOS_DEREGISTER_LISTENER: { + struct qseecom_unregister_listener_ireq *req; + req = (struct qseecom_unregister_listener_ireq *) + req_buf; + smc_id = TZ_OS_DEREGISTER_LISTENER_ID; + desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID; + desc.args[0] = req->listener_id; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_LISTENER_DATA_RSP_COMMAND: { + struct qseecom_client_listener_data_irsp *req; + req = (struct qseecom_client_listener_data_irsp *) + req_buf; + smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID; + desc.arginfo = + TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID; + desc.args[0] = req->listener_id; + desc.args[1] = req->status; + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: { + struct qseecom_client_listener_data_irsp *req; + struct qseecom_client_listener_data_64bit_irsp *req_64; + + smc_id = + TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID; + desc.arginfo = + TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = + (struct qseecom_client_listener_data_irsp *) + req_buf; + desc.args[0] = req->listener_id; + desc.args[1] = req->status; + desc.args[2] = req->sglistinfo_ptr; + desc.args[3] = req->sglistinfo_len; + } else { + req_64 = + (struct qseecom_client_listener_data_64bit_irsp *) + req_buf; + desc.args[0] = req_64->listener_id; + desc.args[1] = req_64->status; + desc.args[2] = req_64->sglistinfo_ptr; + desc.args[3] = req_64->sglistinfo_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: { + struct qseecom_load_app_ireq *req; + struct qseecom_load_app_64bit_ireq *req_64bit; + smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID; + desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_load_app_ireq *)req_buf; + desc.args[0] = req->mdt_len; + desc.args[1] = req->img_len; + desc.args[2] = req->phy_addr; + } else { + req_64bit = + (struct qseecom_load_app_64bit_ireq *)req_buf; + desc.args[0] = req_64bit->mdt_len; + desc.args[1] = req_64bit->img_len; + desc.args[2] = req_64bit->phy_addr; + } + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: { + smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID; + desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + + case QSEOS_CLIENT_SEND_DATA_COMMAND: { + struct qseecom_client_send_data_ireq *req; + struct qseecom_client_send_data_64bit_ireq *req_64bit; + smc_id = TZ_APP_QSAPP_SEND_DATA_ID; + desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_client_send_data_ireq *) + req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->rsp_ptr; + desc.args[4] = req->rsp_len; + } else { + req_64bit = + (struct qseecom_client_send_data_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->rsp_ptr; + desc.args[4] = req_64bit->rsp_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: { + struct qseecom_client_send_data_ireq *req; + struct qseecom_client_send_data_64bit_ireq *req_64bit; + + smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_client_send_data_ireq *) + req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->rsp_ptr; + desc.args[4] = req->rsp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = + (struct qseecom_client_send_data_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->rsp_ptr; + desc.args[4] = req_64bit->rsp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_RPMB_PROVISION_KEY_COMMAND: { + struct qseecom_client_send_service_ireq *req; + req = (struct qseecom_client_send_service_ireq *) + req_buf; + smc_id = TZ_OS_RPMB_PROVISION_KEY_ID; + desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID; + desc.args[0] = req->key_type; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_RPMB_ERASE_COMMAND: { + smc_id = TZ_OS_RPMB_ERASE_ID; + desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: { + smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID; + desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_GENERATE_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_generate_ireq) - + sizeof(uint32_t)); + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) + return -ENOMEM; + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_generate_ireq) - + sizeof(uint32_t))); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_OS_KS_GEN_KEY_ID; + desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbuf); + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + case QSEOS_DELETE_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_delete_ireq) - + sizeof(uint32_t)); + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) { + pr_err("Allocate %d bytes buffer failed\n", + tzbuflen); + return -ENOMEM; + } + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_delete_ireq) - + sizeof(uint32_t))); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_OS_KS_DEL_KEY_ID; + desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbuf); + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + case QSEOS_SET_KEY: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_select_ireq) - + sizeof(uint32_t)); + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) { + pr_err("Allocate %d bytes buffer failed\n", + tzbuflen); + return -ENOMEM; + } + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), + (sizeof(struct qseecom_key_select_ireq) - + sizeof(uint32_t))); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_OS_KS_SET_PIPE_KEY_ID; + desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbuf); + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + case QSEOS_UPDATE_KEY_USERINFO: { + u32 tzbuflen = PAGE_ALIGN(sizeof + (struct qseecom_key_userinfo_update_ireq) - + sizeof(uint32_t)); + char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL); + if (!tzbuf) { + pr_err("Allocate %d bytes buffer failed\n", + tzbuflen); + return -ENOMEM; + } + memset(tzbuf, 0, tzbuflen); + memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof + (struct qseecom_key_userinfo_update_ireq) - + sizeof(uint32_t))); + dmac_flush_range(tzbuf, tzbuf + tzbuflen); + smc_id = TZ_OS_KS_UPDATE_KEY_ID; + desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbuf); + desc.args[1] = tzbuflen; + __qseecom_reentrancy_check_if_no_app_blocked(smc_id); + ret = scm_call2(smc_id, &desc); + kzfree(tzbuf); + break; + } + case QSEOS_TEE_OPEN_SESSION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID; + desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_TEE_OPEN_SESSION_WHITELIST: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_TEE_INVOKE_COMMAND: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID; + desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + + smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID; + desc.arginfo = + TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + desc.args[5] = req->sglistinfo_ptr; + desc.args[6] = req->sglistinfo_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + desc.args[5] = req_64bit->sglistinfo_ptr; + desc.args[6] = req_64bit->sglistinfo_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_TEE_CLOSE_SESSION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID; + desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_TEE_REQUEST_CANCELLATION: { + struct qseecom_qteec_ireq *req; + struct qseecom_qteec_64bit_ireq *req_64bit; + smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID; + desc.arginfo = + TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID; + if (qseecom.qsee_version < QSEE_VERSION_40) { + req = (struct qseecom_qteec_ireq *)req_buf; + desc.args[0] = req->app_id; + desc.args[1] = req->req_ptr; + desc.args[2] = req->req_len; + desc.args[3] = req->resp_ptr; + desc.args[4] = req->resp_len; + } else { + req_64bit = (struct qseecom_qteec_64bit_ireq *) + req_buf; + desc.args[0] = req_64bit->app_id; + desc.args[1] = req_64bit->req_ptr; + desc.args[2] = req_64bit->req_len; + desc.args[3] = req_64bit->resp_ptr; + desc.args[4] = req_64bit->resp_len; + } + ret = scm_call2(smc_id, &desc); + break; + } + case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: { + struct qseecom_continue_blocked_request_ireq *req = + (struct qseecom_continue_blocked_request_ireq *) + req_buf; + if (qseecom.smcinvoke_support) + smc_id = + TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID; + else + smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID; + desc.arginfo = + TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID; + desc.args[0] = req->app_or_session_id; + ret = scm_call2(smc_id, &desc); + break; + } + default: { + pr_err("qseos_cmd_id 0x%d is not supported by armv8 scm_call2.\n", + qseos_cmd_id); + ret = -EINVAL; + break; + } + } /*end of switch (qsee_cmd_id) */ + break; + } /*end of case SCM_SVC_TZSCHEDULER*/ + default: { + pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n", + svc_id); + ret = -EINVAL; + break; + } + } /*end of switch svc_id */ + scm_resp->result = desc.ret[0]; + scm_resp->resp_type = desc.ret[1]; + scm_resp->data = desc.ret[2]; + pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n", + svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo); + pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n", + scm_resp->result, scm_resp->resp_type, scm_resp->data); + return ret; +} + + +static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf, + size_t cmd_len, void *resp_buf, size_t resp_len) +{ + if (!is_scm_armv8()) + return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len, + resp_buf, resp_len); + else + return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf); +} + +static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data, + struct qseecom_register_listener_req *svc) +{ + struct qseecom_registered_listener_list *ptr; + int unique = 1; + unsigned long flags; + + spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); + list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) { + if (ptr->svc.listener_id == svc->listener_id) { + pr_err("Service id: %u is already registered\n", + ptr->svc.listener_id); + unique = 0; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + return unique; +} + +static struct qseecom_registered_listener_list *__qseecom_find_svc( + int32_t listener_id) +{ + struct qseecom_registered_listener_list *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); + list_for_each_entry(entry, &qseecom.registered_listener_list_head, list) + { + if (entry->svc.listener_id == listener_id) + break; + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + + if ((entry != NULL) && (entry->svc.listener_id != listener_id)) { + pr_err("Service id: %u is not found\n", listener_id); + return NULL; + } + + return entry; +} + +static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc, + struct qseecom_dev_handle *handle, + struct qseecom_register_listener_req *listener) +{ + int ret = 0; + struct qseecom_register_listener_ireq req; + struct qseecom_register_listener_64bit_ireq req_64bit; + struct qseecom_command_scm_resp resp; + ion_phys_addr_t pa; + void *cmd_buf = NULL; + size_t cmd_len; + + /* Get the handle of the shared fd */ + svc->ihandle = ion_import_dma_buf(qseecom.ion_clnt, + listener->ifd_data_fd); + if (IS_ERR_OR_NULL(svc->ihandle)) { + pr_err("Ion client could not retrieve the handle\n"); + return -ENOMEM; + } + + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length); + if (ret) { + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + return ret; + } + /* Populate the structure for sending scm call to load image */ + svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle); + if (IS_ERR_OR_NULL(svc->sb_virt)) { + pr_err("ION memory mapping for listener shared buffer failed\n"); + return -ENOMEM; + } + svc->sb_phys = (phys_addr_t)pa; + + if (qseecom.qsee_version < QSEE_VERSION_40) { + req.qsee_cmd_id = QSEOS_REGISTER_LISTENER; + req.listener_id = svc->svc.listener_id; + req.sb_len = svc->sb_length; + req.sb_ptr = (uint32_t)svc->sb_phys; + cmd_buf = (void *)&req; + cmd_len = sizeof(struct qseecom_register_listener_ireq); + } else { + req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER; + req_64bit.listener_id = svc->svc.listener_id; + req_64bit.sb_len = svc->sb_length; + req_64bit.sb_ptr = (uint64_t)svc->sb_phys; + cmd_buf = (void *)&req_64bit; + cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq); + } + + resp.result = QSEOS_RESULT_INCOMPLETE; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("qseecom_scm_call failed with err: %d\n", ret); + return -EINVAL; + } + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Error SB registration req: resp.result = %d\n", + resp.result); + return -EPERM; + } + return 0; +} + +static int qseecom_register_listener(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + unsigned long flags; + struct qseecom_register_listener_req rcvd_lstnr; + struct qseecom_registered_listener_list *new_entry; + + ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base, + rcvd_lstnr.sb_size)) + return -EFAULT; + + data->listener.id = 0; + if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) { + pr_err("Service is not unique and is already registered\n"); + data->released = true; + return -EBUSY; + } + + new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL); + if (!new_entry) { + pr_err("kmalloc failed\n"); + return -ENOMEM; + } + memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr)); + new_entry->rcv_req_flag = 0; + + new_entry->svc.listener_id = rcvd_lstnr.listener_id; + new_entry->sb_length = rcvd_lstnr.sb_size; + new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base; + if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) { + pr_err("qseecom_set_sb_memoryfailed\n"); + kzfree(new_entry); + return -ENOMEM; + } + + data->listener.id = rcvd_lstnr.listener_id; + init_waitqueue_head(&new_entry->rcv_req_wq); + init_waitqueue_head(&new_entry->listener_block_app_wq); + new_entry->send_resp_flag = 0; + new_entry->listener_in_use = false; + spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); + list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head); + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + + return ret; +} + +static int qseecom_unregister_listener(struct qseecom_dev_handle *data) +{ + int ret = 0; + unsigned long flags; + uint32_t unmap_mem = 0; + struct qseecom_register_listener_ireq req; + struct qseecom_registered_listener_list *ptr_svc = NULL; + struct qseecom_command_scm_resp resp; + struct ion_handle *ihandle = NULL; /* Retrieve phy addr */ + + req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER; + req.listener_id = data->listener.id; + resp.result = QSEOS_RESULT_INCOMPLETE; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(req), &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (lstnr id=%d)\n", + ret, data->listener.id); + return ret; + } + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Failed resp.result=%d,(lstnr id=%d)\n", + resp.result, data->listener.id); + return -EPERM; + } + + data->abort = 1; + spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); + list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head, + list) { + if (ptr_svc->svc.listener_id == data->listener.id) { + wake_up_all(&ptr_svc->rcv_req_wq); + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + + while (atomic_read(&data->ioctl_count) > 1) { + if (wait_event_freezable(data->abort_wq, + atomic_read(&data->ioctl_count) <= 1)) { + pr_err("Interrupted from abort\n"); + ret = -ERESTARTSYS; + return ret; + } + } + + spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags); + list_for_each_entry(ptr_svc, + &qseecom.registered_listener_list_head, + list) + { + if (ptr_svc->svc.listener_id == data->listener.id) { + if (ptr_svc->sb_virt) { + unmap_mem = 1; + ihandle = ptr_svc->ihandle; + } + list_del(&ptr_svc->list); + kzfree(ptr_svc); + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags); + + /* Unmap the memory */ + if (unmap_mem) { + if (!IS_ERR_OR_NULL(ihandle)) { + ion_unmap_kernel(qseecom.ion_clnt, ihandle); + ion_free(qseecom.ion_clnt, ihandle); + } + } + data->released = true; + return ret; +} + +static int __qseecom_set_msm_bus_request(uint32_t mode) +{ + int ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + if (qclk->ce_core_src_clk != NULL) { + if (mode == INACTIVE) { + __qseecom_disable_clk(CLK_QSEE); + } else { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + pr_err("CLK enabling failed (%d) MODE (%d)\n", + ret, mode); + } + } + + if ((!ret) && (qseecom.current_mode != mode)) { + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, mode); + if (ret) { + pr_err("Bandwidth req failed(%d) MODE (%d)\n", + ret, mode); + if (qclk->ce_core_src_clk != NULL) { + if (mode == INACTIVE) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + pr_err("CLK enable failed\n"); + } else + __qseecom_disable_clk(CLK_QSEE); + } + } + qseecom.current_mode = mode; + } + return ret; +} + +static void qseecom_bw_inactive_req_work(struct work_struct *work) +{ + mutex_lock(&app_access_lock); + mutex_lock(&qsee_bw_mutex); + if (qseecom.timer_running) + __qseecom_set_msm_bus_request(INACTIVE); + pr_debug("current_mode = %d, cumulative_mode = %d\n", + qseecom.current_mode, qseecom.cumulative_mode); + qseecom.timer_running = false; + mutex_unlock(&qsee_bw_mutex); + mutex_unlock(&app_access_lock); + return; +} + +static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data) +{ + schedule_work(&qseecom.bw_inactive_req_ws); + return; +} + +static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + int ret = 0; + mutex_lock(&clk_access_lock); + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + if (qclk->clk_access_cnt > 2) { + pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt); + ret = -EINVAL; + goto err_dec_ref_cnt; + } + if (qclk->clk_access_cnt == 2) + qclk->clk_access_cnt--; + +err_dec_ref_cnt: + mutex_unlock(&clk_access_lock); + return ret; +} + + +static int qseecom_scale_bus_bandwidth_timer(uint32_t mode) +{ + int32_t ret = 0; + int32_t request_mode = INACTIVE; + + mutex_lock(&qsee_bw_mutex); + if (mode == 0) { + if (qseecom.cumulative_mode > MEDIUM) + request_mode = HIGH; + else + request_mode = qseecom.cumulative_mode; + } else { + request_mode = mode; + } + + ret = __qseecom_set_msm_bus_request(request_mode); + if (ret) { + pr_err("set msm bus request failed (%d),request_mode (%d)\n", + ret, request_mode); + goto err_scale_timer; + } + + if (qseecom.timer_running) { + ret = __qseecom_decrease_clk_ref_count(CLK_QSEE); + if (ret) { + pr_err("Failed to decrease clk ref count.\n"); + goto err_scale_timer; + } + del_timer_sync(&(qseecom.bw_scale_down_timer)); + qseecom.timer_running = false; + } +err_scale_timer: + mutex_unlock(&qsee_bw_mutex); + return ret; +} + + +static int qseecom_unregister_bus_bandwidth_needs( + struct qseecom_dev_handle *data) +{ + int32_t ret = 0; + + qseecom.cumulative_mode -= data->mode; + data->mode = INACTIVE; + + return ret; +} + +static int __qseecom_register_bus_bandwidth_needs( + struct qseecom_dev_handle *data, uint32_t request_mode) +{ + int32_t ret = 0; + + if (data->mode == INACTIVE) { + qseecom.cumulative_mode += request_mode; + data->mode = request_mode; + } else { + if (data->mode != request_mode) { + qseecom.cumulative_mode -= data->mode; + qseecom.cumulative_mode += request_mode; + data->mode = request_mode; + } + } + return ret; +} + +static int qseecom_perf_enable(struct qseecom_dev_handle *data) +{ + int ret = 0; + ret = qsee_vote_for_clock(data, CLK_DFAB); + if (ret) { + pr_err("Failed to vote for DFAB clock with err %d\n", ret); + goto perf_enable_exit; + } + ret = qsee_vote_for_clock(data, CLK_SFPB); + if (ret) { + qsee_disable_clock_vote(data, CLK_DFAB); + pr_err("Failed to vote for SFPB clock with err %d\n", ret); + goto perf_enable_exit; + } + +perf_enable_exit: + return ret; +} + +static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data, + void __user *argp) +{ + int32_t ret = 0; + int32_t req_mode; + + if (qseecom.no_clock_support) + return 0; + + ret = copy_from_user(&req_mode, argp, sizeof(req_mode)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + if (req_mode > HIGH) { + pr_err("Invalid bandwidth mode (%d)\n", req_mode); + return -EINVAL; + } + + /* + * Register bus bandwidth needs if bus scaling feature is enabled; + * otherwise, qseecom enable/disable clocks for the client directly. + */ + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, req_mode); + mutex_unlock(&qsee_bw_mutex); + } else { + pr_debug("Bus scaling feature is NOT enabled\n"); + pr_debug("request bandwidth mode %d for the client\n", + req_mode); + if (req_mode != INACTIVE) { + ret = qseecom_perf_enable(data); + if (ret) + pr_err("Failed to vote for clock with err %d\n", + ret); + } else { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + } + return ret; +} + +static void __qseecom_add_bw_scale_down_timer(uint32_t duration) +{ + if (qseecom.no_clock_support) + return; + + mutex_lock(&qsee_bw_mutex); + qseecom.bw_scale_down_timer.expires = jiffies + + msecs_to_jiffies(duration); + mod_timer(&(qseecom.bw_scale_down_timer), + qseecom.bw_scale_down_timer.expires); + qseecom.timer_running = true; + mutex_unlock(&qsee_bw_mutex); +} + +static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data) +{ + if (!qseecom.support_bus_scaling) + qsee_disable_clock_vote(data, CLK_SFPB); + else + __qseecom_add_bw_scale_down_timer( + QSEECOM_LOAD_APP_CRYPTO_TIMEOUT); + return; +} + +static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data) +{ + int ret = 0; + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(MEDIUM); + if (ret) + pr_err("Failed to set bw MEDIUM.\n"); + } else { + ret = qsee_vote_for_clock(data, CLK_SFPB); + if (ret) + pr_err("Fail vote for clk SFPB ret %d\n", ret); + } + return ret; +} + +static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data, + void __user *argp) +{ + ion_phys_addr_t pa; + int32_t ret; + struct qseecom_set_sb_mem_param_req req; + size_t len; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&req, (void __user *)argp, sizeof(req))) + return -EFAULT; + + if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) || + (req.sb_len == 0)) { + pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n", + req.ifd_data_fd, req.sb_len, req.virt_sb_base); + return -EFAULT; + } + if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base, + req.sb_len)) + return -EFAULT; + + /* Get the handle of the shared fd */ + data->client.ihandle = ion_import_dma_buf(qseecom.ion_clnt, + req.ifd_data_fd); + if (IS_ERR_OR_NULL(data->client.ihandle)) { + pr_err("Ion client could not retrieve the handle\n"); + return -ENOMEM; + } + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len); + if (ret) { + + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + return ret; + } + + if (len < req.sb_len) { + pr_err("Requested length (0x%x) is > allocated (0x%zu)\n", + req.sb_len, len); + return -EINVAL; + } + /* Populate the structure for sending scm call to load image */ + data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, + data->client.ihandle); + if (IS_ERR_OR_NULL(data->client.sb_virt)) { + pr_err("ION memory mapping for client shared buf failed\n"); + return -ENOMEM; + } + data->client.sb_phys = (phys_addr_t)pa; + data->client.sb_length = req.sb_len; + data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base; + return 0; +} + +static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data) +{ + int ret; + ret = (qseecom.send_resp_flag != 0); + return ret || data->abort; +} + +static int __qseecom_reentrancy_listener_has_sent_rsp( + struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *ptr_svc) +{ + int ret; + + ret = (ptr_svc->send_resp_flag != 0); + return ret || data->abort; +} + +static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data, + struct qseecom_command_scm_resp *resp, + struct qseecom_client_listener_data_irsp *send_data_rsp, + struct qseecom_registered_listener_list *ptr_svc, + uint32_t lstnr) { + int ret = 0; + + send_data_rsp->status = QSEOS_RESULT_FAILURE; + qseecom.send_resp_flag = 0; + send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND; + send_data_rsp->listener_id = lstnr; + if (ptr_svc) + pr_warn("listener_id:%x, lstnr: %x\n", + ptr_svc->svc.listener_id, lstnr); + if (ptr_svc && ptr_svc->ihandle) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle, + ptr_svc->sb_virt, ptr_svc->sb_length, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + } + + if (lstnr == RPMB_SERVICE) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + } + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp, + sizeof(send_data_rsp), resp, sizeof(*resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + if (lstnr == RPMB_SERVICE) + __qseecom_disable_clk(CLK_QSEE); + return ret; + } + if ((resp->result != QSEOS_RESULT_SUCCESS) && + (resp->result != QSEOS_RESULT_INCOMPLETE)) { + pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", + resp->result, data->client.app_id, lstnr); + ret = -EINVAL; + } + if (lstnr == RPMB_SERVICE) + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + +static void __qseecom_clean_listener_sglistinfo( + struct qseecom_registered_listener_list *ptr_svc) +{ + if (ptr_svc->sglist_cnt) { + memset(ptr_svc->sglistinfo_ptr, 0, + SGLISTINFO_TABLE_SIZE); + ptr_svc->sglist_cnt = 0; + } +} + +static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data, + struct qseecom_command_scm_resp *resp) +{ + int ret = 0; + int rc = 0; + uint32_t lstnr; + unsigned long flags; + struct qseecom_client_listener_data_irsp send_data_rsp; + struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit; + struct qseecom_registered_listener_list *ptr_svc = NULL; + sigset_t new_sigset; + sigset_t old_sigset; + uint32_t status; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = NULL; + + while (resp->result == QSEOS_RESULT_INCOMPLETE) { + lstnr = resp->data; + /* + * Wake up blocking lsitener service with the lstnr id + */ + spin_lock_irqsave(&qseecom.registered_listener_list_lock, + flags); + list_for_each_entry(ptr_svc, + &qseecom.registered_listener_list_head, list) { + if (ptr_svc->svc.listener_id == lstnr) { + ptr_svc->listener_in_use = true; + ptr_svc->rcv_req_flag = 1; + wake_up_interruptible(&ptr_svc->rcv_req_wq); + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, + flags); + + if (ptr_svc == NULL) { + pr_err("Listener Svc %d does not exist\n", lstnr); + __qseecom_qseos_fail_return_resp_tz(data, resp, + &send_data_rsp, ptr_svc, lstnr); + return -EINVAL; + } + + if (!ptr_svc->ihandle) { + pr_err("Client handle is not initialized\n"); + __qseecom_qseos_fail_return_resp_tz(data, resp, + &send_data_rsp, ptr_svc, lstnr); + return -EINVAL; + } + + if (ptr_svc->svc.listener_id != lstnr) { + pr_warn("Service requested does not exist\n"); + __qseecom_qseos_fail_return_resp_tz(data, resp, + &send_data_rsp, NULL, lstnr); + return -ERESTARTSYS; + } + pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); + + /* initialize the new signal mask with all signals*/ + sigfillset(&new_sigset); + /* block all signals */ + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + do { + /* + * When reentrancy is not supported, check global + * send_resp_flag; otherwise, check this listener's + * send_resp_flag. + */ + if (!qseecom.qsee_reentrancy_support && + !wait_event_freezable(qseecom.send_resp_wq, + __qseecom_listener_has_sent_rsp(data))) { + break; + } + + if (qseecom.qsee_reentrancy_support && + !wait_event_freezable(qseecom.send_resp_wq, + __qseecom_reentrancy_listener_has_sent_rsp( + data, ptr_svc))) { + break; + } + } while (1); + + /* restore signal mask */ + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + if (data->abort) { + pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d", + data->client.app_id, lstnr, ret); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + } else { + status = QSEOS_RESULT_SUCCESS; + } + + qseecom.send_resp_flag = 0; + ptr_svc->send_resp_flag = 0; + table = ptr_svc->sglistinfo_ptr; + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_rsp.listener_id = lstnr; + send_data_rsp.status = status; + send_data_rsp.sglistinfo_ptr = + (uint32_t)virt_to_phys(table); + send_data_rsp.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_rsp; + cmd_len = sizeof(send_data_rsp); + } else { + send_data_rsp_64bit.listener_id = lstnr; + send_data_rsp_64bit.status = status; + send_data_rsp_64bit.sglistinfo_ptr = + virt_to_phys(table); + send_data_rsp_64bit.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_rsp_64bit; + cmd_len = sizeof(send_data_rsp_64bit); + } + if (qseecom.whitelist_support == false) + *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND; + else + *(uint32_t *)cmd_buf = + QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST; + if (ptr_svc) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ptr_svc->ihandle, + ptr_svc->sb_virt, ptr_svc->sb_length, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + } + + if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + ptr_svc->listener_in_use = false; + __qseecom_clean_listener_sglistinfo(ptr_svc); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) + __qseecom_disable_clk(CLK_QSEE); + return ret; + } + if ((resp->result != QSEOS_RESULT_SUCCESS) && + (resp->result != QSEOS_RESULT_INCOMPLETE)) { + pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", + resp->result, data->client.app_id, lstnr); + ret = -EINVAL; + } + if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) + __qseecom_disable_clk(CLK_QSEE); + + } + if (rc) + return rc; + + return ret; +} + +static int __qseecom_process_reentrancy_blocked_on_listener( + struct qseecom_command_scm_resp *resp, + struct qseecom_registered_app_list *ptr_app, + struct qseecom_dev_handle *data) +{ + struct qseecom_registered_listener_list *list_ptr; + int ret = 0; + struct qseecom_continue_blocked_request_ireq ireq; + struct qseecom_command_scm_resp continue_resp; + unsigned int session_id; + sigset_t new_sigset; + sigset_t old_sigset; + unsigned long flags; + bool found_app = false; + + if (!resp || !data) { + pr_err("invalid resp or data pointer\n"); + ret = -EINVAL; + goto exit; + } + + /* find app_id & img_name from list */ + if (!ptr_app) { + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, + data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", + data->client.app_id, + (char *)data->client.app_name); + ret = -ENOENT; + goto exit; + } + } + + do { + session_id = resp->resp_type; + list_ptr = __qseecom_find_svc(resp->data); + if (!list_ptr) { + pr_err("Invalid listener ID %d\n", resp->data); + ret = -ENODATA; + goto exit; + } + ptr_app->blocked_on_listener_id = resp->data; + + pr_warn("Lsntr %d in_use %d, block session(%d) app(%d)\n", + resp->data, list_ptr->listener_in_use, + session_id, data->client.app_id); + + /* sleep until listener is available */ + sigfillset(&new_sigset); + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + do { + qseecom.app_block_ref_cnt++; + ptr_app->app_blocked = true; + mutex_unlock(&app_access_lock); + wait_event_freezable( + list_ptr->listener_block_app_wq, + !list_ptr->listener_in_use); + mutex_lock(&app_access_lock); + ptr_app->app_blocked = false; + qseecom.app_block_ref_cnt--; + } while (list_ptr->listener_in_use); + + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + + ptr_app->blocked_on_listener_id = 0; + pr_warn("Lsntr %d is available, unblock session(%d) app(%d)\n", + resp->data, session_id, data->client.app_id); + + /* notify TZ that listener is available */ + ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND; + + if (qseecom.smcinvoke_support) + ireq.app_or_session_id = session_id; + else + ireq.app_or_session_id = data->client.app_id; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + &ireq, sizeof(ireq), + &continue_resp, sizeof(continue_resp)); + if (ret && qseecom.smcinvoke_support) { + /* retry with legacy cmd */ + qseecom.smcinvoke_support = false; + ireq.app_or_session_id = data->client.app_id; + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + &ireq, sizeof(ireq), + &continue_resp, sizeof(continue_resp)); + qseecom.smcinvoke_support = true; + if (ret) { + pr_err("unblock app %d or session %d fail\n", + data->client.app_id, session_id); + goto exit; + } + } + resp->result = continue_resp.result; + resp->resp_type = continue_resp.resp_type; + resp->data = continue_resp.data; + pr_debug("unblock resp = %d\n", resp->result); + } while (resp->result == QSEOS_RESULT_BLOCKED_ON_LISTENER); + + if (resp->result != QSEOS_RESULT_INCOMPLETE) { + pr_err("Unexpected unblock resp %d\n", resp->result); + ret = -EINVAL; + } +exit: + return ret; +} + +static int __qseecom_reentrancy_process_incomplete_cmd( + struct qseecom_dev_handle *data, + struct qseecom_command_scm_resp *resp) +{ + int ret = 0; + int rc = 0; + uint32_t lstnr; + unsigned long flags; + struct qseecom_client_listener_data_irsp send_data_rsp; + struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit; + struct qseecom_registered_listener_list *ptr_svc = NULL; + sigset_t new_sigset; + sigset_t old_sigset; + uint32_t status; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = NULL; + + while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) { + lstnr = resp->data; + /* + * Wake up blocking lsitener service with the lstnr id + */ + spin_lock_irqsave(&qseecom.registered_listener_list_lock, + flags); + list_for_each_entry(ptr_svc, + &qseecom.registered_listener_list_head, list) { + if (ptr_svc->svc.listener_id == lstnr) { + ptr_svc->listener_in_use = true; + ptr_svc->rcv_req_flag = 1; + wake_up_interruptible(&ptr_svc->rcv_req_wq); + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, + flags); + + if (ptr_svc == NULL) { + pr_err("Listener Svc %d does not exist\n", lstnr); + return -EINVAL; + } + + if (!ptr_svc->ihandle) { + pr_err("Client handle is not initialized\n"); + return -EINVAL; + } + + if (ptr_svc->svc.listener_id != lstnr) { + pr_warn("Service requested does not exist\n"); + return -ERESTARTSYS; + } + pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n"); + + /* initialize the new signal mask with all signals*/ + sigfillset(&new_sigset); + + /* block all signals */ + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + + /* unlock mutex btw waking listener and sleep-wait */ + mutex_unlock(&app_access_lock); + do { + if (!wait_event_freezable(qseecom.send_resp_wq, + __qseecom_reentrancy_listener_has_sent_rsp( + data, ptr_svc))) { + break; + } + } while (1); + /* lock mutex again after resp sent */ + mutex_lock(&app_access_lock); + ptr_svc->send_resp_flag = 0; + qseecom.send_resp_flag = 0; + + /* restore signal mask */ + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + if (data->abort) { + pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d", + data->client.app_id, lstnr, ret); + rc = -ENODEV; + status = QSEOS_RESULT_FAILURE; + } else { + status = QSEOS_RESULT_SUCCESS; + } + table = ptr_svc->sglistinfo_ptr; + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_rsp.listener_id = lstnr; + send_data_rsp.status = status; + send_data_rsp.sglistinfo_ptr = + (uint32_t)virt_to_phys(table); + send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_rsp; + cmd_len = sizeof(send_data_rsp); + } else { + send_data_rsp_64bit.listener_id = lstnr; + send_data_rsp_64bit.status = status; + send_data_rsp_64bit.sglistinfo_ptr = + virt_to_phys(table); + send_data_rsp_64bit.sglistinfo_len = + SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_rsp_64bit; + cmd_len = sizeof(send_data_rsp_64bit); + } + if (qseecom.whitelist_support == false) + *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND; + else + *(uint32_t *)cmd_buf = + QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST; + if (ptr_svc) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ptr_svc->ihandle, + ptr_svc->sb_virt, ptr_svc->sb_length, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + } + if (lstnr == RPMB_SERVICE) { + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, resp, sizeof(*resp)); + ptr_svc->listener_in_use = false; + __qseecom_clean_listener_sglistinfo(ptr_svc); + wake_up_interruptible(&ptr_svc->listener_block_app_wq); + + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + + switch (resp->result) { + case QSEOS_RESULT_BLOCKED_ON_LISTENER: + pr_warn("send lsr %d rsp, but app %d block on lsr %d\n", + lstnr, data->client.app_id, resp->data); + if (lstnr == resp->data) { + pr_err("lstnr %d should not be blocked!\n", + lstnr); + ret = -EINVAL; + goto exit; + } + ret = __qseecom_process_reentrancy_blocked_on_listener( + resp, NULL, data); + if (ret) { + pr_err("failed to process App(%d) %s blocked on listener %d\n", + data->client.app_id, + data->client.app_name, resp->data); + goto exit; + } + case QSEOS_RESULT_SUCCESS: + case QSEOS_RESULT_INCOMPLETE: + break; + default: + pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n", + resp->result, data->client.app_id, lstnr); + ret = -EINVAL; + goto exit; + } +exit: + if (lstnr == RPMB_SERVICE) + __qseecom_disable_clk(CLK_QSEE); + + } + if (rc) + return rc; + + return ret; +} + +/* + * QSEE doesn't support OS level cmds reentrancy until RE phase-3, + * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app. + * So, needs to first check if no app blocked before sending OS level scm call, + * then wait until all apps are unblocked. + */ +static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id) +{ + sigset_t new_sigset, old_sigset; + + if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 && + qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 && + IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) { + /* thread sleep until this app unblocked */ + while (qseecom.app_block_ref_cnt > 0) { + sigfillset(&new_sigset); + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + mutex_unlock(&app_access_lock); + do { + if (!wait_event_freezable(qseecom.app_block_wq, + (qseecom.app_block_ref_cnt == 0))) + break; + } while (1); + mutex_lock(&app_access_lock); + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + } + } +} + +/* + * scm_call of send data will fail if this TA is blocked or there are more + * than one TA requesting listener services; So, first check to see if need + * to wait. + */ +static void __qseecom_reentrancy_check_if_this_app_blocked( + struct qseecom_registered_app_list *ptr_app) +{ + sigset_t new_sigset, old_sigset; + if (qseecom.qsee_reentrancy_support) { + while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) { + /* thread sleep until this app unblocked */ + sigfillset(&new_sigset); + sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset); + mutex_unlock(&app_access_lock); + do { + if (!wait_event_freezable(qseecom.app_block_wq, + (!ptr_app->app_blocked && + qseecom.app_block_ref_cnt <= 1))) + break; + } while (1); + mutex_lock(&app_access_lock); + sigprocmask(SIG_SETMASK, &old_sigset, NULL); + } + } +} + +static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req, + uint32_t *app_id) +{ + int32_t ret; + struct qseecom_command_scm_resp resp; + bool found_app = false; + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + + if (!app_id) { + pr_err("Null pointer to app_id\n"); + return -EINVAL; + } + *app_id = 0; + + /* check if app exists and has been registered locally */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list) { + if (!strcmp(entry->app_name, req.app_name)) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (found_app) { + pr_debug("Found app with id %d\n", entry->app_id); + *app_id = entry->app_id; + return 0; + } + + memset((void *)&resp, 0, sizeof(resp)); + + /* SCM_CALL to check if app_id for the mentioned app exists */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_check_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to check if app is already loaded failed\n"); + return -EINVAL; + } + + if (resp.result == QSEOS_RESULT_FAILURE) + return 0; + + switch (resp.resp_type) { + /*qsee returned listener type response */ + case QSEOS_LISTENER_ID: + pr_err("resp type is of listener type instead of app"); + return -EINVAL; + case QSEOS_APP_ID: + *app_id = resp.data; + return 0; + default: + pr_err("invalid resp type (%d) from qsee", + resp.resp_type); + return -ENODEV; + } +} + +static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp) +{ + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + u32 app_id = 0; + struct ion_handle *ihandle; /* Ion handle */ + struct qseecom_load_img_req load_img_req; + int32_t ret = 0; + ion_phys_addr_t pa = 0; + size_t len; + struct qseecom_command_scm_resp resp; + struct qseecom_check_app_ireq req; + struct qseecom_load_app_ireq load_req; + struct qseecom_load_app_64bit_ireq load_req_64bit; + void *cmd_buf = NULL; + size_t cmd_len; + bool first_time = false; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&load_img_req, + (void __user *)argp, + sizeof(struct qseecom_load_img_req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + /* Check and load cmnlib */ + if (qseecom.qsee_version > QSEEE_VERSION_00) { + if (!qseecom.commonlib_loaded && + load_img_req.app_arch == ELFCLASS32) { + ret = qseecom_load_commonlib_image(data, "cmnlib"); + if (ret) { + pr_err("failed to load cmnlib\n"); + return -EIO; + } + qseecom.commonlib_loaded = true; + pr_debug("cmnlib is loaded\n"); + } + + if (!qseecom.commonlib64_loaded && + load_img_req.app_arch == ELFCLASS64) { + ret = qseecom_load_commonlib_image(data, "cmnlib64"); + if (ret) { + pr_err("failed to load cmnlib64\n"); + return -EIO; + } + qseecom.commonlib64_loaded = true; + pr_debug("cmnlib64 is loaded\n"); + } + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) + return ret; + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) + goto enable_clk_err; + + req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0'; + strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE); + + ret = __qseecom_check_app_exists(req, &app_id); + if (ret < 0) + goto loadapp_err; + + if (app_id) { + pr_debug("App id %d (%s) already exists\n", app_id, + (char *)(req.app_name)); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + entry->ref_cnt++; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + ret = 0; + } else { + first_time = true; + pr_warn("App (%s) does'nt exist, loading apps for first time\n", + (char *)(load_img_req.img_name)); + /* Get the handle of the shared fd */ + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + load_img_req.ifd_data_fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client could not retrieve the handle\n"); + ret = -ENOMEM; + goto loadapp_err; + } + + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len); + if (ret) { + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + goto loadapp_err; + } + if (load_img_req.mdt_len > len || load_img_req.img_len > len) { + pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n", + len, load_img_req.mdt_len, + load_img_req.img_len); + ret = -EINVAL; + goto loadapp_err; + } + /* Populate the structure for sending scm call to load image */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req.mdt_len = load_img_req.mdt_len; + load_req.img_len = load_img_req.img_len; + strlcpy(load_req.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req_64bit.mdt_len = load_img_req.mdt_len; + load_req_64bit.img_len = load_img_req.img_len; + strlcpy(load_req_64bit.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto loadapp_err; + } + + /* SCM_CALL to load the app and get the app_id back */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, + cmd_len, &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load app failed\n"); + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + ret = -EINVAL; + goto loadapp_err; + } + + if (resp.result == QSEOS_RESULT_FAILURE) { + pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n"); + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + ret = -EFAULT; + goto loadapp_err; + } + + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + ret = -EFAULT; + goto loadapp_err; + } + } + + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("scm_call failed resp.result unknown, %d\n", + resp.result); + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + ret = -EFAULT; + goto loadapp_err; + } + + app_id = resp.data; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_err("kmalloc failed\n"); + ret = -ENOMEM; + goto loadapp_err; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + entry->app_arch = load_img_req.app_arch; + /* + * keymaster app may be first loaded as "keymaste" by qseecomd, + * and then used as "keymaster" on some targets. To avoid app + * name checking error, register "keymaster" into app_list and + * thread private data. + */ + if (!strcmp(load_img_req.img_name, "keymaste")) + strlcpy(entry->app_name, "keymaster", + MAX_APP_NAME_SIZE); + else + strlcpy(entry->app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + + /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_add_tail(&entry->list, &qseecom.registered_app_list_head); + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + + pr_warn("App with id %d (%s) now loaded\n", app_id, + (char *)(load_img_req.img_name)); + } + data->client.app_id = app_id; + data->client.app_arch = load_img_req.app_arch; + if (!strcmp(load_img_req.img_name, "keymaste")) + strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE); + else + strlcpy(data->client.app_name, load_img_req.img_name, + MAX_APP_NAME_SIZE); + load_img_req.app_id = app_id; + if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + if (first_time == true) { + spin_lock_irqsave( + &qseecom.registered_app_list_lock, flags); + list_del(&entry->list); + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + kzfree(entry); + } + } + +loadapp_err: + __qseecom_disable_clk_scale_down(data); +enable_clk_err: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + return ret; +} + +static int __qseecom_cleanup_app(struct qseecom_dev_handle *data) +{ + int ret = 1; /* Set unload app */ + wake_up_all(&qseecom.send_resp_wq); + if (qseecom.qsee_reentrancy_support) + mutex_unlock(&app_access_lock); + while (atomic_read(&data->ioctl_count) > 1) { + if (wait_event_freezable(data->abort_wq, + atomic_read(&data->ioctl_count) <= 1)) { + pr_err("Interrupted from abort\n"); + ret = -ERESTARTSYS; + break; + } + } + if (qseecom.qsee_reentrancy_support) + mutex_lock(&app_access_lock); + return ret; +} + +static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data) +{ + int ret = 0; + if (!IS_ERR_OR_NULL(data->client.ihandle)) { + ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle); + ion_free(qseecom.ion_clnt, data->client.ihandle); + data->client.ihandle = NULL; + } + return ret; +} + +static int qseecom_unload_app(struct qseecom_dev_handle *data, + bool app_crash) +{ + unsigned long flags; + unsigned long flags1; + int ret = 0; + struct qseecom_command_scm_resp resp; + struct qseecom_registered_app_list *ptr_app = NULL; + bool unload = false; + bool found_app = false; + bool found_dead_app = false; + + if (!data) { + pr_err("Invalid/uninitialized device handle\n"); + return -EINVAL; + } + + if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) { + pr_debug("Do not unload keymaster app from tz\n"); + goto unload_exit; + } + + __qseecom_cleanup_app(data); + __qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID); + + if (data->client.app_id > 0) { + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if (ptr_app->app_id == data->client.app_id) { + if (!strcmp((void *)ptr_app->app_name, + (void *)data->client.app_name)) { + found_app = true; + if (ptr_app->app_blocked) + app_crash = false; + if (app_crash || ptr_app->ref_cnt == 1) + unload = true; + break; + } else { + found_dead_app = true; + break; + } + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + if (found_app == false && found_dead_app == false) { + pr_err("Cannot find app with id = %d (%s)\n", + data->client.app_id, + (char *)data->client.app_name); + ret = -EINVAL; + goto unload_exit; + } + } + + if (found_dead_app) + pr_warn("cleanup app_id %d(%s)\n", data->client.app_id, + (char *)data->client.app_name); + + if (unload) { + struct qseecom_unload_app_ireq req; + /* Populate the structure for sending scm call to load image */ + req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND; + req.app_id = data->client.app_id; + + /* SCM_CALL to unload the app */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_unload_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload app (id = %d) failed\n", + req.app_id); + ret = -EFAULT; + goto unload_exit; + } else { + pr_warn("App id %d now unloaded\n", req.app_id); + } + if (resp.result == QSEOS_RESULT_FAILURE) { + pr_err("app (%d) unload_failed!!\n", + data->client.app_id); + ret = -EFAULT; + goto unload_exit; + } + if (resp.result == QSEOS_RESULT_SUCCESS) + pr_debug("App (%d) is unloaded!!\n", + data->client.app_id); + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd fail err: %d\n", + ret); + goto unload_exit; + } + } + } + + if (found_app) { + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1); + if (app_crash) { + ptr_app->ref_cnt = 0; + pr_debug("app_crash: ref_count = 0\n"); + } else { + if (ptr_app->ref_cnt == 1) { + ptr_app->ref_cnt = 0; + pr_debug("ref_count set to 0\n"); + } else { + ptr_app->ref_cnt--; + pr_debug("Can't unload app(%d) inuse\n", + ptr_app->app_id); + } + } + if (unload) { + list_del(&ptr_app->list); + kzfree(ptr_app); + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags1); + } +unload_exit: + qseecom_unmap_ion_allocated_memory(data); + data->released = true; + return ret; +} + +static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data, + unsigned long virt) +{ + return data->client.sb_phys + (virt - data->client.user_virt_sb_base); +} + +static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data, + unsigned long virt) +{ + return (uintptr_t)data->client.sb_virt + + (virt - data->client.user_virt_sb_base); +} + +int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr, + struct qseecom_send_svc_cmd_req *req_ptr, + struct qseecom_client_send_service_ireq *send_svc_ireq_ptr) +{ + int ret = 0; + void *req_buf = NULL; + + if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) { + pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n", + req_ptr, send_svc_ireq_ptr); + return -EINVAL; + } + + /* Clients need to ensure req_buf is at base offset of shared buffer */ + if ((uintptr_t)req_ptr->cmd_req_buf != + data_ptr->client.user_virt_sb_base) { + pr_err("cmd buf not pointing to base offset of shared buffer\n"); + return -EINVAL; + } + + if (data_ptr->client.sb_length < + sizeof(struct qseecom_rpmb_provision_key)) { + pr_err("shared buffer is too small to hold key type\n"); + return -EINVAL; + } + req_buf = data_ptr->client.sb_virt; + + send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id; + send_svc_ireq_ptr->key_type = + ((struct qseecom_rpmb_provision_key *)req_buf)->key_type; + send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len; + send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->resp_buf)); + send_svc_ireq_ptr->rsp_len = req_ptr->resp_len; + + return ret; +} + +int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr, + struct qseecom_send_svc_cmd_req *req_ptr, + struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr) +{ + int ret = 0; + uint32_t reqd_len_sb_in = 0; + + if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) { + pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n", + req_ptr, send_svc_ireq_ptr); + return -EINVAL; + } + + reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len; + if (reqd_len_sb_in > data_ptr->client.sb_length) { + pr_err("Not enough memory to fit cmd_buf and resp_buf. "); + pr_err("Required: %u, Available: %zu\n", + reqd_len_sb_in, data_ptr->client.sb_length); + return -ENOMEM; + } + + send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id; + send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len; + send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->resp_buf)); + send_svc_ireq_ptr->rsp_len = req_ptr->resp_len; + + send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data_ptr, (uintptr_t)req_ptr->cmd_req_buf)); + + + return ret; +} + +static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_svc_cmd_req *req) +{ + if (!req || !req->resp_buf || !req->cmd_req_buf) { + pr_err("req or cmd buffer or response buffer is null\n"); + return -EINVAL; + } + + if (!data || !data->client.ihandle) { + pr_err("Client or client handle is not initialized\n"); + return -EINVAL; + } + + if (data->client.sb_virt == NULL) { + pr_err("sb_virt null\n"); + return -EINVAL; + } + + if (data->client.user_virt_sb_base == 0) { + pr_err("user_virt_sb_base is null\n"); + return -EINVAL; + } + + if (data->client.sb_length == 0) { + pr_err("sb_length is 0\n"); + return -EINVAL; + } + + if (((uintptr_t)req->cmd_req_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->cmd_req_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + if (((uintptr_t)req->resp_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + if ((req->cmd_req_len == 0) || (req->resp_len == 0) || + (req->cmd_req_len > data->client.sb_length) || + (req->resp_len > data->client.sb_length)) { + pr_err("cmd buf length or response buf length not valid\n"); + return -EINVAL; + } + if (req->cmd_req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->cmd_req_len + req->resp_len), + data->client.sb_length); + return -ENOMEM; + } + if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) { + pr_err("Integer overflow in req_len & cmd_req_buf\n"); + return -EINVAL; + } + if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_buf + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +static int qseecom_send_service_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + struct qseecom_client_send_service_ireq send_svc_ireq; + struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq; + struct qseecom_command_scm_resp resp; + struct qseecom_send_svc_cmd_req req; + void *send_req_ptr; + size_t req_buf_size; + + /*struct qseecom_command_scm_resp resp;*/ + + if (copy_from_user(&req, + (void __user *)argp, + sizeof(req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + if (__validate_send_service_cmd_inputs(data, &req)) + return -EINVAL; + + data->type = QSEECOM_SECURE_SERVICE; + + switch (req.cmd_id) { + case QSEOS_RPMB_PROVISION_KEY_COMMAND: + case QSEOS_RPMB_ERASE_COMMAND: + case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: + send_req_ptr = &send_svc_ireq; + req_buf_size = sizeof(send_svc_ireq); + if (__qseecom_process_rpmb_svc_cmd(data, &req, + send_req_ptr)) + return -EINVAL; + break; + case QSEOS_FSM_LTEOTA_REQ_CMD: + case QSEOS_FSM_LTEOTA_REQ_RSP_CMD: + case QSEOS_FSM_IKE_REQ_CMD: + case QSEOS_FSM_IKE_REQ_RSP_CMD: + case QSEOS_FSM_OEM_FUSE_WRITE_ROW: + case QSEOS_FSM_OEM_FUSE_READ_ROW: + case QSEOS_FSM_ENCFS_REQ_CMD: + case QSEOS_FSM_ENCFS_REQ_RSP_CMD: + send_req_ptr = &send_fsm_key_svc_ireq; + req_buf_size = sizeof(send_fsm_key_svc_ireq); + if (__qseecom_process_fsm_key_svc_cmd(data, &req, + send_req_ptr)) + return -EINVAL; + break; + default: + pr_err("Unsupported cmd_id %d\n", req.cmd_id); + return -EINVAL; + } + + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(HIGH); + if (ret) { + pr_err("Fail to set bw HIGH\n"); + return ret; + } + } else { + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clocks with err %d\n", ret); + goto exit; + } + } + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, data->client.sb_length, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit; + } + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + (const void *)send_req_ptr, + req_buf_size, &resp, sizeof(resp)); + if (ret) { + pr_err("qseecom_scm_call failed with err: %d\n", ret); + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } else { + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + } + goto exit; + } + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, data->client.sb_length, + ION_IOC_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit; + } + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + pr_debug("qseos_result_incomplete\n"); + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd fail with result: %d\n", + resp.result); + } + if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) { + pr_warn("RPMB key status is 0x%x\n", resp.result); + if (put_user(resp.result, + (uint32_t __user *)req.resp_buf)) { + ret = -EINVAL; + goto exit; + } + ret = 0; + } + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed with resp.result: %d\n", resp.result); + ret = -EINVAL; + break; + default: + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + break; + } + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } else { + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + } + +exit: + return ret; +} + +static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_cmd_req *req) + +{ + if (!data || !data->client.ihandle) { + pr_err("Client or client handle is not initialized\n"); + return -EINVAL; + } + if (((req->resp_buf == NULL) && (req->resp_len != 0)) || + (req->cmd_req_buf == NULL)) { + pr_err("cmd buffer or response buffer is null\n"); + return -EINVAL; + } + if (((uintptr_t)req->cmd_req_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->cmd_req_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + if (((uintptr_t)req->resp_buf < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_buf >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + if ((req->cmd_req_len == 0) || + (req->cmd_req_len > data->client.sb_length) || + (req->resp_len > data->client.sb_length)) { + pr_err("cmd buf length or response buf length not valid\n"); + return -EINVAL; + } + if (req->cmd_req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->cmd_req_len + req->resp_len), + data->client.sb_length); + return -ENOMEM; + } + if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) { + pr_err("Integer overflow in req_len & cmd_req_buf\n"); + return -EINVAL; + } + if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_buf + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp, + struct qseecom_registered_app_list *ptr_app, + struct qseecom_dev_handle *data) +{ + int ret = 0; + + switch (resp->result) { + case QSEOS_RESULT_BLOCKED_ON_LISTENER: + pr_warn("App(%d) %s is blocked on listener %d\n", + data->client.app_id, data->client.app_name, + resp->data); + ret = __qseecom_process_reentrancy_blocked_on_listener( + resp, ptr_app, data); + if (ret) { + pr_err("failed to process App(%d) %s is blocked on listener %d\n", + data->client.app_id, data->client.app_name, resp->data); + return ret; + } + + case QSEOS_RESULT_INCOMPLETE: + qseecom.app_block_ref_cnt++; + ptr_app->app_blocked = true; + ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp); + ptr_app->app_blocked = false; + qseecom.app_block_ref_cnt--; + wake_up_interruptible(&qseecom.app_block_wq); + if (ret) + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + return ret; + case QSEOS_RESULT_SUCCESS: + return ret; + default: + pr_err("Response result %d not supported\n", + resp->result); + return -EINVAL; + } +} + +static int __qseecom_send_cmd(struct qseecom_dev_handle *data, + struct qseecom_send_cmd_req *req) +{ + int ret = 0; + int ret2 = 0; + u32 reqd_len_sb_in = 0; + struct qseecom_client_send_data_ireq send_data_req = {0}; + struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0}; + struct qseecom_command_scm_resp resp; + unsigned long flags; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = data->sglistinfo_ptr; + + reqd_len_sb_in = req->cmd_req_len + req->resp_len; + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + + if (qseecom.qsee_version < QSEE_VERSION_40) { + send_data_req.app_id = data->client.app_id; + send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data, (uintptr_t)req->cmd_req_buf)); + send_data_req.req_len = req->cmd_req_len; + send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys( + data, (uintptr_t)req->resp_buf)); + send_data_req.rsp_len = req->resp_len; + send_data_req.sglistinfo_ptr = + (uint32_t)virt_to_phys(table); + send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_req; + cmd_len = sizeof(struct qseecom_client_send_data_ireq); + } else { + send_data_req_64bit.app_id = data->client.app_id; + send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data, + (uintptr_t)req->cmd_req_buf); + send_data_req_64bit.req_len = req->cmd_req_len; + send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data, + (uintptr_t)req->resp_buf); + send_data_req_64bit.rsp_len = req->resp_len; + /* check if 32bit app's phys_addr region is under 4GB.*/ + if ((data->client.app_arch == ELFCLASS32) && + ((send_data_req_64bit.req_ptr >= + PHY_ADDR_4G - send_data_req_64bit.req_len) || + (send_data_req_64bit.rsp_ptr >= + PHY_ADDR_4G - send_data_req_64bit.rsp_len))){ + pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n", + data->client.app_name, + send_data_req_64bit.req_ptr, + send_data_req_64bit.req_len, + send_data_req_64bit.rsp_ptr, + send_data_req_64bit.rsp_len); + return -EFAULT; + } + send_data_req_64bit.sglistinfo_ptr = + (uint64_t)virt_to_phys(table); + send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&send_data_req_64bit; + cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq); + } + + if (qseecom.whitelist_support == false || data->use_legacy_cmd == true) + *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND; + else + *(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST; + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, + reqd_len_sb_in, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + goto exit; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + goto exit; + } + } + } +exit: + ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, data->client.sb_length, + ION_IOC_INV_CACHES); + if (ret2) { + pr_err("cache operation failed %d\n", ret2); + return ret2; + } + return ret; +} + +static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp) +{ + int ret = 0; + struct qseecom_send_cmd_req req; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (__validate_send_cmd_inputs(data, &req)) + return -EINVAL; + + ret = __qseecom_send_cmd(data, &req); + + if (ret) + return ret; + + return ret; +} + +int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req, + struct qseecom_send_modfd_listener_resp *lstnr_resp, + struct qseecom_dev_handle *data, int i) { + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + if ((req->cmd_req_len < sizeof(uint32_t)) || + (req->ifd_data[i].cmd_buf_offset > + req->cmd_req_len - sizeof(uint32_t))) { + pr_err("Invalid offset (req len) 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + if ((lstnr_resp->resp_len < sizeof(uint32_t)) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + lstnr_resp->resp_len - sizeof(uint32_t))) { + pr_err("Invalid offset (lstnr resp len) 0x%x\n", + lstnr_resp->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + return 0; +} + +static int __qseecom_update_cmd_buf(void *msg, bool cleanup, + struct qseecom_dev_handle *data) +{ + struct ion_handle *ihandle; + char *field; + int ret = 0; + int i = 0; + uint32_t len = 0; + struct scatterlist *sg; + struct qseecom_send_modfd_cmd_req *req = NULL; + struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL; + struct qseecom_registered_listener_list *this_lstnr = NULL; + uint32_t offset; + struct sg_table *sg_ptr; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) + return -EFAULT; + + if (msg == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + if (data->type == QSEECOM_LISTENER_SERVICE) { + lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg; + this_lstnr = __qseecom_find_svc(data->listener.id); + if (IS_ERR_OR_NULL(this_lstnr)) { + pr_err("Invalid listener ID\n"); + return -ENOMEM; + } + } else { + req = (struct qseecom_send_modfd_cmd_req *)msg; + } + + for (i = 0; i < MAX_ION_FD; i++) { + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + req->ifd_data[i].fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client can't retrieve the handle\n"); + return -ENOMEM; + } + field = (char *) req->cmd_req_buf + + req->ifd_data[i].cmd_buf_offset; + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + lstnr_resp->ifd_data[i].fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client can't retrieve the handle\n"); + return -ENOMEM; + } + field = lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[i].cmd_buf_offset; + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle); + if (IS_ERR_OR_NULL(sg_ptr)) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + if (sg_ptr->nents == 0) { + pr_err("Num of scattered entries is 0\n"); + goto err; + } + if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { + pr_err("Num of scattered entries"); + pr_err(" (%d) is greater than max supported %d\n", + sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); + goto err; + } + sg = sg_ptr->sgl; + if (sg_ptr->nents == 1) { + uint32_t *update; + if (__boundary_checks_offset(req, lstnr_resp, data, i)) + goto err; + if ((data->type == QSEECOM_CLIENT_APP && + (data->client.app_arch == ELFCLASS32 || + data->client.app_arch == ELFCLASS64)) || + (data->type == QSEECOM_LISTENER_SERVICE)) { + /* + * Check if sg list phy add region is under 4GB + */ + if ((qseecom.qsee_version >= QSEE_VERSION_40) && + (!cleanup) && + ((uint64_t)sg_dma_address(sg_ptr->sgl) + >= PHY_ADDR_4G - sg->length)) { + pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n", + data->client.app_name, + &(sg_dma_address(sg_ptr->sgl)), + sg->length); + goto err; + } + update = (uint32_t *) field; + *update = cleanup ? 0 : + (uint32_t)sg_dma_address(sg_ptr->sgl); + } else { + pr_err("QSEE app arch %u is not supported\n", + data->client.app_arch); + goto err; + } + len += (uint32_t)sg->length; + } else { + struct qseecom_sg_entry *update; + int j = 0; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + + if ((req->cmd_req_len < + SG_ENTRY_SZ * sg_ptr->nents) || + (req->ifd_data[i].cmd_buf_offset > + (req->cmd_req_len - + SG_ENTRY_SZ * sg_ptr->nents))) { + pr_err("Invalid offset = 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + goto err; + } + + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + + if ((lstnr_resp->resp_len < + SG_ENTRY_SZ * sg_ptr->nents) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + (lstnr_resp->resp_len - + SG_ENTRY_SZ * sg_ptr->nents))) { + goto err; + } + } + if ((data->type == QSEECOM_CLIENT_APP && + (data->client.app_arch == ELFCLASS32 || + data->client.app_arch == ELFCLASS64)) || + (data->type == QSEECOM_LISTENER_SERVICE)) { + update = (struct qseecom_sg_entry *)field; + for (j = 0; j < sg_ptr->nents; j++) { + /* + * Check if sg list PA is under 4GB + */ + if ((qseecom.qsee_version >= + QSEE_VERSION_40) && + (!cleanup) && + ((uint64_t)(sg_dma_address(sg)) + >= PHY_ADDR_4G - sg->length)) { + pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n", + data->client.app_name, + &(sg_dma_address(sg)), + sg->length); + goto err; + } + update->phys_addr = cleanup ? 0 : + (uint32_t)sg_dma_address(sg); + update->len = cleanup ? 0 : sg->length; + update++; + len += sg->length; + sg = sg_next(sg); + } + } else { + pr_err("QSEE app arch %u is not supported\n", + data->client.app_arch); + goto err; + } + } + + if (cleanup) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, len, + ION_IOC_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, len, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + if (data->type == QSEECOM_CLIENT_APP) { + offset = req->ifd_data[i].cmd_buf_offset; + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } else { + offset = (lstnr_resp->ifd_data[i].cmd_buf_offset + + (uintptr_t)lstnr_resp->resp_buf_ptr - + (uintptr_t)this_lstnr->sb_virt); + this_lstnr->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, offset); + this_lstnr->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + this_lstnr->sglist_cnt = i + 1; + } + } + /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + } + return ret; +err: + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + return -ENOMEM; +} + +static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data, + char *field, uint32_t fd_idx, struct sg_table *sg_ptr) +{ + struct scatterlist *sg = sg_ptr->sgl; + struct qseecom_sg_entry_64bit *sg_entry; + struct qseecom_sg_list_buf_hdr_64bit *buf_hdr; + void *buf; + uint i; + size_t size; + dma_addr_t coh_pmem; + + if (fd_idx >= MAX_ION_FD) { + pr_err("fd_idx [%d] is invalid\n", fd_idx); + return -ENOMEM; + } + buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field; + memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT); + /* Allocate a contiguous kernel buffer */ + size = sg_ptr->nents * SG_ENTRY_SZ_64BIT; + size = (size + PAGE_SIZE) & PAGE_MASK; + buf = dma_alloc_coherent(qseecom.pdev, + size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) { + pr_err("failed to alloc memory for sg buf\n"); + return -ENOMEM; + } + /* update qseecom_sg_list_buf_hdr_64bit */ + buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2; + buf_hdr->new_buf_phys_addr = coh_pmem; + buf_hdr->nents_total = sg_ptr->nents; + /* save the left sg entries into new allocated buf */ + sg_entry = (struct qseecom_sg_entry_64bit *)buf; + for (i = 0; i < sg_ptr->nents; i++) { + sg_entry->phys_addr = (uint64_t)sg_dma_address(sg); + sg_entry->len = sg->length; + sg_entry++; + sg = sg_next(sg); + } + + data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; + data->client.sec_buf_fd[fd_idx].vbase = buf; + data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; + data->client.sec_buf_fd[fd_idx].size = size; + + return 0; +} + +static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup, + struct qseecom_dev_handle *data) +{ + struct ion_handle *ihandle; + char *field; + int ret = 0; + int i = 0; + uint32_t len = 0; + struct scatterlist *sg; + struct qseecom_send_modfd_cmd_req *req = NULL; + struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL; + struct qseecom_registered_listener_list *this_lstnr = NULL; + uint32_t offset; + struct sg_table *sg_ptr; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) + return -EFAULT; + + if (msg == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + if (data->type == QSEECOM_LISTENER_SERVICE) { + lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg; + this_lstnr = __qseecom_find_svc(data->listener.id); + if (IS_ERR_OR_NULL(this_lstnr)) { + pr_err("Invalid listener ID\n"); + return -ENOMEM; + } + } else { + req = (struct qseecom_send_modfd_cmd_req *)msg; + } + + for (i = 0; i < MAX_ION_FD; i++) { + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + req->ifd_data[i].fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client can't retrieve the handle\n"); + return -ENOMEM; + } + field = (char *) req->cmd_req_buf + + req->ifd_data[i].cmd_buf_offset; + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + lstnr_resp->ifd_data[i].fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client can't retrieve the handle\n"); + return -ENOMEM; + } + field = lstnr_resp->resp_buf_ptr + + lstnr_resp->ifd_data[i].cmd_buf_offset; + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle); + if (IS_ERR_OR_NULL(sg_ptr)) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + if (sg_ptr->nents == 0) { + pr_err("Num of scattered entries is 0\n"); + goto err; + } + if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) { + pr_warn("Num of scattered entries"); + pr_warn(" (%d) is greater than %d\n", + sg_ptr->nents, QSEECOM_MAX_SG_ENTRY); + if (cleanup) { + if (data->client.sec_buf_fd[i].is_sec_buf_fd && + data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.pdev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + } else { + ret = __qseecom_allocate_sg_list_buffer(data, + field, i, sg_ptr); + if (ret) { + pr_err("Failed to allocate sg list buffer\n"); + goto err; + } + } + len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT; + sg = sg_ptr->sgl; + goto cleanup; + } + sg = sg_ptr->sgl; + if (sg_ptr->nents == 1) { + uint64_t *update_64bit; + if (__boundary_checks_offset(req, lstnr_resp, data, i)) + goto err; + /* 64bit app uses 64bit address */ + update_64bit = (uint64_t *) field; + *update_64bit = cleanup ? 0 : + (uint64_t)sg_dma_address(sg_ptr->sgl); + len += (uint32_t)sg->length; + } else { + struct qseecom_sg_entry_64bit *update_64bit; + int j = 0; + + if ((data->type != QSEECOM_LISTENER_SERVICE) && + (req->ifd_data[i].fd > 0)) { + + if ((req->cmd_req_len < + SG_ENTRY_SZ_64BIT * sg_ptr->nents) || + (req->ifd_data[i].cmd_buf_offset > + (req->cmd_req_len - + SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { + pr_err("Invalid offset = 0x%x\n", + req->ifd_data[i].cmd_buf_offset); + goto err; + } + + } else if ((data->type == QSEECOM_LISTENER_SERVICE) && + (lstnr_resp->ifd_data[i].fd > 0)) { + + if ((lstnr_resp->resp_len < + SG_ENTRY_SZ_64BIT * sg_ptr->nents) || + (lstnr_resp->ifd_data[i].cmd_buf_offset > + (lstnr_resp->resp_len - + SG_ENTRY_SZ_64BIT * sg_ptr->nents))) { + goto err; + } + } + /* 64bit app uses 64bit address */ + update_64bit = (struct qseecom_sg_entry_64bit *)field; + for (j = 0; j < sg_ptr->nents; j++) { + update_64bit->phys_addr = cleanup ? 0 : + (uint64_t)sg_dma_address(sg); + update_64bit->len = cleanup ? 0 : + (uint32_t)sg->length; + update_64bit++; + len += sg->length; + sg = sg_next(sg); + } + } +cleanup: + if (cleanup) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, len, + ION_IOC_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, len, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + if (data->type == QSEECOM_CLIENT_APP) { + offset = req->ifd_data[i].cmd_buf_offset; + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 1, offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } else { + offset = (lstnr_resp->ifd_data[i].cmd_buf_offset + + (uintptr_t)lstnr_resp->resp_buf_ptr - + (uintptr_t)this_lstnr->sb_virt); + this_lstnr->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 1, offset); + this_lstnr->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + this_lstnr->sglist_cnt = i + 1; + } + } + /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + } + return ret; +err: + for (i = 0; i < MAX_ION_FD; i++) + if (data->client.sec_buf_fd[i].is_sec_buf_fd && + data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.pdev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + return -ENOMEM; +} + +static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp, + bool is_64bit_addr) +{ + int ret = 0; + int i; + struct qseecom_send_modfd_cmd_req req; + struct qseecom_send_cmd_req send_cmd_req; + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + send_cmd_req.cmd_req_buf = req.cmd_req_buf; + send_cmd_req.cmd_req_len = req.cmd_req_len; + send_cmd_req.resp_buf = req.resp_buf; + send_cmd_req.resp_len = req.resp_len; + + if (__validate_send_cmd_inputs(data, &send_cmd_req)) + return -EINVAL; + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) { + pr_err("Invalid offset %d = 0x%x\n", + i, req.ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.cmd_req_buf); + req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.resp_buf); + + if (!is_64bit_addr) { + ret = __qseecom_update_cmd_buf(&req, false, data); + if (ret) + return ret; + ret = __qseecom_send_cmd(data, &send_cmd_req); + if (ret) + return ret; + ret = __qseecom_update_cmd_buf(&req, true, data); + if (ret) + return ret; + } else { + ret = __qseecom_update_cmd_buf_64(&req, false, data); + if (ret) + return ret; + ret = __qseecom_send_cmd(data, &send_cmd_req); + if (ret) + return ret; + ret = __qseecom_update_cmd_buf_64(&req, true, data); + if (ret) + return ret; + } + + return ret; +} + +static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_cmd(data, argp, false); +} + +static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_cmd(data, argp, true); +} + + + +static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data, + struct qseecom_registered_listener_list *svc) +{ + int ret; + ret = (svc->rcv_req_flag != 0); + return ret || data->abort; +} + +static int qseecom_receive_req(struct qseecom_dev_handle *data) +{ + int ret = 0; + struct qseecom_registered_listener_list *this_lstnr; + + this_lstnr = __qseecom_find_svc(data->listener.id); + if (!this_lstnr) { + pr_err("Invalid listener ID\n"); + return -ENODATA; + } + + while (1) { + if (wait_event_freezable(this_lstnr->rcv_req_wq, + __qseecom_listener_has_rcvd_req(data, + this_lstnr))) { + pr_debug("Interrupted: exiting Listener Service = %d\n", + (uint32_t)data->listener.id); + /* woken up for different reason */ + return -ERESTARTSYS; + } + + if (data->abort) { + pr_err("Aborting Listener Service = %d\n", + (uint32_t)data->listener.id); + return -ENODEV; + } + this_lstnr->rcv_req_flag = 0; + break; + } + return ret; +} + +static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry) +{ + unsigned char app_arch = 0; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + + app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + + switch (app_arch) { + case ELFCLASS32: { + ehdr = (struct elf32_hdr *)fw_entry->data; + if (fw_entry->size < sizeof(*ehdr)) { + pr_err("%s: Not big enough to be an elf32 header\n", + qseecom.pdev->init_name); + return false; + } + if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { + pr_err("%s: Not an elf32 header\n", + qseecom.pdev->init_name); + return false; + } + if (ehdr->e_phnum == 0) { + pr_err("%s: No loadable segments\n", + qseecom.pdev->init_name); + return false; + } + if (sizeof(struct elf32_phdr) * ehdr->e_phnum + + sizeof(struct elf32_hdr) > fw_entry->size) { + pr_err("%s: Program headers not within mdt\n", + qseecom.pdev->init_name); + return false; + } + break; + } + case ELFCLASS64: { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + if (fw_entry->size < sizeof(*ehdr64)) { + pr_err("%s: Not big enough to be an elf64 header\n", + qseecom.pdev->init_name); + return false; + } + if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) { + pr_err("%s: Not an elf64 header\n", + qseecom.pdev->init_name); + return false; + } + if (ehdr64->e_phnum == 0) { + pr_err("%s: No loadable segments\n", + qseecom.pdev->init_name); + return false; + } + if (sizeof(struct elf64_phdr) * ehdr64->e_phnum + + sizeof(struct elf64_hdr) > fw_entry->size) { + pr_err("%s: Program headers not within mdt\n", + qseecom.pdev->init_name); + return false; + } + break; + } + default: { + pr_err("QSEE app arch %u is not supported\n", app_arch); + return false; + } + } + return true; +} + +static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size, + uint32_t *app_arch) +{ + int ret = -1; + int i = 0, rc = 0; + const struct firmware *fw_entry = NULL; + char fw_name[MAX_APP_NAME_SIZE]; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + int num_images = 0; + + snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname); + rc = request_firmware(&fw_entry, fw_name, qseecom.pdev); + if (rc) { + pr_err("error with request_firmware\n"); + ret = -EIO; + goto err; + } + if (!__qseecom_is_fw_image_valid(fw_entry)) { + ret = -EIO; + goto err; + } + *app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + *fw_size = fw_entry->size; + if (*app_arch == ELFCLASS32) { + ehdr = (struct elf32_hdr *)fw_entry->data; + num_images = ehdr->e_phnum; + } else if (*app_arch == ELFCLASS64) { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + num_images = ehdr64->e_phnum; + } else { + pr_err("QSEE %s app, arch %u is not supported\n", + appname, *app_arch); + ret = -EIO; + goto err; + } + pr_debug("QSEE %s app, arch %u\n", appname, *app_arch); + release_firmware(fw_entry); + fw_entry = NULL; + for (i = 0; i < num_images; i++) { + memset(fw_name, 0, sizeof(fw_name)); + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i); + ret = request_firmware(&fw_entry, fw_name, qseecom.pdev); + if (ret) + goto err; + if (*fw_size > U32_MAX - fw_entry->size) { + pr_err("QSEE %s app file size overflow\n", appname); + ret = -EINVAL; + goto err; + } + *fw_size += fw_entry->size; + release_firmware(fw_entry); + fw_entry = NULL; + } + + return ret; +err: + if (fw_entry) + release_firmware(fw_entry); + *fw_size = 0; + return ret; +} + +static int __qseecom_get_fw_data(const char *appname, u8 *img_data, + uint32_t fw_size, + struct qseecom_load_app_ireq *load_req) +{ + int ret = -1; + int i = 0, rc = 0; + const struct firmware *fw_entry = NULL; + char fw_name[MAX_APP_NAME_SIZE]; + u8 *img_data_ptr = img_data; + struct elf32_hdr *ehdr; + struct elf64_hdr *ehdr64; + int num_images = 0; + unsigned char app_arch = 0; + + snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname); + rc = request_firmware(&fw_entry, fw_name, qseecom.pdev); + if (rc) { + ret = -EIO; + goto err; + } + + load_req->img_len = fw_entry->size; + if (load_req->img_len > fw_size) { + pr_err("app %s size %zu is larger than buf size %u\n", + appname, fw_entry->size, fw_size); + ret = -EINVAL; + goto err; + } + memcpy(img_data_ptr, fw_entry->data, fw_entry->size); + img_data_ptr = img_data_ptr + fw_entry->size; + load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/ + + app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS); + if (app_arch == ELFCLASS32) { + ehdr = (struct elf32_hdr *)fw_entry->data; + num_images = ehdr->e_phnum; + } else if (app_arch == ELFCLASS64) { + ehdr64 = (struct elf64_hdr *)fw_entry->data; + num_images = ehdr64->e_phnum; + } else { + pr_err("QSEE %s app, arch %u is not supported\n", + appname, app_arch); + ret = -EIO; + goto err; + } + release_firmware(fw_entry); + fw_entry = NULL; + for (i = 0; i < num_images; i++) { + snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i); + ret = request_firmware(&fw_entry, fw_name, qseecom.pdev); + if (ret) { + pr_err("Failed to locate blob %s\n", fw_name); + goto err; + } + if ((fw_entry->size > U32_MAX - load_req->img_len) || + (fw_entry->size + load_req->img_len > fw_size)) { + pr_err("Invalid file size for %s\n", fw_name); + ret = -EINVAL; + goto err; + } + memcpy(img_data_ptr, fw_entry->data, fw_entry->size); + img_data_ptr = img_data_ptr + fw_entry->size; + load_req->img_len += fw_entry->size; + release_firmware(fw_entry); + fw_entry = NULL; + } + return ret; +err: + release_firmware(fw_entry); + return ret; +} + +static int __qseecom_allocate_img_data(struct ion_handle **pihandle, + u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr) +{ + size_t len = 0; + int ret = 0; + ion_phys_addr_t pa; + struct ion_handle *ihandle = NULL; + u8 *img_data = NULL; + + ihandle = ion_alloc(qseecom.ion_clnt, fw_size, + SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0); + + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("ION alloc failed\n"); + return -ENOMEM; + } + img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt, + ihandle); + + if (IS_ERR_OR_NULL(img_data)) { + pr_err("ION memory mapping for image loading failed\n"); + ret = -ENOMEM; + goto exit_ion_free; + } + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len); + if (ret) { + pr_err("physical memory retrieval failure\n"); + ret = -EIO; + goto exit_ion_unmap_kernel; + } + + *pihandle = ihandle; + *data = img_data; + *paddr = pa; + return ret; + +exit_ion_unmap_kernel: + ion_unmap_kernel(qseecom.ion_clnt, ihandle); +exit_ion_free: + ion_free(qseecom.ion_clnt, ihandle); + ihandle = NULL; + return ret; +} + +static void __qseecom_free_img_data(struct ion_handle **ihandle) +{ + ion_unmap_kernel(qseecom.ion_clnt, *ihandle); + ion_free(qseecom.ion_clnt, *ihandle); + *ihandle = NULL; +} + +static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname, + uint32_t *app_id) +{ + int ret = -1; + uint32_t fw_size = 0; + struct qseecom_load_app_ireq load_req = {0, 0, 0, 0}; + struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0}; + struct qseecom_command_scm_resp resp; + u8 *img_data = NULL; + ion_phys_addr_t pa = 0; + struct ion_handle *ihandle = NULL; + void *cmd_buf = NULL; + size_t cmd_len; + uint32_t app_arch = 0; + + if (!data || !appname || !app_id) { + pr_err("Null pointer to data or appname or appid\n"); + return -EINVAL; + } + *app_id = 0; + if (__qseecom_get_fw_size(appname, &fw_size, &app_arch)) + return -EIO; + data->client.app_arch = app_arch; + + /* Check and load cmnlib */ + if (qseecom.qsee_version > QSEEE_VERSION_00) { + if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) { + ret = qseecom_load_commonlib_image(data, "cmnlib"); + if (ret) { + pr_err("failed to load cmnlib\n"); + return -EIO; + } + qseecom.commonlib_loaded = true; + pr_debug("cmnlib is loaded\n"); + } + + if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) { + ret = qseecom_load_commonlib_image(data, "cmnlib64"); + if (ret) { + pr_err("failed to load cmnlib64\n"); + return -EIO; + } + qseecom.commonlib64_loaded = true; + pr_debug("cmnlib64 is loaded\n"); + } + } + + ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa); + if (ret) + return ret; + + ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + + /* Populate the load_req parameters */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req.mdt_len = load_req.mdt_len; + load_req.img_len = load_req.img_len; + strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE); + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND; + load_req_64bit.mdt_len = load_req.mdt_len; + load_req_64bit.img_len = load_req.img_len; + strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE); + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + } + + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_unregister_bus_bw_need; + } + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, + img_data, fw_size, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit_disable_clk_vote; + } + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", ret); + ret = -EIO; + goto exit_disable_clk_vote; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + *app_id = resp.data; + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("process_incomplete_cmd FAILED\n"); + else + *app_id = resp.data; + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed with response QSEOS_RESULT FAILURE\n"); + break; + default: + pr_err("scm call return unknown response %d\n", resp.result); + ret = -EINVAL; + break; + } + +exit_disable_clk_vote: + __qseecom_disable_clk_scale_down(data); + +exit_unregister_bus_bw_need: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + +exit_free_img_data: + __qseecom_free_img_data(&ihandle); + return ret; +} + +static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data, + char *cmnlib_name) +{ + int ret = 0; + uint32_t fw_size = 0; + struct qseecom_load_app_ireq load_req = {0, 0, 0, 0}; + struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0}; + struct qseecom_command_scm_resp resp; + u8 *img_data = NULL; + ion_phys_addr_t pa = 0; + void *cmd_buf = NULL; + size_t cmd_len; + uint32_t app_arch = 0; + struct ion_handle *cmnlib_ion_handle = NULL; + + if (!cmnlib_name) { + pr_err("cmnlib_name is NULL\n"); + return -EINVAL; + } + if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) { + pr_err("The cmnlib_name (%s) with length %zu is not valid\n", + cmnlib_name, strlen(cmnlib_name)); + return -EINVAL; + } + + if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch)) + return -EIO; + + ret = __qseecom_allocate_img_data(&cmnlib_ion_handle, + &img_data, fw_size, &pa); + if (ret) + return -EIO; + + ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.phy_addr = (uint32_t)pa; + load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_lib_image_ireq); + } else { + load_req_64bit.phy_addr = (uint64_t)pa; + load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND; + load_req_64bit.img_len = load_req.img_len; + load_req_64bit.mdt_len = load_req.mdt_len; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_free_img_data; + } + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_unregister_bus_bw_need; + } + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, cmnlib_ion_handle, + img_data, fw_size, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit_disable_clk_vote; + } + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", ret); + ret = -EIO; + goto exit_disable_clk_vote; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm call failed w/response result%d\n", resp.result); + ret = -EINVAL; + goto exit_disable_clk_vote; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", ret); + goto exit_disable_clk_vote; + } + break; + default: + pr_err("scm call return unknown response %d\n", resp.result); + ret = -EINVAL; + goto exit_disable_clk_vote; + } + +exit_disable_clk_vote: + __qseecom_disable_clk_scale_down(data); + +exit_unregister_bus_bw_need: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + +exit_free_img_data: + __qseecom_free_img_data(&cmnlib_ion_handle); + return ret; +} + +static int qseecom_unload_commonlib_image(void) +{ + int ret = -EINVAL; + struct qseecom_unload_lib_image_ireq unload_req = {0}; + struct qseecom_command_scm_resp resp; + + /* Populate the remaining parameters */ + unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND; + + /* SCM_CALL to load the image */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req, + sizeof(struct qseecom_unload_lib_image_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload lib failed : ret %d\n", ret); + ret = -EIO; + } else { + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n"); + break; + default: + pr_err("scm call return unknown response %d\n", + resp.result); + ret = -EINVAL; + break; + } + } + + return ret; +} + +int qseecom_start_app(struct qseecom_handle **handle, + char *app_name, uint32_t size) +{ + int32_t ret = 0; + unsigned long flags = 0; + struct qseecom_dev_handle *data = NULL; + struct qseecom_check_app_ireq app_ireq; + struct qseecom_registered_app_list *entry = NULL; + struct qseecom_registered_kclient_list *kclient_entry = NULL; + bool found_app = false; + size_t len; + ion_phys_addr_t pa; + uint32_t fw_size, app_arch; + uint32_t app_id = 0; + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + if (!app_name) { + pr_err("failed to get the app name\n"); + return -EINVAL; + } + + if (strnlen(app_name, MAX_APP_NAME_SIZE) == MAX_APP_NAME_SIZE) { + pr_err("The app_name (%s) with length %zu is not valid\n", + app_name, strnlen(app_name, MAX_APP_NAME_SIZE)); + return -EINVAL; + } + + *handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL); + if (!(*handle)) { + pr_err("failed to allocate memory for kernel client handle\n"); + return -ENOMEM; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + pr_err("kmalloc failed\n"); + if (ret == 0) { + kfree(*handle); + *handle = NULL; + } + return -ENOMEM; + } + data->abort = 0; + data->type = QSEECOM_CLIENT_APP; + data->released = false; + data->client.sb_length = size; + data->client.user_virt_sb_base = 0; + data->client.ihandle = NULL; + + init_waitqueue_head(&data->abort_wq); + + data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096, + ION_HEAP(ION_QSECOM_HEAP_ID), 0); + if (IS_ERR_OR_NULL(data->client.ihandle)) { + pr_err("Ion client could not retrieve the handle\n"); + kfree(data); + kfree(*handle); + *handle = NULL; + return -EINVAL; + } + mutex_lock(&app_access_lock); + + app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE); + ret = __qseecom_check_app_exists(app_ireq, &app_id); + if (ret) + goto err; + + strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE); + if (app_id) { + pr_warn("App id %d for [%s] app exists\n", app_id, + (char *)app_ireq.app_name); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + entry->ref_cnt++; + found_app = true; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + if (!found_app) + pr_warn("App_id %d [%s] was loaded but not registered\n", + ret, (char *)app_ireq.app_name); + } else { + /* load the app and get the app_id */ + pr_debug("%s: Loading app for the first time'\n", + qseecom.pdev->init_name); + ret = __qseecom_load_fw(data, app_name, &app_id); + if (ret < 0) + goto err; + } + data->client.app_id = app_id; + if (!found_app) { + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_err("kmalloc for app entry failed\n"); + ret = -ENOMEM; + goto err; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE); + if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) { + ret = -EIO; + kfree(entry); + goto err; + } + entry->app_arch = app_arch; + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_add_tail(&entry->list, &qseecom.registered_app_list_head); + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, + flags); + } + + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len); + if (ret) { + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + goto err; + } + + /* Populate the structure for sending scm call to load image */ + data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, + data->client.ihandle); + if (IS_ERR_OR_NULL(data->client.sb_virt)) { + pr_err("ION memory mapping for client shared buf failed\n"); + ret = -ENOMEM; + goto err; + } + data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt; + data->client.sb_phys = (phys_addr_t)pa; + (*handle)->dev = (void *)data; + (*handle)->sbuf = (unsigned char *)data->client.sb_virt; + (*handle)->sbuf_len = data->client.sb_length; + + kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL); + if (!kclient_entry) { + pr_err("kmalloc failed\n"); + ret = -ENOMEM; + goto err; + } + kclient_entry->handle = *handle; + + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + list_add_tail(&kclient_entry->list, + &qseecom.registered_kclient_list_head); + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + + mutex_unlock(&app_access_lock); + return 0; + +err: + kfree(data); + kfree(*handle); + *handle = NULL; + mutex_unlock(&app_access_lock); + return ret; +} +EXPORT_SYMBOL(qseecom_start_app); + +int qseecom_shutdown_app(struct qseecom_handle **handle) +{ + int ret = -EINVAL; + struct qseecom_dev_handle *data; + + struct qseecom_registered_kclient_list *kclient = NULL; + unsigned long flags = 0; + bool found_handle = false; + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + + if ((handle == NULL) || (*handle == NULL)) { + pr_err("Handle is not initialized\n"); + return -EINVAL; + } + data = (struct qseecom_dev_handle *) ((*handle)->dev); + mutex_lock(&app_access_lock); + + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + list_for_each_entry(kclient, &qseecom.registered_kclient_list_head, + list) { + if (kclient->handle == (*handle)) { + list_del(&kclient->list); + found_handle = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + if (!found_handle) + pr_err("Unable to find the handle, exiting\n"); + else + ret = qseecom_unload_app(data, false); + + mutex_unlock(&app_access_lock); + if (ret == 0) { + kzfree(data); + kzfree(*handle); + kzfree(kclient); + *handle = NULL; + } + + return ret; +} +EXPORT_SYMBOL(qseecom_shutdown_app); + +int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, + uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len) +{ + int ret = 0; + struct qseecom_send_cmd_req req = {0, 0, 0, 0}; + struct qseecom_dev_handle *data; + bool perf_enabled = false; + + if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) { + pr_err("Not allowed to be called in %d state\n", + atomic_read(&qseecom.qseecom_state)); + return -EPERM; + } + + if (handle == NULL) { + pr_err("Handle is not initialized\n"); + return -EINVAL; + } + data = handle->dev; + + req.cmd_req_len = sbuf_len; + req.resp_len = rbuf_len; + req.cmd_req_buf = send_buf; + req.resp_buf = resp_buf; + + if (__validate_send_cmd_inputs(data, &req)) + return -EINVAL; + + mutex_lock(&app_access_lock); + if (qseecom.support_bus_scaling) { + ret = qseecom_scale_bus_bandwidth_timer(INACTIVE); + if (ret) { + pr_err("Failed to set bw.\n"); + mutex_unlock(&app_access_lock); + return ret; + } + } + /* + * On targets where crypto clock is handled by HLOS, + * if clk_access_cnt is zero and perf_enabled is false, + * then the crypto clock was not enabled before sending cmd + * to tz, qseecom will enable the clock to avoid service failure. + */ + if (!qseecom.no_clock_support && + !qseecom.qsee.clk_access_cnt && !data->perf_enabled) { + pr_debug("ce clock is not enabled!\n"); + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clock with err %d\n", + ret); + mutex_unlock(&app_access_lock); + return -EINVAL; + } + perf_enabled = true; + } + if (!strcmp(data->client.app_name, "securemm")) + data->use_legacy_cmd = true; + + ret = __qseecom_send_cmd(data, &req); + data->use_legacy_cmd = false; + if (qseecom.support_bus_scaling) + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + + if (perf_enabled) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } + + mutex_unlock(&app_access_lock); + + if (ret) + return ret; + + pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n", + req.resp_len, req.resp_buf); + return ret; +} +EXPORT_SYMBOL(qseecom_send_command); + +int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high) +{ + int ret = 0; + if ((handle == NULL) || (handle->dev == NULL)) { + pr_err("No valid kernel client\n"); + return -EINVAL; + } + if (high) { + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs(handle->dev, + HIGH); + mutex_unlock(&qsee_bw_mutex); + } else { + ret = qseecom_perf_enable(handle->dev); + if (ret) + pr_err("Failed to vote for clock with err %d\n", + ret); + } + } else { + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(handle->dev, CLK_DFAB); + qsee_disable_clock_vote(handle->dev, CLK_SFPB); + } else { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(handle->dev); + mutex_unlock(&qsee_bw_mutex); + } + } + return ret; +} +EXPORT_SYMBOL(qseecom_set_bandwidth); + +int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc) +{ + struct qseecom_registered_app_list dummy_app_entry = { {0} }; + struct qseecom_dev_handle dummy_private_data = {0}; + struct qseecom_command_scm_resp resp; + int ret = 0; + + if (!desc) { + pr_err("desc is NULL\n"); + return -EINVAL; + } + + resp.result = desc->ret[0]; /*req_cmd*/ + resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/ + resp.data = desc->ret[2]; /*listener_id*/ + + dummy_private_data.client.app_id = desc->ret[1]; + dummy_app_entry.app_id = desc->ret[1]; + + mutex_lock(&app_access_lock); + if (qseecom.qsee_reentrancy_support) + ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry, + &dummy_private_data); + else + ret = __qseecom_process_incomplete_cmd(&dummy_private_data, + &resp); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n", + (int)desc->ret[0], (int)desc->ret[2], + (int)desc->ret[1], ret); + desc->ret[0] = resp.result; + desc->ret[1] = resp.resp_type; + desc->ret[2] = resp.data; + return ret; +} +EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke); + +static int qseecom_send_resp(void) +{ + qseecom.send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data) +{ + struct qseecom_registered_listener_list *this_lstnr = NULL; + + pr_debug("lstnr %d send resp, wakeup\n", data->listener.id); + this_lstnr = __qseecom_find_svc(data->listener.id); + if (this_lstnr == NULL) + return -EINVAL; + qseecom.send_resp_flag = 1; + this_lstnr->send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data, + struct qseecom_send_modfd_listener_resp *resp, + struct qseecom_registered_listener_list *this_lstnr) +{ + int i; + + if (!data || !resp || !this_lstnr) { + pr_err("listener handle or resp msg is null\n"); + return -EINVAL; + } + + if (resp->resp_buf_ptr == NULL) { + pr_err("resp buffer is null\n"); + return -EINVAL; + } + /* validate resp buf length */ + if ((resp->resp_len == 0) || + (resp->resp_len > this_lstnr->sb_length)) { + pr_err("resp buf length %d not valid\n", resp->resp_len); + return -EINVAL; + } + + if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) { + pr_err("Integer overflow in resp_len & resp_buf\n"); + return -EINVAL; + } + if ((uintptr_t)this_lstnr->user_virt_sb_base > + (ULONG_MAX - this_lstnr->sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + /* validate resp buf */ + if (((uintptr_t)resp->resp_buf_ptr < + (uintptr_t)this_lstnr->user_virt_sb_base) || + ((uintptr_t)resp->resp_buf_ptr >= + ((uintptr_t)this_lstnr->user_virt_sb_base + + this_lstnr->sb_length)) || + (((uintptr_t)resp->resp_buf_ptr + resp->resp_len) > + ((uintptr_t)this_lstnr->user_virt_sb_base + + this_lstnr->sb_length))) { + pr_err("resp buf is out of shared buffer region\n"); + return -EINVAL; + } + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) { + pr_err("Invalid offset %d = 0x%x\n", + i, resp->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + } + + return 0; +} + +static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data, + void __user *argp, bool is_64bit_addr) +{ + struct qseecom_send_modfd_listener_resp resp; + struct qseecom_registered_listener_list *this_lstnr = NULL; + + if (copy_from_user(&resp, argp, sizeof(resp))) { + pr_err("copy_from_user failed"); + return -EINVAL; + } + + this_lstnr = __qseecom_find_svc(data->listener.id); + if (this_lstnr == NULL) + return -EINVAL; + + if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr)) + return -EINVAL; + + resp.resp_buf_ptr = this_lstnr->sb_virt + + (uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base); + + if (!is_64bit_addr) + __qseecom_update_cmd_buf(&resp, false, data); + else + __qseecom_update_cmd_buf_64(&resp, false, data); + qseecom.send_resp_flag = 1; + this_lstnr->send_resp_flag = 1; + wake_up_interruptible(&qseecom.send_resp_wq); + return 0; +} + +static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_resp(data, argp, false); +} + +static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data, + void __user *argp) +{ + return __qseecom_send_modfd_resp(data, argp, true); +} + +static int qseecom_get_qseos_version(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qseos_version_req req; + + if (copy_from_user(&req, argp, sizeof(req))) { + pr_err("copy_from_user failed"); + return -EINVAL; + } + req.qseos_version = qseecom.qseos_version; + if (copy_to_user(argp, &req, sizeof(req))) { + pr_err("copy_to_user failed"); + return -EINVAL; + } + return 0; +} + +static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce) +{ + int rc = 0; + struct qseecom_clk *qclk = NULL; + + if (qseecom.no_clock_support) + return 0; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + if (ce == CLK_CE_DRV) + qclk = &qseecom.ce_drv; + + if (qclk == NULL) { + pr_err("CLK type not supported\n"); + return -EINVAL; + } + mutex_lock(&clk_access_lock); + + if (qclk->clk_access_cnt == ULONG_MAX) { + pr_err("clk_access_cnt beyond limitation\n"); + goto err; + } + if (qclk->clk_access_cnt > 0) { + qclk->clk_access_cnt++; + mutex_unlock(&clk_access_lock); + return rc; + } + + /* Enable CE core clk */ + if (qclk->ce_core_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_core_clk); + if (rc) { + pr_err("Unable to enable/prepare CE core clk\n"); + goto err; + } + } + /* Enable CE clk */ + if (qclk->ce_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_clk); + if (rc) { + pr_err("Unable to enable/prepare CE iface clk\n"); + goto ce_clk_err; + } + } + /* Enable AXI clk */ + if (qclk->ce_bus_clk != NULL) { + rc = clk_prepare_enable(qclk->ce_bus_clk); + if (rc) { + pr_err("Unable to enable/prepare CE bus clk\n"); + goto ce_bus_clk_err; + } + } + qclk->clk_access_cnt++; + mutex_unlock(&clk_access_lock); + return 0; + +ce_bus_clk_err: + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); +ce_clk_err: + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); +err: + mutex_unlock(&clk_access_lock); + return -EIO; +} + +static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + + if (qseecom.no_clock_support) + return; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + mutex_lock(&clk_access_lock); + + if (qclk->clk_access_cnt == 0) { + mutex_unlock(&clk_access_lock); + return; + } + + if (qclk->clk_access_cnt == 1) { + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); + if (qclk->ce_bus_clk != NULL) + clk_disable_unprepare(qclk->ce_bus_clk); + } + qclk->clk_access_cnt--; + mutex_unlock(&clk_access_lock); +} + +static int qsee_vote_for_clock(struct qseecom_dev_handle *data, + int32_t clk_type) +{ + int ret = 0; + struct qseecom_clk *qclk; + + if (qseecom.no_clock_support) + return 0; + + qclk = &qseecom.qsee; + if (!qseecom.qsee_perf_client) + return ret; + + switch (clk_type) { + case CLK_DFAB: + mutex_lock(&qsee_bw_mutex); + if (!qseecom.qsee_bw_count) { + if (qseecom.qsee_sfpb_bw_count > 0) + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 3); + else { + if (qclk->ce_core_src_clk != NULL) + ret = __qseecom_enable_clk(CLK_QSEE); + if (!ret) { + ret = + msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 1); + if ((ret) && + (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + } + if (ret) + pr_err("DFAB Bandwidth req failed (%d)\n", + ret); + else { + qseecom.qsee_bw_count++; + data->perf_enabled = true; + } + } else { + qseecom.qsee_bw_count++; + data->perf_enabled = true; + } + mutex_unlock(&qsee_bw_mutex); + break; + case CLK_SFPB: + mutex_lock(&qsee_bw_mutex); + if (!qseecom.qsee_sfpb_bw_count) { + if (qseecom.qsee_bw_count > 0) + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 3); + else { + if (qclk->ce_core_src_clk != NULL) + ret = __qseecom_enable_clk(CLK_QSEE); + if (!ret) { + ret = + msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 2); + if ((ret) && + (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + } + + if (ret) + pr_err("SFPB Bandwidth req failed (%d)\n", + ret); + else { + qseecom.qsee_sfpb_bw_count++; + data->fast_load_enabled = true; + } + } else { + qseecom.qsee_sfpb_bw_count++; + data->fast_load_enabled = true; + } + mutex_unlock(&qsee_bw_mutex); + break; + default: + pr_err("Clock type not defined\n"); + break; + } + return ret; +} + +static void qsee_disable_clock_vote(struct qseecom_dev_handle *data, + int32_t clk_type) +{ + int32_t ret = 0; + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + + if (qseecom.no_clock_support) + return; + if (!qseecom.qsee_perf_client) + return; + + switch (clk_type) { + case CLK_DFAB: + mutex_lock(&qsee_bw_mutex); + if (qseecom.qsee_bw_count == 0) { + pr_err("Client error.Extra call to disable DFAB clk\n"); + mutex_unlock(&qsee_bw_mutex); + return; + } + + if (qseecom.qsee_bw_count == 1) { + if (qseecom.qsee_sfpb_bw_count > 0) + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 2); + else { + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 0); + if ((!ret) && (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + if (ret) + pr_err("SFPB Bandwidth req fail (%d)\n", + ret); + else { + qseecom.qsee_bw_count--; + data->perf_enabled = false; + } + } else { + qseecom.qsee_bw_count--; + data->perf_enabled = false; + } + mutex_unlock(&qsee_bw_mutex); + break; + case CLK_SFPB: + mutex_lock(&qsee_bw_mutex); + if (qseecom.qsee_sfpb_bw_count == 0) { + pr_err("Client error.Extra call to disable SFPB clk\n"); + mutex_unlock(&qsee_bw_mutex); + return; + } + if (qseecom.qsee_sfpb_bw_count == 1) { + if (qseecom.qsee_bw_count > 0) + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 1); + else { + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, 0); + if ((!ret) && (qclk->ce_core_src_clk != NULL)) + __qseecom_disable_clk(CLK_QSEE); + } + if (ret) + pr_err("SFPB Bandwidth req fail (%d)\n", + ret); + else { + qseecom.qsee_sfpb_bw_count--; + data->fast_load_enabled = false; + } + } else { + qseecom.qsee_sfpb_bw_count--; + data->fast_load_enabled = false; + } + mutex_unlock(&qsee_bw_mutex); + break; + default: + pr_err("Clock type not defined\n"); + break; + } + +} + +static int qseecom_load_external_elf(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct ion_handle *ihandle; /* Ion handle */ + struct qseecom_load_img_req load_img_req; + int uret = 0; + int ret; + ion_phys_addr_t pa = 0; + size_t len; + struct qseecom_load_app_ireq load_req; + struct qseecom_load_app_64bit_ireq load_req_64bit; + struct qseecom_command_scm_resp resp; + void *cmd_buf = NULL; + size_t cmd_len; + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&load_img_req, + (void __user *)argp, + sizeof(struct qseecom_load_img_req))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + /* Get the handle of the shared fd */ + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + load_img_req.ifd_data_fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client could not retrieve the handle\n"); + return -ENOMEM; + } + + /* Get the physical address of the ION BUF */ + ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len); + if (ret) { + pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n", + ret); + return ret; + } + if (load_img_req.mdt_len > len || load_img_req.img_len > len) { + pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n", + len, load_img_req.mdt_len, + load_img_req.img_len); + return ret; + } + /* Populate the structure for sending scm call to load image */ + if (qseecom.qsee_version < QSEE_VERSION_40) { + load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND; + load_req.mdt_len = load_img_req.mdt_len; + load_req.img_len = load_img_req.img_len; + load_req.phy_addr = (uint32_t)pa; + cmd_buf = (void *)&load_req; + cmd_len = sizeof(struct qseecom_load_app_ireq); + } else { + load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND; + load_req_64bit.mdt_len = load_img_req.mdt_len; + load_req_64bit.img_len = load_img_req.img_len; + load_req_64bit.phy_addr = (uint64_t)pa; + cmd_buf = (void *)&load_req_64bit; + cmd_len = sizeof(struct qseecom_load_app_64bit_ireq); + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM); + mutex_unlock(&qsee_bw_mutex); + if (ret) { + ret = -EIO; + goto exit_cpu_restore; + } + } + + /* Vote for the SFPB clock */ + ret = __qseecom_enable_clk_scale_up(data); + if (ret) { + ret = -EIO; + goto exit_register_bus_bandwidth_needs; + } + ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto exit_disable_clock; + } + /* SCM_CALL to load the external elf */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to load failed : ret %d\n", + ret); + ret = -EFAULT; + goto exit_disable_clock; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + pr_err("%s: qseos result incomplete\n", __func__); + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("process_incomplete_cmd failed: err: %d\n", ret); + break; + case QSEOS_RESULT_FAILURE: + pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n"); + ret = -EFAULT; + break; + default: + pr_err("scm_call response result %d not supported\n", + resp.result); + ret = -EFAULT; + break; + } + +exit_disable_clock: + __qseecom_disable_clk_scale_down(data); + +exit_register_bus_bandwidth_needs: + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + uret = qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + if (uret) + pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n", + uret, ret); + } + +exit_cpu_restore: + /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + return ret; +} + +static int qseecom_unload_external_elf(struct qseecom_dev_handle *data) +{ + int ret = 0; + struct qseecom_command_scm_resp resp; + struct qseecom_unload_app_ireq req; + + /* unavailable client app */ + data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; + + /* Populate the structure for sending scm call to unload image */ + req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND; + + /* SCM_CALL to unload the external elf */ + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req, + sizeof(struct qseecom_unload_app_ireq), + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call to unload failed : ret %d\n", + ret); + ret = -EFAULT; + goto qseecom_unload_external_elf_scm_err; + } + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) + pr_err("process_incomplete_cmd fail err: %d\n", + ret); + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("scm_call to unload image failed resp.result =%d\n", + resp.result); + ret = -EFAULT; + } + } + +qseecom_unload_external_elf_scm_err: + + return ret; +} + +static int qseecom_query_app_loaded(struct qseecom_dev_handle *data, + void __user *argp) +{ + + int32_t ret; + struct qseecom_qseos_app_load_query query_req; + struct qseecom_check_app_ireq req; + struct qseecom_registered_app_list *entry = NULL; + unsigned long flags = 0; + uint32_t app_arch = 0, app_id = 0; + bool found_app = false; + + /* Copy the relevant information needed for loading the image */ + if (copy_from_user(&query_req, + (void __user *)argp, + sizeof(struct qseecom_qseos_app_load_query))) { + pr_err("copy_from_user failed\n"); + return -EFAULT; + } + + req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND; + query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0'; + strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE); + + ret = __qseecom_check_app_exists(req, &app_id); + if (ret) { + pr_err(" scm call to check if app is loaded failed"); + return ret; /* scm call failed */ + } + if (app_id) { + pr_debug("App id %d (%s) already exists\n", app_id, + (char *)(req.app_name)); + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(entry, + &qseecom.registered_app_list_head, list){ + if (entry->app_id == app_id) { + app_arch = entry->app_arch; + entry->ref_cnt++; + found_app = true; + break; + } + } + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + data->client.app_id = app_id; + query_req.app_id = app_id; + if (app_arch) { + data->client.app_arch = app_arch; + query_req.app_arch = app_arch; + } else { + data->client.app_arch = 0; + query_req.app_arch = 0; + } + strlcpy(data->client.app_name, query_req.app_name, + MAX_APP_NAME_SIZE); + /* + * If app was loaded by appsbl before and was not registered, + * regiser this app now. + */ + if (!found_app) { + pr_debug("Register app %d [%s] which was loaded before\n", + ret, (char *)query_req.app_name); + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + pr_err("kmalloc for app entry failed\n"); + return -ENOMEM; + } + entry->app_id = app_id; + entry->ref_cnt = 1; + entry->app_arch = data->client.app_arch; + strlcpy(entry->app_name, data->client.app_name, + MAX_APP_NAME_SIZE); + entry->app_blocked = false; + entry->blocked_on_listener_id = 0; + spin_lock_irqsave(&qseecom.registered_app_list_lock, + flags); + list_add_tail(&entry->list, + &qseecom.registered_app_list_head); + spin_unlock_irqrestore( + &qseecom.registered_app_list_lock, flags); + } + if (copy_to_user(argp, &query_req, sizeof(query_req))) { + pr_err("copy_to_user failed\n"); + return -EFAULT; + } + return -EEXIST; /* app already loaded */ + } else { + return 0; /* app not loaded */ + } +} + +static int __qseecom_get_ce_pipe_info( + enum qseecom_key_management_usage_type usage, + uint32_t *pipe, uint32_t **ce_hw, uint32_t unit) +{ + int ret = -EINVAL; + int i, j; + struct qseecom_ce_info_use *p = NULL; + int total = 0; + struct qseecom_ce_pipe_entry *pcepipe; + + switch (usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", usage); + return -EINVAL; + } + + for (j = 0; j < total; j++) { + if (p->unit_num == unit) { + pcepipe = p->ce_pipe_entry; + for (i = 0; i < p->num_ce_pipe_entries; i++) { + (*ce_hw)[i] = pcepipe->ce_num; + *pipe = pcepipe->ce_pipe_pair; + pcepipe++; + } + ret = 0; + break; + } + p++; + } + return ret; +} + +static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_generate_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_generate_ireq), + &resp, sizeof(resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) { + pr_debug("Key ID exists.\n"); + ret = 0; + } else { + pr_err("scm call to generate key failed : %d\n", ret); + ret = -EFAULT; + } + goto generate_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_FAIL_KEY_ID_EXISTS: + pr_debug("Key ID exists.\n"); + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) { + pr_debug("Key ID exists.\n"); + ret = 0; + } else { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + } + } + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("gen key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } +generate_key_exit: + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + +static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_delete_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_delete_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } else { + pr_err("scm call to delete key failed : %d\n", ret); + ret = -EFAULT; + } + goto del_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } + } + break; + case QSEOS_RESULT_FAIL_MAX_ATTEMPT: + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Delete key scm call failed resp.result %d\n", + resp.result); + ret = -EINVAL; + break; + } +del_key_exit: + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + +static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_select_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + if (qseecom.qsee.instance != qseecom.ce_drv.instance) { + ret = __qseecom_enable_clk(CLK_CE_DRV); + if (ret) + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_select_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } else if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } else { + pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n", + ret); + ret = -EFAULT; + } + goto set_key_exit; + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + if (resp.result == + QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } + if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) { + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + } + } + break; + case QSEOS_RESULT_FAIL_MAX_ATTEMPT: + pr_debug("Max attempts to input password reached.\n"); + ret = -ERANGE; + break; + case QSEOS_RESULT_FAIL_PENDING_OPERATION: + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Set key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } +set_key_exit: + __qseecom_disable_clk(CLK_QSEE); + if (qseecom.qsee.instance != qseecom.ce_drv.instance) + __qseecom_disable_clk(CLK_CE_DRV); + return ret; +} + +static int __qseecom_update_current_key_user_info( + struct qseecom_dev_handle *data, + enum qseecom_key_management_usage_type usage, + struct qseecom_key_userinfo_update_ireq *ireq) +{ + struct qseecom_command_scm_resp resp; + int ret; + + if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", usage); + return -EFAULT; + } + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + return ret; + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + ireq, sizeof(struct qseecom_key_userinfo_update_ireq), + &resp, sizeof(struct qseecom_command_scm_resp)); + if (ret) { + if (ret == -EINVAL && + resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } else { + pr_err("scm call to update key userinfo failed: %d\n", + ret); + __qseecom_disable_clk(CLK_QSEE); + return -EFAULT; + } + } + + switch (resp.result) { + case QSEOS_RESULT_SUCCESS: + break; + case QSEOS_RESULT_INCOMPLETE: + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (resp.result == + QSEOS_RESULT_FAIL_PENDING_OPERATION) { + pr_debug("Set Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + } + if (ret) + pr_err("process_incomplete_cmd FAILED, resp.result %d\n", + resp.result); + break; + case QSEOS_RESULT_FAIL_PENDING_OPERATION: + pr_debug("Update Key operation under processing...\n"); + ret = QSEOS_RESULT_FAIL_PENDING_OPERATION; + break; + case QSEOS_RESULT_FAILURE: + default: + pr_err("Set key scm call failed resp.result %d\n", resp.result); + ret = -EINVAL; + break; + } + + __qseecom_disable_clk(CLK_QSEE); + return ret; +} + + +static int qseecom_enable_ice_setup(int usage) +{ + int ret = 0; + + if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("ufs", true); + else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("sdcc", true); + + return ret; +} + +static int qseecom_disable_ice_setup(int usage) +{ + int ret = 0; + + if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("ufs", false); + else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) + ret = qcom_ice_setup_ice_hw("sdcc", false); + + return ret; +} + +static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage) +{ + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + int i; + + switch (usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + break; + default: + pr_err("unsupported usage %d\n", usage); + return -EINVAL; + } + + pce_info_use = NULL; + + for (i = 0; i < total; i++) { + if (p->unit_num == unit) { + pce_info_use = p; + break; + } + p++; + } + if (!pce_info_use) { + pr_err("can not find %d\n", unit); + return -EINVAL; + } + return pce_info_use->num_ce_pipe_entries; +} + +static int qseecom_create_key(struct qseecom_dev_handle *data, + void __user *argp) +{ + int i; + uint32_t *ce_hw = NULL; + uint32_t pipe = 0; + int ret = 0; + uint32_t flags = 0; + struct qseecom_create_key_req create_key_req; + struct qseecom_key_generate_ireq generate_key_ireq; + struct qseecom_key_select_ireq set_key_ireq; + uint32_t entries = 0; + + ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + create_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("unsupported usage %d\n", create_key_req.usage); + ret = -EFAULT; + return ret; + } + entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT, + create_key_req.usage); + if (entries <= 0) { + pr_err("no ce instance for usage %d instance %d\n", + DEFAULT_CE_INFO_UNIT, create_key_req.usage); + ret = -EINVAL; + return ret; + } + + ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL); + if (!ce_hw) { + ret = -ENOMEM; + return ret; + } + ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw, + DEFAULT_CE_INFO_UNIT); + if (ret) { + pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret); + ret = -EINVAL; + goto free_buf; + } + + if (qseecom.fde_key_size) + flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE; + else + flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE; + + generate_key_ireq.flags = flags; + generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY; + memset((void *)generate_key_ireq.key_id, + 0, QSEECOM_KEY_ID_SIZE); + memset((void *)generate_key_ireq.hash32, + 0, QSEECOM_HASH_SIZE); + memcpy((void *)generate_key_ireq.key_id, + (void *)key_id_array[create_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)generate_key_ireq.hash32, + (void *)create_key_req.hash32, + QSEECOM_HASH_SIZE); + + ret = __qseecom_generate_and_save_key(data, + create_key_req.usage, &generate_key_ireq); + if (ret) { + pr_err("Failed to generate key on storage: %d\n", ret); + goto free_buf; + } + + for (i = 0; i < entries; i++) { + set_key_ireq.qsee_command_id = QSEOS_SET_KEY; + if (create_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) { + set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM; + set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + + } else if (create_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) { + set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM; + set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + + } else { + set_key_ireq.ce = ce_hw[i]; + set_key_ireq.pipe = pipe; + } + set_key_ireq.flags = flags; + + /* set both PIPE_ENC and PIPE_ENC_XTS*/ + set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS; + memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + memcpy((void *)set_key_ireq.key_id, + (void *)key_id_array[create_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)set_key_ireq.hash32, + (void *)create_key_req.hash32, + QSEECOM_HASH_SIZE); + + /* It will return false if it is GPCE based crypto instance or + ICE is setup properly */ + if (qseecom_enable_ice_setup(create_key_req.usage)) + goto free_buf; + + do { + ret = __qseecom_set_clear_ce_key(data, + create_key_req.usage, + &set_key_ireq); + /* + * wait a little before calling scm again to let other + * processes run + */ + if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION) + msleep(50); + + } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION); + + qseecom_disable_ice_setup(create_key_req.usage); + + if (ret) { + pr_err("Failed to create key: pipe %d, ce %d: %d\n", + pipe, ce_hw[i], ret); + goto free_buf; + } else { + pr_err("Set the key successfully\n"); + if ((create_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) || + (create_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)) + goto free_buf; + } + } + +free_buf: + kzfree(ce_hw); + return ret; +} + +static int qseecom_wipe_key(struct qseecom_dev_handle *data, + void __user *argp) +{ + uint32_t *ce_hw = NULL; + uint32_t pipe = 0; + int ret = 0; + uint32_t flags = 0; + int i, j; + struct qseecom_wipe_key_req wipe_key_req; + struct qseecom_key_delete_ireq delete_key_ireq; + struct qseecom_key_select_ireq clear_key_ireq; + uint32_t entries = 0; + + ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("unsupported usage %d\n", wipe_key_req.usage); + ret = -EFAULT; + return ret; + } + + entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT, + wipe_key_req.usage); + if (entries <= 0) { + pr_err("no ce instance for usage %d instance %d\n", + DEFAULT_CE_INFO_UNIT, wipe_key_req.usage); + ret = -EINVAL; + return ret; + } + + ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL); + if (!ce_hw) { + ret = -ENOMEM; + return ret; + } + + ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw, + DEFAULT_CE_INFO_UNIT); + if (ret) { + pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret); + ret = -EINVAL; + goto free_buf; + } + + if (wipe_key_req.wipe_key_flag) { + delete_key_ireq.flags = flags; + delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY; + memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memcpy((void *)delete_key_ireq.key_id, + (void *)key_id_array[wipe_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + + ret = __qseecom_delete_saved_key(data, wipe_key_req.usage, + &delete_key_ireq); + if (ret) { + pr_err("Failed to delete key from ssd storage: %d\n", + ret); + ret = -EFAULT; + goto free_buf; + } + } + + for (j = 0; j < entries; j++) { + clear_key_ireq.qsee_command_id = QSEOS_SET_KEY; + if (wipe_key_req.usage == + QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) { + clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM; + clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + } else if (wipe_key_req.usage == + QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) { + clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM; + clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX; + } else { + clear_key_ireq.ce = ce_hw[j]; + clear_key_ireq.pipe = pipe; + } + clear_key_ireq.flags = flags; + clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS; + for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++) + clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID; + memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE); + + /* It will return false if it is GPCE based crypto instance or + ICE is setup properly */ + if (qseecom_enable_ice_setup(wipe_key_req.usage)) + goto free_buf; + + ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage, + &clear_key_ireq); + + qseecom_disable_ice_setup(wipe_key_req.usage); + + if (ret) { + pr_err("Failed to wipe key: pipe %d, ce %d: %d\n", + pipe, ce_hw[j], ret); + ret = -EFAULT; + goto free_buf; + } + } + +free_buf: + kzfree(ce_hw); + return ret; +} + +static int qseecom_update_key_user_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + int ret = 0; + uint32_t flags = 0; + struct qseecom_update_key_userinfo_req update_key_req; + struct qseecom_key_userinfo_update_ireq ireq; + + ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION || + update_key_req.usage >= QSEOS_KM_USAGE_MAX) { + pr_err("Error:: unsupported usage %d\n", update_key_req.usage); + return -EFAULT; + } + + ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO; + + if (qseecom.fde_key_size) + flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE; + else + flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE; + + ireq.flags = flags; + memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE); + memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE); + memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE); + memcpy((void *)ireq.key_id, + (void *)key_id_array[update_key_req.usage].desc, + QSEECOM_KEY_ID_SIZE); + memcpy((void *)ireq.current_hash32, + (void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE); + memcpy((void *)ireq.new_hash32, + (void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE); + + do { + ret = __qseecom_update_current_key_user_info(data, + update_key_req.usage, + &ireq); + /* + * wait a little before calling scm again to let other + * processes run + */ + if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION) + msleep(50); + + } while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION); + if (ret) { + pr_err("Failed to update key info: %d\n", ret); + return ret; + } + return ret; + +} +static int qseecom_is_es_activated(void __user *argp) +{ + struct qseecom_is_es_activated_req req; + struct qseecom_command_scm_resp resp; + int ret; + + if (qseecom.qsee_version < QSEE_VERSION_04) { + pr_err("invalid qsee version\n"); + return -ENODEV; + } + + if (argp == NULL) { + pr_err("arg is null\n"); + return -EINVAL; + } + + ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID, + &req, sizeof(req), &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call failed\n"); + return ret; + } + + req.is_activated = resp.result; + ret = copy_to_user(argp, &req, sizeof(req)); + if (ret) { + pr_err("copy_to_user failed\n"); + return ret; + } + + return 0; +} + +static int qseecom_save_partition_hash(void __user *argp) +{ + struct qseecom_save_partition_hash_req req; + struct qseecom_command_scm_resp resp; + int ret; + + memset(&resp, 0x00, sizeof(resp)); + + if (qseecom.qsee_version < QSEE_VERSION_04) { + pr_err("invalid qsee version\n"); + return -ENODEV; + } + + if (argp == NULL) { + pr_err("arg is null\n"); + return -EINVAL; + } + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID, + (void *)&req, sizeof(req), (void *)&resp, sizeof(resp)); + if (ret) { + pr_err("qseecom_scm_call failed\n"); + return ret; + } + + return 0; +} + +static int qseecom_mdtp_cipher_dip(void __user *argp) +{ + struct qseecom_mdtp_cipher_dip_req req; + u32 tzbuflenin, tzbuflenout; + char *tzbufin = NULL, *tzbufout = NULL; + struct scm_desc desc = {0}; + int ret; + + do { + /* Copy the parameters from userspace */ + if (argp == NULL) { + pr_err("arg is null\n"); + ret = -EINVAL; + break; + } + + ret = copy_from_user(&req, argp, sizeof(req)); + if (ret) { + pr_err("copy_from_user failed, ret= %d\n", ret); + break; + } + + if (req.in_buf == NULL || req.out_buf == NULL || + req.in_buf_size == 0 || req.in_buf_size > MAX_DIP || + req.out_buf_size == 0 || req.out_buf_size > MAX_DIP || + req.direction > 1) { + pr_err("invalid parameters\n"); + ret = -EINVAL; + break; + } + + /* Copy the input buffer from userspace to kernel space */ + tzbuflenin = PAGE_ALIGN(req.in_buf_size); + tzbufin = kzalloc(tzbuflenin, GFP_KERNEL); + if (!tzbufin) { + pr_err("error allocating in buffer\n"); + ret = -ENOMEM; + break; + } + + ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size); + if (ret) { + pr_err("copy_from_user failed, ret=%d\n", ret); + break; + } + + dmac_flush_range(tzbufin, tzbufin + tzbuflenin); + + /* Prepare the output buffer in kernel space */ + tzbuflenout = PAGE_ALIGN(req.out_buf_size); + tzbufout = kzalloc(tzbuflenout, GFP_KERNEL); + if (!tzbufout) { + pr_err("error allocating out buffer\n"); + ret = -ENOMEM; + break; + } + + dmac_flush_range(tzbufout, tzbufout + tzbuflenout); + + /* Send the command to TZ */ + desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID; + desc.args[0] = virt_to_phys(tzbufin); + desc.args[1] = req.in_buf_size; + desc.args[2] = virt_to_phys(tzbufout); + desc.args[3] = req.out_buf_size; + desc.args[4] = req.direction; + + ret = __qseecom_enable_clk(CLK_QSEE); + if (ret) + break; + + ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc); + + __qseecom_disable_clk(CLK_QSEE); + + if (ret) { + pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n", + ret); + break; + } + + /* Copy the output buffer from kernel space to userspace */ + dmac_flush_range(tzbufout, tzbufout + tzbuflenout); + ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size); + if (ret) { + pr_err("copy_to_user failed, ret=%d\n", ret); + break; + } + } while (0); + + kzfree(tzbufin); + kzfree(tzbufout); + + return ret; +} + +static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data, + struct qseecom_qteec_req *req) +{ + if (!data || !data->client.ihandle) { + pr_err("Client or client handle is not initialized\n"); + return -EINVAL; + } + + if (data->type != QSEECOM_CLIENT_APP) + return -EFAULT; + + if (req->req_len > UINT_MAX - req->resp_len) { + pr_err("Integer overflow detected in req_len & rsp_len\n"); + return -EINVAL; + } + + if (req->req_len + req->resp_len > data->client.sb_length) { + pr_debug("Not enough memory to fit cmd_buf.\n"); + pr_debug("resp_buf. Required: %u, Available: %zu\n", + (req->req_len + req->resp_len), data->client.sb_length); + return -ENOMEM; + } + + if (req->req_ptr == NULL || req->resp_ptr == NULL) { + pr_err("cmd buffer or response buffer is null\n"); + return -EINVAL; + } + if (((uintptr_t)req->req_ptr < + data->client.user_virt_sb_base) || + ((uintptr_t)req->req_ptr >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("cmd buffer address not within shared bufffer\n"); + return -EINVAL; + } + + if (((uintptr_t)req->resp_ptr < + data->client.user_virt_sb_base) || + ((uintptr_t)req->resp_ptr >= + (data->client.user_virt_sb_base + data->client.sb_length))) { + pr_err("response buffer address not within shared bufffer\n"); + return -EINVAL; + } + + if ((req->req_len == 0) || (req->resp_len == 0)) { + pr_err("cmd buf lengtgh/response buf length not valid\n"); + return -EINVAL; + } + + if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) { + pr_err("Integer overflow in req_len & req_ptr\n"); + return -EINVAL; + } + + if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) { + pr_err("Integer overflow in resp_len & resp_ptr\n"); + return -EINVAL; + } + + if (data->client.user_virt_sb_base > + (ULONG_MAX - data->client.sb_length)) { + pr_err("Integer overflow in user_virt_sb_base & sb_length\n"); + return -EINVAL; + } + if ((((uintptr_t)req->req_ptr + req->req_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length)) || + (((uintptr_t)req->resp_ptr + req->resp_len) > + ((uintptr_t)data->client.user_virt_sb_base + + data->client.sb_length))) { + pr_err("cmd buf or resp buf is out of shared buffer region\n"); + return -EINVAL; + } + return 0; +} + +static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data, + uint32_t fd_idx, struct sg_table *sg_ptr) +{ + struct scatterlist *sg = sg_ptr->sgl; + struct qseecom_sg_entry *sg_entry; + void *buf; + uint i; + size_t size; + dma_addr_t coh_pmem; + + if (fd_idx >= MAX_ION_FD) { + pr_err("fd_idx [%d] is invalid\n", fd_idx); + return -ENOMEM; + } + /* + * Allocate a buffer, populate it with number of entry plus + * each sg entry's phy addr and lenth; then return the + * phy_addr of the buffer. + */ + size = sizeof(uint32_t) + + sizeof(struct qseecom_sg_entry) * sg_ptr->nents; + size = (size + PAGE_SIZE) & PAGE_MASK; + buf = dma_alloc_coherent(qseecom.pdev, + size, &coh_pmem, GFP_KERNEL); + if (buf == NULL) { + pr_err("failed to alloc memory for sg buf\n"); + return -ENOMEM; + } + *(uint32_t *)buf = sg_ptr->nents; + sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t)); + for (i = 0; i < sg_ptr->nents; i++) { + sg_entry->phys_addr = (uint32_t)sg_dma_address(sg); + sg_entry->len = sg->length; + sg_entry++; + sg = sg_next(sg); + } + data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true; + data->client.sec_buf_fd[fd_idx].vbase = buf; + data->client.sec_buf_fd[fd_idx].pbase = coh_pmem; + data->client.sec_buf_fd[fd_idx].size = size; + return 0; +} + +static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req, + struct qseecom_dev_handle *data, bool cleanup) +{ + struct ion_handle *ihandle; + int ret = 0; + int i = 0; + uint32_t *update; + struct sg_table *sg_ptr = NULL; + struct scatterlist *sg; + struct qseecom_param_memref *memref; + + if (req == NULL) { + pr_err("Invalid address\n"); + return -EINVAL; + } + for (i = 0; i < MAX_ION_FD; i++) { + if (req->ifd_data[i].fd > 0) { + ihandle = ion_import_dma_buf(qseecom.ion_clnt, + req->ifd_data[i].fd); + if (IS_ERR_OR_NULL(ihandle)) { + pr_err("Ion client can't retrieve the handle\n"); + return -ENOMEM; + } + if ((req->req_len < sizeof(uint32_t)) || + (req->ifd_data[i].cmd_buf_offset > + req->req_len - sizeof(uint32_t))) { + pr_err("Invalid offset/req len 0x%x/0x%x\n", + req->req_len, + req->ifd_data[i].cmd_buf_offset); + return -EINVAL; + } + update = (uint32_t *)((char *) req->req_ptr + + req->ifd_data[i].cmd_buf_offset); + if (!update) { + pr_err("update pointer is NULL\n"); + return -EINVAL; + } + } else { + continue; + } + /* Populate the cmd data structure with the phys_addr */ + sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle); + if (IS_ERR_OR_NULL(sg_ptr)) { + pr_err("IOn client could not retrieve sg table\n"); + goto err; + } + sg = sg_ptr->sgl; + if (sg == NULL) { + pr_err("sg is NULL\n"); + goto err; + } + if ((sg_ptr->nents == 0) || (sg->length == 0)) { + pr_err("Num of scat entr (%d)or length(%d) invalid\n", + sg_ptr->nents, sg->length); + goto err; + } + /* clean up buf for pre-allocated fd */ + if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd && + (*update)) { + if (data->client.sec_buf_fd[i].vbase) + dma_free_coherent(qseecom.pdev, + data->client.sec_buf_fd[i].size, + data->client.sec_buf_fd[i].vbase, + data->client.sec_buf_fd[i].pbase); + memset((void *)update, 0, + sizeof(struct qseecom_param_memref)); + memset(&(data->client.sec_buf_fd[i]), 0, + sizeof(struct qseecom_sec_buf_fd_info)); + goto clean; + } + + if (*update == 0) { + /* update buf for pre-allocated fd from secure heap*/ + ret = __qseecom_qteec_handle_pre_alc_fd(data, i, + sg_ptr); + if (ret) { + pr_err("Failed to handle buf for fd[%d]\n", i); + goto err; + } + memref = (struct qseecom_param_memref *)update; + memref->buffer = + (uint32_t)(data->client.sec_buf_fd[i].pbase); + memref->size = + (uint32_t)(data->client.sec_buf_fd[i].size); + } else { + /* update buf for fd from non-secure qseecom heap */ + if (sg_ptr->nents != 1) { + pr_err("Num of scat entr (%d) invalid\n", + sg_ptr->nents); + goto err; + } + if (cleanup) + *update = 0; + else + *update = (uint32_t)sg_dma_address(sg_ptr->sgl); + } +clean: + if (cleanup) { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, sg->length, + ION_IOC_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + } else { + ret = msm_ion_do_cache_op(qseecom.ion_clnt, + ihandle, NULL, sg->length, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + goto err; + } + data->sglistinfo_ptr[i].indexAndFlags = + SGLISTINFO_SET_INDEX_FLAG( + (sg_ptr->nents == 1), 0, + req->ifd_data[i].cmd_buf_offset); + data->sglistinfo_ptr[i].sizeOrCount = + (sg_ptr->nents == 1) ? + sg->length : sg_ptr->nents; + data->sglist_cnt = i + 1; + } + /* Deallocate the handle */ + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + } + return ret; +err: + if (!IS_ERR_OR_NULL(ihandle)) + ion_free(qseecom.ion_clnt, ihandle); + return -ENOMEM; +} + +static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data, + struct qseecom_qteec_req *req, uint32_t cmd_id) +{ + struct qseecom_command_scm_resp resp; + struct qseecom_qteec_ireq ireq; + struct qseecom_qteec_64bit_ireq ireq_64bit; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + unsigned long flags; + int ret = 0; + int ret2 = 0; + uint32_t reqd_len_sb_in = 0; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = data->sglistinfo_ptr; + void *req_ptr = NULL; + void *resp_ptr = NULL; + + ret = __qseecom_qteec_validate_msg(data, req); + if (ret) + return ret; + + req_ptr = req->req_ptr; + resp_ptr = req->resp_ptr; + + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + + req->req_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->req_ptr); + req->resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req->resp_ptr); + + if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || + (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { + ret = __qseecom_update_qteec_req_buf( + (struct qseecom_qteec_modfd_req *)req, data, false); + if (ret) + return ret; + } + + if (qseecom.qsee_version < QSEE_VERSION_40) { + ireq.app_id = data->client.app_id; + ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq.req_len = req->req_len; + ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq.resp_len = req->resp_len; + ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table); + ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&ireq; + cmd_len = sizeof(struct qseecom_qteec_ireq); + } else { + ireq_64bit.app_id = data->client.app_id; + ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq_64bit.req_len = req->req_len; + ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq_64bit.resp_len = req->resp_len; + if ((data->client.app_arch == ELFCLASS32) && + ((ireq_64bit.req_ptr >= + PHY_ADDR_4G - ireq_64bit.req_len) || + (ireq_64bit.resp_ptr >= + PHY_ADDR_4G - ireq_64bit.resp_len))){ + pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n", + data->client.app_name, data->client.app_id); + pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n", + ireq_64bit.req_ptr, ireq_64bit.req_len, + ireq_64bit.resp_ptr, ireq_64bit.resp_len); + return -EFAULT; + } + ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table); + ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + cmd_buf = (void *)&ireq_64bit; + cmd_len = sizeof(struct qseecom_qteec_64bit_ireq); + } + if (qseecom.whitelist_support == true + && cmd_id == QSEOS_TEE_OPEN_SESSION) + *(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST; + else + *(uint32_t *)cmd_buf = cmd_id; + + reqd_len_sb_in = req->req_len + req->resp_len; + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, + reqd_len_sb_in, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + goto exit; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + if (ret) + goto exit; + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + goto exit; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + goto exit; + } + } + } +exit: + ret2 = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, data->client.sb_length, + ION_IOC_INV_CACHES); + if (ret2) { + pr_err("cache operation failed %d\n", ret); + return ret2; + } + + if ((cmd_id == QSEOS_TEE_OPEN_SESSION) || + (cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) { + ret2 = __qseecom_update_qteec_req_buf( + (struct qseecom_qteec_modfd_req *)req, data, true); + if (ret2) + return ret2; + } + return ret; +} + +static int qseecom_qteec_open_session(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req, + QSEOS_TEE_OPEN_SESSION); + + return ret; +} + +static int qseecom_qteec_close_session(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION); + return ret; +} + +static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + struct qseecom_command_scm_resp resp; + struct qseecom_qteec_ireq ireq; + struct qseecom_qteec_64bit_ireq ireq_64bit; + struct qseecom_registered_app_list *ptr_app; + bool found_app = false; + unsigned long flags; + int ret = 0; + int i = 0; + uint32_t reqd_len_sb_in = 0; + void *cmd_buf = NULL; + size_t cmd_len; + struct sglist_info *table = data->sglistinfo_ptr; + void *req_ptr = NULL; + void *resp_ptr = NULL; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_validate_msg(data, + (struct qseecom_qteec_req *)(&req)); + if (ret) + return ret; + req_ptr = req.req_ptr; + resp_ptr = req.resp_ptr; + + /* find app_id & img_name from list */ + spin_lock_irqsave(&qseecom.registered_app_list_lock, flags); + list_for_each_entry(ptr_app, &qseecom.registered_app_list_head, + list) { + if ((ptr_app->app_id == data->client.app_id) && + (!strcmp(ptr_app->app_name, data->client.app_name))) { + found_app = true; + break; + } + } + spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags); + if (!found_app) { + pr_err("app_id %d (%s) is not found\n", data->client.app_id, + (char *)data->client.app_name); + return -ENOENT; + } + + /* validate offsets */ + for (i = 0; i < MAX_ION_FD; i++) { + if (req.ifd_data[i].fd) { + if (req.ifd_data[i].cmd_buf_offset >= req.req_len) + return -EINVAL; + } + } + req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.req_ptr); + req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data, + (uintptr_t)req.resp_ptr); + ret = __qseecom_update_qteec_req_buf(&req, data, false); + if (ret) + return ret; + + if (qseecom.qsee_version < QSEE_VERSION_40) { + ireq.app_id = data->client.app_id; + ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq.req_len = req.req_len; + ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq.resp_len = req.resp_len; + cmd_buf = (void *)&ireq; + cmd_len = sizeof(struct qseecom_qteec_ireq); + ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table); + ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + } else { + ireq_64bit.app_id = data->client.app_id; + ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)req_ptr); + ireq_64bit.req_len = req.req_len; + ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data, + (uintptr_t)resp_ptr); + ireq_64bit.resp_len = req.resp_len; + cmd_buf = (void *)&ireq_64bit; + cmd_len = sizeof(struct qseecom_qteec_64bit_ireq); + ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table); + ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE; + dmac_flush_range((void *)table, + (void *)table + SGLISTINFO_TABLE_SIZE); + } + reqd_len_sb_in = req.req_len + req.resp_len; + if (qseecom.whitelist_support == true) + *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST; + else + *(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND; + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, + reqd_len_sb_in, + ION_IOC_CLEAN_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + + __qseecom_reentrancy_check_if_this_app_blocked(ptr_app); + + ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + if (ret) { + pr_err("scm_call() failed with err: %d (app_id = %d)\n", + ret, data->client.app_id); + return ret; + } + + if (qseecom.qsee_reentrancy_support) { + ret = __qseecom_process_reentrancy(&resp, ptr_app, data); + } else { + if (resp.result == QSEOS_RESULT_INCOMPLETE) { + ret = __qseecom_process_incomplete_cmd(data, &resp); + if (ret) { + pr_err("process_incomplete_cmd failed err: %d\n", + ret); + return ret; + } + } else { + if (resp.result != QSEOS_RESULT_SUCCESS) { + pr_err("Response result %d not supported\n", + resp.result); + ret = -EINVAL; + } + } + } + ret = __qseecom_update_qteec_req_buf(&req, data, true); + if (ret) + return ret; + + ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle, + data->client.sb_virt, data->client.sb_length, + ION_IOC_INV_CACHES); + if (ret) { + pr_err("cache operation failed %d\n", ret); + return ret; + } + return 0; +} + +static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_qteec_modfd_req req; + int ret = 0; + + ret = copy_from_user(&req, argp, + sizeof(struct qseecom_qteec_modfd_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req, + QSEOS_TEE_REQUEST_CANCELLATION); + + return ret; +} + +static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data) +{ + if (data->sglist_cnt) { + memset(data->sglistinfo_ptr, 0, + SGLISTINFO_TABLE_SIZE); + data->sglist_cnt = 0; + } +} + + +static int __qseecom_bus_scaling_enable(struct qseecom_dev_handle *data, + bool *perf_enabled) +{ + int ret = 0; + + if (qseecom.support_bus_scaling) { + if (!data->mode) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs( + data, HIGH); + mutex_unlock(&qsee_bw_mutex); + } + ret = qseecom_scale_bus_bandwidth_timer(INACTIVE); + if (ret) { + pr_err("Failed to set bw\n"); + ret = -EINVAL; + goto exit; + } + } + /* + * On targets where crypto clock is handled by HLOS, + * if clk_access_cnt is zero and perf_enabled is false, + * then the crypto clock was not enabled before sending cmd + * to tz, qseecom will enable the clock to avoid service failure. + */ + if (!qseecom.no_clock_support && + !qseecom.qsee.clk_access_cnt && !data->perf_enabled) { + pr_debug("ce clock is not enabled\n"); + ret = qseecom_perf_enable(data); + if (ret) { + pr_err("Failed to vote for clock with err %d\n", + ret); + ret = -EINVAL; + goto exit; + } + *perf_enabled = true; + } +exit: + return ret; +} + +static void __qseecom_bus_scaling_disable(struct qseecom_dev_handle *data, + bool perf_enabled) +{ + if (qseecom.support_bus_scaling) + __qseecom_add_bw_scale_down_timer( + QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + if (perf_enabled) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } +} + +long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + int ret = 0; + struct qseecom_dev_handle *data = file->private_data; + void __user *argp = (void __user *) arg; + bool perf_enabled = false; + + if (!data) { + pr_err("Invalid/uninitialized device handle\n"); + return -EINVAL; + } + + if (data->abort) { + pr_err("Aborting qseecom driver\n"); + return -ENODEV; + } + + switch (cmd) { + case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("reg lstnr req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + pr_debug("ioctl register_listener_req()\n"); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + data->type = QSEECOM_LISTENER_SERVICE; + ret = qseecom_register_listener(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_register_listener: %d\n", ret); + break; + } + case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + pr_debug("ioctl unregister_listener_req()\n"); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unregister_listener(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_unregister_listener: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_CMD_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("send cmd req: invalid handle (%d) app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + ret = __qseecom_bus_scaling_enable(data, &perf_enabled); + if (ret) { + mutex_unlock(&app_access_lock); + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_send_cmd(data, argp); + __qseecom_bus_scaling_disable(data, perf_enabled); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_send_cmd: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ: + case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + ret = __qseecom_bus_scaling_enable(data, &perf_enabled); + if (ret) { + mutex_unlock(&app_access_lock); + break; + } + atomic_inc(&data->ioctl_count); + if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ) + ret = qseecom_send_modfd_cmd(data, argp); + else + ret = qseecom_send_modfd_cmd_64(data, argp); + __qseecom_bus_scaling_disable(data, perf_enabled); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed qseecom_send_cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_IOCTL_RECEIVE_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("receive req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_receive_req(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + if (ret && (ret != -ERESTARTSYS)) + pr_err("failed qseecom_receive_req: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SEND_RESP_REQ: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("send resp req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (!qseecom.qsee_reentrancy_support) + ret = qseecom_send_resp(); + else + ret = qseecom_reentrancy_send_resp(data); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + if (ret) + pr_err("failed qseecom_send_resp: %d\n", ret); + break; + } + case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: { + if ((data->type != QSEECOM_CLIENT_APP) && + (data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_SECURE_SERVICE)) { + pr_err("set mem param req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_set_client_mem_param(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed Qqseecom_set_mem_param request: %d\n", + ret); + break; + } + case QSEECOM_IOCTL_LOAD_APP_REQ: { + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("load app req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_CLIENT_APP; + pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_load_app(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed load_app request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_UNLOAD_APP_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("unload app req:invalid handle(%d) app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data); + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unload_app(data, false); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed unload_app request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: { + atomic_inc(&data->ioctl_count); + ret = qseecom_get_qseos_version(data, argp); + if (ret) + pr_err("qseecom_get_qseos_version: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_PERF_ENABLE_REQ:{ + if ((data->type != QSEECOM_GENERIC) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("perf enable req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if ((data->type == QSEECOM_CLIENT_APP) && + (data->client.app_id == 0)) { + pr_err("perf enable req:invalid handle(%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + __qseecom_register_bus_bandwidth_needs(data, HIGH); + mutex_unlock(&qsee_bw_mutex); + } else { + ret = qseecom_perf_enable(data); + if (ret) + pr_err("Fail to vote for clocks %d\n", ret); + } + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_PERF_DISABLE_REQ:{ + if ((data->type != QSEECOM_SECURE_SERVICE) && + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("perf disable req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if ((data->type == QSEECOM_CLIENT_APP) && + (data->client.app_id == 0)) { + pr_err("perf disable: invalid handle (%d)app_id(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (!qseecom.support_bus_scaling) { + qsee_disable_clock_vote(data, CLK_DFAB); + qsee_disable_clock_vote(data, CLK_SFPB); + } else { + mutex_lock(&qsee_bw_mutex); + qseecom_unregister_bus_bandwidth_needs(data); + mutex_unlock(&qsee_bw_mutex); + } + atomic_dec(&data->ioctl_count); + break; + } + + case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: { + /* If crypto clock is not handled by HLOS, return directly. */ + if (qseecom.no_clock_support) { + pr_debug("crypto clock is not handled by HLOS\n"); + break; + } + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("set bus scale: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_scale_bus_bandwidth(data, argp); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("load ext elf req: invalid client handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_UNAVAILABLE_CLIENT_APP; + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_load_external_elf(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed load_external_elf request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: { + if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) { + pr_err("unload ext elf req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_unload_external_elf(data); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed unload_app request: %d\n", ret); + break; + } + case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: { + data->type = QSEECOM_CLIENT_APP; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data); + ret = qseecom_query_app_loaded(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("send cmd svc req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->type = QSEECOM_SECURE_SERVICE; + if (qseecom.qsee_version < QSEE_VERSION_03) { + pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_send_service_cmd(data, argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_CREATE_KEY_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("create key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Create Key feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_create_key(data, argp); + if (ret) + pr_err("failed to create encryption key: %d\n", ret); + + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_WIPE_KEY_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("wipe key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Wipe Key feature unsupported in qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_wipe_key(data, argp); + if (ret) + pr_err("failed to wipe encryption key: %d\n", ret); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: { + if (!(qseecom.support_pfe || qseecom.support_fde)) + pr_err("Features requiring key init not supported\n"); + if (data->type != QSEECOM_GENERIC) { + pr_err("update key req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_05) { + pr_err("Update Key feature unsupported in qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_update_key_user_info(data, argp); + if (ret) + pr_err("failed to update key user info: %d\n", ret); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("save part hash req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_save_partition_hash(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("ES activated req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_is_es_activated(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: { + if (data->type != QSEECOM_GENERIC) { + pr_err("MDTP cipher DIP req: invalid handle (%d)\n", + data->type); + ret = -EINVAL; + break; + } + data->released = true; + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_mdtp_cipher_dip(argp); + atomic_dec(&data->ioctl_count); + mutex_unlock(&app_access_lock); + break; + } + case QSEECOM_IOCTL_SEND_MODFD_RESP: + case QSEECOM_IOCTL_SEND_MODFD_RESP_64: { + if ((data->listener.id == 0) || + (data->type != QSEECOM_LISTENER_SERVICE)) { + pr_err("receive req: invalid handle (%d), lid(%d)\n", + data->type, data->listener.id); + ret = -EINVAL; + break; + } + atomic_inc(&data->ioctl_count); + if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP) + ret = qseecom_send_modfd_resp(data, argp); + else + ret = qseecom_send_modfd_resp_64(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + if (ret) + pr_err("failed qseecom_send_mod_resp: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Open session: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + ret = __qseecom_bus_scaling_enable(data, &perf_enabled); + if (ret) { + mutex_unlock(&app_access_lock); + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_open_session(data, argp); + __qseecom_bus_scaling_disable(data, perf_enabled); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed open_session_cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Close session: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_close_session(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed close_session_cmd: %d\n", ret); + break; + } + case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + ret = __qseecom_bus_scaling_enable(data, &perf_enabled); + if (ret) { + mutex_unlock(&app_access_lock); + break; + } + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_invoke_modfd_cmd(data, argp); + __qseecom_bus_scaling_disable(data, perf_enabled); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed Invoke cmd: %d\n", ret); + __qseecom_clean_data_sglistinfo(data); + break; + } + case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: { + if ((data->client.app_id == 0) || + (data->type != QSEECOM_CLIENT_APP)) { + pr_err("Cancel req: invalid handle (%d) appid(%d)\n", + data->type, data->client.app_id); + ret = -EINVAL; + break; + } + if (qseecom.qsee_version < QSEE_VERSION_40) { + pr_err("GP feature unsupported: qsee ver %u\n", + qseecom.qsee_version); + return -EINVAL; + } + /* Only one client allowed here at a time */ + mutex_lock(&app_access_lock); + atomic_inc(&data->ioctl_count); + ret = qseecom_qteec_request_cancellation(data, argp); + atomic_dec(&data->ioctl_count); + wake_up_all(&data->abort_wq); + mutex_unlock(&app_access_lock); + if (ret) + pr_err("failed request_cancellation: %d\n", ret); + break; + } + case QSEECOM_IOCTL_GET_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_get_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_free_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: { + atomic_inc(&data->ioctl_count); + ret = qseecom_query_ce_info(data, argp); + if (ret) + pr_err("failed get fde ce pipe info: %d\n", ret); + atomic_dec(&data->ioctl_count); + break; + } + default: + pr_err("Invalid IOCTL: 0x%x\n", cmd); + return -EINVAL; + } + return ret; +} + +static int qseecom_open(struct inode *inode, struct file *file) +{ + int ret = 0; + struct qseecom_dev_handle *data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) { + pr_err("kmalloc failed\n"); + return -ENOMEM; + } + file->private_data = data; + data->abort = 0; + data->type = QSEECOM_GENERIC; + data->released = false; + memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE); + data->mode = INACTIVE; + init_waitqueue_head(&data->abort_wq); + atomic_set(&data->ioctl_count, 0); + return ret; +} + +static int qseecom_release(struct inode *inode, struct file *file) +{ + struct qseecom_dev_handle *data = file->private_data; + int ret = 0; + + if (data->released == false) { + pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n", + data->type, data->mode, data); + switch (data->type) { + case QSEECOM_LISTENER_SERVICE: + mutex_lock(&app_access_lock); + ret = qseecom_unregister_listener(data); + mutex_unlock(&app_access_lock); + break; + case QSEECOM_CLIENT_APP: + mutex_lock(&app_access_lock); + ret = qseecom_unload_app(data, true); + mutex_unlock(&app_access_lock); + break; + case QSEECOM_SECURE_SERVICE: + case QSEECOM_GENERIC: + ret = qseecom_unmap_ion_allocated_memory(data); + if (ret) + pr_err("Ion Unmap failed\n"); + break; + case QSEECOM_UNAVAILABLE_CLIENT_APP: + break; + default: + pr_err("Unsupported clnt_handle_type %d", + data->type); + break; + } + } + + if (qseecom.support_bus_scaling) { + mutex_lock(&qsee_bw_mutex); + if (data->mode != INACTIVE) { + qseecom_unregister_bus_bandwidth_needs(data); + if (qseecom.cumulative_mode == INACTIVE) { + ret = __qseecom_set_msm_bus_request(INACTIVE); + if (ret) + pr_err("Fail to scale down bus\n"); + } + } + mutex_unlock(&qsee_bw_mutex); + } else { + if (data->fast_load_enabled == true) + qsee_disable_clock_vote(data, CLK_SFPB); + if (data->perf_enabled == true) + qsee_disable_clock_vote(data, CLK_DFAB); + } + kfree(data); + + return ret; +} + +static const struct file_operations qseecom_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qseecom_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = compat_qseecom_ioctl, +#endif + .open = qseecom_open, + .release = qseecom_release +}; + +static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce) +{ + int rc = 0; + struct device *pdev; + struct qseecom_clk *qclk; + char *core_clk_src = NULL; + char *core_clk = NULL; + char *iface_clk = NULL; + char *bus_clk = NULL; + + switch (ce) { + case CLK_QSEE: { + core_clk_src = "core_clk_src"; + core_clk = "core_clk"; + iface_clk = "iface_clk"; + bus_clk = "bus_clk"; + qclk = &qseecom.qsee; + qclk->instance = CLK_QSEE; + break; + }; + case CLK_CE_DRV: { + core_clk_src = "ce_drv_core_clk_src"; + core_clk = "ce_drv_core_clk"; + iface_clk = "ce_drv_iface_clk"; + bus_clk = "ce_drv_bus_clk"; + qclk = &qseecom.ce_drv; + qclk->instance = CLK_CE_DRV; + break; + }; + default: + pr_err("Invalid ce hw instance: %d!\n", ce); + return -EIO; + } + + if (qseecom.no_clock_support) { + qclk->ce_core_clk = NULL; + qclk->ce_clk = NULL; + qclk->ce_bus_clk = NULL; + qclk->ce_core_src_clk = NULL; + return 0; + } + + pdev = qseecom.pdev; + + /* Get CE3 src core clk. */ + qclk->ce_core_src_clk = clk_get(pdev, core_clk_src); + if (!IS_ERR(qclk->ce_core_src_clk)) { + rc = clk_set_rate(qclk->ce_core_src_clk, + qseecom.ce_opp_freq_hz); + if (rc) { + clk_put(qclk->ce_core_src_clk); + qclk->ce_core_src_clk = NULL; + pr_err("Unable to set the core src clk @%uMhz.\n", + qseecom.ce_opp_freq_hz/CE_CLK_DIV); + return -EIO; + } + } else { + pr_warn("Unable to get CE core src clk, set to NULL\n"); + qclk->ce_core_src_clk = NULL; + } + + /* Get CE core clk */ + qclk->ce_core_clk = clk_get(pdev, core_clk); + if (IS_ERR(qclk->ce_core_clk)) { + rc = PTR_ERR(qclk->ce_core_clk); + pr_err("Unable to get CE core clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + return -EIO; + } + + /* Get CE Interface clk */ + qclk->ce_clk = clk_get(pdev, iface_clk); + if (IS_ERR(qclk->ce_clk)) { + rc = PTR_ERR(qclk->ce_clk); + pr_err("Unable to get CE interface clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + clk_put(qclk->ce_core_clk); + return -EIO; + } + + /* Get CE AXI clk */ + qclk->ce_bus_clk = clk_get(pdev, bus_clk); + if (IS_ERR(qclk->ce_bus_clk)) { + rc = PTR_ERR(qclk->ce_bus_clk); + pr_err("Unable to get CE BUS interface clk\n"); + if (qclk->ce_core_src_clk != NULL) + clk_put(qclk->ce_core_src_clk); + clk_put(qclk->ce_core_clk); + clk_put(qclk->ce_clk); + return -EIO; + } + + return rc; +} + +static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce) +{ + struct qseecom_clk *qclk; + + if (ce == CLK_QSEE) + qclk = &qseecom.qsee; + else + qclk = &qseecom.ce_drv; + + if (qclk->ce_clk != NULL) { + clk_put(qclk->ce_clk); + qclk->ce_clk = NULL; + } + if (qclk->ce_core_clk != NULL) { + clk_put(qclk->ce_core_clk); + qclk->ce_core_clk = NULL; + } + if (qclk->ce_bus_clk != NULL) { + clk_put(qclk->ce_bus_clk); + qclk->ce_bus_clk = NULL; + } + if (qclk->ce_core_src_clk != NULL) { + clk_put(qclk->ce_core_src_clk); + qclk->ce_core_src_clk = NULL; + } + qclk->instance = CLK_INVALID; +} + +static int qseecom_retrieve_ce_data(struct platform_device *pdev) +{ + int rc = 0; + uint32_t hlos_num_ce_hw_instances; + uint32_t disk_encrypt_pipe; + uint32_t file_encrypt_pipe; + uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT] = {0}; + int i; + const int *tbl; + int size; + int entry; + struct qseecom_crypto_info *pfde_tbl = NULL; + struct qseecom_crypto_info *p; + int tbl_size; + int j; + bool old_db = true; + struct qseecom_ce_info_use *pce_info_use; + uint32_t *unit_tbl = NULL; + int total_units = 0; + struct qseecom_ce_pipe_entry *pce_entry; + + qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL; + qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0; + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,qsee-ce-hw-instance", + &qseecom.ce_info.qsee_ce_hw_instance)) { + pr_err("Fail to get qsee ce hw instance information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("qsee-ce-hw-instance=0x%x\n", + qseecom.ce_info.qsee_ce_hw_instance); + } + + qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-fde"); + qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-pfe"); + + if (!qseecom.support_pfe && !qseecom.support_fde) { + pr_warn("Device does not support PFE/FDE"); + goto out; + } + + if (qseecom.support_fde) + tbl = of_get_property((&pdev->dev)->of_node, + "qcom,full-disk-encrypt-info", &size); + else + tbl = NULL; + if (tbl) { + old_db = false; + if (size % sizeof(struct qseecom_crypto_info)) { + pr_err("full-disk-encrypt-info tbl size(%d)\n", + size); + rc = -EINVAL; + goto out; + } + tbl_size = size / sizeof + (struct qseecom_crypto_info); + + pfde_tbl = kzalloc(size, GFP_KERNEL); + unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL); + total_units = 0; + + if (!pfde_tbl || !unit_tbl) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,full-disk-encrypt-info", + (u32 *)pfde_tbl, size/sizeof(u32))) { + pr_err("failed to read full-disk-encrypt-info tbl\n"); + rc = -EINVAL; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + for (j = 0; j < total_units; j++) { + if (p->unit_num == *(unit_tbl + j)) + break; + } + if (j == total_units) { + *(unit_tbl + total_units) = p->unit_num; + total_units++; + } + } + + qseecom.ce_info.num_fde = total_units; + pce_info_use = qseecom.ce_info.fde = kcalloc( + total_units, sizeof(struct qseecom_ce_info_use), + GFP_KERNEL); + if (!pce_info_use) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + + for (j = 0; j < total_units; j++, pce_info_use++) { + pce_info_use->unit_num = *(unit_tbl + j); + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE; + pce_info_use->num_ce_pipe_entries = 0; + pce_info_use->ce_pipe_entry = NULL; + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) + pce_info_use->num_ce_pipe_entries++; + } + + entry = pce_info_use->num_ce_pipe_entries; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) { + pce_entry->ce_num = p->ce; + pce_entry->ce_pipe_pair = + p->pipe_pair; + pce_entry->valid = true; + pce_entry++; + } + } + } + kfree(unit_tbl); + unit_tbl = NULL; + kfree(pfde_tbl); + pfde_tbl = NULL; + } + + if (qseecom.support_pfe) + tbl = of_get_property((&pdev->dev)->of_node, + "qcom,per-file-encrypt-info", &size); + else + tbl = NULL; + if (tbl) { + old_db = false; + if (size % sizeof(struct qseecom_crypto_info)) { + pr_err("per-file-encrypt-info tbl size(%d)\n", + size); + rc = -EINVAL; + goto out; + } + tbl_size = size / sizeof + (struct qseecom_crypto_info); + + pfde_tbl = kzalloc(size, GFP_KERNEL); + unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL); + total_units = 0; + if (!pfde_tbl || !unit_tbl) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,per-file-encrypt-info", + (u32 *)pfde_tbl, size/sizeof(u32))) { + pr_err("failed to read per-file-encrypt-info tbl\n"); + rc = -EINVAL; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + for (j = 0; j < total_units; j++) { + if (p->unit_num == *(unit_tbl + j)) + break; + } + if (j == total_units) { + *(unit_tbl + total_units) = p->unit_num; + total_units++; + } + } + + qseecom.ce_info.num_pfe = total_units; + pce_info_use = qseecom.ce_info.pfe = kcalloc( + total_units, sizeof(struct qseecom_ce_info_use), + GFP_KERNEL); + if (!pce_info_use) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + + for (j = 0; j < total_units; j++, pce_info_use++) { + pce_info_use->unit_num = *(unit_tbl + j); + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE; + pce_info_use->num_ce_pipe_entries = 0; + pce_info_use->ce_pipe_entry = NULL; + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) + pce_info_use->num_ce_pipe_entries++; + } + + entry = pce_info_use->num_ce_pipe_entries; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + + for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) { + if (p->unit_num == pce_info_use->unit_num) { + pce_entry->ce_num = p->ce; + pce_entry->ce_pipe_pair = + p->pipe_pair; + pce_entry->valid = true; + pce_entry++; + } + } + } + kfree(unit_tbl); + unit_tbl = NULL; + kfree(pfde_tbl); + pfde_tbl = NULL; + } + + if (!old_db) + goto out1; + + if (of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-multiple-ce-hw-instance")) { + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,hlos-num-ce-hw-instances", + &hlos_num_ce_hw_instances)) { + pr_err("Fail: get hlos number of ce hw instance\n"); + rc = -EINVAL; + goto out; + } + } else { + hlos_num_ce_hw_instances = 1; + } + + if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) { + pr_err("Fail: hlos number of ce hw instance exceeds %d\n", + MAX_CE_PIPE_PAIR_PER_UNIT); + rc = -EINVAL; + goto out; + } + + if (of_property_read_u32_array((&pdev->dev)->of_node, + "qcom,hlos-ce-hw-instance", hlos_ce_hw_instance, + hlos_num_ce_hw_instances)) { + pr_err("Fail: get hlos ce hw instance info\n"); + rc = -EINVAL; + goto out; + } + + if (qseecom.support_fde) { + pce_info_use = qseecom.ce_info.fde = + kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL); + if (!pce_info_use) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + /* by default for old db */ + qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT; + pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT; + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE; + pce_info_use->ce_pipe_entry = NULL; + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,disk-encrypt-pipe-pair", + &disk_encrypt_pipe)) { + pr_err("Fail to get FDE pipe information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("disk-encrypt-pipe-pair=0x%x", + disk_encrypt_pipe); + } + entry = pce_info_use->num_ce_pipe_entries = + hlos_num_ce_hw_instances; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + for (i = 0; i < entry; i++) { + pce_entry->ce_num = hlos_ce_hw_instance[i]; + pce_entry->ce_pipe_pair = disk_encrypt_pipe; + pce_entry->valid = 1; + pce_entry++; + } + } else { + pr_warn("Device does not support FDE"); + disk_encrypt_pipe = 0xff; + } + if (qseecom.support_pfe) { + pce_info_use = qseecom.ce_info.pfe = + kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL); + if (!pce_info_use) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + /* by default for old db */ + qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT; + pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT; + pce_info_use->alloc = false; + pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE; + pce_info_use->ce_pipe_entry = NULL; + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,file-encrypt-pipe-pair", + &file_encrypt_pipe)) { + pr_err("Fail to get PFE pipe information.\n"); + rc = -EINVAL; + goto out; + } else { + pr_debug("file-encrypt-pipe-pair=0x%x", + file_encrypt_pipe); + } + entry = pce_info_use->num_ce_pipe_entries = + hlos_num_ce_hw_instances; + pce_entry = pce_info_use->ce_pipe_entry = + kcalloc(entry, + sizeof(struct qseecom_ce_pipe_entry), + GFP_KERNEL); + if (pce_entry == NULL) { + pr_err("failed to alloc memory\n"); + rc = -ENOMEM; + goto out; + } + for (i = 0; i < entry; i++) { + pce_entry->ce_num = hlos_ce_hw_instance[i]; + pce_entry->ce_pipe_pair = file_encrypt_pipe; + pce_entry->valid = 1; + pce_entry++; + } + } else { + pr_warn("Device does not support PFE"); + file_encrypt_pipe = 0xff; + } + +out1: + qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance; + qseecom.ce_drv.instance = hlos_ce_hw_instance[0]; +out: + if (rc) { + if (qseecom.ce_info.fde) { + pce_info_use = qseecom.ce_info.fde; + for (i = 0; i < qseecom.ce_info.num_fde; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.fde); + qseecom.ce_info.fde = NULL; + if (qseecom.ce_info.pfe) { + pce_info_use = qseecom.ce_info.pfe; + for (i = 0; i < qseecom.ce_info.num_pfe; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.pfe); + qseecom.ce_info.pfe = NULL; + } + kfree(unit_tbl); + kfree(pfde_tbl); + return rc; +} + +static int qseecom_get_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + int i; + unsigned int entries; + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + bool found = false; + struct qseecom_ce_pipe_entry *pce_entry; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) { + pr_err("copy_from_user failed\n"); + return ret; + } + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + pce_info_use = NULL; + for (i = 0; i < total; i++) { + if (!p->alloc) + pce_info_use = p; + else if (!memcmp(p->handle, pinfo->handle, + MAX_CE_INFO_HANDLE_SIZE)) { + pce_info_use = p; + found = true; + break; + } + p++; + } + + if (pce_info_use == NULL) + return -EBUSY; + + pinfo->unit_num = pce_info_use->unit_num; + if (!pce_info_use->alloc) { + pce_info_use->alloc = true; + memcpy(pce_info_use->handle, + pinfo->handle, MAX_CE_INFO_HANDLE_SIZE); + } + if (pce_info_use->num_ce_pipe_entries > + MAX_CE_PIPE_PAIR_PER_UNIT) + entries = MAX_CE_PIPE_PAIR_PER_UNIT; + else + entries = pce_info_use->num_ce_pipe_entries; + pinfo->num_ce_pipe_entries = entries; + pce_entry = pce_info_use->ce_pipe_entry; + for (i = 0; i < entries; i++, pce_entry++) + pinfo->ce_pipe_entry[i] = *pce_entry; + for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; + + if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + } + return ret; +} + +static int qseecom_free_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + struct qseecom_ce_info_use *p; + int total = 0; + int i; + bool found = false; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) + return ret; + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + for (i = 0; i < total; i++) { + if (p->alloc && + !memcmp(p->handle, pinfo->handle, + MAX_CE_INFO_HANDLE_SIZE)) { + memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE); + p->alloc = false; + found = true; + break; + } + p++; + } + return ret; +} + +static int qseecom_query_ce_info(struct qseecom_dev_handle *data, + void __user *argp) +{ + struct qseecom_ce_info_req req; + struct qseecom_ce_info_req *pinfo = &req; + int ret = 0; + int i; + unsigned int entries; + struct qseecom_ce_info_use *pce_info_use, *p; + int total = 0; + bool found = false; + struct qseecom_ce_pipe_entry *pce_entry; + + ret = copy_from_user(pinfo, argp, + sizeof(struct qseecom_ce_info_req)); + if (ret) + return ret; + + switch (pinfo->usage) { + case QSEOS_KM_USAGE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION: + case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION: + if (qseecom.support_fde) { + p = qseecom.ce_info.fde; + total = qseecom.ce_info.num_fde; + } else { + pr_err("system does not support fde\n"); + return -EINVAL; + } + break; + case QSEOS_KM_USAGE_FILE_ENCRYPTION: + if (qseecom.support_pfe) { + p = qseecom.ce_info.pfe; + total = qseecom.ce_info.num_pfe; + } else { + pr_err("system does not support pfe\n"); + return -EINVAL; + } + break; + default: + pr_err("unsupported usage %d\n", pinfo->usage); + return -EINVAL; + } + + pce_info_use = NULL; + pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM; + pinfo->num_ce_pipe_entries = 0; + for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; + + for (i = 0; i < total; i++) { + + if (p->alloc && !memcmp(p->handle, + pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) { + pce_info_use = p; + found = true; + break; + } + p++; + } + if (!pce_info_use) + goto out; + pinfo->unit_num = pce_info_use->unit_num; + if (pce_info_use->num_ce_pipe_entries > + MAX_CE_PIPE_PAIR_PER_UNIT) + entries = MAX_CE_PIPE_PAIR_PER_UNIT; + else + entries = pce_info_use->num_ce_pipe_entries; + pinfo->num_ce_pipe_entries = entries; + pce_entry = pce_info_use->ce_pipe_entry; + for (i = 0; i < entries; i++, pce_entry++) + pinfo->ce_pipe_entry[i] = *pce_entry; + for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++) + pinfo->ce_pipe_entry[i].valid = 0; +out: + if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) { + pr_err("copy_to_user failed\n"); + ret = -EFAULT; + } + return ret; +} + +/* + * Check whitelist feature, and if TZ feature version is < 1.0.0, + * then whitelist feature is not supported. + */ +static int qseecom_check_whitelist_feature(void) +{ + u64 version = 0; + int ret = scm_get_feat_version(FEATURE_ID_WHITELIST, &version); + + return (ret == 0) && (version >= MAKE_WHITELIST_VERSION(1, 0, 0)); +} + +static int qseecom_probe(struct platform_device *pdev) +{ + int rc; + int i; + uint32_t feature = 10; + struct device *class_dev; + struct msm_bus_scale_pdata *qseecom_platform_support = NULL; + struct qseecom_command_scm_resp resp; + struct qseecom_ce_info_use *pce_info_use = NULL; + + qseecom.qsee_bw_count = 0; + qseecom.qsee_perf_client = 0; + qseecom.qsee_sfpb_bw_count = 0; + + qseecom.qsee.ce_core_clk = NULL; + qseecom.qsee.ce_clk = NULL; + qseecom.qsee.ce_core_src_clk = NULL; + qseecom.qsee.ce_bus_clk = NULL; + + qseecom.cumulative_mode = 0; + qseecom.current_mode = INACTIVE; + qseecom.support_bus_scaling = false; + qseecom.support_fde = false; + qseecom.support_pfe = false; + + qseecom.ce_drv.ce_core_clk = NULL; + qseecom.ce_drv.ce_clk = NULL; + qseecom.ce_drv.ce_core_src_clk = NULL; + qseecom.ce_drv.ce_bus_clk = NULL; + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY); + + qseecom.app_block_ref_cnt = 0; + init_waitqueue_head(&qseecom.app_block_wq); + qseecom.whitelist_support = true; + + rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV); + if (rc < 0) { + pr_err("alloc_chrdev_region failed %d\n", rc); + return rc; + } + + driver_class = class_create(THIS_MODULE, QSEECOM_DEV); + if (IS_ERR(driver_class)) { + rc = -ENOMEM; + pr_err("class_create failed %d\n", rc); + goto exit_unreg_chrdev_region; + } + + class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL, + QSEECOM_DEV); + if (IS_ERR(class_dev)) { + pr_err("class_device_create failed %d\n", rc); + rc = -ENOMEM; + goto exit_destroy_class; + } + + cdev_init(&qseecom.cdev, &qseecom_fops); + qseecom.cdev.owner = THIS_MODULE; + + rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1); + if (rc < 0) { + pr_err("cdev_add failed %d\n", rc); + goto exit_destroy_device; + } + + INIT_LIST_HEAD(&qseecom.registered_listener_list_head); + spin_lock_init(&qseecom.registered_listener_list_lock); + INIT_LIST_HEAD(&qseecom.registered_app_list_head); + spin_lock_init(&qseecom.registered_app_list_lock); + INIT_LIST_HEAD(&qseecom.registered_kclient_list_head); + spin_lock_init(&qseecom.registered_kclient_list_lock); + init_waitqueue_head(&qseecom.send_resp_wq); + qseecom.send_resp_flag = 0; + + qseecom.qsee_version = QSEEE_VERSION_00; + rc = qseecom_scm_call(6, 3, &feature, sizeof(feature), + &resp, sizeof(resp)); + pr_info("qseecom.qsee_version = 0x%x\n", resp.result); + if (rc) { + pr_err("Failed to get QSEE version info %d\n", rc); + goto exit_del_cdev; + } + qseecom.qsee_version = resp.result; + qseecom.qseos_version = QSEOS_VERSION_14; + qseecom.commonlib_loaded = false; + qseecom.commonlib64_loaded = false; + qseecom.pdev = class_dev; + /* Create ION msm client */ + qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel"); + if (IS_ERR_OR_NULL(qseecom.ion_clnt)) { + pr_err("Ion client cannot be created\n"); + + if (qseecom.ion_clnt != ERR_PTR(-EPROBE_DEFER)) + rc = -ENOMEM; + else + rc = -EPROBE_DEFER; + goto exit_del_cdev; + } + + /* register client for bus scaling */ + if (pdev->dev.of_node) { + qseecom.pdev->of_node = pdev->dev.of_node; + qseecom.support_bus_scaling = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,support-bus-scaling"); + rc = qseecom_retrieve_ce_data(pdev); + if (rc) + goto exit_destroy_ion_client; + qseecom.appsbl_qseecom_support = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,appsbl-qseecom-support"); + pr_debug("qseecom.appsbl_qseecom_support = 0x%x", + qseecom.appsbl_qseecom_support); + + qseecom.commonlib64_loaded = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,commonlib64-loaded-by-uefi"); + pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x", + qseecom.commonlib64_loaded); + qseecom.fde_key_size = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,fde-key-size"); + qseecom.no_clock_support = + of_property_read_bool((&pdev->dev)->of_node, + "qcom,no-clock-support"); + if (!qseecom.no_clock_support) { + pr_info("qseecom clocks handled by other subsystem\n"); + } else { + pr_info("no-clock-support=0x%x", + qseecom.no_clock_support); + } + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,qsee-reentrancy-support", + &qseecom.qsee_reentrancy_support)) { + pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n"); + qseecom.qsee_reentrancy_support = 0; + } else { + pr_warn("qseecom.qsee_reentrancy_support = %d\n", + qseecom.qsee_reentrancy_support); + } + + /* + * The qseecom bus scaling flag can not be enabled when + * crypto clock is not handled by HLOS. + */ + if (qseecom.no_clock_support && qseecom.support_bus_scaling) { + pr_err("support_bus_scaling flag can not be enabled.\n"); + rc = -EINVAL; + goto exit_destroy_ion_client; + } + + if (of_property_read_u32((&pdev->dev)->of_node, + "qcom,ce-opp-freq", + &qseecom.ce_opp_freq_hz)) { + pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n"); + qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ; + } + rc = __qseecom_init_clk(CLK_QSEE); + if (rc) + goto exit_destroy_ion_client; + + if ((qseecom.qsee.instance != qseecom.ce_drv.instance) && + (qseecom.support_pfe || qseecom.support_fde)) { + rc = __qseecom_init_clk(CLK_CE_DRV); + if (rc) { + __qseecom_deinit_clk(CLK_QSEE); + goto exit_destroy_ion_client; + } + } else { + struct qseecom_clk *qclk; + + qclk = &qseecom.qsee; + qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk; + qseecom.ce_drv.ce_clk = qclk->ce_clk; + qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk; + qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk; + } + + qseecom_platform_support = (struct msm_bus_scale_pdata *) + msm_bus_cl_get_pdata(pdev); + if (qseecom.qsee_version >= (QSEE_VERSION_02) && + (!qseecom.is_apps_region_protected && + !qseecom.appsbl_qseecom_support)) { + struct resource *resource = NULL; + struct qsee_apps_region_info_ireq req; + struct qsee_apps_region_info_64bit_ireq req_64bit; + struct qseecom_command_scm_resp resp; + void *cmd_buf = NULL; + size_t cmd_len; + + resource = platform_get_resource_byname(pdev, + IORESOURCE_MEM, "secapp-region"); + if (resource) { + if (qseecom.qsee_version < QSEE_VERSION_40) { + req.qsee_cmd_id = + QSEOS_APP_REGION_NOTIFICATION; + req.addr = (uint32_t)resource->start; + req.size = resource_size(resource); + cmd_buf = (void *)&req; + cmd_len = sizeof(struct + qsee_apps_region_info_ireq); + pr_warn("secure app region addr=0x%x size=0x%x", + req.addr, req.size); + } else { + req_64bit.qsee_cmd_id = + QSEOS_APP_REGION_NOTIFICATION; + req_64bit.addr = resource->start; + req_64bit.size = resource_size( + resource); + cmd_buf = (void *)&req_64bit; + cmd_len = sizeof(struct + qsee_apps_region_info_64bit_ireq); + pr_warn("secure app region addr=0x%llx size=0x%x", + req_64bit.addr, req_64bit.size); + } + } else { + pr_err("Fail to get secure app region info\n"); + rc = -EINVAL; + goto exit_deinit_clock; + } + rc = __qseecom_enable_clk(CLK_QSEE); + if (rc) { + pr_err("CLK_QSEE enabling failed (%d)\n", rc); + rc = -EIO; + goto exit_deinit_clock; + } + rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, + cmd_buf, cmd_len, + &resp, sizeof(resp)); + __qseecom_disable_clk(CLK_QSEE); + if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) { + pr_err("send secapp reg fail %d resp.res %d\n", + rc, resp.result); + rc = -EINVAL; + goto exit_deinit_clock; + } + } + /* + * By default, appsbl only loads cmnlib. If OEM changes appsbl to + * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin, + * Pls add "qseecom.commonlib64_loaded = true" here too. + */ + if (qseecom.is_apps_region_protected || + qseecom.appsbl_qseecom_support) + qseecom.commonlib_loaded = true; + } else { + qseecom_platform_support = (struct msm_bus_scale_pdata *) + pdev->dev.platform_data; + } + if (qseecom.support_bus_scaling) { + init_timer(&(qseecom.bw_scale_down_timer)); + INIT_WORK(&qseecom.bw_inactive_req_ws, + qseecom_bw_inactive_req_work); + qseecom.bw_scale_down_timer.function = + qseecom_scale_bus_bandwidth_timer_callback; + } + qseecom.timer_running = false; + qseecom.qsee_perf_client = msm_bus_scale_register_client( + qseecom_platform_support); + + qseecom.whitelist_support = qseecom_check_whitelist_feature(); + pr_warn("qseecom.whitelist_support = %d\n", + qseecom.whitelist_support); + + if (!qseecom.qsee_perf_client) + pr_err("Unable to register bus client\n"); + + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY); + return 0; + +exit_deinit_clock: + __qseecom_deinit_clk(CLK_QSEE); + if ((qseecom.qsee.instance != qseecom.ce_drv.instance) && + (qseecom.support_pfe || qseecom.support_fde)) + __qseecom_deinit_clk(CLK_CE_DRV); +exit_destroy_ion_client: + if (qseecom.ce_info.fde) { + pce_info_use = qseecom.ce_info.fde; + for (i = 0; i < qseecom.ce_info.num_fde; i++) { + kzfree(pce_info_use->ce_pipe_entry); + pce_info_use++; + } + kfree(qseecom.ce_info.fde); + } + if (qseecom.ce_info.pfe) { + pce_info_use = qseecom.ce_info.pfe; + for (i = 0; i < qseecom.ce_info.num_pfe; i++) { + kzfree(pce_info_use->ce_pipe_entry); + pce_info_use++; + } + kfree(qseecom.ce_info.pfe); + } + ion_client_destroy(qseecom.ion_clnt); +exit_del_cdev: + cdev_del(&qseecom.cdev); +exit_destroy_device: + device_destroy(driver_class, qseecom_device_no); +exit_destroy_class: + class_destroy(driver_class); +exit_unreg_chrdev_region: + unregister_chrdev_region(qseecom_device_no, 1); + return rc; +} + +static int qseecom_remove(struct platform_device *pdev) +{ + struct qseecom_registered_kclient_list *kclient = NULL; + struct qseecom_registered_kclient_list *kclient_tmp = NULL; + unsigned long flags = 0; + int ret = 0; + int i; + struct qseecom_ce_pipe_entry *pce_entry; + struct qseecom_ce_info_use *pce_info_use; + + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY); + spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags); + + list_for_each_entry_safe(kclient, kclient_tmp, + &qseecom.registered_kclient_list_head, list) { + + /* Break the loop if client handle is NULL */ + if (!kclient->handle) { + list_del(&kclient->list); + kzfree(kclient); + break; + } + + list_del(&kclient->list); + mutex_lock(&app_access_lock); + ret = qseecom_unload_app(kclient->handle->dev, false); + mutex_unlock(&app_access_lock); + if (!ret) { + kzfree(kclient->handle->dev); + kzfree(kclient->handle); + kzfree(kclient); + } + } + + spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags); + + if (qseecom.qseos_version > QSEEE_VERSION_00) + qseecom_unload_commonlib_image(); + + if (qseecom.qsee_perf_client) + msm_bus_scale_client_update_request(qseecom.qsee_perf_client, + 0); + if (pdev->dev.platform_data != NULL) + msm_bus_scale_unregister_client(qseecom.qsee_perf_client); + + if (qseecom.support_bus_scaling) { + cancel_work_sync(&qseecom.bw_inactive_req_ws); + del_timer_sync(&qseecom.bw_scale_down_timer); + } + + if (qseecom.ce_info.fde) { + pce_info_use = qseecom.ce_info.fde; + for (i = 0; i < qseecom.ce_info.num_fde; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.fde); + if (qseecom.ce_info.pfe) { + pce_info_use = qseecom.ce_info.pfe; + for (i = 0; i < qseecom.ce_info.num_pfe; i++) { + pce_entry = pce_info_use->ce_pipe_entry; + kfree(pce_entry); + pce_info_use++; + } + } + kfree(qseecom.ce_info.pfe); + + /* register client for bus scaling */ + if (pdev->dev.of_node) { + __qseecom_deinit_clk(CLK_QSEE); + if ((qseecom.qsee.instance != qseecom.ce_drv.instance) && + (qseecom.support_pfe || qseecom.support_fde)) + __qseecom_deinit_clk(CLK_CE_DRV); + } + + ion_client_destroy(qseecom.ion_clnt); + + cdev_del(&qseecom.cdev); + + device_destroy(driver_class, qseecom_device_no); + + class_destroy(driver_class); + + unregister_chrdev_region(qseecom_device_no, 1); + + return ret; +} + +static int qseecom_suspend(struct platform_device *pdev, pm_message_t state) +{ + int ret = 0; + struct qseecom_clk *qclk; + qclk = &qseecom.qsee; + + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND); + if (qseecom.no_clock_support) + return 0; + + mutex_lock(&qsee_bw_mutex); + mutex_lock(&clk_access_lock); + + if (qseecom.current_mode != INACTIVE) { + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, INACTIVE); + if (ret) + pr_err("Fail to scale down bus\n"); + else + qseecom.current_mode = INACTIVE; + } + + if (qclk->clk_access_cnt) { + if (qclk->ce_clk != NULL) + clk_disable_unprepare(qclk->ce_clk); + if (qclk->ce_core_clk != NULL) + clk_disable_unprepare(qclk->ce_core_clk); + if (qclk->ce_bus_clk != NULL) + clk_disable_unprepare(qclk->ce_bus_clk); + } + + del_timer_sync(&(qseecom.bw_scale_down_timer)); + qseecom.timer_running = false; + + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + cancel_work_sync(&qseecom.bw_inactive_req_ws); + + return 0; +} + +static int qseecom_resume(struct platform_device *pdev) +{ + int mode = 0; + int ret = 0; + struct qseecom_clk *qclk; + qclk = &qseecom.qsee; + + if (qseecom.no_clock_support) + goto exit; + + mutex_lock(&qsee_bw_mutex); + mutex_lock(&clk_access_lock); + if (qseecom.cumulative_mode >= HIGH) + mode = HIGH; + else + mode = qseecom.cumulative_mode; + + if (qseecom.cumulative_mode != INACTIVE) { + ret = msm_bus_scale_client_update_request( + qseecom.qsee_perf_client, mode); + if (ret) + pr_err("Fail to scale up bus to %d\n", mode); + else + qseecom.current_mode = mode; + } + + if (qclk->clk_access_cnt) { + if (qclk->ce_core_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_core_clk); + if (ret) { + pr_err("Unable to enable/prep CE core clk\n"); + qclk->clk_access_cnt = 0; + goto err; + } + } + if (qclk->ce_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_clk); + if (ret) { + pr_err("Unable to enable/prep CE iface clk\n"); + qclk->clk_access_cnt = 0; + goto ce_clk_err; + } + } + if (qclk->ce_bus_clk != NULL) { + ret = clk_prepare_enable(qclk->ce_bus_clk); + if (ret) { + pr_err("Unable to enable/prep CE bus clk\n"); + qclk->clk_access_cnt = 0; + goto ce_bus_clk_err; + } + } + } + + if (qclk->clk_access_cnt || qseecom.cumulative_mode) { + qseecom.bw_scale_down_timer.expires = jiffies + + msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT); + mod_timer(&(qseecom.bw_scale_down_timer), + qseecom.bw_scale_down_timer.expires); + qseecom.timer_running = true; + } + + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + goto exit; + +ce_bus_clk_err: + if (qclk->ce_clk) + clk_disable_unprepare(qclk->ce_clk); +ce_clk_err: + if (qclk->ce_core_clk) + clk_disable_unprepare(qclk->ce_core_clk); +err: + mutex_unlock(&clk_access_lock); + mutex_unlock(&qsee_bw_mutex); + ret = -EIO; +exit: + atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY); + return ret; +} +static struct of_device_id qseecom_match[] = { + { + .compatible = "qcom,qseecom", + }, + {} +}; + +static struct platform_driver qseecom_plat_driver = { + .probe = qseecom_probe, + .remove = qseecom_remove, + .suspend = qseecom_suspend, + .resume = qseecom_resume, + .driver = { + .name = "qseecom", + .owner = THIS_MODULE, + .of_match_table = qseecom_match, + }, +}; + +static int qseecom_init(void) +{ + return platform_driver_register(&qseecom_plat_driver); +} + +static void qseecom_exit(void) +{ + platform_driver_unregister(&qseecom_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Qualcomm Secure Execution Environment Communicator"); + +module_init(qseecom_init); +module_exit(qseecom_exit); diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h new file mode 100644 index 000000000000..40426b749f60 --- /dev/null +++ b/drivers/misc/qseecom_kernel.h @@ -0,0 +1,44 @@ +/* Copyright (c) 2012-2013, 2016-2017 The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QSEECOM_KERNEL_H_ +#define __QSEECOM_KERNEL_H_ + +#include <linux/types.h> +#include <soc/qcom/scm.h> + +#define QSEECOM_ALIGN_SIZE 0x40 +#define QSEECOM_ALIGN_MASK (QSEECOM_ALIGN_SIZE - 1) +#define QSEECOM_ALIGN(x) \ + ((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK)) + +/* + * struct qseecom_handle - + * Handle to the qseecom device for kernel clients + * @sbuf - shared buffer pointer + * @sbbuf_len - shared buffer size + */ +struct qseecom_handle { + void *dev; /* in/out */ + unsigned char *sbuf; /* in/out */ + uint32_t sbuf_len; /* in/out */ +}; + +int qseecom_start_app(struct qseecom_handle **handle, + char *app_name, uint32_t size); +int qseecom_shutdown_app(struct qseecom_handle **handle); +int qseecom_send_command(struct qseecom_handle *handle, void *send_buf, + uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len); +int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high); +int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc); + +#endif /* __QSEECOM_KERNEL_H_ */ diff --git a/drivers/misc/qseecom_legacy.h b/drivers/misc/qseecom_legacy.h new file mode 100644 index 000000000000..35d6e0611e9a --- /dev/null +++ b/drivers/misc/qseecom_legacy.h @@ -0,0 +1,79 @@ +/* Qualcomm Secure Execution Environment Communicator (QSEECOM) driver + * + * Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QSEECOM_LEGACY_H_ +#define __QSEECOM_LEGACY_H_ + +#include <linux/types.h> + +#define TZ_SCHED_CMD_ID_REGISTER_LISTENER 0x04 + +enum tz_sched_cmd_type { + TZ_SCHED_CMD_INVALID = 0, + TZ_SCHED_CMD_NEW, /* New TZ Scheduler Command */ + TZ_SCHED_CMD_PENDING, /* Pending cmd...sched will restore stack */ + TZ_SCHED_CMD_COMPLETE, /* TZ sched command is complete */ + TZ_SCHED_CMD_MAX = 0x7FFFFFFF +}; + +enum tz_sched_cmd_status { + TZ_SCHED_STATUS_INCOMPLETE = 0, + TZ_SCHED_STATUS_COMPLETE, + TZ_SCHED_STATUS_MAX = 0x7FFFFFFF +}; +/* Command structure for initializing shared buffers */ +__packed struct qse_pr_init_sb_req_s { + /* First 4 bytes should always be command id */ + uint32_t pr_cmd; + /* Pointer to the physical location of sb buffer */ + uint32_t sb_ptr; + /* length of shared buffer */ + uint32_t sb_len; + uint32_t listener_id; +}; + +__packed struct qse_pr_init_sb_rsp_s { + /* First 4 bytes should always be command id */ + uint32_t pr_cmd; + /* Return code, 0 for success, Approp error code otherwise */ + int32_t ret; +}; + +/* + * struct QSEECom_command - QSECom command buffer + * @cmd_type: value from enum tz_sched_cmd_type + * @sb_in_cmd_addr: points to physical location of command + * buffer + * @sb_in_cmd_len: length of command buffer + */ +__packed struct qseecom_command { + uint32_t cmd_type; + uint8_t *sb_in_cmd_addr; + uint32_t sb_in_cmd_len; +}; + +/* + * struct QSEECom_response - QSECom response buffer + * @cmd_status: value from enum tz_sched_cmd_status + * @sb_in_rsp_addr: points to physical location of response + * buffer + * @sb_in_rsp_len: length of command response + */ +__packed struct qseecom_response { + uint32_t cmd_status; + uint8_t *sb_in_rsp_addr; + uint32_t sb_in_rsp_len; +}; + +#endif /* __QSEECOM_LEGACY_H_ */ diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index cda32366cc7e..c52c8ccc90b7 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -345,13 +345,13 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime = 0; } - read_lock(&tasklist_lock); + rcu_read_lock(); do_each_thread(temp, task) { uid = from_kuid_munged(user_ns, task_uid(task)); if (!uid_entry || uid_entry->uid != uid) uid_entry = find_or_register_uid(uid); if (!uid_entry) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); rt_mutex_unlock(&uid_lock); pr_err("%s: failed to find the uid_entry for uid %d\n", __func__, uid); @@ -361,7 +361,7 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime += utime; uid_entry->active_stime += stime; } while_each_thread(temp, task); - read_unlock(&tasklist_lock); + rcu_read_unlock(); hash_for_each(hash_table, bkt, uid_entry, hash) { cputime_t total_utime = uid_entry->utime + |
