summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/adsprpc.c12
-rw-r--r--drivers/char/diag/diag_masks.c14
-rw-r--r--drivers/char/diag/diag_memorydevice.c31
-rw-r--r--drivers/char/diag/diag_mux.c7
-rw-r--r--drivers/char/diag/diagchar_core.c24
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c55
-rw-r--r--drivers/char/diag/diagfwd_peripheral.h3
-rw-r--r--drivers/char/rdbg.c18
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c264
-rw-r--r--drivers/gpu/drm/ast/ast_post.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c15
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h9
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c113
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c73
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c132
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c54
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c111
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c2
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h8
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.c33
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c1
-rw-r--r--drivers/gpu/msm/kgsl.c8
-rw-r--r--drivers/gpu/msm/kgsl_pwrscale.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c36
-rw-r--r--drivers/iommu/amd_iommu_v2.c2
-rw-r--r--drivers/iommu/intel-iommu.c2
-rw-r--r--drivers/iommu/iommu.c37
-rw-r--r--drivers/media/platform/msm/ais/sensor/flash/msm_flash.c10
-rw-r--r--drivers/media/platform/msm/ais/sensor/ois/msm_ois.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c9
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c51
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c40
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c3
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c10
-rw-r--r--drivers/misc/qseecom.c126
-rw-r--r--drivers/mmc/host/sdhci-msm.c17
-rw-r--r--drivers/mtd/bcm47xxpart.c42
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c7
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c24
-rw-r--r--drivers/net/ethernet/sfc/falcon.c10
-rw-r--r--drivers/net/virtio_net.c10
-rw-r--r--drivers/net/vxlan.c2
-rw-r--r--drivers/net/wireless/cnss2/main.c180
-rw-r--r--drivers/net/wireless/cnss2/main.h4
-rw-r--r--drivers/net/wireless/cnss2/pci.c28
-rw-r--r--drivers/net/wireless/cnss2/qmi.c6
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.c53
-rw-r--r--drivers/net/wireless/cnss2/wlan_firmware_service_v01.h16
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/platform/msm/Kconfig2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_flt.c64
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c91
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h30
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_intf.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_nat.c70
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_rt.c87
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c86
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c36
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c9
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c85
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c89
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h28
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_intf.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_nat.c74
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c121
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c12
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/power/qcom/msm-core.c4
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c8
-rw-r--r--drivers/power/supply/qcom/smb-lib.c119
-rw-r--r--drivers/power/supply/qcom/smb-lib.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c76
-rw-r--r--drivers/usb/pd/policy_engine.c21
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c24
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c34
-rw-r--r--drivers/video/fbdev/msm/msm_mdss_io_8974.c3
-rw-r--r--drivers/watchdog/bcm_kona_wdt.c3
-rw-r--r--drivers/xen/swiotlb-xen.c5
114 files changed, 2130 insertions, 1054 deletions
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13016f32b344..a84172106e0f 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -212,6 +212,7 @@ struct fastrpc_channel_ctx {
struct device *dev;
struct fastrpc_session_ctx session[NUM_SESSIONS];
struct completion work;
+ struct completion workport;
struct notifier_block nb;
struct kref kref;
int channel;
@@ -1474,6 +1475,7 @@ static void fastrpc_init(struct fastrpc_apps *me)
me->channel = &gcinfo[0];
for (i = 0; i < NUM_CHANNELS; i++) {
init_completion(&me->channel[i].work);
+ init_completion(&me->channel[i].workport);
me->channel[i].sesscount = 0;
}
}
@@ -2135,7 +2137,7 @@ void fastrpc_glink_notify_state(void *handle, const void *priv, unsigned event)
switch (event) {
case GLINK_CONNECTED:
link->port_state = FASTRPC_LINK_CONNECTED;
- complete(&me->channel[cid].work);
+ complete(&me->channel[cid].workport);
break;
case GLINK_LOCAL_DISCONNECTED:
link->port_state = FASTRPC_LINK_DISCONNECTED;
@@ -2285,8 +2287,7 @@ static void fastrpc_glink_close(void *chan, int cid)
return;
link = &gfa.channel[cid].link;
- if (link->port_state == FASTRPC_LINK_CONNECTED ||
- link->port_state == FASTRPC_LINK_CONNECTING) {
+ if (link->port_state == FASTRPC_LINK_CONNECTED) {
link->port_state = FASTRPC_LINK_DISCONNECTING;
glink_close(chan);
}
@@ -2484,8 +2485,9 @@ static int fastrpc_channel_open(struct fastrpc_file *fl)
if (err)
goto bail;
- VERIFY(err, wait_for_completion_timeout(&me->channel[cid].work,
- RPC_TIMEOUT));
+ VERIFY(err,
+ wait_for_completion_timeout(&me->channel[cid].workport,
+ RPC_TIMEOUT));
if (err) {
me->channel[cid].chan = 0;
goto bail;
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index 20e617ed0770..e37609abf7bf 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -796,7 +796,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -856,7 +858,9 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -950,7 +954,9 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -997,7 +1003,9 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
write_len += sizeof(header);
@@ -1251,7 +1259,9 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -1302,7 +1312,9 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -1966,9 +1978,11 @@ void diag_send_updates_peripheral(uint8_t peripheral)
diag_send_feature_mask_update(peripheral);
if (driver->time_sync_enabled)
diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
diag_send_real_time_update(peripheral,
driver->real_time_mode[DIAG_LOCAL_PROC]);
diag_send_peripheral_buffering_mode(
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 06b83f5230bf..a27f12883c8d 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -129,37 +129,6 @@ void diag_md_close_all()
diag_ws_reset(DIAG_WS_MUX);
}
-static int diag_md_get_peripheral(int ctxt)
-{
- int peripheral;
-
- if (driver->num_pd_session) {
- peripheral = GET_PD_CTXT(ctxt);
- switch (peripheral) {
- case UPD_WLAN:
- case UPD_AUDIO:
- case UPD_SENSORS:
- break;
- case DIAG_ID_MPSS:
- case DIAG_ID_LPASS:
- case DIAG_ID_CDSP:
- default:
- peripheral =
- GET_BUF_PERIPHERAL(ctxt);
- if (peripheral > NUM_PERIPHERALS)
- peripheral = -EINVAL;
- break;
- }
- } else {
- /* Account for Apps data as well */
- peripheral = GET_BUF_PERIPHERAL(ctxt);
- if (peripheral > NUM_PERIPHERALS)
- peripheral = -EINVAL;
- }
-
- return peripheral;
-}
-
int diag_md_write(int id, unsigned char *buf, int len, int ctx)
{
int i;
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index d6f6ea7af8ea..8cc803eef552 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -153,12 +153,15 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
upd = PERIPHERAL_CDSP;
break;
case UPD_WLAN:
- if (!driver->num_pd_session)
+ if (!driver->pd_logging_mode[0])
upd = PERIPHERAL_MODEM;
break;
case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ upd = PERIPHERAL_LPASS;
+ break;
case UPD_SENSORS:
- if (!driver->num_pd_session)
+ if (!driver->pd_logging_mode[2])
upd = PERIPHERAL_LPASS;
break;
default:
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 5ae3e4defd0d..afaedc99a4e7 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -456,6 +456,7 @@ static void diag_close_logging_process(const int pid)
{
int i, j;
int session_mask;
+ uint32_t p_mask;
struct diag_md_session_t *session_info = NULL;
struct diag_logging_mode_param_t params;
@@ -475,6 +476,9 @@ static void diag_close_logging_process(const int pid)
session_mask = session_info->peripheral_mask;
diag_md_session_close(session_info);
+ p_mask =
+ diag_translate_kernel_to_user_mask(session_mask);
+
for (i = 0; i < NUM_MD_SESSIONS; i++)
if (MD_PERIPHERAL_MASK(i) & session_mask)
diag_mux_close_peripheral(DIAG_LOCAL_PROC, i);
@@ -482,19 +486,17 @@ static void diag_close_logging_process(const int pid)
params.req_mode = USB_MODE;
params.mode_param = 0;
params.pd_mask = 0;
- params.peripheral_mask =
- diag_translate_kernel_to_user_mask(session_mask);
+ params.peripheral_mask = p_mask;
if (driver->num_pd_session > 0) {
- for (i = UPD_WLAN; ((i < NUM_MD_SESSIONS) &&
- (session_mask & MD_PERIPHERAL_MASK(i)));
- i++) {
- j = i - UPD_WLAN;
- driver->pd_session_clear[j] = 1;
- driver->pd_logging_mode[j] = 0;
- driver->num_pd_session -= 1;
- params.pd_mask =
- diag_translate_kernel_to_user_mask(session_mask);
+ for (i = UPD_WLAN; (i < NUM_MD_SESSIONS); i++) {
+ if (session_mask & MD_PERIPHERAL_MASK(i)) {
+ j = i - UPD_WLAN;
+ driver->pd_session_clear[j] = 1;
+ driver->pd_logging_mode[j] = 0;
+ driver->num_pd_session -= 1;
+ params.pd_mask = p_mask;
+ }
}
}
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index e209039bed5a..9ac1ad62ffe0 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -216,6 +216,45 @@ static int check_bufsize_for_encoding(struct diagfwd_buf_t *buf, uint32_t len)
return buf->len;
}
+int diag_md_get_peripheral(int ctxt)
+{
+ int peripheral;
+
+ if (driver->num_pd_session) {
+ peripheral = GET_PD_CTXT(ctxt);
+ switch (peripheral) {
+ case UPD_WLAN:
+ if (!driver->pd_logging_mode[0])
+ peripheral = PERIPHERAL_MODEM;
+ break;
+ case UPD_AUDIO:
+ if (!driver->pd_logging_mode[1])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case UPD_SENSORS:
+ if (!driver->pd_logging_mode[2])
+ peripheral = PERIPHERAL_LPASS;
+ break;
+ case DIAG_ID_MPSS:
+ case DIAG_ID_LPASS:
+ case DIAG_ID_CDSP:
+ default:
+ peripheral =
+ GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ break;
+ }
+ } else {
+ /* Account for Apps data as well */
+ peripheral = GET_BUF_PERIPHERAL(ctxt);
+ if (peripheral > NUM_PERIPHERALS)
+ peripheral = -EINVAL;
+ }
+
+ return peripheral;
+}
+
static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
struct diagfwd_buf_t *buf, int len)
{
@@ -245,13 +284,15 @@ static void diagfwd_data_process_done(struct diagfwd_info *fwd_info,
mutex_lock(&driver->hdlc_disable_mutex);
mutex_lock(&fwd_info->data_mutex);
- peripheral = GET_PD_CTXT(buf->ctxt);
- if (peripheral == DIAG_ID_MPSS)
- peripheral = PERIPHERAL_MODEM;
- if (peripheral == DIAG_ID_LPASS)
- peripheral = PERIPHERAL_LPASS;
- if (peripheral == DIAG_ID_CDSP)
- peripheral = PERIPHERAL_CDSP;
+ peripheral = diag_md_get_peripheral(buf->ctxt);
+ if (peripheral < 0) {
+ pr_err("diag:%s:%d invalid peripheral = %d\n",
+ __func__, __LINE__, peripheral);
+ mutex_unlock(&fwd_info->data_mutex);
+ mutex_unlock(&driver->hdlc_disable_mutex);
+ diag_ws_release();
+ return;
+ }
session_info =
diag_md_session_get_peripheral(peripheral);
diff --git a/drivers/char/diag/diagfwd_peripheral.h b/drivers/char/diag/diagfwd_peripheral.h
index 037eeebdeb35..eda70dcfdcd9 100644
--- a/drivers/char/diag/diagfwd_peripheral.h
+++ b/drivers/char/diag/diagfwd_peripheral.h
@@ -105,6 +105,9 @@ void diagfwd_early_open(uint8_t peripheral);
void diagfwd_late_open(struct diagfwd_info *fwd_info);
void diagfwd_close(uint8_t peripheral, uint8_t type);
+
+int diag_md_get_peripheral(int ctxt);
+
int diagfwd_register(uint8_t transport, uint8_t peripheral, uint8_t type,
void *ctxt, struct diag_peripheral_ops *ops,
struct diagfwd_info **fwd_ctxt);
diff --git a/drivers/char/rdbg.c b/drivers/char/rdbg.c
index 0823ed78485e..8161d77ca194 100644
--- a/drivers/char/rdbg.c
+++ b/drivers/char/rdbg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -22,7 +22,7 @@
#include <linux/uaccess.h>
#include <linux/interrupt.h>
-#define SMP2P_NUM_PROCS 8
+#define SMP2P_NUM_PROCS 16
#define MAX_RETRIES 20
#define SM_VERSION 1
@@ -146,9 +146,17 @@ static struct processor_specific_info proc_info[SMP2P_NUM_PROCS] = {
{"rdbg_adsp", SMEM_LC_DEBUGGER, 16*1024}, /*ADSP*/
{0}, /*SMP2P_RESERVED_PROC_1*/
{"rdbg_wcnss", 0, 0}, /*WCNSS*/
- {0}, /*SMP2P_RESERVED_PROC_2*/
- {0}, /*SMP2P_POWER_PROC*/
- {0} /*SMP2P_REMOTE_MOCK_PROC*/
+ {"rdbg_cdsp", SMEM_LC_DEBUGGER, 16*1024}, /*CDSP*/
+ {NULL}, /*SMP2P_POWER_PROC*/
+ {NULL}, /*SMP2P_TZ_PROC*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL}, /*EMPTY*/
+ {NULL} /*SMP2P_REMOTE_MOCK_PROC*/
};
static int smq_blockmap_get(struct smq_block_map *block_map,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 31e8ae916ba0..be0b09a0fb44 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1864,7 +1864,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index d6d425773fa4..5b2db3c6568f 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
- clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 25a3e2485cc2..2bc17a907ecf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,6 +124,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index b92139e9b9d8..b5c64edeb668 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,7 +113,11 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
- bool DisableP2A;
+ enum {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+ } config_mode;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 6c021165ca67..498a94069e6b 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
return ret;
}
+static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+{
+ struct device_node *np = dev->pdev->dev.of_node;
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, jregd0, jregd1;
+
+ /* Defaults */
+ ast->config_mode = ast_use_defaults;
+ *scu_rev = 0xffffffff;
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
+ scu_rev)) {
+ /* We do, disable P2A access */
+ ast->config_mode = ast_use_dt;
+ DRM_INFO("Using device-tree for configuration\n");
+ return;
+ }
+
+ /* Not all families have a P2A bridge */
+ if (dev->pdev->device != PCI_CHIP_AST2000)
+ return;
+
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+ /* Double check it's actually working */
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF) {
+ /* P2A works, grab silicon revision */
+ ast->config_mode = ast_use_p2a;
+
+ DRM_INFO("Using P2A bridge for configuration\n");
+
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ *scu_rev = ast_read32(ast, 0x1207c);
+ return;
+ }
+ }
+
+ /* We have a P2A bridge but it's disabled */
+ DRM_INFO("P2A bridge disabled, using default configuration\n");
+}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
- uint32_t data, jreg;
+ uint32_t jreg, scu_rev;
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail. We also inform
+ * our caller that it needs to POST the chip
+ * (Assumption: VGA not enabled -> need to POST)
+ */
+ if (!ast_is_vga_enabled(dev)) {
+ ast_enable_vga(dev);
+ DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+ *need_post = true;
+ } else
+ *need_post = false;
+
+
+ /* Enable extended register access */
+ ast_enable_mmio(dev);
ast_open_key(ast);
+ /* Find out whether P2A works or whether to use device-tree */
+ ast_detect_config_mode(dev, &scu_rev);
+
+ /* Identify chipset */
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
@@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
- uint32_t data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
- data = ast_read32(ast, 0x1207c);
- switch (data & 0x0300) {
+ switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
DRM_INFO("AST 1100 detected\n");
@@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
}
}
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail. We also inform
- * our caller that it needs to POST the chip
- * (Assumption: VGA not enabled -> need to POST)
- */
- if (!ast_is_vga_enabled(dev)) {
- ast_enable_vga(dev);
- ast_enable_mmio(dev);
- DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
- *need_post = true;
- } else
- *need_post = false;
-
- /* Check P2A Access */
- ast->DisableP2A = true;
- data = ast_read32(ast, 0xf004);
- if (data != 0xFFFFFFFF)
- ast->DisableP2A = false;
-
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- if (ast->DisableP2A == false) {
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
- }
+ if (ast->chip == AST2300 &&
+ (scu_rev & 0x300) == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 &&
+ (scu_rev & 0x300) == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
}
break;
}
@@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
+ struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
- uint32_t data, data2;
- uint32_t denum, num, div, ref_pll;
+ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
+ uint32_t denum, num, div, ref_pll, dsel;
- if (ast->DisableP2A)
- {
+ switch (ast->config_mode) {
+ case ast_use_dt:
+ /*
+ * If some properties are missing, use reasonable
+ * defaults for AST2400
+ */
+ if (of_property_read_u32(np, "aspeed,mcr-configuration",
+ &mcr_cfg))
+ mcr_cfg = 0x00000577;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
+ &mcr_scu_mpll))
+ mcr_scu_mpll = 0x000050C0;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
+ &mcr_scu_strap))
+ mcr_scu_strap = 0;
+ break;
+ case ast_use_p2a:
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+ mcr_scu_mpll = ast_read32(ast, 0x10120);
+ mcr_scu_strap = ast_read32(ast, 0x10170);
+ break;
+ case ast_use_defaults:
+ default:
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
+ return 0;
}
- else
- {
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
- ast->dram_bus_width = 16;
- else
- ast->dram_bus_width = 32;
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
- case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
- break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
- break;
- }
- }
+ if (mcr_cfg & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (mcr_cfg & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
break;
- case 2:
+ default:
case 1:
- div = 0x2;
+ ast->dram_type = AST_DRAM_1Gx16;
break;
- default:
- div = 0x1;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (mcr_cfg & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (mcr_cfg & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
break;
}
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
+
+ if (mcr_scu_strap & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = mcr_scu_mpll & 0x1f;
+ num = (mcr_scu_mpll & 0x3fe0) >> 5;
+ dsel = (mcr_scu_mpll & 0xc000) >> 14;
+ switch (dsel) {
+ case 3:
+ div = 0x4;
+ break;
+ case 2:
+ case 1:
+ div = 0x2;
+ break;
+ default:
+ div = 0x1;
+ break;
+ }
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 270e8fb2803f..c7c58becb25d 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->DisableP2A == false)
- {
+ if (ast->config_mode == ast_use_p2a) {
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
- }
- else
- {
+ } else {
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
index b30e894ebc4d..1b295949122b 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -556,13 +556,13 @@ static const struct file_operations sde_hdmi_hdcp_state_fops = {
.read = _sde_hdmi_hdcp_state_read,
};
-static u64 _sde_hdmi_clip_valid_pclk(struct drm_display_mode *mode, u64 pclk_in)
+static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in)
{
u32 pclk_delta, pclk;
u64 pclk_clip = pclk_in;
/* as per standard, 0.5% of deviation is allowed */
- pclk = mode->clock * HDMI_KHZ_TO_HZ;
+ pclk = hdmi->pixclock;
pclk_delta = pclk * 5 / 1000;
if (pclk_in < (pclk - pclk_delta))
@@ -700,7 +700,6 @@ static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
{
struct hdmi *hdmi = display->ctrl.ctrl;
- struct drm_display_mode *current_mode = &display->mode;
u64 cur_pclk, dst_pclk;
u64 clip_pclk;
int rc = 0;
@@ -725,7 +724,7 @@ static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
dst_pclk = cur_pclk * (1000000000 + ppm);
do_div(dst_pclk, 1000000000);
- clip_pclk = _sde_hdmi_clip_valid_pclk(current_mode, dst_pclk);
+ clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk);
/* update pclk */
if (clip_pclk != cur_pclk) {
@@ -2126,9 +2125,13 @@ static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi)
}
}
- SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s>\n", __func__,
+ if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4)
+ sde_hdmi->dc_feature_supported = true;
+
+ SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__,
hdmi_disabled ? "OFF" : "ON",
- hdcp_disabled ? "OFF" : "ON");
+ hdcp_disabled ? "OFF" : "ON",
+ sde_hdmi->dc_feature_supported ? "ON" : "OFF");
if (hdmi_disabled) {
DEV_ERR("%s: HDMI disabled\n", __func__);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
index 6b9bcfec031b..743d34d05e57 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -33,6 +33,9 @@
#include "sde_hdmi_util.h"
#include "sde_hdcp.h"
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
#ifdef HDMI_DEBUG_ENABLE
#define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
#else
@@ -110,6 +113,8 @@ enum hdmi_tx_feature_type {
* @client_notify_pending: If there is client notification pending.
* @irq_domain: IRQ domain structure.
* @pll_update_enable: if it's allowed to update HDMI PLL ppm.
+ * @dc_enable: If deep color is enabled. Only DC_30 so far.
+ * @dc_feature_supported: If deep color feature is supported.
* @notifier: CEC notifider to convey physical address information.
* @root: Debug fs root entry.
*/
@@ -161,6 +166,8 @@ struct sde_hdmi {
struct irq_domain *irq_domain;
struct cec_notifier *notifier;
bool pll_update_enable;
+ bool dc_enable;
+ bool dc_feature_supported;
struct delayed_work hdcp_cb_work;
struct dss_io_data io[HDMI_TX_MAX_IO];
@@ -188,6 +195,8 @@ enum hdmi_tx_scdc_access_type {
#define HDMI_KHZ_TO_HZ 1000
#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
/* Maximum pixel clock rates for hdmi tx */
#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 24d2320683e4..a4c3c2e7ce46 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -345,7 +345,9 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
return 0;
}
- if (mode->clock > HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ) {
+ /* use actual clock instead of mode clock */
+ if (hdmi->pixclock >
+ HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) {
scrambler_on = true;
tmds_clock_ratio = 1;
} else {
@@ -402,6 +404,38 @@ static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
return rc;
}
+static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ u32 hdmi_ctrl_reg, vbi_pkt_reg;
+
+ SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off");
+
+ if (display->dc_enable) {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* GC CD override */
+ hdmi_ctrl_reg |= BIT(27);
+
+ /* enable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg |= BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+ /* Enable GC_CONT and GC_SEND in General Control Packet
+ * (GCP) register so that deep color data is
+ * transmitted to the sink on every frame, allowing
+ * the sink to decode the data correctly.
+ *
+ * GC_CONT: 0x1 - Send GCP on every frame
+ * GC_SEND: 0x1 - Enable GCP Transmission
+ */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg |= BIT(5) | BIT(4);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ }
+}
+
static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
@@ -543,6 +577,10 @@ static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
struct hdmi_avi_infoframe info;
drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+
hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
@@ -660,31 +698,77 @@ static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
drm_mode_copy(&display->mode, mode);
}
+static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ /*
+ * choose priority:
+ * 1. DC + RGB
+ * 2. DC + YUV
+ * 3. RGB
+ * 4. YUV
+ */
+ int dc_format;
+ struct drm_connector *connector = hdmi->connector;
+
+ dc_format = sde_hdmi_sink_dc_support(connector, mode);
+ if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444
+ | MSM_MODE_FLAG_RGB444_DC_ENABLE);
+ else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420
+ | MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB)
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV)
+ return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420;
+
+ SDE_ERROR("Can't get available best display format\n");
+
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+}
+
static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
int hstart, hend, vstart, vend;
uint32_t frame_ctrl;
+ u32 div = 0;
mode = adjusted_mode;
- hdmi->pixclock = mode->clock * 1000;
+ display->dc_enable = mode->private_flags &
+ (MSM_MODE_FLAG_RGB444_DC_ENABLE |
+ MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ /* compute pixclock as per color format and bit depth */
+ hdmi->pixclock = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ mode->private_flags,
+ display->dc_enable);
+ SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n",
+ hdmi->pixclock, mode->clock);
+
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ div = 1;
- hstart = mode->htotal - mode->hsync_start;
- hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+ hstart = (mode->htotal - mode->hsync_start) >> div;
+ hend = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div;
vstart = mode->vtotal - mode->vsync_start - 1;
vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
- DRM_DEBUG(
+ SDE_DEBUG(
"htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
hdmi_write(hdmi, REG_HDMI_TOTAL,
- SDE_HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) |
SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
@@ -734,6 +818,22 @@ static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
_sde_hdmi_save_mode(hdmi, mode);
_sde_hdmi_bridge_setup_scrambler(hdmi, mode);
+ _sde_hdmi_bridge_setup_deep_color(hdmi);
+}
+
+static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ adjusted_mode->private_flags |=
+ _sde_hdmi_choose_best_format(hdmi, adjusted_mode);
+ SDE_DEBUG("Adjusted mode private flags: 0x%x\n",
+ adjusted_mode->private_flags);
+
+ return true;
}
static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
@@ -742,6 +842,7 @@ static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
.disable = _sde_hdmi_bridge_disable,
.post_disable = _sde_hdmi_bridge_post_disable,
.mode_set = _sde_hdmi_bridge_mode_set,
+ .mode_fixup = _sde_hdmi_bridge_mode_fixup,
};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
index a7887d2c84b0..ba47b7702efd 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
@@ -825,3 +825,76 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display)
}
return rc;
}
+
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable)
+{
+ u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ pixel_freq /= rate_ratio;
+
+ if (dc_enable)
+ pixel_freq += pixel_freq >> 2;
+
+ return pixel_freq;
+
+}
+
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ;
+
+ if (connector->max_tmds_char)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_char * HDMI_MHZ_TO_HZ);
+ else if (connector->max_tmds_clock)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_clock * HDMI_MHZ_TO_HZ);
+
+ SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk);
+
+ return pclk < max_pclk;
+}
+
+static bool sde_hdmi_check_dc_clock(struct drm_connector *connector,
+ struct drm_display_mode *mode, u32 format)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ format,
+ true);
+
+ return (display->dc_feature_supported &&
+ sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc));
+}
+
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int dc_format = 0;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_YCBCR420_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420))
+ dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_HDMI_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_RGB444))
+ dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE;
+
+ return dc_format;
+}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
index 8b69dd31f637..10effed54a14 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -164,4 +164,10 @@ int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display);
void sde_hdmi_ddc_config(void *hdmi_display);
int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display);
void sde_hdmi_dump_regs(void *hdmi_display);
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable);
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk);
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode);
#endif /* _SDE_HDMI_UTIL_H_ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 2ab50919f514..ed0ba928f170 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -34,6 +34,24 @@
#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
/* Transition to new mode requires a wait-for-vblank before the modeset */
#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
+/*
+ * We need setting some flags in bridge, and using them in encoder. Add them in
+ * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are
+ * flags that indicating the SINK supported color formats read from EDID. While,
+ * these flags defined here indicate the best color/bit depth foramt we choosed
+ * that would be better for display. For example the best mode display like:
+ * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at
+ * the same time. And also RGB_DC only set when RGB format is set,the same for
+ * YUV_DC.
+ */
+/* Enable RGB444 30 bit deep color */
+#define MSM_MODE_FLAG_RGB444_DC_ENABLE (1<<2)
+/* Enable YUV420 30 bit deep color */
+#define MSM_MODE_FLAG_YUV420_DC_ENABLE (1<<3)
+/* Choose RGB444 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444 (1<<4)
+/* Choose YUV420 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 (1<<5)
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 29444a83cf02..97c9f8baea6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -55,6 +55,33 @@
#define MAX_CHANNELS_PER_ENC 2
+/* rgb to yuv color space conversion matrix */
+static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = {
+ [SDE_CSC_RGB2YUV_601L] = {
+ {
+ TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+ TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_601FR] = {
+ {
+ TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a),
+ TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100),
+ TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0000, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+};
+
/**
* struct sde_encoder_virt - virtual encoder. Container of one or more physical
* encoders. Virtual encoder manages one "logical" display. Physical
@@ -1374,3 +1401,108 @@ enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
return INTF_MODE_NONE;
}
+
+/**
+ * sde_encoder_phys_setup_cdm - setup chroma down block
+ * @phys_enc: Pointer to physical encoder
+ * @output_type: HDMI/WB
+ * @format: Output format
+ * @roi: Output size
+ */
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+ int ret;
+ u32 csc_type = 0;
+
+ if (!encoder) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(encoder);
+
+ if (!SDE_FORMAT_IS_YUV(format)) {
+ SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n",
+ format->base.pixel_format);
+
+ if (hw_cdm && hw_cdm->ops.disable)
+ hw_cdm->ops.disable(hw_cdm);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
+
+ cdm_cfg->output_width = roi->w;
+ cdm_cfg->output_height = roi->h;
+ cdm_cfg->output_fmt = format;
+ cdm_cfg->output_type = output_type;
+ cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case SDE_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case SDE_CHROMA_H1V2:
+ default:
+ SDE_ERROR("unsupported chroma sampling type\n");
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ cdm_cfg->output_width,
+ cdm_cfg->output_height,
+ cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type,
+ cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type,
+ cdm_cfg->v_cdwn_type);
+
+ if (output_type == CDM_CDWN_OUTPUT_HDMI)
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else if (output_type == CDM_CDWN_OUTPUT_WB)
+ csc_type = SDE_CSC_RGB2YUV_601L;
+
+ if (hw_cdm && hw_cdm->ops.setup_csc_data) {
+ ret = hw_cdm->ops.setup_csc_data(hw_cdm,
+ &sde_csc_10bit_convert[csc_type]);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CSC %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.setup_cdwn) {
+ ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CDM %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.enable) {
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to enable CDM %d\n", ret);
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index 2205dd98a927..20f125155de3 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -349,8 +349,8 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
#endif
void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
- struct drm_framebuffer *fb, const struct sde_format *format,
- struct sde_rect *wb_roi);
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi);
/**
* sde_encoder_helper_trigger_start - control start helper function
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
index 0b6ee302e231..78a8b732b0de 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -242,17 +242,20 @@ static void sde_encoder_phys_vid_setup_timing_engine(
SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
- if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ if (phys_enc->split_role != ENC_ROLE_SOLO ||
+ (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) {
mode.hdisplay >>= 1;
mode.htotal >>= 1;
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
SDE_DEBUG_VIDENC(vid_enc,
- "split_role %d, halve horizontal %d %d %d %d\n",
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
- mode.hsync_start, mode.hsync_end);
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
}
drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
@@ -407,6 +410,9 @@ static void sde_encoder_phys_vid_mode_set(
return;
}
+ phys_enc->hw_ctl = NULL;
+ phys_enc->hw_cdm = NULL;
+
rm = &phys_enc->sde_kms->rm;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
phys_enc->cached_mode = *adj_mode;
@@ -427,6 +433,20 @@ static void sde_encoder_phys_vid_mode_set(
phys_enc->hw_ctl = NULL;
return;
}
+
+ /* CDM is optional */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+ }
+
+ if (IS_ERR(phys_enc->hw_cdm)) {
+ SDE_ERROR("CDM required but not allocated: %ld\n",
+ PTR_ERR(phys_enc->hw_cdm));
+ phys_enc->hw_cdm = NULL;
+ }
}
static int sde_encoder_phys_vid_control_vblank_irq(
@@ -477,6 +497,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
struct sde_encoder_phys_vid *vid_enc;
struct sde_hw_intf *intf;
struct sde_hw_ctl *ctl;
+ struct sde_hw_cdm *hw_cdm = NULL;
+ struct drm_display_mode mode;
+ const struct sde_format *fmt = NULL;
u32 flush_mask = 0;
int ret;
@@ -485,7 +508,9 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
SDE_ERROR("invalid encoder/device\n");
return;
}
+ hw_cdm = phys_enc->hw_cdm;
priv = phys_enc->parent->dev->dev_private;
+ mode = phys_enc->cached_mode;
vid_enc = to_sde_encoder_phys_vid(phys_enc);
intf = vid_enc->hw_intf;
@@ -520,7 +545,21 @@ static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
goto end;
}
+ if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ fmt = sde_get_sde_format(DRM_FORMAT_YUV420);
+
+ if (fmt) {
+ struct sde_rect hdmi_roi;
+
+ hdmi_roi.w = mode.hdisplay;
+ hdmi_roi.h = mode.vdisplay;
+ sde_encoder_phys_setup_cdm(phys_enc, fmt,
+ CDM_CDWN_OUTPUT_HDMI, &hdmi_roi);
+ }
+
ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ if (ctl->ops.get_bitmask_cdm && hw_cdm)
+ ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
@@ -569,6 +608,8 @@ static void sde_encoder_phys_vid_get_hw_resources(
SDE_DEBUG_VIDENC(vid_enc, "\n");
hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+ hw_res->needs_cdm = true;
+ SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm);
}
static int sde_encoder_phys_vid_wait_for_vblank(
@@ -713,6 +754,11 @@ static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
atomic_read(&phys_enc->vblank_refcount));
+ if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+ SDE_DEBUG_DRIVER("[cdm_disable]\n");
+ phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+ }
+
phys_enc->enable_state = SDE_ENC_DISABLED;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index a48962a2384b..65b16419fcec 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -28,24 +28,6 @@
#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
-#define TO_S15D16(_x_) ((_x_) << 7)
-
-/**
- * sde_rgb2yuv_601l - rgb to yuv color space conversion matrix
- *
- */
-static struct sde_csc_cfg sde_encoder_phys_wb_rgb2yuv_601l = {
- {
- TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
- TO_S15D16(0x1fb5), TO_S15D16(0x1f6c), TO_S15D16(0x00e1),
- TO_S15D16(0x00e1), TO_S15D16(0x1f45), TO_S15D16(0x1fdc)
- },
- { 0x00, 0x00, 0x00 },
- { 0x0040, 0x0200, 0x0200 },
- { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
- { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
-};
-
/**
* sde_encoder_phys_wb_is_master - report wb always as master encoder
*/
@@ -105,96 +87,6 @@ static void sde_encoder_phys_wb_set_traffic_shaper(
}
/**
- * sde_encoder_phys_setup_cdm - setup chroma down block
- * @phys_enc: Pointer to physical encoder
- * @fb: Pointer to output framebuffer
- * @format: Output format
- */
-void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
- struct drm_framebuffer *fb, const struct sde_format *format,
- struct sde_rect *wb_roi)
-{
- struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
- struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
- int ret;
-
- if (!SDE_FORMAT_IS_YUV(format)) {
- SDE_DEBUG("[cdm_disable fmt:%x]\n",
- format->base.pixel_format);
-
- if (hw_cdm && hw_cdm->ops.disable)
- hw_cdm->ops.disable(hw_cdm);
-
- return;
- }
-
- memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
-
- cdm_cfg->output_width = wb_roi->w;
- cdm_cfg->output_height = wb_roi->h;
- cdm_cfg->output_fmt = format;
- cdm_cfg->output_type = CDM_CDWN_OUTPUT_WB;
- cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
- CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
-
- /* enable 10 bit logic */
- switch (cdm_cfg->output_fmt->chroma_sample) {
- case SDE_CHROMA_RGB:
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case SDE_CHROMA_H2V1:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- case SDE_CHROMA_420:
- cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
- break;
- case SDE_CHROMA_H1V2:
- default:
- SDE_ERROR("unsupported chroma sampling type\n");
- cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
- cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
- break;
- }
-
- SDE_DEBUG("[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
- cdm_cfg->output_width,
- cdm_cfg->output_height,
- cdm_cfg->output_fmt->base.pixel_format,
- cdm_cfg->output_type,
- cdm_cfg->output_bit_depth,
- cdm_cfg->h_cdwn_type,
- cdm_cfg->v_cdwn_type);
-
- if (hw_cdm && hw_cdm->ops.setup_csc_data) {
- ret = hw_cdm->ops.setup_csc_data(hw_cdm,
- &sde_encoder_phys_wb_rgb2yuv_601l);
- if (ret < 0) {
- SDE_ERROR("failed to setup CSC %d\n", ret);
- return;
- }
- }
-
- if (hw_cdm && hw_cdm->ops.setup_cdwn) {
- ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
- if (ret < 0) {
- SDE_ERROR("failed to setup CDM %d\n", ret);
- return;
- }
- }
-
- if (hw_cdm && hw_cdm->ops.enable) {
- ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
- if (ret < 0) {
- SDE_ERROR("failed to enable CDM %d\n", ret);
- return;
- }
- }
-}
-
-/**
* sde_encoder_phys_wb_setup_fb - setup output framebuffer
* @phys_enc: Pointer to physical encoder
* @fb: Pointer to output framebuffer
@@ -520,7 +412,8 @@ static void sde_encoder_phys_wb_setup(
sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
- sde_encoder_phys_setup_cdm(phys_enc, fb, wb_enc->wb_fmt, wb_roi);
+ sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt,
+ CDM_CDWN_OUTPUT_WB, wb_roi);
sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 188649d946d6..9ec81c227e60 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -227,7 +227,7 @@ int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
return -EINVAL;
if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
- if (fmt->chroma_sample != SDE_CHROMA_H1V2)
+ if (fmt->chroma_sample == SDE_CHROMA_H1V2)
return -EINVAL; /*unsupported format */
opmode = BIT(0);
opmode |= (fmt->chroma_sample << 1);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
index 2592fe26cb38..1edeff6a7aec 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -63,6 +63,8 @@ enum sde_format_flags {
(((X)->fetch_mode == SDE_FETCH_UBWC) && \
test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define TO_S15D16(_x_) ((_x_) << 7)
+
#define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0)
#define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0)
#define SDE_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
@@ -339,6 +341,12 @@ enum sde_3d_blend_mode {
BLEND_3D_MAX
};
+enum sde_csc_type {
+ SDE_CSC_RGB2YUV_601L,
+ SDE_CSC_RGB2YUV_601FR,
+ SDE_MAX_CSC
+};
+
/** struct sde_format - defines the format configuration which
* allows SDE HW to correctly fetch and decode the format
* @base: base msm_format struture containing fourcc code
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
index ca9229ede251..68246253bb70 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.c
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -229,10 +229,17 @@ u32 video_format)
{
u8 cea_mode = 0;
struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
/* Need to add Y420 support flag to the modes */
list_for_each_entry(mode, &connector->probed_modes, head) {
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /* Clear the RGB/YUV format flags before calling upstream API */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
if ((cea_mode != 0) && (cea_mode == video_format)) {
SDE_EDID_DEBUG("%s found match for %d ", __func__,
video_format);
@@ -246,7 +253,7 @@ struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
const u8 *db)
{
u32 offset = 0;
- u8 len = 0;
+ u8 cmdb_len = 0;
u8 svd_len = 0;
const u8 *svd = NULL;
u32 i = 0, j = 0;
@@ -262,10 +269,8 @@ const u8 *db)
return;
}
SDE_EDID_DEBUG("%s +\n", __func__);
- len = db[0] & 0x1f;
+ cmdb_len = db[0] & 0x1f;
- if (len < 7)
- return;
/* Byte 3 to L+1 contain SVDs */
offset += 2;
@@ -273,20 +278,24 @@ const u8 *db)
if (svd) {
/*moving to the next byte as vic info begins there*/
- ++svd;
svd_len = svd[0] & 0x1f;
+ ++svd;
}
for (i = 0; i < svd_len; i++, j++) {
- video_format = *svd & 0x7F;
- if (db[offset] & (1 << j))
+ video_format = *(svd + i) & 0x7F;
+ if (cmdb_len == 1) {
+ /* If cmdb_len is 1, it means all SVDs support YUV */
+ sde_edid_set_y420_support(connector, video_format);
+ } else if (db[offset] & (1 << j)) {
sde_edid_set_y420_support(connector, video_format);
- if (j & 0x80) {
- j = j/8;
- offset++;
- if (offset >= len)
- break;
+ if (j & 0x80) {
+ j = j/8;
+ offset++;
+ if (offset >= cmdb_len)
+ break;
+ }
}
}
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
index 1143dc2c7bec..59e3dceca33c 100644
--- a/drivers/gpu/drm/msm/sde_edid_parser.h
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -33,6 +33,8 @@
#define SDE_CEA_EXT 0x02
#define SDE_EXTENDED_TAG 0x07
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
enum extended_data_block_types {
VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2851ed..1f013d45c9e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 990c9bca5127..afb489f10172 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2210,21 +2210,23 @@ static int kgsl_setup_dmabuf_useraddr(struct kgsl_device *device,
if (fd != 0)
dmabuf = dma_buf_get(fd - 1);
}
- up_read(&current->mm->mmap_sem);
- if (IS_ERR_OR_NULL(dmabuf))
+ if (IS_ERR_OR_NULL(dmabuf)) {
+ up_read(&current->mm->mmap_sem);
return dmabuf ? PTR_ERR(dmabuf) : -ENODEV;
+ }
ret = kgsl_setup_dma_buf(device, pagetable, entry, dmabuf);
if (ret) {
dma_buf_put(dmabuf);
+ up_read(&current->mm->mmap_sem);
return ret;
}
/* Setup the user addr/cache mode for cache operations */
entry->memdesc.useraddr = hostptr;
_setup_cache_mode(entry, vma);
-
+ up_read(&current->mm->mmap_sem);
return 0;
}
#else
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 24a1a42af74e..3e4cc4490792 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -522,7 +522,8 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
struct kgsl_device *device = dev_get_drvdata(dev);
struct kgsl_pwrctrl *pwr;
struct kgsl_pwrlevel *pwr_level;
- int level, i;
+ int level;
+ unsigned int i;
unsigned long cur_freq;
if (device == NULL)
@@ -551,10 +552,11 @@ int kgsl_devfreq_target(struct device *dev, unsigned long *freq, u32 flags)
if (*freq != cur_freq) {
level = pwr->max_pwrlevel;
/*
- * To avoid infinite loop issue type cast max_pwrlevel to
- * signed integer type
+ * Array index of pwrlevels[] should be within the permitted
+ * power levels, i.e., from max_pwrlevel to min_pwrlevel.
*/
- for (i = pwr->min_pwrlevel; i >= (int)pwr->max_pwrlevel; i--)
+ for (i = pwr->min_pwrlevel; (i >= pwr->max_pwrlevel
+ && i <= pwr->min_pwrlevel); i--)
if (*freq <= pwr->pwrlevels[i].gpu_freq) {
if (pwr->thermal_cycle == CYCLE_ACTIVE)
level = _thermal_adjust(pwr, i);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 0b80633bae91..d4d655a10df1 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -364,6 +364,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
return ret;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 316d8b783d94..691c7bb3afac 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, 2016-2017 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -764,6 +764,16 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
pm_runtime_get_sync(drvdata->dev);
mutex_lock(&drvdata->mem_lock);
+
+ spin_lock_irqsave(&drvdata->spinlock, flags);
+ if (drvdata->reading) {
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
+ pm_runtime_put(drvdata->dev);
+ return -EBUSY;
+ }
+ spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR &&
drvdata->out_mode == TMC_ETR_OUT_MODE_MEM) {
/*
@@ -807,19 +817,8 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
coresight_cti_map_trigout(drvdata->cti_flush, 1, 0);
coresight_cti_map_trigin(drvdata->cti_reset, 2, 0);
}
- mutex_unlock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->reading) {
- spin_unlock_irqrestore(&drvdata->spinlock, flags);
-
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR
- && drvdata->out_mode == TMC_ETR_OUT_MODE_USB)
- usb_qdss_close(drvdata->usbch);
- pm_runtime_put(drvdata->dev);
-
- return -EBUSY;
- }
if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
tmc_etb_enable_hw(drvdata);
@@ -845,6 +844,7 @@ static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
*/
drvdata->sticky_enable = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC enabled\n");
return 0;
@@ -1140,6 +1140,7 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
unsigned long flags;
enum tmc_mode mode;
+ mutex_lock(&drvdata->mem_lock);
spin_lock_irqsave(&drvdata->spinlock, flags);
if (!drvdata->sticky_enable) {
dev_err(drvdata->dev, "enable tmc once before reading\n");
@@ -1172,11 +1173,13 @@ static int tmc_read_prepare(struct tmc_drvdata *drvdata)
out:
drvdata->reading = true;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
dev_info(drvdata->dev, "TMC read start\n");
return 0;
err:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
+ mutex_unlock(&drvdata->mem_lock);
return ret;
}
@@ -1353,7 +1356,11 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
{
struct tmc_drvdata *drvdata = container_of(file->private_data,
struct tmc_drvdata, miscdev);
- char *bufp = drvdata->buf + *ppos;
+ char *bufp;
+
+ mutex_lock(&drvdata->mem_lock);
+
+ bufp = drvdata->buf + *ppos;
if (*ppos + len > drvdata->size)
len = drvdata->size - *ppos;
@@ -1375,6 +1382,7 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
if (copy_to_user(data, bufp, len)) {
dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+ mutex_unlock(&drvdata->mem_lock);
return -EFAULT;
}
@@ -1382,6 +1390,8 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
dev_dbg(drvdata->dev, "%s: %zu bytes copied, %d bytes left\n",
__func__, len, (int)(drvdata->size - *ppos));
+
+ mutex_unlock(&drvdata->mem_lock);
return len;
}
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 4831eb910fc7..22160e481794 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -699,9 +699,9 @@ out_clear_state:
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b92b8a724efb..f9711aceef54 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1137,7 +1137,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
- level_pfn = pfn & level_mask(level - 1);
+ level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33176a4aa6ef..92e6ae48caf8 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -394,36 +394,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -435,8 +429,10 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -447,6 +443,21 @@ rename:
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
diff --git a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
index 6af589e5c230..a2a89b92c9f1 100644
--- a/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/ais/sensor/flash/msm_flash.c
@@ -1022,13 +1022,13 @@ static long msm_flash_subdev_do_ioctl(
sd = vdev_to_v4l2_subdev(vdev);
u32 = (struct msm_flash_cfg_data_t32 *)arg;
- flash_data.cfg_type = u32->cfg_type;
- for (i = 0; i < MAX_LED_TRIGGERS; i++) {
- flash_data.flash_current[i] = u32->flash_current[i];
- flash_data.flash_duration[i] = u32->flash_duration[i];
- }
switch (cmd) {
case VIDIOC_MSM_FLASH_CFG32:
+ flash_data.cfg_type = u32->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data.flash_current[i] = u32->flash_current[i];
+ flash_data.flash_duration[i] = u32->flash_duration[i];
+ }
cmd = VIDIOC_MSM_FLASH_CFG;
switch (flash_data.cfg_type) {
case CFG_FLASH_OFF:
diff --git a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
index 28a5402a4359..236660dca3fb 100644
--- a/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
+++ b/drivers/media/platform/msm/ais/sensor/ois/msm_ois.c
@@ -781,11 +781,10 @@ static long msm_ois_subdev_do_ioctl(
u32 = (struct msm_ois_cfg_data32 *)arg;
parg = arg;
- ois_data.cfgtype = u32->cfgtype;
-
switch (cmd) {
case VIDIOC_MSM_OIS_CFG32:
cmd = VIDIOC_MSM_OIS_CFG;
+ ois_data.cfgtype = u32->cfgtype;
switch (u32->cfgtype) {
case CFG_OIS_CONTROL:
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 1f1435fe282e..b2d152bf4ef0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -786,6 +786,7 @@ struct vfe_device {
size_t num_norm_clk;
bool hvx_clk_state;
enum cam_ahb_clk_vote ahb_vote;
+ enum cam_ahb_clk_vote user_requested_ahb_vote;
struct cx_ipeak_client *vfe_cx_ipeak;
/* Sync variables*/
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 6515e3d6ecbc..24d1c6cba84d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -274,10 +274,12 @@ int msm_isp47_ahb_clk_cfg(struct vfe_device *vfe_dev,
enum cam_ahb_clk_vote src_clk_vote;
struct msm_isp_clk_rates clk_rates;
- if (ahb_cfg)
+ if (ahb_cfg) {
vote = msm_isp47_get_cam_clk_vote(ahb_cfg->vote);
- else
- vote = CAM_AHB_SVS_VOTE;
+ vfe_dev->user_requested_ahb_vote = vote;
+ } else {
+ vote = vfe_dev->user_requested_ahb_vote;
+ }
vfe_dev->hw_info->vfe_ops.platform_ops.get_clk_rates(vfe_dev,
&clk_rates);
@@ -327,6 +329,7 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
if (rc)
goto clk_enable_failed;
+ vfe_dev->user_requested_ahb_vote = CAM_AHB_SVS_VOTE;
rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
if (rc < 0) {
pr_err("%s: failed to vote for AHB\n", __func__);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index e6d36a083a36..63f5497e63b8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -776,40 +776,6 @@ void msm_isp_check_for_output_error(struct vfe_device *vfe_dev,
}
}
-static int msm_isp_check_sync_time(struct msm_vfe_src_info *src_info,
- struct msm_isp_timestamp *ts,
- struct master_slave_resource_info *ms_res)
-{
- int i;
- struct msm_vfe_src_info *master_src_info = NULL;
- uint32_t master_time = 0, current_time;
-
- if (!ms_res->src_sof_mask)
- return 0;
-
- for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
- if (ms_res->src_info[i] == NULL)
- continue;
- if (src_info == ms_res->src_info[i] ||
- ms_res->src_info[i]->active == 0)
- continue;
- if (ms_res->src_sof_mask &
- (1 << ms_res->src_info[i]->dual_hw_ms_info.index)) {
- master_src_info = ms_res->src_info[i];
- break;
- }
- }
- if (!master_src_info)
- return 0;
- master_time = master_src_info->
- dual_hw_ms_info.sof_info.mono_timestamp_ms;
- current_time = ts->buf_time.tv_sec * 1000 +
- ts->buf_time.tv_usec / 1000;
- if ((current_time - master_time) > ms_res->sof_delta_threshold)
- return 1;
- return 0;
-}
-
static void msm_isp_sync_dual_cam_frame_id(
struct vfe_device *vfe_dev,
struct master_slave_resource_info *ms_res,
@@ -824,24 +790,11 @@ static void msm_isp_sync_dual_cam_frame_id(
if (src_info->dual_hw_ms_info.sync_state ==
ms_res->dual_sync_mode) {
- if (msm_isp_check_sync_time(src_info, ts, ms_res) == 0) {
- (frame_src == VFE_PIX_0) ? src_info->frame_id +=
+ (frame_src == VFE_PIX_0) ? src_info->frame_id +=
vfe_dev->axi_data.src_info[frame_src].
sof_counter_step :
src_info->frame_id++;
- return;
- }
- ms_res->src_sof_mask = 0;
- ms_res->active_src_mask = 0;
- for (i = 0; i < MAX_VFE * VFE_SRC_MAX; i++) {
- if (ms_res->src_info[i] == NULL)
- continue;
- if (ms_res->src_info[i]->active == 0)
- continue;
- ms_res->src_info[i]->dual_hw_ms_info.
- sync_state =
- MSM_ISP_DUAL_CAM_ASYNC;
- }
+ return;
}
/* find highest frame id */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
index bc844bb87db5..b1bea12c2cc3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/actuator/msm_actuator.c
@@ -1707,6 +1707,10 @@ static long msm_actuator_subdev_do_ioctl(
parg = &actuator_data;
break;
}
+ break;
+ case VIDIOC_MSM_ACTUATOR_CFG:
+ pr_err("%s: invalid cmd 0x%x received\n", __func__, cmd);
+ return -EINVAL;
}
rc = msm_actuator_subdev_ioctl(sd, cmd, parg);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index 7dda92510879..3cb6b55ccc8c 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -403,6 +403,10 @@ static int32_t msm_cci_calc_cmd_len(struct cci_device *cci_dev,
if (cmd->reg_addr + 1 ==
(cmd+1)->reg_addr) {
len += data_len;
+ if (len > cci_dev->payload_size) {
+ len = len - data_len;
+ break;
+ }
*pack += data_len;
} else
break;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
index cf7d1a8aa1f4..223ddf39dce8 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/flash/msm_flash.c
@@ -508,18 +508,42 @@ static int32_t msm_flash_init(
return 0;
}
-#ifdef CONFIG_COMPAT
static int32_t msm_flash_init_prepare(
struct msm_flash_ctrl_t *flash_ctrl,
struct msm_flash_cfg_data_t *flash_data)
{
+#ifdef CONFIG_COMPAT
+ struct msm_flash_cfg_data_t flash_data_k;
+ struct msm_flash_init_info_t flash_init_info;
+ int32_t i = 0;
+
+ if (!is_compat_task()) {
+ /*for 64-bit usecase,it need copy the data to local memory*/
+ flash_data_k.cfg_type = flash_data->cfg_type;
+ for (i = 0; i < MAX_LED_TRIGGERS; i++) {
+ flash_data_k.flash_current[i] =
+ flash_data->flash_current[i];
+ flash_data_k.flash_duration[i] =
+ flash_data->flash_duration[i];
+ }
+
+ flash_data_k.cfg.flash_init_info = &flash_init_info;
+ if (copy_from_user(&flash_init_info,
+ (void __user *)(flash_data->cfg.flash_init_info),
+ sizeof(struct msm_flash_init_info_t))) {
+ pr_err("%s copy_from_user failed %d\n",
+ __func__, __LINE__);
+ return -EFAULT;
+ }
+ return msm_flash_init(flash_ctrl, &flash_data_k);
+ }
+ /*
+ * for 32-bit usecase,it already copy the userspace
+ * data to local memory in msm_flash_subdev_do_ioctl()
+ * so here do not need copy from user
+ */
return msm_flash_init(flash_ctrl, flash_data);
-}
#else
-static int32_t msm_flash_init_prepare(
- struct msm_flash_ctrl_t *flash_ctrl,
- struct msm_flash_cfg_data_t *flash_data)
-{
struct msm_flash_cfg_data_t flash_data_k;
struct msm_flash_init_info_t flash_init_info;
int32_t i = 0;
@@ -534,15 +558,15 @@ static int32_t msm_flash_init_prepare(
flash_data_k.cfg.flash_init_info = &flash_init_info;
if (copy_from_user(&flash_init_info,
- (void *)(flash_data->cfg.flash_init_info),
+ (void __user *)(flash_data->cfg.flash_init_info),
sizeof(struct msm_flash_init_info_t))) {
pr_err("%s copy_from_user failed %d\n",
__func__, __LINE__);
return -EFAULT;
}
return msm_flash_init(flash_ctrl, &flash_data_k);
-}
#endif
+}
static int32_t msm_flash_prepare(
struct msm_flash_ctrl_t *flash_ctrl)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index acb697946e18..10f72a2155db 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -814,6 +814,9 @@ static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
if (bw > 0xFF)
bw = 0xFF;
+ else if (bw == 0)
+ bw = 1;
+
SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
BIT(31) | bw);
SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 7caf61cb6799..2b3070974df8 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -550,7 +550,7 @@ static int __map_and_update_binfo(struct msm_vidc_inst *inst,
binfo->handle[i] = map_buffer(inst, &b->m.planes[i],
get_hal_buffer_type(inst, b));
if (!binfo->handle[i])
- rc = -EINVAL;
+ return -EINVAL;
binfo->mapped[i] = true;
binfo->device_addr[i] = binfo->handle[i]->device_addr +
@@ -670,13 +670,13 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
rc = __map_and_update_binfo(inst, binfo, b, i);
if (rc)
- goto exit;
+ goto map_err;
/* We maintain one ref count for all planes*/
if (!i && is_dynamic_output_buffer_mode(b, inst)) {
rc = buf_ref_get(inst, binfo);
if (rc < 0)
- goto exit;
+ goto map_err;
}
dprintk(VIDC_DBG,
"%s: [MAP] binfo = %pK, handle[%d] = %pK, device_addr = %pa, fd = %d, offset = %d, mapped = %d\n",
@@ -690,10 +690,14 @@ int map_and_register_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b)
mutex_unlock(&inst->registeredbufs.lock);
return 0;
+map_err:
+ if (binfo->handle[0] && binfo->mapped[0])
+ msm_comm_smem_free(inst, binfo->handle[0]);
exit:
kfree(binfo);
return rc;
}
+
int unmap_and_deregister_buf(struct msm_vidc_inst *inst,
struct buffer_info *binfo)
{
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 7cdcd69cecf4..66f3c9f609f2 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -281,6 +281,7 @@ struct qseecom_control {
wait_queue_head_t app_block_wq;
atomic_t qseecom_state;
int is_apps_region_protected;
+ bool smcinvoke_support;
};
struct qseecom_sec_buf_fd_info {
@@ -578,10 +579,12 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
desc.args[1] = req_64bit->sb_ptr;
desc.args[2] = req_64bit->sb_len;
}
+ qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
if (ret) {
+ qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
__qseecom_reentrancy_check_if_no_app_blocked(
smc_id);
@@ -1006,10 +1009,14 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
struct qseecom_continue_blocked_request_ireq *req =
(struct qseecom_continue_blocked_request_ireq *)
req_buf;
- smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+ if (qseecom.smcinvoke_support)
+ smc_id =
+ TZ_OS_CONTINUE_BLOCKED_REQUEST_SMCINVOKE_ID;
+ else
+ smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
desc.arginfo =
TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
- desc.args[0] = req->app_id;
+ desc.args[0] = req->app_or_session_id;
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1839,7 +1846,7 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
return ret;
}
-int __qseecom_process_reentrancy_blocked_on_listener(
+static int __qseecom_process_blocked_on_listener_legacy(
struct qseecom_command_scm_resp *resp,
struct qseecom_registered_app_list *ptr_app,
struct qseecom_dev_handle *data)
@@ -1848,9 +1855,8 @@ int __qseecom_process_reentrancy_blocked_on_listener(
int ret = 0;
struct qseecom_continue_blocked_request_ireq ireq;
struct qseecom_command_scm_resp continue_resp;
- sigset_t new_sigset, old_sigset;
- unsigned long flags;
bool found_app = false;
+ unsigned long flags;
if (!resp || !data) {
pr_err("invalid resp or data pointer\n");
@@ -1890,32 +1896,30 @@ int __qseecom_process_reentrancy_blocked_on_listener(
pr_debug("lsntr %d in_use = %d\n",
resp->data, list_ptr->listener_in_use);
ptr_app->blocked_on_listener_id = resp->data;
+
/* sleep until listener is available */
- do {
- qseecom.app_block_ref_cnt++;
- ptr_app->app_blocked = true;
- sigfillset(&new_sigset);
- sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
- mutex_unlock(&app_access_lock);
- do {
- if (!wait_event_freezable(
- list_ptr->listener_block_app_wq,
- !list_ptr->listener_in_use)) {
- break;
- }
- } while (1);
- mutex_lock(&app_access_lock);
- sigprocmask(SIG_SETMASK, &old_sigset, NULL);
- ptr_app->app_blocked = false;
- qseecom.app_block_ref_cnt--;
- } while (list_ptr->listener_in_use == true);
+ qseecom.app_block_ref_cnt++;
+ ptr_app->app_blocked = true;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, app_id %d\n",
+ resp->data, ptr_app->app_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ ptr_app->app_blocked = false;
+ qseecom.app_block_ref_cnt--;
+
ptr_app->blocked_on_listener_id = 0;
/* notify the blocked app that listener is available */
pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
resp->data, data->client.app_id,
data->client.app_name);
ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
- ireq.app_id = data->client.app_id;
+ ireq.app_or_session_id = data->client.app_id;
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
&ireq, sizeof(ireq),
&continue_resp, sizeof(continue_resp));
@@ -1934,6 +1938,73 @@ exit:
return ret;
}
+static int __qseecom_process_blocked_on_listener_smcinvoke(
+ struct qseecom_command_scm_resp *resp)
+{
+ struct qseecom_registered_listener_list *list_ptr;
+ int ret = 0;
+ struct qseecom_continue_blocked_request_ireq ireq;
+ struct qseecom_command_scm_resp continue_resp;
+ unsigned int session_id;
+
+ if (!resp) {
+ pr_err("invalid resp pointer\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+ session_id = resp->resp_type;
+ list_ptr = __qseecom_find_svc(resp->data);
+ if (!list_ptr) {
+ pr_err("Invalid listener ID\n");
+ ret = -ENODATA;
+ goto exit;
+ }
+ pr_debug("lsntr %d in_use = %d\n",
+ resp->data, list_ptr->listener_in_use);
+ /* sleep until listener is available */
+ qseecom.app_block_ref_cnt++;
+ mutex_unlock(&app_access_lock);
+ if (wait_event_freezable(
+ list_ptr->listener_block_app_wq,
+ !list_ptr->listener_in_use)) {
+ pr_err("Interrupted: listener_id %d, session_id %d\n",
+ resp->data, session_id);
+ ret = -ERESTARTSYS;
+ goto exit;
+ }
+ mutex_lock(&app_access_lock);
+ qseecom.app_block_ref_cnt--;
+
+ /* notify TZ that listener is available */
+ pr_warn("Lsntr %d is available, unblock session(%d) in TZ\n",
+ resp->data, session_id);
+ ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+ ireq.app_or_session_id = session_id;
+ ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+ &ireq, sizeof(ireq),
+ &continue_resp, sizeof(continue_resp));
+ if (ret) {
+ pr_err("scm_call for continue blocked req for session %d failed, ret %d\n",
+ session_id, ret);
+ goto exit;
+ }
+ resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+ return ret;
+}
+
+static int __qseecom_process_reentrancy_blocked_on_listener(
+ struct qseecom_command_scm_resp *resp,
+ struct qseecom_registered_app_list *ptr_app,
+ struct qseecom_dev_handle *data)
+{
+ if (!qseecom.smcinvoke_support)
+ return __qseecom_process_blocked_on_listener_legacy(
+ resp, ptr_app, data);
+ else
+ return __qseecom_process_blocked_on_listener_smcinvoke(
+ resp);
+}
static int __qseecom_reentrancy_process_incomplete_cmd(
struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
@@ -4699,18 +4770,15 @@ int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
}
resp.result = desc->ret[0]; /*req_cmd*/
- resp.resp_type = desc->ret[1]; /*app_id*/
+ resp.resp_type = desc->ret[1]; /*incomplete:unused;blocked:session_id*/
resp.data = desc->ret[2]; /*listener_id*/
- dummy_private_data.client.app_id = desc->ret[1];
- dummy_app_entry.app_id = desc->ret[1];
-
mutex_lock(&app_access_lock);
ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
&dummy_private_data);
mutex_unlock(&app_access_lock);
if (ret)
- pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+ pr_err("Failed on cmd %d for lsnr %d session %d, ret = %d\n",
(int)desc->ret[0], (int)desc->ret[2],
(int)desc->ret[1], ret);
desc->ret[0] = resp.result;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 7aefeb037ef4..8dc93900b16d 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1139,6 +1139,7 @@ int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
bool drv_type_changed = false;
struct mmc_card *card = host->mmc->card;
int sts_retry;
+ u8 last_good_phase = 0;
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
@@ -1224,6 +1225,22 @@ retry:
mmc_wait_for_req(mmc, &mrq);
if (card && (cmd.error || data.error)) {
+ /*
+ * Set the dll to last known good phase while sending
+ * status command to ensure that status command won't
+ * fail due to bad phase.
+ */
+ if (tuned_phase_cnt)
+ last_good_phase =
+ tuned_phases[tuned_phase_cnt-1];
+ else if (msm_host->saved_tuning_phase !=
+ INVALID_TUNING_PHASE)
+ last_good_phase = msm_host->saved_tuning_phase;
+
+ rc = msm_config_cm_dll_phase(host, last_good_phase);
+ if (rc)
+ goto kfree;
+
sts_cmd.opcode = MMC_SEND_STATUS;
sts_cmd.arg = card->rca << 16;
sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index 5abab8800891..9190057535e6 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@ static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master,
{
uint32_t buf;
size_t bytes_read;
+ int err;
- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
goto out_default;
}
@@ -95,6 +97,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
int trx_part = -1;
int last_trx_part = -1;
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
- /* Nothing more in higher memory */
- if (offset >= 0x2000000)
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -252,10 +256,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read middle of the block */
- if (mtd_read(master, offset + 0x8000, 0x4,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -275,10 +280,11 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
offset = master->size - possible_nvram_sizes[i];
- if (mtd_read(master, offset, 0x4, &bytes_read,
- (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while reading at offset 0x%X!\n",
- offset);
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
continue;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 5e6238e0b2bd..75e6e7e6baed 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2732,8 +2732,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 865b7e0b133b..64034ff081a0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
ret = phy_if->phy_start(pdata);
if (ret)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index b56c9c581359..70da30095b89 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -255,15 +255,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -469,6 +470,11 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
len -= ETH_FCS_LEN;
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ bgmac_err(bgmac, "build_skb failed\n");
+ put_page(virt_to_head_page(buf));
+ break;
+ }
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
BGMAC_RX_BUF_OFFSET + len);
skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
@@ -1302,7 +1308,8 @@ static int bgmac_open(struct net_device *net_dev)
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1795c935ff02..7b8638ddb673 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1052,7 +1052,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
spin_unlock_bh(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 6a061f17a44f..4cd2a7d0124f 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2939,7 +2939,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
size, GFAR_RXB_TRUESIZE);
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f9e4988ea30e..2f9b12cf9ee5 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1602,8 +1602,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4e5782..07eabf72c480 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71ec9cb08e06..15056f06754a 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2446,7 +2446,7 @@ static void mvneta_start_dev(struct mvneta_port *pp)
mvneta_port_enable(pp);
/* Enable polling on the port */
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_enable(&port->napi);
@@ -2472,7 +2472,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
phy_stop(pp->phy_dev);
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_disable(&port->napi);
@@ -2902,13 +2902,11 @@ err_cleanup_rxqs:
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- int cpu;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
- for_each_present_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 603d1c3d3b2e..ff77b8b608bd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -542,8 +542,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -558,15 +559,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 1e611980cf99..f5c1f4acc57b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -153,8 +153,9 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@@ -934,6 +935,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
*/
dev->state = MLX5_DEVICE_STATE_UP;
+ /* wait for firmware to accept initialization segments configurations
+ */
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ goto out;
+ }
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 1e61d4da72db..585e90f8341d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -221,18 +221,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
int ring_size;
int i;
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
- }
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
-
- /* Free aligned TX buffers */
- kfree(priv->tx_align[q]);
- priv->tx_align[q] = NULL;
-
if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
@@ -261,6 +249,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
+ /* Free RX skb ringbuffer */
+ if (priv->rx_skb[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++)
+ dev_kfree_skb(priv->rx_skb[q][i]);
+ }
+ kfree(priv->rx_skb[q]);
+ priv->rx_skb[q] = NULL;
+
+ /* Free aligned TX buffers */
+ kfree(priv->tx_align[q]);
+ priv->tx_align[q] = NULL;
+
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index d790cb8d9db3..8e832ba8ab24 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2796,6 +2796,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
const struct efx_nic_type falcon_b0_nic_type = {
@@ -2897,4 +2902,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 7f7c87762bc6..8dfc75250583 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9a986ccd42e5..dab3bf6649e6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2240,7 +2240,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index 29bfe1f4d6ed..432960afe09a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -37,6 +37,7 @@
#define FW_READY_TIMEOUT 20000
#define FW_ASSERT_TIMEOUT 5000
#define CNSS_EVENT_PENDING 2989
+#define WAKE_MSI_NAME "WAKE"
static struct cnss_plat_data *plat_env;
@@ -537,6 +538,24 @@ int cnss_set_fw_log_mode(struct device *dev, u8 fw_log_mode)
}
EXPORT_SYMBOL(cnss_set_fw_log_mode);
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+ int ret, num_vectors;
+ u32 user_base_data, base_vector;
+
+ ret = cnss_get_user_msi_assignment(&pci_priv->pci_dev->dev,
+ WAKE_MSI_NAME, &num_vectors,
+ &user_base_data, &base_vector);
+
+ if (ret) {
+ cnss_pr_err("WAKE MSI is not valid\n");
+ return 0;
+ }
+
+ return user_base_data;
+}
+
static int cnss_fw_mem_ready_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -573,7 +592,7 @@ static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
if (!plat_priv->driver_ops) {
- cnss_pr_err("driver_ops is NULL!");
+ cnss_pr_err("driver_ops is NULL\n");
ret = -EINVAL;
goto out;
}
@@ -587,7 +606,7 @@ static int cnss_driver_call_probe(struct cnss_plat_data *plat_priv)
goto out;
}
clear_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state);
- } else {
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state)) {
ret = plat_priv->driver_ops->probe(pci_priv->pci_dev,
pci_priv->pci_device_id);
if (ret) {
@@ -605,6 +624,25 @@ out:
return ret;
}
+static int cnss_driver_call_remove(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_pci_data *pci_priv = plat_priv->bus_priv;
+
+ if (!plat_priv->driver_ops) {
+ cnss_pr_err("driver_ops is NULL\n");
+ return -EINVAL;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
+ } else if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
+ plat_priv->driver_ops->remove(pci_priv->pci_dev);
+ clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
+ }
+
+ return 0;
+}
+
static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -626,8 +664,11 @@ static int cnss_fw_ready_hdlr(struct cnss_plat_data *plat_priv)
} else if (test_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state)) {
ret = cnss_wlfw_wlan_mode_send_sync(plat_priv,
QMI_WLFW_CALIBRATION_V01);
- } else {
+ } else if (test_bit(CNSS_DRIVER_LOADING, &plat_priv->driver_state) ||
+ test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
ret = cnss_driver_call_probe(plat_priv);
+ } else {
+ complete(&plat_priv->power_up_complete);
}
if (ret)
@@ -668,6 +709,10 @@ static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
return "RECOVERY";
case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
return "FORCE_FW_ASSERT";
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ return "POWER_UP";
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ return "POWER_DOWN";
case CNSS_DRIVER_EVENT_MAX:
return "EVENT_MAX";
}
@@ -746,64 +791,57 @@ out:
int cnss_power_up(struct device *dev)
{
int ret = 0;
- void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ unsigned int timeout;
- if (!bus_priv || !plat_priv)
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
return -ENODEV;
-
- if (plat_priv->device_id != QCA6174_DEVICE_ID) {
- cnss_pr_dbg("Power up is not supported for device ID 0x%lx\n",
- plat_priv->device_id);
- return 0;
}
- ret = cnss_power_on_device(plat_priv);
- if (ret) {
- cnss_pr_err("Failed to power on device, err = %d\n", ret);
- goto err_power_on;
- }
+ cnss_pr_dbg("Powering up device\n");
- ret = cnss_resume_pci_link(bus_priv);
- if (ret) {
- cnss_pr_err("Failed to resume PCI link, err = %d\n", ret);
- goto err_resume_link;
+ ret = cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ true, NULL);
+ if (ret)
+ goto out;
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID)
+ goto out;
+
+ timeout = cnss_get_qmi_timeout();
+
+ reinit_completion(&plat_priv->power_up_complete);
+ ret = wait_for_completion_timeout(&plat_priv->power_up_complete,
+ msecs_to_jiffies(timeout) << 2);
+ if (!ret) {
+ cnss_pr_err("Timeout waiting for power up to complete\n");
+ ret = -EAGAIN;
+ goto out;
}
return 0;
-err_resume_link:
- cnss_power_off_device(plat_priv);
-err_power_on:
+
+out:
return ret;
}
EXPORT_SYMBOL(cnss_power_up);
int cnss_power_down(struct device *dev)
{
- int ret = 0;
- void *bus_priv = cnss_bus_dev_to_bus_priv(dev);
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
- if (!bus_priv || !plat_priv)
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
return -ENODEV;
-
- if (plat_priv->device_id != QCA6174_DEVICE_ID) {
- cnss_pr_dbg("Power down is not supported for device ID 0x%lx\n",
- plat_priv->device_id);
- return 0;
}
- cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
- cnss_pci_set_monitor_wake_intr(bus_priv, false);
- cnss_pci_set_auto_suspended(bus_priv, 0);
+ cnss_pr_dbg("Powering down device\n");
- ret = cnss_suspend_pci_link(bus_priv);
- if (ret)
- cnss_pr_err("Failed to suspend PCI link, err = %d\n", ret);
-
- cnss_power_off_device(plat_priv);
-
- return 0;
+ return cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
+ true, NULL);
}
EXPORT_SYMBOL(cnss_power_down);
@@ -1024,14 +1062,11 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
if (!plat_priv->driver_ops)
return -EINVAL;
- if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
- cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
- plat_priv->driver_ops->remove(pci_priv->pci_dev);
- cnss_pci_set_monitor_wake_intr(pci_priv, false);
- cnss_pci_set_auto_suspended(pci_priv, 0);
- } else {
- plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
- }
+ cnss_driver_call_remove(plat_priv);
+
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
ret = cnss_suspend_pci_link(pci_priv);
if (ret)
@@ -1039,10 +1074,7 @@ static int cnss_qca6174_shutdown(struct cnss_plat_data *plat_priv)
cnss_power_off_device(plat_priv);
- if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
- clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
- clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
- }
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
return ret;
}
@@ -1134,16 +1166,13 @@ static int cnss_qca6290_shutdown(struct cnss_plat_data *plat_priv)
if (!plat_priv->driver_ops)
return -EINVAL;
- if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
- cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
- plat_priv->driver_ops->remove(pci_priv->pci_dev);
- cnss_pci_set_monitor_wake_intr(pci_priv, false);
- cnss_pci_set_auto_suspended(pci_priv, 0);
- } else {
- plat_priv->driver_ops->shutdown(pci_priv->pci_dev);
- }
+ cnss_driver_call_remove(plat_priv);
skip_driver_remove:
+ cnss_request_bus_bandwidth(CNSS_BUS_WIDTH_NONE);
+ cnss_pci_set_monitor_wake_intr(pci_priv, false);
+ cnss_pci_set_auto_suspended(pci_priv, 0);
+
cnss_pci_stop_mhi(pci_priv);
ret = cnss_suspend_pci_link(pci_priv);
@@ -1154,11 +1183,7 @@ skip_driver_remove:
clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
-
- if (test_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state)) {
- clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
- clear_bit(CNSS_DRIVER_PROBED, &plat_priv->driver_state);
- }
+ clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
return ret;
}
@@ -1661,6 +1686,22 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv)
return 0;
}
+static int cnss_power_up_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ return cnss_powerup(&subsys_info->subsys_desc);
+}
+
+static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
+{
+ struct cnss_subsys_info *subsys_info = &plat_priv->subsys_info;
+
+ cnss_shutdown(&subsys_info->subsys_desc, false);
+
+ return 0;
+}
+
static void cnss_driver_event_work(struct work_struct *work)
{
struct cnss_plat_data *plat_priv =
@@ -1728,6 +1769,12 @@ static void cnss_driver_event_work(struct work_struct *work)
case CNSS_DRIVER_EVENT_FORCE_FW_ASSERT:
ret = cnss_force_fw_assert_hdlr(plat_priv);
break;
+ case CNSS_DRIVER_EVENT_POWER_UP:
+ ret = cnss_power_up_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_POWER_DOWN:
+ ret = cnss_power_down_hdlr(plat_priv);
+ break;
default:
cnss_pr_err("Invalid driver event type: %d",
event->type);
@@ -2226,6 +2273,8 @@ static int cnss_probe(struct platform_device *plat_dev)
cnss_pr_err("Failed to init platform device wakeup source, err = %d\n",
ret);
+ init_completion(&plat_priv->power_up_complete);
+
cnss_pr_info("Platform driver probed successfully.\n");
return 0;
@@ -2257,6 +2306,7 @@ static int cnss_remove(struct platform_device *plat_dev)
{
struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+ complete_all(&plat_priv->power_up_complete);
device_init_wakeup(&plat_dev->dev, false);
unregister_pm_notifier(&cnss_pm_notifier);
del_timer(&plat_priv->fw_boot_timer);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index e3a8d0cccd52..a2d9a02bde20 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -126,6 +126,8 @@ enum cnss_driver_event_type {
CNSS_DRIVER_EVENT_UNREGISTER_DRIVER,
CNSS_DRIVER_EVENT_RECOVERY,
CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ CNSS_DRIVER_EVENT_POWER_UP,
+ CNSS_DRIVER_EVENT_POWER_DOWN,
CNSS_DRIVER_EVENT_MAX,
};
@@ -201,6 +203,7 @@ struct cnss_plat_data {
struct dentry *root_dentry;
atomic_t pm_count;
struct timer_list fw_boot_timer;
+ struct completion power_up_complete;
};
void *cnss_bus_dev_to_bus_priv(struct device *dev);
@@ -217,5 +220,6 @@ void cnss_unregister_subsys(struct cnss_plat_data *plat_priv);
int cnss_register_ramdump(struct cnss_plat_data *plat_priv);
void cnss_unregister_ramdump(struct cnss_plat_data *plat_priv);
void cnss_set_pin_connect_status(struct cnss_plat_data *plat_priv);
+u32 cnss_get_wake_msi(struct cnss_plat_data *plat_priv);
#endif /* _CNSS_MAIN_H */
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index f914f4352392..a17b72ce03ba 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -143,6 +143,12 @@ int cnss_suspend_pci_link(struct cnss_pci_data *pci_priv)
if (ret)
goto out;
+ pci_disable_device(pci_priv->pci_dev);
+
+ ret = pci_set_power_state(pci_priv->pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n", ret);
+
ret = cnss_set_pci_link(pci_priv, PCI_LINK_DOWN);
if (ret)
goto out;
@@ -172,10 +178,18 @@ int cnss_resume_pci_link(struct cnss_pci_data *pci_priv)
pci_priv->pci_link_state = PCI_LINK_UP;
+ ret = pci_enable_device(pci_priv->pci_dev);
+ if (ret) {
+ cnss_pr_err("Failed to enable PCI device, err = %d\n", ret);
+ goto out;
+ }
+
ret = cnss_set_pci_config_space(pci_priv, RESTORE_PCI_CONFIG_SPACE);
if (ret)
goto out;
+ pci_set_master(pci_priv->pci_dev);
+
if (pci_priv->pci_link_down_ind)
pci_priv->pci_link_down_ind = false;
@@ -381,6 +395,12 @@ static int cnss_pci_suspend(struct device *dev)
cnss_set_pci_config_space(pci_priv,
SAVE_PCI_CONFIG_SPACE);
+ pci_disable_device(pci_dev);
+
+ ret = pci_set_power_state(pci_dev, PCI_D3hot);
+ if (ret)
+ cnss_pr_err("Failed to set D3Hot, err = %d\n",
+ ret);
}
}
@@ -407,10 +427,18 @@ static int cnss_pci_resume(struct device *dev)
driver_ops = plat_priv->driver_ops;
if (driver_ops && driver_ops->resume && !pci_priv->pci_link_down_ind) {
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ cnss_pr_err("Failed to enable PCI device, err = %d\n",
+ ret);
+
if (pci_priv->saved_state)
cnss_set_pci_config_space(pci_priv,
RESTORE_PCI_CONFIG_SPACE);
+
+ pci_set_master(pci_dev);
cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RESUME);
+
ret = driver_ops->resume(pci_dev);
}
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index db55d3350eb5..99163d51a497 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -140,6 +140,12 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv)
cnss_pr_dbg("daemon_support is %d\n", req.daemon_support);
+ req.wake_msi = cnss_get_wake_msi(plat_priv);
+ if (req.wake_msi) {
+ cnss_pr_dbg("WAKE MSI base data is %d\n", req.wake_msi);
+ req.wake_msi_valid = 1;
+ }
+
req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN;
req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01;
req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei;
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
index 84a4707e9cc3..7d6a771bc0d5 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c
@@ -1097,6 +1097,24 @@ struct elem_info wlfw_cal_report_req_msg_v01_ei[] = {
meta_data),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct wlfw_cal_report_req_msg_v01,
+ xo_cal_data),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.is_array = QMI_COMMON_TLV_TYPE,
@@ -1828,6 +1846,24 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = {
daemon_support),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct wlfw_host_cap_req_msg_v01,
+ wake_msi),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.is_array = QMI_COMMON_TLV_TYPE,
@@ -2166,3 +2202,20 @@ struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
.is_array = QMI_COMMON_TLV_TYPE,
},
};
+
+struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x01,
+ .offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
+ xo_cal_data),
+ },
+ {
+ .data_type = QMI_EOTI,
+ .is_array = NO_ARRAY,
+ .is_array = QMI_COMMON_TLV_TYPE,
+ },
+};
diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
index cb8225a7c3c3..a3081433cc2b 100644
--- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
+++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h
@@ -31,6 +31,7 @@
#define QMI_WLFW_M3_INFO_RESP_V01 0x003C
#define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029
#define QMI_WLFW_CAL_DOWNLOAD_RESP_V01 0x0027
+#define QMI_WLFW_XO_CAL_IND_V01 0x003D
#define QMI_WLFW_INI_RESP_V01 0x002F
#define QMI_WLFW_CAL_REPORT_RESP_V01 0x0026
#define QMI_WLFW_MAC_ADDR_RESP_V01 0x0033
@@ -339,9 +340,11 @@ extern struct elem_info wlfw_bdf_download_resp_msg_v01_ei[];
struct wlfw_cal_report_req_msg_v01 {
u32 meta_data_len;
enum wlfw_cal_temp_id_enum_v01 meta_data[QMI_WLFW_MAX_NUM_CAL_V01];
+ u8 xo_cal_data_valid;
+ u8 xo_cal_data;
};
-#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 24
+#define WLFW_CAL_REPORT_REQ_MSG_V01_MAX_MSG_LEN 28
extern struct elem_info wlfw_cal_report_req_msg_v01_ei[];
struct wlfw_cal_report_resp_msg_v01 {
@@ -532,9 +535,11 @@ extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[];
struct wlfw_host_cap_req_msg_v01 {
u8 daemon_support_valid;
u8 daemon_support;
+ u8 wake_msi_valid;
+ u32 wake_msi;
};
-#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 4
+#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11
extern struct elem_info wlfw_host_cap_req_msg_v01_ei[];
struct wlfw_host_cap_resp_msg_v01 {
@@ -642,4 +647,11 @@ struct wlfw_m3_info_resp_msg_v01 {
#define WLFW_M3_INFO_RESP_MSG_V01_MAX_MSG_LEN 7
extern struct elem_info wlfw_m3_info_resp_msg_v01_ei[];
+struct wlfw_xo_cal_ind_msg_v01 {
+ u8 xo_cal_data;
+};
+
+#define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4
+extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[];
+
#endif
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 888e9cfef51a..34a062ccb11d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 66bdc593f811..333d28e64087 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -194,7 +194,7 @@ config MSM_11AD
tristate "Platform driver for 11ad chip"
depends on PCI
depends on PCI_MSM
- default y
+ default m
---help---
This module adds required platform support for wireless adapter based on
Qualcomm Technologies, Inc. 11ad chip, integrated into MSM platform
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 0d17fa58a853..85fa9da50779 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -641,7 +641,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -688,7 +688,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -727,7 +727,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -767,7 +767,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -807,7 +807,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -847,7 +847,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -886,7 +886,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -926,7 +926,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -966,7 +966,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1104,7 +1104,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1149,7 +1149,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1194,7 +1194,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1232,7 +1232,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1372,7 +1372,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1411,7 +1411,7 @@ static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
index 80514f6c738e..72542bf6dd5d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -1017,25 +1017,25 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
if (rule->action != IPA_PASS_TO_EXCEPTION) {
if (!rule->eq_attrib_type) {
if (!rule->rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1048,7 +1048,7 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
}
INIT_LIST_HEAD(&entry->link);
entry->rule = *rule;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_FLT_COOKIE;
entry->rt_tbl = rt_tbl;
entry->tbl = tbl;
if (add_rear) {
@@ -1067,13 +1067,19 @@ static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
-
+ipa_insert_failed:
+ tbl->rule_cnt--;
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ list_del(&entry->link);
+ kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
error:
return -EPERM;
}
@@ -1085,12 +1091,12 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -1117,12 +1123,12 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
entry = ipa_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1132,25 +1138,25 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1174,7 +1180,7 @@ static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
struct ipa_flt_tbl *tbl;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
return -EINVAL;
}
@@ -1193,14 +1199,14 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
}
ipa_ep_idx = ipa2_get_ep_mapping(ep);
if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
- IPAERR("ep not valid ep=%d\n", ep);
+ IPAERR_RL("ep not valid ep=%d\n", ep);
return -EINVAL;
}
if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
@@ -1227,7 +1233,7 @@ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1245,7 +1251,7 @@ int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
rules->rules[i].at_rear,
&rules->rules[i].flt_rule_hdl);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1278,14 +1284,14 @@ int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1318,14 +1324,14 @@ int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1359,7 +1365,7 @@ int ipa2_commit_flt(enum ipa_ip_type ip)
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1395,7 +1401,7 @@ int ipa2_reset_flt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
index 10a49b1e75d8..51806cec1e4d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -547,7 +547,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
{
struct ipa_hdr_entry *hdr_entry;
struct ipa_hdr_proc_ctx_entry *entry;
- struct ipa_hdr_proc_ctx_offset_entry *offset;
+ struct ipa_hdr_proc_ctx_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
int id;
@@ -558,13 +558,13 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
- if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
- IPAERR("hdr_hdl is invalid\n");
+ if (!hdr_entry || (hdr_entry->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
@@ -580,7 +580,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_PROC_HDR_COOKIE;
needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
@@ -592,7 +592,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -602,7 +602,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -640,6 +640,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -647,6 +648,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -659,7 +668,7 @@ bad_len:
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
int id;
@@ -667,12 +676,12 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -691,7 +700,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -704,7 +713,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -780,6 +789,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -804,10 +814,19 @@ fail_add_proc_ctx:
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
bad_hdr_len:
@@ -824,8 +843,8 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl,
struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
entry = ipa_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -833,7 +852,7 @@ static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl,
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -871,12 +890,12 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -888,7 +907,7 @@ int __ipa_del_hdr(u32 hdr_hdl, bool by_user)
htbl->hdr_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("hdr already deleted by user\n");
+ IPAERR_RL("hdr already deleted by user\n");
return -EINVAL;
}
@@ -937,12 +956,12 @@ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
int result = -EFAULT;
if (unlikely(!ipa_ctx)) {
- IPAERR("IPA driver was not initialized\n");
+ IPAERR_RL("IPA driver was not initialized\n");
return -EINVAL;
}
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -951,7 +970,7 @@ int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -992,14 +1011,14 @@ int ipa2_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1048,13 +1067,13 @@ int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
- IPAERR("Processing context not supported on IPA HW %d\n",
+ IPAERR_RL("Processing context not supported on IPA HW %d\n",
ipa_ctx->ipa_hw_type);
return -EFAULT;
}
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1063,7 +1082,7 @@ int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -1108,14 +1127,14 @@ int ipa2_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
}
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1352,7 +1371,7 @@ int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup)
}
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
@@ -1439,13 +1458,13 @@ int ipa2_put_hdr(u32 hdr_hdl)
entry = ipa_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1473,7 +1492,7 @@ int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy)
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index bb11230c960a..bfb1ce56412c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -37,7 +37,15 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
+
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
+
#define MTU_BYTE 1500
#define IPA_MAX_NUM_PIPES 0x14
@@ -87,6 +95,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__, \
+ __LINE__, ## args);\
+ if (ipa_ctx) { \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -223,8 +243,8 @@ struct ipa_smmu_cb_ctx {
*/
struct ipa_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa_flt_tbl *tbl;
struct ipa_rt_tbl *rt_tbl;
u32 hw_len;
@@ -249,13 +269,13 @@ struct ipa_flt_entry {
*/
struct ipa_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa_rt_tbl_set *set;
- u32 cookie;
bool in_sys;
u32 sz;
struct ipa_mem_buffer curr_mem;
@@ -286,6 +306,7 @@ struct ipa_rt_tbl {
*/
struct ipa_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -295,7 +316,6 @@ struct ipa_hdr_entry {
dma_addr_t phys_base;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -368,10 +388,10 @@ struct ipa_hdr_proc_ctx_add_hdr_cmd_seq {
*/
struct ipa_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -427,8 +447,8 @@ struct ipa_flt_tbl {
*/
struct ipa_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa_rt_tbl *tbl;
struct ipa_hdr_entry *hdr;
struct ipa_hdr_proc_ctx_entry *proc_ctx;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index e3f987d47692..9c4fc0ce8cc1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -274,7 +274,7 @@ int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
if (!strncmp(entry->name, tx->name, IPA_RESOURCE_NAME_MAX)) {
/* add the entry check */
if (entry->num_tx_props != tx->num_tx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_tx_props,
tx->num_tx_props);
mutex_unlock(&ipa_ctx->lock);
@@ -315,7 +315,7 @@ int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
if (!strncmp(entry->name, rx->name, IPA_RESOURCE_NAME_MAX)) {
/* add the entry check */
if (entry->num_rx_props != rx->num_rx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_rx_props,
rx->num_rx_props);
mutex_unlock(&ipa_ctx->lock);
@@ -356,7 +356,7 @@ int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
if (!strcmp(entry->name, ext->name)) {
/* add the entry check */
if (entry->num_ext_props != ext->num_ext_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_ext_props,
ext->num_ext_props);
mutex_unlock(&ipa_ctx->lock);
@@ -405,13 +405,13 @@ int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -634,7 +634,7 @@ int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
index 5dd8b225217d..e7092e9acbc7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -252,8 +252,8 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -272,7 +272,7 @@ int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -335,17 +335,17 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->ipv4_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1)))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Table Entry offset is not
beyond allocated size */
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -354,16 +354,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->expn_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table Entry offset is not
beyond allocated size */
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -372,16 +372,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Indx Table Entry offset is not
beyond allocated size */
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -389,17 +389,17 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_expn_offset >
- UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table entry offset is not
beyond allocated size */
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa_ctx->nat_mem.size);
return -EPERM;
@@ -444,16 +444,16 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_mem;
@@ -509,7 +509,7 @@ int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
desc[1].len = size;
IPADBG("posting v4 init command\n");
if (ipa_send_cmd(2, desc)) {
- IPAERR("Fail to send immediate command\n");
+ IPAERR_RL("Fail to send immediate command\n");
result = -EPERM;
goto free_mem;
}
@@ -574,7 +574,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -582,7 +582,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -593,7 +593,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -605,7 +605,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -617,7 +617,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -629,7 +629,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -638,7 +638,7 @@ int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
index f2909110d09f..011ca300cc09 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -853,7 +853,7 @@ int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
struct ipa_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -909,7 +909,7 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys = (ip == IPA_IP_v4) ?
!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
set->tbl_cnt++;
@@ -922,12 +922,16 @@ static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
@@ -940,13 +944,13 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
enum ipa_ip_type ip = IPA_IP_MAX;
u32 id;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -954,8 +958,11 @@ static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
+
if (!entry->in_sys) {
list_del(&entry->link);
@@ -994,13 +1001,14 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
if (rule->hdr_hdl) {
hdr = ipa_id_find(rule->hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rule->hdr_proc_ctx_hdl) {
proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
goto error;
}
@@ -1008,7 +1016,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
IPAERR("bad params\n");
goto error;
}
@@ -1029,7 +1037,7 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
INIT_LIST_HEAD(&entry->link);
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_RULE_COOKIE;
entry->rule = *rule;
entry->tbl = tbl;
entry->hdr = hdr;
@@ -1082,7 +1090,7 @@ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1092,7 +1100,7 @@ int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1119,12 +1127,12 @@ int __ipa_del_rt_rule(u32 rule_hdl)
entry = ipa_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1138,7 +1146,7 @@ int __ipa_del_rt_rule(u32 rule_hdl)
entry->tbl->rule_cnt);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1165,14 +1173,14 @@ int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1205,7 +1213,7 @@ int ipa2_commit_rt(enum ipa_ip_type ip)
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1249,7 +1257,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1267,7 +1275,7 @@ int ipa2_reset_rt(enum ipa_ip_type ip)
* filtering rules point to routing tables
*/
if (ipa2_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa_ctx->rt_tbl_set[ip];
rset = &ipa_ctx->reap_rt_tbl_set[ip];
@@ -1353,14 +1361,14 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1368,7 +1376,7 @@ int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
/* commit for get */
if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1396,13 +1404,13 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
mutex_lock(&ipa_ctx->lock);
entry = ipa_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1411,16 +1419,19 @@ int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v4;
else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ result = -EINVAL;
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa_ctx->ctrl->ipa_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1439,20 +1450,20 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
if (rtrule->rule.hdr_hdl) {
hdr = ipa_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
}
entry = ipa_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1485,14 +1496,14 @@ int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 56b4bf1a2d1e..23f802425cf0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1672,7 +1672,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
ipa_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1685,7 +1685,7 @@ int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index e0200fe50871..50a8e46d3b12 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -918,7 +918,7 @@ int ipa2_get_ep_mapping(enum ipa_client_type client)
}
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return INVALID_EP_MAPPING_INDEX;
}
@@ -1769,7 +1769,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
IPA_FLT_FLOW_LABEL) {
- IPAERR("v6 attrib's specified for v4 rule\n");
+ IPAERR_RL("v6 attrib's specified for v4 rule\n");
return -EPERM;
}
@@ -1781,7 +1781,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1801,7 +1801,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1815,7 +1815,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -1829,11 +1829,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1847,11 +1847,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1865,7 +1865,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1878,7 +1878,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1891,7 +1891,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -1905,7 +1905,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1919,7 +1919,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -1946,7 +1946,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1961,7 +1961,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1976,7 +1976,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -1991,7 +1991,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2006,7 +2006,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2024,7 +2024,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
/* error check */
if (attrib->attrib_mask & IPA_FLT_TOS ||
attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAERR("v4 attrib's specified for v6 rule\n");
+ IPAERR_RL("v4 attrib's specified for v6 rule\n");
return -EPERM;
}
@@ -2036,7 +2036,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TYPE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2049,7 +2049,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_CODE) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2062,7 +2062,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SPI) {
if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
+ IPAERR_RL("ran out of ihl_meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
@@ -2076,7 +2076,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2090,7 +2090,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2104,11 +2104,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
+ IPAERR_RL("bad src port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2122,11 +2122,11 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
+ IPAERR_RL("ran out of ihl_rng16 eq\n");
return -EPERM;
}
if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
+ IPAERR_RL("bad dst port range param\n");
return -EPERM;
}
*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
@@ -2140,7 +2140,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2166,7 +2166,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2198,7 +2198,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2243,7 +2243,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2258,7 +2258,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2273,7 +2273,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2288,7 +2288,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq128[ofst_meq128];
@@ -2303,7 +2303,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
+ IPAERR_RL("ran out of meq128 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -2316,7 +2316,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
}
} else {
- IPAERR("unsupported ip %d\n", ip);
+ IPAERR_RL("unsupported ip %d\n", ip);
return -EPERM;
}
@@ -2326,7 +2326,7 @@ int ipa_generate_flt_eq(enum ipa_ip_type ip,
*/
if (attrib->attrib_mask == 0) {
if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
+ IPAERR_RL("ran out of meq32 eq\n");
return -EPERM;
}
*en_rule |= ipa_ofst_meq32[ofst_meq32];
@@ -3617,19 +3617,19 @@ int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
}
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -3642,7 +3642,7 @@ int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index a50cd0b807a2..7ed83fb74fcc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -603,7 +603,7 @@ static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
msg_meta.msg_len = sizeof(struct ipa_wan_msg);
retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
if (retval) {
- IPAERR("ipa3_send_msg failed: %d\n", retval);
+ IPAERR_RL("ipa3_send_msg failed: %d\n", retval);
kfree(wan_msg);
return retval;
}
@@ -695,7 +695,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_nat_dma_cmd *)param)->entries,
pre_entry);
retval = -EFAULT;
@@ -742,7 +742,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr *)param)->num_hdrs,
pre_entry);
retval = -EFAULT;
@@ -781,7 +781,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -821,7 +821,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule *)param)->
num_rules,
pre_entry);
@@ -861,7 +861,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_rt_rule_after *)param)->
num_rules,
pre_entry);
@@ -903,7 +903,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_rt_rule *)param)->
num_rules,
pre_entry);
@@ -943,7 +943,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
pre_entry);
retval = -EFAULT;
@@ -982,7 +982,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1024,7 +1024,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_flt_rule_after *)param)->
num_rules,
pre_entry);
@@ -1065,7 +1065,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_flt_rule *)param)->
num_hdls,
pre_entry);
@@ -1105,7 +1105,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_mdfy_flt_rule *)param)->
num_rules,
pre_entry);
@@ -1243,7 +1243,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_tx_props *)
param)->num_tx_props, pre_entry);
retval = -EFAULT;
@@ -1288,7 +1288,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_rx_props *)
param)->num_rx_props, pre_entry);
retval = -EFAULT;
@@ -1333,7 +1333,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_query_intf_ext_props *)
param)->num_ext_props, pre_entry);
retval = -EFAULT;
@@ -1371,7 +1371,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_msg_meta *)param)->msg_len
!= pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_msg_meta *)param)->msg_len,
pre_entry);
retval = -EFAULT;
@@ -1511,7 +1511,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_add_hdr_proc_ctx *)
param)->num_proc_ctxs, pre_entry);
retval = -EFAULT;
@@ -1550,7 +1550,7 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* add check in case user-space module compromised */
if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
param)->num_hdls != pre_entry)) {
- IPAERR("current %d pre %d\n",
+ IPAERR_RL("current %d pre %d\n",
((struct ipa_ioc_del_hdr_proc_ctx *)param)->
num_hdls,
pre_entry);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 6a3a89e2cf8d..5fe425ea7655 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -1188,6 +1188,15 @@ static void ipa3_handle_rx(struct ipa3_sys_context *sys)
} else {
inactive_cycles = 0;
}
+
+ /*
+ * if pipe is out of buffers there is no point polling for
+ * completed descs; release the worker so delayed work can
+ * run in a timely manner
+ */
+ if (sys->len == 0)
+ break;
+
} while (inactive_cycles <= POLLING_INACTIVITY_RX);
trace_poll_to_intr3(sys->ep->client);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index 41b29335d23b..c2fb87ab757b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -745,7 +745,7 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
goto error;
}
- if ((*rt_tbl)->cookie != IPA_COOKIE) {
+ if ((*rt_tbl)->cookie != IPA_RT_TBL_COOKIE) {
IPAERR("RT table cookie is invalid\n");
goto error;
}
@@ -787,7 +787,7 @@ static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
}
INIT_LIST_HEAD(&((*entry)->link));
(*entry)->rule = *rule;
- (*entry)->cookie = IPA_COOKIE;
+ (*entry)->cookie = IPA_FLT_COOKIE;
(*entry)->rt_tbl = rt_tbl;
(*entry)->tbl = tbl;
if (rule->rule_id) {
@@ -822,12 +822,18 @@ static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
*rule_hdl = id;
entry->id = id;
IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
return 0;
+ipa_insert_failed:
+ if (entry->rt_tbl)
+ entry->rt_tbl->ref_cnt--;
+ tbl->rule_cnt--;
+ return -EPERM;
}
static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
@@ -853,9 +859,16 @@ static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
list_add(&entry->link, &tbl->head_flt_rule_list);
}
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
error:
return -EPERM;
@@ -874,7 +887,7 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
goto error;
if (rule == NULL || rule_hdl == NULL) {
- IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p\n", rule,
rule_hdl);
goto error;
}
@@ -887,7 +900,8 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
list_add(&entry->link, &((*add_after_entry)->link));
- __ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+ if (__ipa_finish_flt_rule_add(tbl, entry, rule_hdl))
+ goto ipa_insert_failed;
/*
* prepare for next insertion
@@ -896,6 +910,13 @@ static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
return 0;
+ipa_insert_failed:
+ list_del(&entry->link);
+ /* if rule id was allocated from idr, remove it */
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
error:
*add_after_entry = NULL;
return -EPERM;
@@ -908,12 +929,12 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
id = entry->id;
@@ -945,12 +966,12 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
entry = ipa3_id_find(frule->rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -960,25 +981,25 @@ static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
if (!frule->rule.eq_attrib_type) {
if (!frule->rule.rt_tbl_hdl) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
if (rt_tbl == NULL) {
- IPAERR("RT tbl not found\n");
+ IPAERR_RL("RT tbl not found\n");
goto error;
}
- if (rt_tbl->cookie != IPA_COOKIE) {
- IPAERR("RT table cookie is invalid\n");
+ if (rt_tbl->cookie != IPA_RT_TBL_COOKIE) {
+ IPAERR_RL("RT table cookie is invalid\n");
goto error;
}
} else {
if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
IPA_MEM_PART(v4_modem_rt_index_hi) :
IPA_MEM_PART(v6_modem_rt_index_hi))) {
- IPAERR("invalid RT tbl\n");
+ IPAERR_RL("invalid RT tbl\n");
goto error;
}
}
@@ -1023,7 +1044,7 @@ static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
int ipa_ep_idx;
if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+ IPAERR_RL("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
rule_hdl, ep);
return -EINVAL;
@@ -1053,7 +1074,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1068,7 +1089,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
result = -1;
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1076,7 +1097,7 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
}
if (rules->global) {
- IPAERR("no support for global filter rules\n");
+ IPAERR_RL("no support for global filter rules\n");
result = -EPERM;
goto bail;
}
@@ -1111,12 +1132,12 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
if (rules->ep >= IPA_CLIENT_MAX) {
- IPAERR("bad parms ep=%d\n", rules->ep);
+ IPAERR_RL("bad parms ep=%d\n", rules->ep);
return -EINVAL;
}
@@ -1131,20 +1152,20 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
entry = ipa3_id_find(rules->add_after_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given entry does not match the table\n");
+ IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
goto bail;
}
if (tbl->sticky_rear)
if (&entry->link == tbl->head_flt_rule_list.prev) {
- IPAERR("cannot add rule at end of a sticky table");
+ IPAERR_RL("cannot add rule at end of a sticky table");
result = -EINVAL;
goto bail;
}
@@ -1166,7 +1187,7 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
&entry);
if (result) {
- IPAERR("failed to add flt rule %d\n", i);
+ IPAERR_RL("failed to add flt rule %d\n", i);
rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1200,14 +1221,14 @@ int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del flt rule %i\n", i);
+ IPAERR_RL("failed to del flt rule %i\n", i);
hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1240,14 +1261,14 @@ int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
- IPAERR("failed to mdfy flt rule %i\n", i);
+ IPAERR_RL("failed to mdfy flt rule %i\n", i);
hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
@@ -1281,7 +1302,7 @@ int ipa3_commit_flt(enum ipa_ip_type ip)
int result;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1317,7 +1338,7 @@ int ipa3_reset_flt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index a5186f1aff35..f8529faf159d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -330,17 +330,17 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
proc_ctx->type, proc_ctx->hdr_hdl);
if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
- IPAERR("invalid processing type %d\n", proc_ctx->type);
+ IPAERR_RL("invalid processing type %d\n", proc_ctx->type);
return -EINVAL;
}
hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
if (!hdr_entry) {
- IPAERR("hdr_hdl is invalid\n");
+ IPAERR_RL("hdr_hdl is invalid\n");
return -EINVAL;
}
- if (hdr_entry->cookie != IPA_COOKIE) {
- IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+ if (hdr_entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("Invalid header cookie %u\n", hdr_entry->cookie);
WARN_ON(1);
return -EINVAL;
}
@@ -359,7 +359,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
entry->hdr = hdr_entry;
if (add_ref_hdr)
hdr_entry->ref_cnt++;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
@@ -369,7 +369,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
bin = IPA_HDR_PROC_CTX_BIN1;
} else {
- IPAERR("unexpected needed len %d\n", needed_len);
+ IPAERR_RL("unexpected needed len %d\n", needed_len);
WARN_ON(1);
goto bad_len;
}
@@ -379,7 +379,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
if (list_empty(&htbl->head_free_offset_list[bin])) {
if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
- IPAERR("hdr proc ctx table overflow\n");
+ IPAERR_RL("hdr proc ctx table overflow\n");
goto bad_len;
}
@@ -417,6 +417,7 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
proc_ctx->proc_ctx_hdl = id;
@@ -424,6 +425,14 @@ static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
return 0;
+ipa_insert_failed:
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ list_del(&entry->link);
+ htbl->proc_ctx_cnt--;
+
bad_len:
if (add_ref_hdr)
hdr_entry->ref_cnt--;
@@ -436,19 +445,19 @@ bad_len:
static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
{
struct ipa3_hdr_entry *entry;
- struct ipa_hdr_offset_entry *offset;
+ struct ipa_hdr_offset_entry *offset = NULL;
u32 bin;
struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
int id;
int mem_size;
if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
goto error;
}
if (!HDR_TYPE_IS_VALID(hdr->type)) {
- IPAERR("invalid hdr type %d\n", hdr->type);
+ IPAERR_RL("invalid hdr type %d\n", hdr->type);
goto error;
}
@@ -467,7 +476,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
entry->type = hdr->type;
entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
entry->eth2_ofst = hdr->eth2_ofst;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_HDR_COOKIE;
if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
bin = IPA_HDR_BIN0;
@@ -480,7 +489,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
bin = IPA_HDR_BIN4;
else {
- IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+ IPAERR_RL("unexpected hdr len %d\n", hdr->hdr_len);
goto bad_hdr_len;
}
@@ -546,6 +555,7 @@ static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
if (id < 0) {
IPAERR("failed to alloc id\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
hdr->hdr_hdl = id;
@@ -570,10 +580,19 @@ fail_add_proc_ctx:
entry->ref_cnt--;
hdr->hdr_hdl = 0;
ipa3_id_remove(id);
+ipa_insert_failed:
+ if (entry->is_hdr_proc_ctx) {
+ dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+ entry->hdr_len, DMA_TO_DEVICE);
+ } else {
+ if (offset)
+ list_move(&offset->link,
+ &htbl->head_free_offset_list[offset->bin]);
+ entry->offset_entry = NULL;
+ }
htbl->hdr_cnt--;
list_del(&entry->link);
- dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
- entry->hdr_len, DMA_TO_DEVICE);
+
fail_dma_mapping:
entry->is_hdr_proc_ctx = false;
@@ -591,8 +610,8 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
entry = ipa3_id_find(proc_ctx_hdl);
- if (!entry || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parm\n");
+ if (!entry || (entry->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -600,7 +619,7 @@ static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl,
htbl->proc_ctx_cnt, entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -638,12 +657,12 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad parm\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -656,7 +675,7 @@ int __ipa3_del_hdr(u32 hdr_hdl, bool by_user)
entry->offset_entry->offset);
if (by_user && entry->user_deleted) {
- IPAERR("proc_ctx already deleted by user\n");
+ IPAERR_RL("proc_ctx already deleted by user\n");
return -EINVAL;
}
@@ -705,7 +724,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
int result = -EFAULT;
if (hdrs == NULL || hdrs->num_hdrs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -714,7 +733,7 @@ int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
hdrs->num_hdrs);
for (i = 0; i < hdrs->num_hdrs; i++) {
if (__ipa_add_hdr(&hdrs->hdr[i])) {
- IPAERR("failed to add hdr %d\n", i);
+ IPAERR_RL("failed to add hdr %d\n", i);
hdrs->hdr[i].status = -1;
} else {
hdrs->hdr[i].status = 0;
@@ -750,14 +769,14 @@ int ipa3_del_hdr_by_user(struct ipa_ioc_del_hdr *hdls, bool by_user)
int result = -EFAULT;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr(hdls->hdl[i].hdl, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -805,7 +824,7 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
int result = -EFAULT;
if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -814,7 +833,7 @@ int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
proc_ctxs->num_proc_ctxs);
for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
- IPAERR("failed to add hdr pric ctx %d\n", i);
+ IPAERR_RL("failed to add hdr pric ctx %d\n", i);
proc_ctxs->proc_ctx[i].status = -1;
} else {
proc_ctxs->proc_ctx[i].status = 0;
@@ -852,14 +871,14 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
int result;
if (hdls == NULL || hdls->num_hdls == 0) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true, by_user)) {
- IPAERR("failed to del hdr %i\n", i);
+ IPAERR_RL("failed to del hdr %i\n", i);
hdls->hdl[i].status = -1;
} else {
hdls->hdl[i].status = 0;
@@ -1066,7 +1085,7 @@ static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
struct ipa3_hdr_entry *entry;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Header name too long: %s\n", name);
+ IPAERR_RL("Header name too long: %s\n", name);
return NULL;
}
@@ -1096,7 +1115,7 @@ int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
int result = -1;
if (lookup == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
@@ -1183,13 +1202,13 @@ int ipa3_put_hdr(u32 hdr_hdl)
entry = ipa3_id_find(hdr_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto bail;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("invalid header entry\n");
+ if (entry->cookie != IPA_HDR_COOKIE) {
+ IPAERR_RL("invalid header entry\n");
result = -EINVAL;
goto bail;
}
@@ -1217,7 +1236,7 @@ int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
int result = -EFAULT;
if (copy == NULL) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index ac7ef6a21952..a890e88a8f61 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -40,6 +40,12 @@
#define DRV_NAME "ipa"
#define NAT_DEV_NAME "ipaNatTable"
#define IPA_COOKIE 0x57831603
+#define IPA_RT_RULE_COOKIE 0x57831604
+#define IPA_RT_TBL_COOKIE 0x57831605
+#define IPA_FLT_COOKIE 0x57831606
+#define IPA_HDR_COOKIE 0x57831607
+#define IPA_PROC_HDR_COOKIE 0x57831608
+
#define MTU_BYTE 1500
#define IPA_EP_NOT_ALLOCATED (-1)
@@ -89,6 +95,18 @@
} \
} while (0)
+#define IPAERR_RL(fmt, args...) \
+ do { \
+ pr_err_ratelimited(DRV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args);\
+ if (ipa3_ctx) { \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+ DRV_NAME " %s:%d " fmt, ## args); \
+ } \
+ } while (0)
+
#define WLAN_AMPDU_TX_EP 15
#define WLAN_PROD_TX_EP 19
#define WLAN1_CONS_RX_EP 14
@@ -217,8 +235,8 @@ struct ipa_smmu_cb_ctx {
*/
struct ipa3_flt_entry {
struct list_head link;
- struct ipa_flt_rule rule;
u32 cookie;
+ struct ipa_flt_rule rule;
struct ipa3_flt_tbl *tbl;
struct ipa3_rt_tbl *rt_tbl;
u32 hw_len;
@@ -246,13 +264,13 @@ struct ipa3_flt_entry {
*/
struct ipa3_rt_tbl {
struct list_head link;
+ u32 cookie;
struct list_head head_rt_rule_list;
char name[IPA_RESOURCE_NAME_MAX];
u32 idx;
u32 rule_cnt;
u32 ref_cnt;
struct ipa3_rt_tbl_set *set;
- u32 cookie;
bool in_sys[IPA_RULE_TYPE_MAX];
u32 sz[IPA_RULE_TYPE_MAX];
struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
@@ -284,6 +302,7 @@ struct ipa3_rt_tbl {
*/
struct ipa3_hdr_entry {
struct list_head link;
+ u32 cookie;
u8 hdr[IPA_HDR_MAX_SIZE];
u32 hdr_len;
char name[IPA_RESOURCE_NAME_MAX];
@@ -293,7 +312,6 @@ struct ipa3_hdr_entry {
dma_addr_t phys_base;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
struct ipa_hdr_offset_entry *offset_entry;
- u32 cookie;
u32 ref_cnt;
int id;
u8 is_eth2_ofst_valid;
@@ -342,10 +360,10 @@ struct ipa3_hdr_proc_ctx_offset_entry {
*/
struct ipa3_hdr_proc_ctx_entry {
struct list_head link;
+ u32 cookie;
enum ipa_hdr_proc_type type;
struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
struct ipa3_hdr_entry *hdr;
- u32 cookie;
u32 ref_cnt;
int id;
bool user_deleted;
@@ -407,8 +425,8 @@ struct ipa3_flt_tbl {
*/
struct ipa3_rt_entry {
struct list_head link;
- struct ipa_rt_rule rule;
u32 cookie;
+ struct ipa_rt_rule rule;
struct ipa3_rt_tbl *tbl;
struct ipa3_hdr_entry *hdr;
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index 38e8d4e9d639..76f37162f495 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -227,7 +227,7 @@ int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", lookup->name);
+ IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
return result;
}
@@ -268,7 +268,7 @@ int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
}
if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", tx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", tx->name);
return result;
}
@@ -277,7 +277,7 @@ int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
if (!strcmp(entry->name, tx->name)) {
/* add the entry check */
if (entry->num_tx_props != tx->num_tx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_tx_props,
tx->num_tx_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -315,7 +315,7 @@ int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
}
if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Interface name too long. (%s)\n", rx->name);
+ IPAERR_RL("Interface name too long. (%s)\n", rx->name);
return result;
}
@@ -324,7 +324,7 @@ int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
if (!strcmp(entry->name, rx->name)) {
/* add the entry check */
if (entry->num_rx_props != rx->num_rx_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_rx_props,
rx->num_rx_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -366,7 +366,7 @@ int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
if (!strcmp(entry->name, ext->name)) {
/* add the entry check */
if (entry->num_ext_props != ext->num_ext_props) {
- IPAERR("invalid entry number(%u %u)\n",
+ IPAERR_RL("invalid entry number(%u %u)\n",
entry->num_ext_props,
ext->num_ext_props);
mutex_unlock(&ipa3_ctx->lock);
@@ -410,13 +410,13 @@ int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
- IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+ IPAERR_RL("invalid param meta=%p buff=%p, callback=%p\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
- IPAERR("unsupported message type %d\n", meta->msg_type);
+ IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
@@ -640,7 +640,7 @@ int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
- IPAERR("invalid param name=%p buff=%p count=%zu\n",
+ IPAERR_RL("invalid param name=%p buff=%p count=%zu\n",
meta, buff, count);
return result;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index e7e5cf114242..0256ff89ae24 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -254,8 +254,8 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
mutex_lock(&nat_ctx->lock);
if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
- IPAERR("Nat device name mismatch\n");
- IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+ IPAERR_RL("Nat device name mismatch\n");
+ IPAERR_RL("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
result = -EPERM;
goto bail;
}
@@ -274,7 +274,7 @@ int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
if (mem->size <= 0 ||
nat_ctx->is_dev_init == true) {
- IPAERR("Invalid Parameters or device is already init\n");
+ IPAERR_RL("Invalid Parameters or device is already init\n");
result = -EPERM;
goto bail;
}
@@ -337,16 +337,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->ipv4_rules_offset >
UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Table Entry offset is not
beyond allocated size */
tmp = init->ipv4_rules_offset +
(TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->ipv4_rules_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -354,17 +354,17 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->expn_rules_offset >
- UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ (UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries))) {
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table Entry offset is not
beyond allocated size */
tmp = init->expn_rules_offset +
(TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->expn_rules_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -373,16 +373,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Indx Table Entry offset is not
beyond allocated size */
tmp = init->index_offset +
(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_offset, (init->table_entries + 1),
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -391,16 +391,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
/* check for integer overflow */
if (init->index_expn_offset >
UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
- IPAERR("Detected overflow\n");
- return -EPERM;
+ IPAERR_RL("Detected overflow\n");
+ return -EPERM;
}
/* Check Expn Table entry offset is not
beyond allocated size */
tmp = init->index_expn_offset +
(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
if (tmp > ipa3_ctx->nat_mem.size) {
- IPAERR("Indx Expn Table rules offset not valid\n");
- IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+ IPAERR_RL("Indx Expn Table rules offset not valid\n");
+ IPAERR_RL("offset:%d entrys:%d size:%zu mem_size:%zu\n",
init->index_expn_offset, init->expn_table_entries,
tmp, ipa3_ctx->nat_mem.size);
return -EPERM;
@@ -437,16 +437,16 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
(init->expn_rules_offset > offset) ||
(init->index_offset > offset) ||
(init->index_expn_offset > offset)) {
- IPAERR("Failed due to integer overflow\n");
- IPAERR("nat.mem.dma_handle: 0x%pa\n",
+ IPAERR_RL("Failed due to integer overflow\n");
+ IPAERR_RL("nat.mem.dma_handle: 0x%pa\n",
&ipa3_ctx->nat_mem.dma_handle);
- IPAERR("ipv4_rules_offset: 0x%x\n",
+ IPAERR_RL("ipv4_rules_offset: 0x%x\n",
init->ipv4_rules_offset);
- IPAERR("expn_rules_offset: 0x%x\n",
+ IPAERR_RL("expn_rules_offset: 0x%x\n",
init->expn_rules_offset);
- IPAERR("index_offset: 0x%x\n",
+ IPAERR_RL("index_offset: 0x%x\n",
init->index_offset);
- IPAERR("index_expn_offset: 0x%x\n",
+ IPAERR_RL("index_expn_offset: 0x%x\n",
init->index_expn_offset);
result = -EPERM;
goto free_nop;
@@ -496,7 +496,7 @@ int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto free_nop;
}
@@ -576,7 +576,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
IPADBG("\n");
if (dma->entries <= 0) {
- IPAERR("Invalid number of commands %d\n",
+ IPAERR_RL("Invalid number of commands %d\n",
dma->entries);
ret = -EPERM;
goto bail;
@@ -584,7 +584,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
for (cnt = 0; cnt < dma->entries; cnt++) {
if (dma->dma[cnt].table_index >= 1) {
- IPAERR("Invalid table index %d\n",
+ IPAERR_RL("Invalid table index %d\n",
dma->dma[cnt].table_index);
ret = -EPERM;
goto bail;
@@ -595,7 +595,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -607,7 +607,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -619,7 +619,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
(ipa3_ctx->nat_mem.size_base_tables + 1) *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -631,7 +631,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
if (dma->dma[cnt].offset >=
ipa3_ctx->nat_mem.size_expansion_tables *
NAT_INTEX_TABLE_ENTRY_SIZE_BYTE) {
- IPAERR("Invalid offset %d\n",
+ IPAERR_RL("Invalid offset %d\n",
dma->dma[cnt].offset);
ret = -EPERM;
goto bail;
@@ -640,7 +640,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
break;
default:
- IPAERR("Invalid base_addr %d\n",
+ IPAERR_RL("Invalid base_addr %d\n",
dma->dma[cnt].base_addr);
ret = -EPERM;
goto bail;
@@ -679,7 +679,7 @@ int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_NAT_DMA, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct nat_dma imm cmd\n");
+ IPAERR_RL("Fail to construct nat_dma imm cmd\n");
continue;
}
desc[1].type = IPA_IMM_CMD_DESC;
@@ -796,7 +796,7 @@ int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
if (!cmd_pyld) {
- IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+ IPAERR_RL("Fail to construct ip_v4_nat_init imm cmd\n");
result = -EPERM;
goto destroy_regwrt_imm_cmd;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 6197c9f64ca5..bc7cc7060545 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -697,7 +697,7 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
struct ipa3_rt_tbl_set *set;
if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
- IPAERR("Name too long: %s\n", name);
+ IPAERR_RL("Name too long: %s\n", name);
return NULL;
}
@@ -723,7 +723,7 @@ int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
struct ipa3_rt_tbl *entry;
if (in->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -749,7 +749,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
int max_tbl_indx;
if (name == NULL) {
- IPAERR("no tbl name\n");
+ IPAERR_RL("no tbl name\n");
goto error;
}
@@ -762,7 +762,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
max(IPA_MEM_PART(v6_modem_rt_index_hi),
IPA_MEM_PART(v6_apps_rt_index_hi));
} else {
- IPAERR("bad ip family type\n");
+ IPAERR_RL("bad ip family type\n");
goto error;
}
@@ -796,7 +796,7 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
INIT_LIST_HEAD(&entry->link);
strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
entry->set = set;
- entry->cookie = IPA_COOKIE;
+ entry->cookie = IPA_RT_TBL_COOKIE;
entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
!ipa3_ctx->ip4_rt_tbl_hash_lcl :
!ipa3_ctx->ip6_rt_tbl_hash_lcl;
@@ -814,12 +814,16 @@ static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
if (id < 0) {
IPAERR("failed to add to tree\n");
WARN_ON(1);
+ goto ipa_insert_failed;
}
entry->id = id;
}
return entry;
-
+ipa_insert_failed:
+ set->tbl_cnt--;
+ list_del(&entry->link);
+ idr_destroy(&entry->rule_ids);
fail_rt_idx_alloc:
entry->cookie = 0;
kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
@@ -833,13 +837,13 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
u32 id;
struct ipa3_rt_tbl_set *rset;
- if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
- IPAERR("bad parms\n");
+ if (entry == NULL || (entry->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("bad parms\n");
return -EINVAL;
}
id = entry->id;
if (ipa3_id_find(id) == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EPERM;
}
@@ -847,8 +851,10 @@ static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ return -EPERM;
+ }
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -885,14 +891,14 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
if (rule->hdr_hdl) {
*hdr = ipa3_id_find(rule->hdr_hdl);
- if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+ if ((*hdr == NULL) || ((*hdr)->cookie != IPA_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid hdr\n");
return -EPERM;
}
} else if (rule->hdr_proc_ctx_hdl) {
*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
if ((*proc_ctx == NULL) ||
- ((*proc_ctx)->cookie != IPA_COOKIE)) {
+ ((*proc_ctx)->cookie != IPA_PROC_HDR_COOKIE)) {
IPAERR("rt rule does not point to valid proc ctx\n");
return -EPERM;
@@ -915,7 +921,7 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
goto error;
}
INIT_LIST_HEAD(&(*entry)->link);
- (*(entry))->cookie = IPA_COOKIE;
+ (*(entry))->cookie = IPA_RT_RULE_COOKIE;
(*(entry))->rule = *rule;
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
@@ -983,8 +989,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
tbl = __ipa_add_rt_tbl(ip, name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed adding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed adding rt tbl name = %s\n",
name ? name : "");
goto error;
}
@@ -994,8 +1000,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(tbl->rule_cnt > 0) && (at_rear != 0)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
- tbl->rule_cnt, at_rear);
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d at_rear=%d"
+ , tbl->rule_cnt, at_rear);
goto error;
}
@@ -1065,7 +1071,7 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
int ret;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1075,7 +1081,7 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
&rules->rules[i].rule,
rules->rules[i].at_rear,
&rules->rules[i].rt_rule_hdl)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1111,36 +1117,36 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
struct ipa3_rt_entry *entry = NULL;
if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
- if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
- IPAERR("failed finding rt tbl name = %s\n",
+ if (tbl == NULL || (tbl->cookie != IPA_RT_TBL_COOKIE)) {
+ IPAERR_RL("failed finding rt tbl name = %s\n",
rules->rt_tbl_name ? rules->rt_tbl_name : "");
ret = -EINVAL;
goto bail;
}
if (tbl->rule_cnt <= 0) {
- IPAERR("tbl->rule_cnt <= 0");
+ IPAERR_RL("tbl->rule_cnt <= 0");
ret = -EINVAL;
goto bail;
}
entry = ipa3_id_find(rules->add_after_hdl);
if (!entry) {
- IPAERR("failed finding rule %d in rt tbls\n",
+ IPAERR_RL("failed finding rule %d in rt tbls\n",
rules->add_after_hdl);
ret = -EINVAL;
goto bail;
}
if (entry->tbl != tbl) {
- IPAERR("given rt rule does not match the table\n");
+ IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
goto bail;
}
@@ -1151,7 +1157,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
*/
if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
(&entry->link == tbl->head_rt_rule_list.prev)) {
- IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+ IPAERR_RL("cannot add rule at end of tbl rule_cnt=%d\n",
tbl->rule_cnt);
ret = -EINVAL;
goto bail;
@@ -1168,7 +1174,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
&rules->rules[i].rule,
&rules->rules[i].rt_rule_hdl,
&entry)) {
- IPAERR("failed to add rt rule %d\n", i);
+ IPAERR_RL("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1177,7 +1183,7 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
if (rules->commit)
if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
- IPAERR("failed to commit\n");
+ IPAERR_RL("failed to commit\n");
ret = -EPERM;
goto bail;
}
@@ -1198,12 +1204,12 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
entry = ipa3_id_find(rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
return -EINVAL;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
return -EINVAL;
}
@@ -1219,7 +1225,7 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
}
entry->cookie = 0;
id = entry->id;
@@ -1246,14 +1252,14 @@ int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
int ret;
if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_hdls; i++) {
if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
- IPAERR("failed to del rt rule %i\n", i);
+ IPAERR_RL("failed to del rt rule %i\n", i);
hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
} else {
hdls->hdl[i].status = 0;
@@ -1286,7 +1292,7 @@ int ipa3_commit_rt(enum ipa_ip_type ip)
int ret;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1330,7 +1336,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
int id;
if (ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
@@ -1346,7 +1352,7 @@ int ipa3_reset_rt(enum ipa_ip_type ip)
* filtering rules point to routing tables
*/
if (ipa3_reset_flt(ip))
- IPAERR("fail to reset flt ip=%d\n", ip);
+ IPAERR_RL("fail to reset flt ip=%d\n", ip);
set = &ipa3_ctx->rt_tbl_set[ip];
rset = &ipa3_ctx->reap_rt_tbl_set[ip];
@@ -1435,14 +1441,14 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
int result = -EFAULT;
if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
- if (entry && entry->cookie == IPA_COOKIE) {
+ if (entry && entry->cookie == IPA_RT_TBL_COOKIE) {
if (entry->ref_cnt == U32_MAX) {
- IPAERR("fail: ref count crossed limit\n");
+ IPAERR_RL("fail: ref count crossed limit\n");
goto ret;
}
entry->ref_cnt++;
@@ -1450,7 +1456,7 @@ int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
/* commit for get */
if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
result = 0;
}
@@ -1478,13 +1484,13 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
mutex_lock(&ipa3_ctx->lock);
entry = ipa3_id_find(rt_tbl_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
result = -EINVAL;
goto ret;
}
- if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
- IPAERR("bad parms\n");
+ if ((entry->cookie != IPA_RT_TBL_COOKIE) || entry->ref_cnt == 0) {
+ IPAERR_RL("bad parms\n");
result = -EINVAL;
goto ret;
}
@@ -1493,18 +1499,20 @@ int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
ip = IPA_IP_v4;
else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
ip = IPA_IP_v6;
- else
+ else {
WARN_ON(1);
+ goto ret;
+ }
entry->ref_cnt--;
if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
entry->idx);
if (__ipa_del_rt_tbl(entry))
- IPAERR("fail to del RT tbl\n");
+ IPAERR_RL("fail to del RT tbl\n");
/* commit for put */
if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
- IPAERR("fail to commit RT tbl\n");
+ IPAERR_RL("fail to commit RT tbl\n");
}
result = 0;
@@ -1524,26 +1532,27 @@ static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
if (rtrule->rule.hdr_hdl) {
hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
- if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid hdr\n");
+ if ((hdr == NULL) || (hdr->cookie != IPA_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid hdr\n");
goto error;
}
} else if (rtrule->rule.hdr_proc_ctx_hdl) {
proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
- if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
- IPAERR("rt rule does not point to valid proc ctx\n");
+ if ((proc_ctx == NULL) ||
+ (proc_ctx->cookie != IPA_PROC_HDR_COOKIE)) {
+ IPAERR_RL("rt rule does not point to valid proc ctx\n");
goto error;
}
}
entry = ipa3_id_find(rtrule->rt_rule_hdl);
if (entry == NULL) {
- IPAERR("lookup failed\n");
+ IPAERR_RL("lookup failed\n");
goto error;
}
- if (entry->cookie != IPA_COOKIE) {
- IPAERR("bad params\n");
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("bad params\n");
goto error;
}
@@ -1584,14 +1593,14 @@ int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
int result;
if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
- IPAERR("bad parm\n");
+ IPAERR_RL("bad parm\n");
return -EINVAL;
}
mutex_lock(&ipa3_ctx->lock);
for (i = 0; i < hdls->num_rules; i++) {
if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
- IPAERR("failed to mdfy rt rule %i\n", i);
+ IPAERR_RL("failed to mdfy rt rule %i\n", i);
hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
} else {
hdls->rules[i].status = 0;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index e49bdc8c7083..d3837a05fdc2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -1664,7 +1664,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
- IPAERR("bad parm, %d\n", clnt_hdl);
+ IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
@@ -1677,7 +1677,7 @@ int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
ep = &ipa3_ctx->ep[clnt_hdl];
if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
- IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+ IPAERR_RL("WDI channel bad state %d\n", ep->uc_offload_state);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6647f919a577..1fa1196dda63 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -838,7 +838,7 @@ int ipa3_cfg_route(struct ipahal_reg_route *route)
*/
int ipa3_cfg_filter(u32 disable)
{
- IPAERR("Filter disable is not supported!\n");
+ IPAERR_RL("Filter disable is not supported!\n");
return -EPERM;
}
@@ -928,7 +928,7 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
int ipa_ep_idx;
if (client >= IPA_CLIENT_MAX || client < 0) {
- IPAERR("Bad client number! client =%d\n", client);
+ IPAERR_RL("Bad client number! client =%d\n", client);
return IPA_EP_NOT_ALLOCATED;
}
@@ -2001,19 +2001,19 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
int result = -EINVAL;
if (param_in->client >= IPA_CLIENT_MAX) {
- IPAERR("bad parm client:%d\n", param_in->client);
+ IPAERR_RL("bad parm client:%d\n", param_in->client);
goto fail;
}
ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
if (ipa_ep_idx == -1) {
- IPAERR("Invalid client.\n");
+ IPAERR_RL("Invalid client.\n");
goto fail;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (!ep->valid) {
- IPAERR("EP not allocated.\n");
+ IPAERR_RL("EP not allocated.\n");
goto fail;
}
@@ -2026,7 +2026,7 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
if (result)
- IPAERR("qmap_id %d write failed on ep=%d\n",
+ IPAERR_RL("qmap_id %d write failed on ep=%d\n",
meta.qmap_id, ipa_ep_idx);
result = 0;
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4edd4..09cc64b3b695 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -807,6 +807,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 11:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c
index 43ad33ea234b..825c27e7a4c1 100644
--- a/drivers/power/qcom/msm-core.c
+++ b/drivers/power/qcom/msm-core.c
@@ -190,10 +190,12 @@ static void core_temp_notify(enum thermal_trip_type type,
struct cpu_activity_info *cpu_node =
(struct cpu_activity_info *) data;
+ temp /= scaling_factor;
+
trace_temp_notification(cpu_node->sensor_id,
type, temp, cpu_node->temp);
- cpu_node->temp = temp / scaling_factor;
+ cpu_node->temp = temp;
complete(&sampling_completion);
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 0908eea3b186..7b7f991ecba9 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -1847,6 +1847,12 @@ static int smb2_post_init(struct smb2 *chip)
struct smb_charger *chg = &chip->chg;
int rc;
+ /* In case the usb path is suspended, we would have missed disabling
+ * the icl change interrupt because the interrupt could have been
+ * not requested
+ */
+ rerun_election(chg->usb_icl_votable);
+
/* configure power role for dual-role */
rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
TYPEC_POWER_ROLE_CMD_MASK, 0);
@@ -2196,6 +2202,8 @@ static int smb2_request_interrupts(struct smb2 *chip)
return rc;
}
}
+ if (chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq)
+ chg->usb_icl_change_irq_enabled = true;
return rc;
}
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 16db425514c3..64d71727f7e6 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -427,6 +427,14 @@ static int step_charge_soc_update(struct smb_charger *chg, int capacity)
int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
{
int rc = 0;
+ int irq = chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq;
+
+ if (suspend && irq) {
+ if (chg->usb_icl_change_irq_enabled) {
+ disable_irq_nosync(irq);
+ chg->usb_icl_change_irq_enabled = false;
+ }
+ }
rc = smblib_masked_write(chg, USBIN_CMD_IL_REG, USBIN_SUSPEND_BIT,
suspend ? USBIN_SUSPEND_BIT : 0);
@@ -434,6 +442,13 @@ int smblib_set_usb_suspend(struct smb_charger *chg, bool suspend)
smblib_err(chg, "Couldn't write %s to USBIN_SUSPEND_BIT rc=%d\n",
suspend ? "suspend" : "resume", rc);
+ if (!suspend && irq) {
+ if (!chg->usb_icl_change_irq_enabled) {
+ enable_irq(irq);
+ chg->usb_icl_change_irq_enabled = true;
+ }
+ }
+
return rc;
}
@@ -885,7 +900,6 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
if (icl_ua < USBIN_25MA)
return smblib_set_usb_suspend(chg, true);
- disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
if (icl_ua == INT_MAX)
goto override_suspend_config;
@@ -943,7 +957,6 @@ override_suspend_config:
}
enable_icl_changed_interrupt:
- enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
return rc;
}
@@ -1319,68 +1332,110 @@ int smblib_vconn_regulator_is_enabled(struct regulator_dev *rdev)
#define MAX_RETRY 15
#define MIN_DELAY_US 2000
#define MAX_DELAY_US 9000
-static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+static int otg_current[] = {250000, 500000, 1000000, 1500000};
+static int smblib_enable_otg_wa(struct smb_charger *chg)
{
- struct smb_charger *chg = rdev_get_drvdata(rdev);
- int rc, retry_count = 0, min_delay = MIN_DELAY_US;
u8 stat;
+ int rc, i, retry_count = 0, min_delay = MIN_DELAY_US;
- smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
- rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
- ENG_BUCKBOOST_HALT1_8_MODE_BIT,
- ENG_BUCKBOOST_HALT1_8_MODE_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
- rc);
- return rc;
- }
+ for (i = 0; i < ARRAY_SIZE(otg_current); i++) {
+ smblib_dbg(chg, PR_OTG, "enabling OTG with %duA\n",
+ otg_current[i]);
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+ otg_current[i]);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n", rc);
+ return rc;
+ }
- smblib_dbg(chg, PR_OTG, "enabling OTG\n");
- rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0) {
- smblib_err(chg, "Couldn't enable OTG regulator rc=%d\n", rc);
- return rc;
- }
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ return rc;
+ }
- if (chg->wa_flags & OTG_WA) {
- /* check for softstart */
+ retry_count = 0;
+ min_delay = MIN_DELAY_US;
do {
usleep_range(min_delay, min_delay + 100);
rc = smblib_read(chg, OTG_STATUS_REG, &stat);
if (rc < 0) {
- smblib_err(chg,
- "Couldn't read OTG status rc=%d\n",
- rc);
+ smblib_err(chg, "Couldn't read OTG status rc=%d\n",
+ rc);
goto out;
}
if (stat & BOOST_SOFTSTART_DONE_BIT) {
rc = smblib_set_charge_param(chg,
&chg->param.otg_cl, chg->otg_cl_ua);
- if (rc < 0)
- smblib_err(chg,
- "Couldn't set otg limit\n");
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set otg limit rc=%d\n",
+ rc);
+ goto out;
+ }
break;
}
-
/* increase the delay for following iterations */
if (retry_count > 5)
min_delay = MAX_DELAY_US;
+
} while (retry_count++ < MAX_RETRY);
if (retry_count >= MAX_RETRY) {
- smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
- goto out;
+ smblib_dbg(chg, PR_OTG, "OTG enable failed with %duA\n",
+ otg_current[i]);
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
+ smblib_err(chg, "disable OTG rc=%d\n", rc);
+ goto out;
+ }
+ } else {
+ smblib_dbg(chg, PR_OTG, "OTG enabled\n");
+ return 0;
}
}
+ if (i == ARRAY_SIZE(otg_current)) {
+ rc = -EINVAL;
+ goto out;
+ }
+
return 0;
out:
- /* disable OTG if softstart failed */
smblib_write(chg, CMD_OTG_REG, 0);
return rc;
}
+static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
+{
+ struct smb_charger *chg = rdev_get_drvdata(rdev);
+ int rc;
+
+ smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ smblib_dbg(chg, PR_OTG, "enabling OTG\n");
+
+ if (chg->wa_flags & OTG_WA) {
+ rc = smblib_enable_otg_wa(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ } else {
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable OTG rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index c97b84c34084..18714827b8d2 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -328,6 +328,7 @@ struct smb_charger {
int fake_input_current_limited;
bool pr_swap_in_progress;
int typec_mode;
+ int usb_icl_change_irq_enabled;
/* workaround flag */
u32 wa_flags;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 59ced8864b2f..0e6aaef9a038 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3563,12 +3563,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f5aeda8f014f..38e90d9c2ced 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5887,18 +5887,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0e59731f95ad..1f6a3b86965f 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2466,6 +2466,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f57d96984ae4..e6faa0b050d1 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2865,7 +2865,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9b3af788376c..8aa202faafb5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2497,7 +2497,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7dbbb29d24c6..03a2aadf0d3c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -533,7 +533,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -561,8 +563,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 7d3af3eacf57..1ddba9ae8c0f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -665,7 +665,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 830ef92ffe80..fc9faaee3170 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -272,7 +272,7 @@ static struct of_device_id msm_hs_match_table[] = {
#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
#define UARTDM_RX_BUF_SIZE 512
#define RETRY_TIMEOUT 5
-#define UARTDM_NR 256
+#define UARTDM_NR 4
#define BAM_PIPE_MIN 0
#define BAM_PIPE_MAX 11
#define BUS_SCALING 1
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index c2d788bc4bc5..8e8c1a349e6a 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -54,6 +54,8 @@
#include "debug.h"
#include "xhci.h"
+#define SDP_CONNETION_CHECK_TIME 10000 /* in ms */
+
/* time out to wait for USB cable status notification (in ms)*/
#define SM_INIT_TIMEOUT 30000
@@ -227,6 +229,7 @@ struct dwc3_msm {
int pm_qos_latency;
struct pm_qos_request pm_qos_req_dma;
struct delayed_work perf_vote_work;
+ struct delayed_work sdp_check;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -2625,6 +2628,42 @@ done:
return NOTIFY_DONE;
}
+
+static void check_for_sdp_connection(struct work_struct *w)
+{
+ int ret;
+ union power_supply_propval pval = {0};
+ struct dwc3_msm *mdwc =
+ container_of(w, struct dwc3_msm, sdp_check.work);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+ if (!mdwc->vbus_active)
+ return;
+
+ /* floating D+/D- lines detected */
+ if (dwc->gadget.state < USB_STATE_DEFAULT &&
+ dwc3_gadget_get_link_state(dwc) != DWC3_LINK_STATE_CMPLY) {
+ if (!mdwc->usb_psy) {
+ mdwc->usb_psy = power_supply_get_by_name("usb");
+ if (!mdwc->usb_psy) {
+ dev_dbg(mdwc->dev,
+ "Could not get usb power_supply\n");
+ return;
+ }
+ }
+ pval.intval = -ETIMEDOUT;
+ ret = power_supply_set_property(mdwc->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
+ if (ret)
+ dev_dbg(mdwc->dev,
+ "power supply error when setting property\n");
+
+ mdwc->vbus_active = 0;
+ dbg_event(0xFF, "Q RW SPD CHK", mdwc->vbus_active);
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ }
+}
+
static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -2833,6 +2872,7 @@ static int dwc3_msm_probe(struct platform_device *pdev)
INIT_WORK(&mdwc->vbus_draw_work, dwc3_msm_vbus_draw_work);
INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
INIT_DELAYED_WORK(&mdwc->perf_vote_work, msm_dwc3_perf_vote_work);
+ INIT_DELAYED_WORK(&mdwc->sdp_check, check_for_sdp_connection);
mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
if (!mdwc->dwc3_wq) {
@@ -3586,28 +3626,38 @@ static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
return 0;
}
-static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+int get_psy_type(struct dwc3_msm *mdwc)
{
union power_supply_propval pval = {0};
- int ret;
if (mdwc->charging_disabled)
- return 0;
-
- if (mdwc->max_power == mA)
- return 0;
+ return -EINVAL;
if (!mdwc->usb_psy) {
mdwc->usb_psy = power_supply_get_by_name("usb");
if (!mdwc->usb_psy) {
- dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+ dev_err(mdwc->dev, "Could not get usb psy\n");
return -ENODEV;
}
}
- power_supply_get_property(mdwc->usb_psy,
- POWER_SUPPLY_PROP_REAL_TYPE, &pval);
- if (pval.intval != POWER_SUPPLY_TYPE_USB)
+ power_supply_get_property(mdwc->usb_psy, POWER_SUPPLY_PROP_REAL_TYPE,
+ &pval);
+
+ return pval.intval;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
+{
+ union power_supply_propval pval = {0};
+ int ret, psy_type;
+
+ if (mdwc->max_power == mA)
+ return 0;
+
+ psy_type = get_psy_type(mdwc);
+ if (psy_type != POWER_SUPPLY_TYPE_USB &&
+ psy_type != POWER_SUPPLY_TYPE_USB_FLOAT)
return 0;
dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
@@ -3684,6 +3734,10 @@ static void dwc3_otg_sm_work(struct work_struct *w)
work = 1;
} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "b_sess_vld\n");
+ if (get_psy_type(mdwc) == POWER_SUPPLY_TYPE_USB_FLOAT)
+ queue_delayed_work(mdwc->dwc3_wq,
+ &mdwc->sdp_check,
+ msecs_to_jiffies(SDP_CONNETION_CHECK_TIME));
/*
* Increment pm usage count upon cable connect. Count
* is decremented in OTG_STATE_B_PERIPHERAL state on
@@ -3707,6 +3761,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
!test_bit(ID, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "!id || !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
/*
* Decrement pm usage count upon cable disconnect
@@ -3739,6 +3794,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
mdwc->otg_state = OTG_STATE_B_IDLE;
+ cancel_delayed_work_sync(&mdwc->sdp_check);
dwc3_otg_start_peripheral(mdwc, 0);
} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
dev_dbg(mdwc->dev, "BSUSP !susp\n");
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 03aeec2e878c..3f1c2b32abb8 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -182,7 +182,7 @@ static void *usbpd_ipc_log;
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
-#define SWAP_SOURCE_START_TIME 20
+#define FIRST_SOURCE_CAP_TIME 200
#define VDM_BUSY_TIME 50
#define VCONN_ON_TIME 100
@@ -790,17 +790,27 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->pd_phy_opened = true;
}
- pd->current_state = PE_SRC_SEND_CAPABILITIES;
if (pd->in_pr_swap) {
- kick_sm(pd, SWAP_SOURCE_START_TIME);
pd->in_pr_swap = false;
val.intval = 0;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PR_SWAP, &val);
- break;
}
- /* fall-through */
+ /*
+ * A sink might remove its terminations (during some Type-C
+ * compliance tests or a sink attempting to do Try.SRC)
+ * at this point just after we enabled VBUS. Sending PD
+ * messages now would delay detecting the detach beyond the
+ * required timing. Instead, delay sending out the first
+ * source capabilities to allow for the other side to
+ * completely settle CC debounce and allow HW to detect detach
+ * sooner in the meantime. PD spec allows up to
+ * tFirstSourceCap (250ms).
+ */
+ pd->current_state = PE_SRC_SEND_CAPABILITIES;
+ kick_sm(pd, FIRST_SOURCE_CAP_TIME);
+ break;
case PE_SRC_SEND_CAPABILITIES:
kick_sm(pd, 0);
@@ -910,6 +920,7 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP ||
+ pd->psy_type == POWER_SUPPLY_TYPE_USB_FLOAT ||
usb_compliance_mode)
start_usb_peripheral(pd);
}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 1a9f18b40be6..34e4b3ad8b92 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1163,6 +1163,10 @@ static int tce_iommu_attach_group(void *iommu_data,
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index d6e8a213c215..5548f0f09f8a 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -547,6 +547,7 @@ struct mdss_data_type {
u32 sec_session_cnt;
wait_queue_head_t secure_waitq;
struct cx_ipeak_client *mdss_cx_ipeak;
+ struct mult_factor bus_throughput_factor;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6936c4c1f3cc..6fb32761a767 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -4527,6 +4527,15 @@ static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-clk-factor",
&mdata->clk_factor);
+ /*
+ * Bus throughput factor will be used during high downscale cases.
+ * The recommended default factor is 1.1.
+ */
+ mdata->bus_throughput_factor.numer = 11;
+ mdata->bus_throughput_factor.denom = 10;
+ mdss_mdp_parse_dt_fudge_factors(pdev, "qcom,mdss-bus-througput-factor",
+ &mdata->bus_throughput_factor);
+
rc = of_property_read_u32(pdev->dev.of_node,
"qcom,max-bandwidth-low-kbps", &mdata->max_bw_low);
if (rc)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index efd681a5d954..0165c48a0467 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -31,6 +31,7 @@
#define MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM 2
#define NUM_MIXERCFG_REGS 3
#define MDSS_MDP_WB_OUTPUT_BPP 3
+#define MIN_BUS_THROUGHPUT_SCALE_FACTOR 35
struct mdss_mdp_mixer_cfg {
u32 config_masks[NUM_MIXERCFG_REGS];
bool border_enabled;
@@ -622,11 +623,21 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
}
static inline bool __is_vert_downscaling(u32 src_h,
- struct mdss_rect dst){
-
+ struct mdss_rect dst)
+{
return (src_h > dst.h);
}
+static inline bool __is_bus_throughput_factor_required(u32 src_h,
+ struct mdss_rect dst)
+{
+ u32 scale_factor = src_h * 10;
+
+ do_div(scale_factor, dst.h);
+ return (__is_vert_downscaling(src_h, dst) &&
+ (scale_factor >= MIN_BUS_THROUGHPUT_SCALE_FACTOR));
+}
+
static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
struct mdss_rect src, struct mdss_rect dst,
u32 fps, u32 v_total, u32 flags)
@@ -673,6 +684,15 @@ static u32 get_pipe_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
}
}
+ /*
+ * If the downscale factor is >= 3.5 for a 32 BPP surface,
+ * it is recommended to add a 10% bus throughput factor to
+ * the clock rate.
+ */
+ if ((pipe->src_fmt->bpp == 4) &&
+ __is_bus_throughput_factor_required(src_h, dst))
+ rate = apply_fudge_factor(rate, &mdata->bus_throughput_factor);
+
if (flags & PERF_CALC_PIPE_APPLY_CLK_FUDGE)
rate = mdss_mdp_clk_fudge_factor(mixer, rate);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index e44edb8fbea7..3ccc09d58480 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -3131,6 +3131,14 @@ int mdss_mdp_layer_pre_commit_wfd(struct msm_fb_data_type *mfd,
sync_pt_data = &mfd->mdp_sync_pt_data;
mutex_lock(&sync_pt_data->sync_mutex);
count = sync_pt_data->acq_fen_cnt;
+
+ if (count >= MDP_MAX_FENCE_FD) {
+ pr_err("Reached maximum possible value for fence count\n");
+ mutex_unlock(&sync_pt_data->sync_mutex);
+ rc = -EINVAL;
+ goto input_layer_err;
+ }
+
sync_pt_data->acq_fen[count] = fence;
sync_pt_data->acq_fen_cnt++;
mutex_unlock(&sync_pt_data->sync_mutex);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
index 017a2f10dfbc..a5ec7097e0f6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp_cache_config.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -194,8 +194,12 @@ static int pp_hist_lut_cache_params_pipe_v1_7(struct mdp_hist_lut_data *config,
return -EINVAL;
}
- memcpy(&hist_lut_usr_config, config->cfg_payload,
- sizeof(struct mdp_hist_lut_data_v1_7));
+ if (copy_from_user(&hist_lut_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(hist_lut_usr_config))) {
+ pr_err("failed to copy hist lut config\n");
+ return -EFAULT;
+ }
hist_lut_cache_data = pipe->pp_res.hist_lut_cfg_payload;
if (!hist_lut_cache_data) {
@@ -606,8 +610,12 @@ static int pp_pcc_cache_params_pipe_v1_7(struct mdp_pcc_cfg_data *config,
return -EINVAL;
}
- memcpy(&v17_usr_config, config->cfg_payload,
- sizeof(v17_usr_config));
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy pcc config\n");
+ return -EFAULT;
+ }
if (!(config->ops & MDP_PP_OPS_WRITE)) {
pr_debug("write ops not set value of flag is %d\n",
@@ -861,8 +869,12 @@ static int pp_igc_lut_cache_params_pipe_v1_7(struct mdp_igc_lut_data *config,
goto igc_config_exit;
}
- memcpy(&v17_usr_config, config->cfg_payload,
- sizeof(v17_usr_config));
+ if (copy_from_user(&v17_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(v17_usr_config))) {
+ pr_err("failed to copy igc usr config\n");
+ return -EFAULT;
+ }
if (!(config->ops & MDP_PP_OPS_WRITE)) {
pr_debug("op for gamut %d\n", config->ops);
@@ -1272,8 +1284,12 @@ static int pp_pa_cache_params_pipe_v1_7(struct mdp_pa_v2_cfg_data *config,
return -EINVAL;
}
- memcpy(&pa_usr_config, config->cfg_payload,
- sizeof(struct mdp_pa_data_v1_7));
+ if (copy_from_user(&pa_usr_config,
+ (void __user *) config->cfg_payload,
+ sizeof(pa_usr_config))) {
+ pr_err("failed to copy pa usr config\n");
+ return -EFAULT;
+ }
pa_cache_data = pipe->pp_res.pa_cfg_payload;
if (!pa_cache_data) {
diff --git a/drivers/video/fbdev/msm/msm_mdss_io_8974.c b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
index 80dbe83972d7..bb3b4b3fa929 100644
--- a/drivers/video/fbdev/msm/msm_mdss_io_8974.c
+++ b/drivers/video/fbdev/msm/msm_mdss_io_8974.c
@@ -2587,7 +2587,8 @@ int mdss_dsi_post_clkon_cb(void *priv,
}
if (clk & MDSS_DSI_LINK_CLK) {
/* toggle the resync FIFO everytime clock changes */
- if (ctrl->shared_data->phy_rev == DSI_PHY_REV_30)
+ if ((ctrl->shared_data->phy_rev == DSI_PHY_REV_30) &&
+ !pdata->panel_info.cont_splash_enabled)
mdss_dsi_phy_v3_toggle_resync_fifo(ctrl);
if (ctrl->ulps) {
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index e0c98423f2c9..11a72bc2c71b 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
if (!wdt)
return -ENOMEM;
+ spin_lock_init(&wdt->lock);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
return ret;
}
- spin_lock_init(&wdt->lock);
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7399782c0998..8a58bbc14de2 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed