summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGhanim Fodi <gfodi@codeaurora.org>2016-06-06 16:59:03 +0300
committerKyle Yan <kyan@codeaurora.org>2016-06-21 15:11:32 -0700
commit38d03cd71e0be00ec4d1479b7274b0513b80a160 (patch)
tree4004633fa58e87aa2fbbcf90f72c23c2d55b8ed4
parentc876c09f5817c12b9162cf2389be730e94c35d75 (diff)
msm: ipa3: Move IPA FnR building to IPAHAL
IPA Filtering and Routing rules and tables building is a logic related to IPA H/W. As such, migrating this logic to IPAHAL (H/W abstraction layer) of IPA driver and adapt the core driver code to use it. New internal S/W API is added to access IPAHAL for Filtering and Routing rules and tables building and clearing. CRs-Fixed: 1006485 Change-Id: I23a95be86412987f72287138817235d3f1f9bc61 Signed-off-by: Ghanim Fodi <gfodi@codeaurora.org>
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c118
-rw-r--r--drivers/platform/msm/ipa/ipa_common_i.h7
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_utils.c64
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c389
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c299
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c818
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h72
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h94
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c690
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c1738
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c34
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h4
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c3200
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h288
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h143
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h4
19 files changed, 4622 insertions, 3349 deletions
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 82402d7d1545..208a4ce1e40e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -166,6 +166,124 @@ const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
__stringify(IPA_CLIENT_TEST4_CONS),
};
+/**
+ * ipa_write_64() - convert 64 bit value to byte array
+ * @w: 64 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_64(u64 w, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_64: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+ *dest++ = (u8)((w >> 32) & 0xFF);
+ *dest++ = (u8)((w >> 40) & 0xFF);
+ *dest++ = (u8)((w >> 48) & 0xFF);
+ *dest++ = (u8)((w >> 56) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_32: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((w) & 0xFF);
+ *dest++ = (u8)((w >> 8) & 0xFF);
+ *dest++ = (u8)((w >> 16) & 0xFF);
+ *dest++ = (u8)((w >> 24) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_16: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (u8)((hw) & 0xFF);
+ *dest++ = (u8)((hw >> 8) & 0xFF);
+
+ return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+ if (unlikely(dest == NULL)) {
+ pr_err("ipa_write_8: NULL address!\n");
+ return dest;
+ }
+ *dest++ = (b) & 0xFF;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_64() - pad byte array to 64 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_64(u8 *dest)
+{
+ int i = (long)dest & 0x7;
+ int j;
+
+ if (i)
+ for (j = 0; j < (8 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+ int i = (long)dest & 0x3;
+ int j;
+
+ if (i)
+ for (j = 0; j < (4 - i); j++)
+ *dest++ = 0;
+
+ return dest;
+}
/**
* ipa_connect() - low-level IPA client connect
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
index 55aa1993d30e..060613281e4c 100644
--- a/drivers/platform/msm/ipa/ipa_common_i.h
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -343,4 +343,11 @@ void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
void ipa_set_tag_process_before_gating(bool val);
bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+u8 *ipa_write_64(u64 w, u8 *dest);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_64(u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+
#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index f0b25132df33..f2dcb2438b9a 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1722,10 +1722,6 @@ int ipa_generate_hw_rule(enum ipa_ip_type ip,
const struct ipa_rule_attrib *attrib,
u8 **buf,
u16 *en_rule);
-u8 *ipa_write_32(u32 w, u8 *dest);
-u8 *ipa_write_16(u16 hw, u8 *dest);
-u8 *ipa_write_8(u8 b, u8 *dest);
-u8 *ipa_pad_to_32(u8 *dest);
int ipa_init_hw(void);
struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
int ipa_set_single_ndp_per_mbim(bool);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index ae709c54cec1..0c5d47ddd9c8 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -1034,70 +1034,6 @@ enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
return ipa_ctx->ep[pipe_idx].client;
}
-/**
- * ipa_write_32() - convert 32 bit value to byte array
- * @w: 32 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa_write_32(u32 w, u8 *dest)
-{
- *dest++ = (u8)((w) & 0xFF);
- *dest++ = (u8)((w >> 8) & 0xFF);
- *dest++ = (u8)((w >> 16) & 0xFF);
- *dest++ = (u8)((w >> 24) & 0xFF);
-
- return dest;
-}
-
-/**
- * ipa_write_16() - convert 16 bit value to byte array
- * @hw: 16 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa_write_16(u16 hw, u8 *dest)
-{
- *dest++ = (u8)((hw) & 0xFF);
- *dest++ = (u8)((hw >> 8) & 0xFF);
-
- return dest;
-}
-
-/**
- * ipa_write_8() - convert 8 bit value to byte array
- * @hw: 8 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa_write_8(u8 b, u8 *dest)
-{
- *dest++ = (b) & 0xFF;
-
- return dest;
-}
-
-/**
- * ipa_pad_to_32() - pad byte array to 32 bit value
- * @dest: byte array
- *
- * Return value: padded value
- */
-u8 *ipa_pad_to_32(u8 *dest)
-{
- int i = (long)dest & 0x3;
- int j;
-
- if (i)
- for (j = 0; j < (4 - i); j++)
- *dest++ = 0;
-
- return dest;
-}
-
void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
const uint8_t mac_addr_mask[ETH_ALEN],
const uint8_t mac_addr[ETH_ALEN])
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 8578417477bb..546c6fbfcaa9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -41,6 +41,7 @@
#include "ipa_i.h"
#include "../ipa_rm_i.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#define CREATE_TRACE_POINTS
#include "ipa_trace.h"
@@ -1216,8 +1217,8 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = -EFAULT;
break;
}
- if (ipa3_generate_flt_eq(flt_eq.ip, &flt_eq.attrib,
- &flt_eq.eq_attrib)) {
+ if (ipahal_flt_generate_equation(flt_eq.ip,
+ &flt_eq.attrib, &flt_eq.eq_attrib)) {
retval = -EFAULT;
break;
}
@@ -1660,10 +1661,10 @@ static void ipa3_q6_avoid_holb(void)
}
static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
- enum ipa_rule_type rlt, const struct ipa_mem_buffer *mem)
+ enum ipa_rule_type rlt)
{
struct ipa3_desc *desc;
- struct ipahal_imm_cmd_dma_shared_mem cmd;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
struct ipahal_imm_cmd_pyld **cmd_pyld;
int retval = 0;
int pipe_idx;
@@ -1671,12 +1672,13 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
int num_cmds = 0;
int index;
u32 lcl_addr_mem_part;
+ u32 lcl_hdr_sz;
+ struct ipa_mem_buffer mem;
IPADBG("Entry\n");
- if (!mem || (ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
- IPAERR("Input Err: mem=%p ; ip=%d ; rlt=%d\n",
- mem, ip, rlt);
+ if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+ IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
return -EINVAL;
}
@@ -1696,16 +1698,30 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
goto free_desc;
}
- if (ip == IPA_IP_v4)
- lcl_addr_mem_part =
- rlt == IPA_RULE_HASHABLE ?
- IPA_MEM_PART(v4_flt_hash_ofst) :
- IPA_MEM_PART(v4_flt_nhash_ofst);
- else
- lcl_addr_mem_part =
- rlt == IPA_RULE_HASHABLE ?
- IPA_MEM_PART(v6_flt_hash_ofst) :
- IPA_MEM_PART(v6_flt_nhash_ofst);
+ if (ip == IPA_IP_v4) {
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+ }
+ } else {
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+ }
+ }
+
+ retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
+ 0, &mem);
+ if (retval) {
+ IPAERR("failed to generate flt single tbl empty img\n");
+ goto free_cmd_pyld;
+ }
for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
if (!ipa_is_ep_support_flt(pipe_idx))
@@ -1721,19 +1737,19 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
cmd.is_read = false;
cmd.skip_pipeline_clear = false;
cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- cmd.size = mem->size;
- cmd.system_addr = mem->phys_base;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
cmd.local_addr =
ipa3_ctx->smem_restricted_bytes +
lcl_addr_mem_part +
- IPA_HW_TBL_HDR_WIDTH +
- flt_idx * IPA_HW_TBL_HDR_WIDTH;
+ ipahal_get_hw_tbl_hdr_width() +
+ flt_idx * ipahal_get_hw_tbl_hdr_width();
cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
if (!cmd_pyld[num_cmds]) {
IPAERR("fail construct dma_shared_mem cmd\n");
retval = -ENOMEM;
- goto free_cmd_pyld;
+ goto free_empty_img;
}
desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
IPA_IMM_CMD_DMA_SHARED_MEM);
@@ -1753,6 +1769,8 @@ static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
retval = -EFAULT;
}
+free_empty_img:
+ ipahal_free_dma_mem(&mem);
free_cmd_pyld:
for (index = 0; index < num_cmds; index++)
ipahal_destroy_imm_cmd(cmd_pyld[index]);
@@ -1763,152 +1781,139 @@ free_desc:
}
static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
- enum ipa_rule_type rlt, const struct ipa_mem_buffer *mem)
+ enum ipa_rule_type rlt)
{
struct ipa3_desc *desc;
- struct ipahal_imm_cmd_dma_shared_mem cmd;
- struct ipahal_imm_cmd_pyld **cmd_pyld;
- int tbls_cnt;
+ struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
int retval = 0;
- int num_cmds = 0;
- int index;
u32 modem_rt_index_lo;
u32 modem_rt_index_hi;
u32 lcl_addr_mem_part;
+ u32 lcl_hdr_sz;
+ struct ipa_mem_buffer mem;
IPADBG("Entry\n");
- if (!mem || (ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
- IPAERR("Input Err: mem=%p ; ip=%d ; rlt=%d\n",
- mem, ip, rlt);
+ if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+ IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
return -EINVAL;
}
if (ip == IPA_IP_v4) {
modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
- lcl_addr_mem_part =
- rlt == IPA_RULE_HASHABLE ?
- IPA_MEM_PART(v4_rt_hash_ofst) :
- IPA_MEM_PART(v4_rt_nhash_ofst);
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+ }
} else {
modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
- lcl_addr_mem_part =
- rlt == IPA_RULE_HASHABLE ?
- IPA_MEM_PART(v6_rt_hash_ofst) :
- IPA_MEM_PART(v6_rt_nhash_ofst);
+ if (rlt == IPA_RULE_HASHABLE) {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+ } else {
+ lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
+ lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+ }
}
- tbls_cnt = modem_rt_index_hi - modem_rt_index_lo + 1;
- desc = kcalloc(tbls_cnt, sizeof(struct ipa3_desc), GFP_KERNEL);
- if (!desc) {
- IPAERR("failed to allocate memory\n");
+ retval = ipahal_rt_generate_empty_img(
+ modem_rt_index_hi - modem_rt_index_lo + 1,
+ lcl_hdr_sz, lcl_hdr_sz, &mem);
+ if (retval) {
+ IPAERR("fail generate empty rt img\n");
return -ENOMEM;
}
- cmd_pyld = kcalloc(tbls_cnt, sizeof(struct ipahal_imm_cmd_pyld *),
- GFP_KERNEL);
- if (!cmd_pyld) {
+ desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+ if (!desc) {
IPAERR("failed to allocate memory\n");
- retval = -ENOMEM;
- goto free_desc;
+ goto free_empty_img;
}
- for (index = modem_rt_index_lo; index <= modem_rt_index_hi; index++) {
- cmd.is_read = false;
- cmd.skip_pipeline_clear = false;
- cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- cmd.size = mem->size;
- cmd.system_addr = mem->phys_base;
- cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
- lcl_addr_mem_part +
- index * IPA_HW_TBL_HDR_WIDTH;
- cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+ cmd.is_read = false;
+ cmd.skip_pipeline_clear = false;
+ cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ cmd.size = mem.size;
+ cmd.system_addr = mem.phys_base;
+ cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+ lcl_addr_mem_part +
+ modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
+ cmd_pyld = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
- if (!cmd_pyld[num_cmds]) {
- IPAERR("failed to construct dma_shared_mem imm cmd\n");
- retval = -ENOMEM;
- goto free_cmd_pyld;
- }
- desc[num_cmds].opcode =
- ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
- desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
- desc[num_cmds].len = cmd_pyld[num_cmds]->len;
- desc[num_cmds].type = IPA_IMM_CMD_DESC;
- num_cmds++;
+ if (!cmd_pyld) {
+ IPAERR("failed to construct dma_shared_mem imm cmd\n");
+ retval = -ENOMEM;
+ goto free_desc;
}
+ desc->opcode =
+ ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+ desc->pyld = cmd_pyld->data;
+ desc->len = cmd_pyld->len;
+ desc->type = IPA_IMM_CMD_DESC;
- IPADBG("Sending %d descriptors for rt tbl clearing\n", num_cmds);
- retval = ipa3_send_cmd(num_cmds, desc);
+ IPADBG("Sending 1 descriptor for rt tbl clearing\n");
+ retval = ipa3_send_cmd(1, desc);
if (retval) {
IPAERR("failed to send immediate command (err %d)\n", retval);
retval = -EFAULT;
}
-free_cmd_pyld:
- for (index = 0; index < num_cmds; index++)
- ipahal_destroy_imm_cmd(cmd_pyld[index]);
- kfree(cmd_pyld);
+ ipahal_destroy_imm_cmd(cmd_pyld);
free_desc:
kfree(desc);
+free_empty_img:
+ ipahal_free_dma_mem(&mem);
return retval;
}
static int ipa3_q6_clean_q6_tables(void)
{
struct ipa3_desc *desc;
- struct ipahal_imm_cmd_pyld *cmd_pyld;
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
int retval;
- struct ipa_mem_buffer mem = { 0 };
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
- u64 *entry;
IPADBG("Entry\n");
- mem.size = IPA_HW_TBL_HDR_WIDTH;
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
- &mem.phys_base, GFP_KERNEL);
- if (!mem.base) {
- IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
- return -ENOMEM;
- }
- entry = mem.base;
- *entry = ipa3_ctx->empty_rt_tbl_mem.phys_base;
-
- if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
goto bail_desc;
}
- if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE, &mem)) {
+ if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
goto bail_desc;
}
@@ -1954,7 +1959,6 @@ static int ipa3_q6_clean_q6_tables(void)
bail_desc:
kfree(desc);
bail_dma:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
IPADBG("Done - retval = %d\n", retval);
return retval;
}
@@ -2277,7 +2281,6 @@ int _ipa_init_rt4_v3(void)
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
- u64 *entry;
int i;
int rc = 0;
@@ -2287,18 +2290,12 @@ int _ipa_init_rt4_v3(void)
ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
- mem.size = IPA_MEM_PART(v4_rt_nhash_size);
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
- GFP_KERNEL);
- if (!mem.base) {
- IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
- return -ENOMEM;
- }
-
- entry = mem.base;
- for (i = 0; i < IPA_MEM_PART(v4_rt_num_index); i++) {
- *entry = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- entry++;
+ rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
+ IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v4 rt img\n");
+ return rc;
}
v4_cmd.hash_rules_addr = mem.phys_base;
@@ -2336,7 +2333,7 @@ int _ipa_init_rt4_v3(void)
ipahal_destroy_imm_cmd(cmd_pyld);
free_mem:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ ipahal_free_dma_mem(&mem);
return rc;
}
@@ -2351,7 +2348,6 @@ int _ipa_init_rt6_v3(void)
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
- u64 *entry;
int i;
int rc = 0;
@@ -2361,18 +2357,12 @@ int _ipa_init_rt6_v3(void)
ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
- mem.size = IPA_MEM_PART(v6_rt_nhash_size);
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
- GFP_KERNEL);
- if (!mem.base) {
- IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
- return -ENOMEM;
- }
-
- entry = mem.base;
- for (i = 0; i < IPA_MEM_PART(v6_rt_num_index); i++) {
- *entry = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- entry++;
+ rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
+ IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v6 rt img\n");
+ return rc;
}
v6_cmd.hash_rules_addr = mem.phys_base;
@@ -2410,7 +2400,7 @@ int _ipa_init_rt6_v3(void)
ipahal_destroy_imm_cmd(cmd_pyld);
free_mem:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ ipahal_free_dma_mem(&mem);
return rc;
}
@@ -2425,48 +2415,15 @@ int _ipa_init_flt4_v3(void)
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
- u64 *entry;
- int i;
- int rc = 0;
- int flt_spc;
-
- flt_spc = IPA_MEM_PART(v4_flt_hash_size);
- /* bitmap word */
- flt_spc -= IPA_HW_TBL_HDR_WIDTH;
- flt_spc /= IPA_HW_TBL_HDR_WIDTH;
- if (ipa3_ctx->ep_flt_num > flt_spc) {
- IPAERR("space for v4 hash flt hdr is too small\n");
- WARN_ON(1);
- return -EPERM;
- }
- flt_spc = IPA_MEM_PART(v4_flt_nhash_size);
- /* bitmap word */
- flt_spc -= IPA_HW_TBL_HDR_WIDTH;
- flt_spc /= IPA_HW_TBL_HDR_WIDTH;
- if (ipa3_ctx->ep_flt_num > flt_spc) {
- IPAERR("space for v4 non-hash flt hdr is too small\n");
- WARN_ON(1);
- return -EPERM;
- }
-
- /* +1 for filtering header bitmap */
- mem.size = (ipa3_ctx->ep_flt_num + 1) * IPA_HW_TBL_HDR_WIDTH;
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
- GFP_KERNEL);
- if (!mem.base) {
- IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
- return -ENOMEM;
- }
-
- entry = mem.base;
-
- *entry = ((u64)ipa3_ctx->ep_flt_bitmap) << 1;
- IPADBG("v4 flt bitmap 0x%llx\n", *entry);
- entry++;
+ int rc;
- for (i = 0; i <= ipa3_ctx->ep_flt_num; i++) {
- *entry = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- entry++;
+ rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+ IPA_MEM_PART(v4_flt_hash_size),
+ IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v4 flt img\n");
+ return rc;
}
v4_cmd.hash_rules_addr = mem.phys_base;
@@ -2503,7 +2460,7 @@ int _ipa_init_flt4_v3(void)
ipahal_destroy_imm_cmd(cmd_pyld);
free_mem:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ ipahal_free_dma_mem(&mem);
return rc;
}
@@ -2518,48 +2475,15 @@ int _ipa_init_flt6_v3(void)
struct ipa_mem_buffer mem;
struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld;
- u64 *entry;
- int i;
- int rc = 0;
- int flt_spc;
-
- flt_spc = IPA_MEM_PART(v6_flt_hash_size);
- /* bitmap word */
- flt_spc -= IPA_HW_TBL_HDR_WIDTH;
- flt_spc /= IPA_HW_TBL_HDR_WIDTH;
- if (ipa3_ctx->ep_flt_num > flt_spc) {
- IPAERR("space for v6 hash flt hdr is too small\n");
- WARN_ON(1);
- return -EPERM;
- }
- flt_spc = IPA_MEM_PART(v6_flt_nhash_size);
- /* bitmap word */
- flt_spc -= IPA_HW_TBL_HDR_WIDTH;
- flt_spc /= IPA_HW_TBL_HDR_WIDTH;
- if (ipa3_ctx->ep_flt_num > flt_spc) {
- IPAERR("space for v6 non-hash flt hdr is too small\n");
- WARN_ON(1);
- return -EPERM;
- }
-
- /* +1 for filtering header bitmap */
- mem.size = (ipa3_ctx->ep_flt_num + 1) * IPA_HW_TBL_HDR_WIDTH;
- mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
- GFP_KERNEL);
- if (!mem.base) {
- IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
- return -ENOMEM;
- }
-
- entry = mem.base;
-
- *entry = ((u64)ipa3_ctx->ep_flt_bitmap) << 1;
- IPADBG("v6 flt bitmap 0x%llx\n", *entry);
- entry++;
+ int rc;
- for (i = 0; i <= ipa3_ctx->ep_flt_num; i++) {
- *entry = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- entry++;
+ rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+ IPA_MEM_PART(v6_flt_hash_size),
+ IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+ &mem);
+ if (rc) {
+ IPAERR("fail generate empty v6 flt img\n");
+ return rc;
}
v6_cmd.hash_rules_addr = mem.phys_base;
@@ -2597,7 +2521,7 @@ int _ipa_init_flt6_v3(void)
ipahal_destroy_imm_cmd(cmd_pyld);
free_mem:
- dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+ ipahal_free_dma_mem(&mem);
return rc;
}
@@ -3806,10 +3730,6 @@ fail_register_device:
unregister_chrdev_region(ipa3_ctx->dev_num, 1);
if (ipa3_ctx->pipe_mem_pool)
gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
- dma_free_coherent(ipa3_ctx->pdev,
- ipa3_ctx->empty_rt_tbl_mem.size,
- ipa3_ctx->empty_rt_tbl_mem.base,
- ipa3_ctx->empty_rt_tbl_mem.phys_base);
ipa3_destroy_flt_tbl_idrs();
idr_destroy(&ipa3_ctx->ipa_idr);
kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
@@ -4094,7 +4014,8 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
goto fail_remap;
}
- if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio)) {
+ if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+ ipa3_ctx->pdev)) {
IPAERR("fail to init ipahal\n");
result = -EFAULT;
goto fail_ipahal;
@@ -4312,33 +4233,6 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
- /*
- * setup an empty routing table in system memory, this will be used
- * to delete a routing table cleanly and safely
- */
- ipa3_ctx->empty_rt_tbl_mem.size = IPA_HW_TBL_WIDTH;
-
- ipa3_ctx->empty_rt_tbl_mem.base =
- dma_alloc_coherent(ipa3_ctx->pdev,
- ipa3_ctx->empty_rt_tbl_mem.size,
- &ipa3_ctx->empty_rt_tbl_mem.phys_base,
- GFP_KERNEL);
- if (!ipa3_ctx->empty_rt_tbl_mem.base) {
- IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
- ipa3_ctx->empty_rt_tbl_mem.size);
- result = -ENOMEM;
- goto fail_empty_rt_tbl_alloc;
- }
- if (ipa3_ctx->empty_rt_tbl_mem.phys_base &
- IPA_HW_TBL_SYSADDR_ALIGNMENT) {
- IPAERR("Empty rt-table buf is not address aligned 0x%pad\n",
- &ipa3_ctx->empty_rt_tbl_mem.phys_base);
- result = -EFAULT;
- goto fail_empty_rt_tbl;
- }
- memset(ipa3_ctx->empty_rt_tbl_mem.base, 0,
- ipa3_ctx->empty_rt_tbl_mem.size);
- IPADBG("empty routing table was allocated in system memory");
/* setup the IPA pipe mem pool */
if (resource_p->ipa_pipe_mem_size)
@@ -4455,12 +4349,6 @@ fail_device_create:
fail_alloc_chrdev_region:
if (ipa3_ctx->pipe_mem_pool)
gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
-fail_empty_rt_tbl:
- dma_free_coherent(ipa3_ctx->pdev,
- ipa3_ctx->empty_rt_tbl_mem.size,
- ipa3_ctx->empty_rt_tbl_mem.base,
- ipa3_ctx->empty_rt_tbl_mem.phys_base);
-fail_empty_rt_tbl_alloc:
ipa3_destroy_flt_tbl_idrs();
idr_destroy(&ipa3_ctx->ipa_idr);
fail_dma_pool:
@@ -4486,17 +4374,16 @@ fail_flt_rule_cache:
fail_create_transport_wq:
destroy_workqueue(ipa3_ctx->power_mgmt_wq);
fail_init_hw:
+ ipahal_destroy();
+fail_ipahal:
iounmap(ipa3_ctx->mmio);
fail_remap:
ipa3_disable_clks();
-fail_init_active_client:
ipa3_active_clients_log_destroy();
+fail_init_active_client:
fail_clk:
msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
-fail_ipahal:
- ipa3_bus_scale_table = NULL;
fail_bus_reg:
- ipahal_destroy();
fail_init_mem_partition:
fail_bind:
kfree(ipa3_ctx->ctrl);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 12127a2304bb..0319c5c78b0d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -511,65 +511,65 @@ static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
if (attrib->protocol_eq_present)
pr_err("protocol:%d ", attrib->protocol_eq);
- for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
+ if (attrib->tc_eq_present)
+ pr_err("tc:%d ", attrib->tc_eq);
+
+ for (i = 0; i < attrib->num_offset_meq_128; i++) {
+ for (j = 0; j < 16; j++) {
+ addr[j] = attrib->offset_meq_128[i].value[j];
+ mask[j] = attrib->offset_meq_128[i].mask[j];
+ }
pr_err(
- "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
- attrib->ihl_offset_range_16[i].offset,
- attrib->ihl_offset_range_16[i].range_low,
- attrib->ihl_offset_range_16[i].range_high);
+ "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+ attrib->offset_meq_128[i].offset,
+ mask, addr);
}
- for (i = 0; i < attrib->num_offset_meq_32; i++) {
+ for (i = 0; i < attrib->num_offset_meq_32; i++)
pr_err(
"(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
attrib->offset_meq_32[i].offset,
attrib->offset_meq_32[i].mask,
attrib->offset_meq_32[i].value);
- }
- if (attrib->tc_eq_present)
- pr_err("tc:%d ", attrib->tc_eq);
-
- if (attrib->fl_eq_present)
- pr_err("flow_label:%d ", attrib->fl_eq);
+ for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
+ pr_err(
+ "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+ attrib->ihl_offset_meq_32[i].offset,
+ attrib->ihl_offset_meq_32[i].mask,
+ attrib->ihl_offset_meq_32[i].value);
- if (attrib->ihl_offset_eq_16_present) {
+ if (attrib->metadata_meq32_present)
pr_err(
- "(ihl_ofst_eq16:%d val:0x%x) ",
- attrib->ihl_offset_eq_16.offset,
- attrib->ihl_offset_eq_16.value);
- }
+ "(metadata: ofst:%u mask:0x%x val:0x%x) ",
+ attrib->metadata_meq32.offset,
+ attrib->metadata_meq32.mask,
+ attrib->metadata_meq32.value);
- for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
+ for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
pr_err(
- "(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
- attrib->ihl_offset_meq_32[i].offset,
- attrib->ihl_offset_meq_32[i].mask,
- attrib->ihl_offset_meq_32[i].value);
- }
+ "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+ attrib->ihl_offset_range_16[i].offset,
+ attrib->ihl_offset_range_16[i].range_low,
+ attrib->ihl_offset_range_16[i].range_high);
- for (i = 0; i < attrib->num_offset_meq_128; i++) {
- for (j = 0; j < 16; j++) {
- addr[j] = attrib->offset_meq_128[i].value[j];
- mask[j] = attrib->offset_meq_128[i].mask[j];
- }
+ if (attrib->ihl_offset_eq_32_present)
pr_err(
- "(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
- attrib->offset_meq_128[i].offset,
- mask + 0,
- addr + 0);
- }
+ "(ihl_ofst_eq32:%d val:0x%x) ",
+ attrib->ihl_offset_eq_32.offset,
+ attrib->ihl_offset_eq_32.value);
- if (attrib->metadata_meq32_present) {
+ if (attrib->ihl_offset_eq_16_present)
pr_err(
- "(metadata: ofst:%u mask:0x%x val:0x%x) ",
- attrib->metadata_meq32.offset,
- attrib->metadata_meq32.mask,
- attrib->metadata_meq32.value);
- }
+ "(ihl_ofst_eq16:%d val:0x%x) ",
+ attrib->ihl_offset_eq_16.offset,
+ attrib->ihl_offset_eq_16.value);
+
+ if (attrib->fl_eq_present)
+ pr_err("flow_label:%d ", attrib->fl_eq);
if (attrib->ipv4_frag_eq_present)
- pr_err("frg ");
+ pr_err("frag ");
pr_err("\n");
return 0;
@@ -678,74 +678,107 @@ static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int i;
- int j;
- int num_rules;
- struct ipa3_debugfs_rt_entry *entry;
enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
- int num_tbls;
+ int tbls_num;
+ int rules_num;
+ int tbl;
+ int rl;
+ int res = 0;
+ struct ipahal_rt_rule_entry *rules = NULL;
+
+ switch (ip) {
+ case IPA_IP_v4:
+ tbls_num = IPA_MEM_PART(v4_rt_num_index);
+ break;
+ case IPA_IP_v6:
+ tbls_num = IPA_MEM_PART(v6_rt_num_index);
+ break;
+ default:
+ IPAERR("ip type error %d\n", ip);
+ return -EINVAL;
+ };
- if (ip == IPA_IP_v4)
- num_tbls = IPA_MEM_PART(v4_rt_num_index);
- else
- num_tbls = IPA_MEM_PART(v6_rt_num_index);
+ IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip);
- entry = kzalloc(sizeof(*entry) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
- if (!entry)
+ rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+ if (!rules) {
+ IPAERR("failed to allocate mem for tbl rules\n");
return -ENOMEM;
+ }
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
mutex_lock(&ipa3_ctx->lock);
- for (j = 0; j < num_tbls; j++) {
- pr_err("== NON HASHABLE TABLE tbl:%d ==\n", j);
- num_rules = IPA_DBG_MAX_RULE_IN_TBL;
- ipa3_rt_read_tbl_from_hw(j, ip, false, entry, &num_rules);
- for (i = 0; i < num_rules; i++) {
+
+ for (tbl = 0 ; tbl < tbls_num ; tbl++) {
+ pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem system table\n");
+
+ for (rl = 0 ; rl < rules_num ; rl++) {
pr_err("rule_idx:%d dst ep:%d L:%u ",
- i, entry[i].dst, entry[i].system);
+ rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
- if (entry[i].is_proc_ctx)
- pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
- entry[i].hdr_ofset,
- entry[i].eq_attrib.rule_eq_bitmap);
+ if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+ pr_err("proc_ctx:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
else
- pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
- entry[i].hdr_ofset,
- entry[i].eq_attrib.rule_eq_bitmap);
+ pr_err("hdr_ofst:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
pr_err("rule_id:%u prio:%u retain_hdr:%u ",
- entry[i].rule_id, entry[i].prio,
- entry[i].retain_hdr);
- ipa3_attrib_dump_eq(&entry[i].eq_attrib);
+ rules[rl].id, rules[rl].priority,
+ rules[rl].retain_hdr);
+ ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
}
- pr_err("== HASHABLE TABLE tbl:%d ==\n", j);
- num_rules = IPA_DBG_MAX_RULE_IN_TBL;
- ipa3_rt_read_tbl_from_hw(j, ip, true, entry, &num_rules);
- for (i = 0; i < num_rules; i++) {
+ pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem system table\n");
+
+ for (rl = 0 ; rl < rules_num ; rl++) {
pr_err("rule_idx:%d dst ep:%d L:%u ",
- i, entry[i].dst, entry[i].system);
+ rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
- if (entry[i].is_proc_ctx)
- pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
- entry[i].hdr_ofset,
- entry[i].eq_attrib.rule_eq_bitmap);
+ if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+ pr_err("proc_ctx:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
else
- pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
- entry[i].hdr_ofset,
- entry[i].eq_attrib.rule_eq_bitmap);
-
- pr_err("rule_id:%u prio:%u retain_hdr:%u ",
- entry[i].rule_id, entry[i].prio,
- entry[i].retain_hdr);
- ipa3_attrib_dump_eq(&entry[i].eq_attrib);
+ pr_err("hdr_ofst:%u attrib_mask:%08x ",
+ rules[rl].hdr_ofst,
+ rules[rl].eq_attrib.rule_eq_bitmap);
+
+ pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
+ rules[rl].id, rules[rl].priority,
+ rules[rl].retain_hdr);
+ ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
}
+ pr_err("\n");
}
+
+bail:
mutex_unlock(&ipa3_ctx->lock);
- kfree(entry);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-
- return 0;
+ kfree(rules);
+ return res;
}
static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf,
@@ -856,59 +889,85 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
- int i;
- int j;
- int num_rules;
- struct ipa3_flt_entry *entry;
+ int pipe;
+ int rl;
+ int rules_num;
+ struct ipahal_flt_rule_entry *rules;
enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
u32 rt_tbl_idx;
u32 bitmap;
+ int res = 0;
- entry = kzalloc(sizeof(*entry) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
- if (!entry)
+ IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n",
+ ipa3_ctx->ep_flt_num, ip);
+
+ rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+ if (!rules) {
+ IPAERR("failed to allocate mem for tbl rules\n");
return -ENOMEM;
+ }
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
mutex_lock(&ipa3_ctx->lock);
- for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
- if (!ipa_is_ep_support_flt(j))
+ for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
+ if (!ipa_is_ep_support_flt(pipe))
continue;
- pr_err("== NON HASHABLE TABLE ep:%d ==\n", j);
- num_rules = IPA_DBG_MAX_RULE_IN_TBL;
- ipa3_flt_read_tbl_from_hw(j, ip, false, entry, &num_rules);
- for (i = 0; i < num_rules; i++) {
- rt_tbl_idx = entry[i].rule.rt_tbl_idx;
- bitmap = entry[i].rule.eq_attrib.rule_eq_bitmap;
+ pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n",
+ pipe);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem sys table\n");
+
+ for (rl = 0; rl < rules_num; rl++) {
+ rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+ bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
- j, i, entry[i].rule.action, rt_tbl_idx);
+ pipe, rl, rules[rl].rule.action, rt_tbl_idx);
pr_err("attrib_mask:%08x retain_hdr:%d ",
- bitmap, entry[i].rule.retain_hdr);
+ bitmap, rules[rl].rule.retain_hdr);
pr_err("rule_id:%u prio:%u ",
- entry[i].rule_id, entry[i].prio);
- ipa3_attrib_dump_eq(&entry[i].rule.eq_attrib);
+ rules[rl].id, rules[rl].priority);
+ ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
}
- pr_err("== HASHABLE TABLE ep:%d ==\n", j);
- num_rules = IPA_DBG_MAX_RULE_IN_TBL;
- ipa3_flt_read_tbl_from_hw(j, ip, true, entry, &num_rules);
- for (i = 0; i < num_rules; i++) {
- rt_tbl_idx = entry[i].rule.rt_tbl_idx;
- bitmap = entry[i].rule.eq_attrib.rule_eq_bitmap;
+ pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
+ pipe);
+ rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+ res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules,
+ &rules_num);
+ if (res) {
+ pr_err("ERROR - Check the logs\n");
+ IPAERR("failed reading tbl from hw\n");
+ goto bail;
+ }
+ if (!rules_num)
+ pr_err("-->No rules. Empty tbl or modem sys table\n");
+ for (rl = 0; rl < rules_num; rl++) {
+ rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+ bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
- j, i, entry[i].rule.action, rt_tbl_idx);
+ pipe, rl, rules[rl].rule.action, rt_tbl_idx);
pr_err("attrib_mask:%08x retain_hdr:%d ",
- bitmap, entry[i].rule.retain_hdr);
- pr_err("rule_id:%u max_prio:%u prio:%u ",
- entry[i].rule_id,
- entry[i].rule.max_prio, entry[i].prio);
- ipa3_attrib_dump_eq(&entry[i].rule.eq_attrib);
+ bitmap, rules[rl].rule.retain_hdr);
+ pr_err("rule_id:%u prio:%u ",
+ rules[rl].id, rules[rl].priority);
+ ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
}
+ pr_err("\n");
}
+
+bail:
mutex_unlock(&ipa3_ctx->lock);
- kfree(entry);
+ kfree(rules);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-
- return 0;
+ return res;
}
static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 429574878d1a..11ae7efa4a0a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -19,6 +19,7 @@
#include "ipa_i.h"
#include "ipa_trace.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#define IPA_LAST_DESC_CNT 0xFFFF
#define POLLING_INACTIVITY_RX 40
@@ -2562,7 +2563,7 @@ begin:
*/
if (status.exception ==
IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
- status.rt_rule_id == IPA_RULE_ID_INVALID)
+ ipahal_is_rule_miss_id(status.rt_rule_id))
sys->drop_packet = true;
if (skb->len == pkt_status_sz &&
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index f4307d2bf1a0..aff39fc18f67 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -12,6 +12,7 @@
#include "ipa_i.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
@@ -24,161 +25,6 @@
(IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
)
-static int ipa3_generate_hw_rule_from_eq(
- const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
-{
- int num_offset_meq_32 = attrib->num_offset_meq_32;
- int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
- int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
- int num_offset_meq_128 = attrib->num_offset_meq_128;
- int i;
- int extra_bytes;
- u8 *extra;
- u8 *rest;
-
- extra_bytes = ipa3_calc_extra_wrd_bytes(attrib);
- /* only 3 eq does not have extra word param, 13 out of 16 is the number
- * of equations that needs extra word param*/
- if (extra_bytes > 13) {
- IPAERR("too much extra bytes\n");
- return -EPERM;
- } else if (extra_bytes > IPA_HW_TBL_HDR_WIDTH) {
- /* two extra words */
- extra = *buf;
- rest = *buf + IPA_HW_TBL_HDR_WIDTH * 2;
- } else if (extra_bytes > 0) {
- /* single exra word */
- extra = *buf;
- rest = *buf + IPA_HW_TBL_HDR_WIDTH;
- } else {
- /* no extra words */
- extra = NULL;
- rest = *buf;
- }
-
- if (attrib->tos_eq_present)
- extra = ipa3_write_8(attrib->tos_eq, extra);
-
- if (attrib->protocol_eq_present)
- extra = ipa3_write_8(attrib->protocol_eq, extra);
-
- if (attrib->tc_eq_present)
- extra = ipa3_write_8(attrib->tc_eq, extra);
-
- if (num_offset_meq_128) {
- extra = ipa3_write_8(attrib->offset_meq_128[0].offset, extra);
- for (i = 0; i < 8; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[0].mask[i],
- rest);
- for (i = 0; i < 8; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[0].value[i],
- rest);
- for (i = 8; i < 16; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[0].mask[i],
- rest);
- for (i = 8; i < 16; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[0].value[i],
- rest);
- num_offset_meq_128--;
- }
-
- if (num_offset_meq_128) {
- extra = ipa3_write_8(attrib->offset_meq_128[1].offset, extra);
- for (i = 0; i < 8; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[1].mask[i],
- rest);
- for (i = 0; i < 8; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[1].value[i],
- rest);
- for (i = 8; i < 16; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[1].mask[i],
- rest);
- for (i = 8; i < 16; i++)
- rest = ipa3_write_8(attrib->offset_meq_128[1].value[i],
- rest);
- num_offset_meq_128--;
- }
-
- if (num_offset_meq_32) {
- extra = ipa3_write_8(attrib->offset_meq_32[0].offset, extra);
- rest = ipa3_write_32(attrib->offset_meq_32[0].mask, rest);
- rest = ipa3_write_32(attrib->offset_meq_32[0].value, rest);
- num_offset_meq_32--;
- }
-
- if (num_offset_meq_32) {
- extra = ipa3_write_8(attrib->offset_meq_32[1].offset, extra);
- rest = ipa3_write_32(attrib->offset_meq_32[1].mask, rest);
- rest = ipa3_write_32(attrib->offset_meq_32[1].value, rest);
- num_offset_meq_32--;
- }
-
- if (num_ihl_offset_meq_32) {
- extra = ipa3_write_8(attrib->ihl_offset_meq_32[0].offset,
- extra);
-
- rest = ipa3_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
- rest = ipa3_write_32(attrib->ihl_offset_meq_32[0].value, rest);
- num_ihl_offset_meq_32--;
- }
-
- if (num_ihl_offset_meq_32) {
- extra = ipa3_write_8(attrib->ihl_offset_meq_32[1].offset,
- extra);
-
- rest = ipa3_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
- rest = ipa3_write_32(attrib->ihl_offset_meq_32[1].value, rest);
- num_ihl_offset_meq_32--;
- }
-
- if (attrib->metadata_meq32_present) {
- rest = ipa3_write_32(attrib->metadata_meq32.mask, rest);
- rest = ipa3_write_32(attrib->metadata_meq32.value, rest);
- }
-
- if (num_ihl_offset_range_16) {
- extra = ipa3_write_8(attrib->ihl_offset_range_16[0].offset,
- extra);
-
- rest = ipa3_write_16(attrib->ihl_offset_range_16[0].range_high,
- rest);
- rest = ipa3_write_16(attrib->ihl_offset_range_16[0].range_low,
- rest);
- num_ihl_offset_range_16--;
- }
-
- if (num_ihl_offset_range_16) {
- extra = ipa3_write_8(attrib->ihl_offset_range_16[1].offset,
- extra);
-
- rest = ipa3_write_16(attrib->ihl_offset_range_16[1].range_high,
- rest);
- rest = ipa3_write_16(attrib->ihl_offset_range_16[1].range_low,
- rest);
- num_ihl_offset_range_16--;
- }
-
- if (attrib->ihl_offset_eq_32_present) {
- extra = ipa3_write_8(attrib->ihl_offset_eq_32.offset, extra);
- rest = ipa3_write_32(attrib->ihl_offset_eq_32.value, rest);
- }
-
- if (attrib->ihl_offset_eq_16_present) {
- extra = ipa3_write_8(attrib->ihl_offset_eq_16.offset, extra);
- rest = ipa3_write_16(attrib->ihl_offset_eq_16.value, rest);
- rest = ipa3_write_16(0, rest);
- }
-
- if (attrib->fl_eq_present)
- rest = ipa3_write_32(attrib->fl_eq & 0xFFFFF, rest);
-
- extra = ipa3_pad_to_64(extra);
- rest = ipa3_pad_to_64(rest);
- *buf = rest;
-
- return 0;
-}
-
/**
* ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
* @ip: the ip address family type
@@ -200,72 +46,24 @@ static int ipa3_generate_hw_rule_from_eq(
static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
struct ipa3_flt_entry *entry, u8 *buf)
{
- struct ipa3_flt_rule_hw_hdr *hdr;
- const struct ipa_flt_rule *rule =
- (const struct ipa_flt_rule *)&entry->rule;
- u16 en_rule = 0;
- u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
- u8 *start;
-
- if (buf == NULL) {
- memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
- buf = (u8 *)tmp;
- } else {
- if ((long)buf & IPA_HW_RULE_START_ALIGNMENT) {
- IPAERR("buff is not rule start aligned\n");
- return -EPERM;
- }
- }
+ struct ipahal_flt_rule_gen_params gen_params;
+ int res = 0;
- start = buf;
- hdr = (struct ipa3_flt_rule_hw_hdr *)buf;
- hdr->u.hdr.action = entry->rule.action;
- hdr->u.hdr.retain_hdr = entry->rule.retain_hdr;
+ memset(&gen_params, 0, sizeof(gen_params));
+
+ gen_params.ipt = ip;
if (entry->rt_tbl)
- hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+ gen_params.rt_tbl_idx = entry->rt_tbl->idx;
else
- hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx;
- hdr->u.hdr.rsvd1 = 0;
- hdr->u.hdr.rsvd2 = 0;
- hdr->u.hdr.rsvd3 = 0;
- BUG_ON(entry->prio & ~0x3FF);
- hdr->u.hdr.priority = entry->prio;
- BUG_ON(entry->rule_id & ~0x3FF);
- hdr->u.hdr.rule_id = entry->rule_id;
- buf += sizeof(struct ipa3_flt_rule_hw_hdr);
-
- if (rule->eq_attrib_type) {
- if (ipa3_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) {
- IPAERR("fail to generate hw rule from eq\n");
- return -EPERM;
- }
- en_rule = rule->eq_attrib.rule_eq_bitmap;
- } else {
- if (ipa3_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
- IPAERR("fail to generate hw rule\n");
- return -EPERM;
- }
- }
-
- IPADBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
- en_rule,
- hdr->u.hdr.action,
- hdr->u.hdr.rt_tbl_idx,
- hdr->u.hdr.retain_hdr);
- IPADBG_LOW("priority=%d, rule_id=%d\n",
- hdr->u.hdr.priority,
- hdr->u.hdr.rule_id);
+ gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
- hdr->u.hdr.en_rule = en_rule;
- ipa3_write_64(hdr->u.word, (u8 *)hdr);
+ gen_params.priority = entry->prio;
+ gen_params.id = entry->rule_id;
+ gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
- if (entry->hw_len == 0) {
- entry->hw_len = buf - start;
- } else if (entry->hw_len != (buf - start)) {
- IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%td\n",
- entry->hw_len, (buf - start));
- return -EPERM;
- }
+ res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+ if (res)
+ IPAERR("failed to generate flt h/w rule\n");
return 0;
}
@@ -284,83 +82,20 @@ static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
tbl = &ipa3_ctx->flt_tbl[i][ip];
if (tbl->prev_mem[rlt].phys_base) {
IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
- dma_free_coherent(ipa3_ctx->pdev,
- tbl->prev_mem[rlt].size,
- tbl->prev_mem[rlt].base,
- tbl->prev_mem[rlt].phys_base);
- memset(&tbl->prev_mem[rlt], 0,
- sizeof(tbl->prev_mem[rlt]));
+ ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
}
if (list_empty(&tbl->head_flt_rule_list)) {
if (tbl->curr_mem[rlt].phys_base) {
IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
i);
- dma_free_coherent(ipa3_ctx->pdev,
- tbl->curr_mem[rlt].size,
- tbl->curr_mem[rlt].base,
- tbl->curr_mem[rlt].phys_base);
- memset(&tbl->curr_mem[rlt], 0,
- sizeof(tbl->curr_mem[rlt]));
+ ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
}
}
}
}
/**
- * ipa_alloc_init_flt_tbl_hdr() - allocate and initialize buffers for
- * flt tables headers to be filled into sram
- * @ip: the ip address family type
- * @hash_hdr: address of the dma buffer for the hashable flt tbl header
- * @nhash_hdr: address of the dma buffer for the non-hashable flt tbl header
- *
- * Return: 0 on success, negative on failure
- */
-static int ipa_alloc_init_flt_tbl_hdr(enum ipa_ip_type ip,
- struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr)
-{
- int num_hdrs;
- u64 *hash_entr;
- u64 *nhash_entr;
- int i;
-
- num_hdrs = ipa3_ctx->ep_flt_num;
-
- hash_hdr->size = num_hdrs * IPA_HW_TBL_HDR_WIDTH;
- hash_hdr->base = dma_alloc_coherent(ipa3_ctx->pdev, hash_hdr->size,
- &hash_hdr->phys_base, GFP_KERNEL);
- if (!hash_hdr->base) {
- IPAERR("fail to alloc DMA buff of size %d\n", hash_hdr->size);
- goto err;
- }
-
- nhash_hdr->size = num_hdrs * IPA_HW_TBL_HDR_WIDTH;
- nhash_hdr->base = dma_alloc_coherent(ipa3_ctx->pdev, nhash_hdr->size,
- &nhash_hdr->phys_base, GFP_KERNEL);
- if (!nhash_hdr->base) {
- IPAERR("fail to alloc DMA buff of size %d\n", nhash_hdr->size);
- goto nhash_alloc_fail;
- }
-
- hash_entr = (u64 *)hash_hdr->base;
- nhash_entr = (u64 *)nhash_hdr->base;
- for (i = 0; i < num_hdrs; i++) {
- *hash_entr = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- *nhash_entr = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- hash_entr++;
- nhash_entr++;
- }
-
- return 0;
-
-nhash_alloc_fail:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr->size,
- hash_hdr->base, hash_hdr->phys_base);
-err:
- return -ENOMEM;
-}
-
-/**
* ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
* assign priorities to the rules, calculate their sizes and calculate
* the overall table size
@@ -374,27 +109,35 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
struct ipa3_flt_tbl *tbl, int pipe_idx)
{
struct ipa3_flt_entry *entry;
- u16 prio_i = 1;
+ int prio_i;
+ int max_prio;
+ u32 hdr_width;
tbl->sz[IPA_RULE_HASHABLE] = 0;
tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
- list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+ max_prio = ipahal_get_rule_max_priority();
- entry->prio = entry->rule.max_prio ?
- IPA_RULE_MAX_PRIORITY : prio_i++;
+ prio_i = max_prio;
+ list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
- if (entry->prio > IPA_RULE_MIN_PRIORITY) {
- IPAERR("cannot allocate new priority for flt rule\n");
- return -EPERM;
+ if (entry->rule.max_prio) {
+ entry->prio = max_prio;
+ } else {
+ if (ipahal_rule_decrease_priority(&prio_i)) {
+ IPAERR("cannot decrease rule priority - %d\n",
+ prio_i);
+ return -EPERM;
+ }
+ entry->prio = prio_i;
}
if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
IPAERR("failed to calculate HW FLT rule size\n");
return -EPERM;
}
- IPADBG_LOW("pipe %d hw_len %d priority %u\n",
- pipe_idx, entry->hw_len, entry->prio);
+ IPADBG("pipe %d rule_id (handle) %u hw_len %d priority %u\n",
+ pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
@@ -409,11 +152,13 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
return 0;
}
+ hdr_width = ipahal_get_hw_tbl_hdr_width();
+
/* for the header word */
if (tbl->sz[IPA_RULE_HASHABLE])
- tbl->sz[IPA_RULE_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
+ tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
if (tbl->sz[IPA_RULE_NON_HASHABLE])
- tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
+ tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
@@ -422,51 +167,6 @@ static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
}
/**
- * ipa_get_flt_tbl_lcl_bdy_size() - calc the overall memory needed on sram
- * to hold the hashable and non-hashable flt rules tables bodies
- * @ip: the ip address family type
- * @hash_bdy_sz[OUT]: size on local sram for all tbls hashable rules
- * @nhash_bdy_sz[OUT]: size on local sram for all tbls non-hashable rules
- *
- * Return: none
- */
-static void ipa_get_flt_tbl_lcl_bdy_size(enum ipa_ip_type ip,
- u32 *hash_bdy_sz, u32 *nhash_bdy_sz)
-{
- struct ipa3_flt_tbl *tbl;
- int i;
-
- *hash_bdy_sz = 0;
- *nhash_bdy_sz = 0;
-
- for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
- if (!ipa_is_ep_support_flt(i))
- continue;
- tbl = &ipa3_ctx->flt_tbl[i][ip];
- if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
- tbl->sz[IPA_RULE_HASHABLE]) {
- *hash_bdy_sz += tbl->sz[IPA_RULE_HASHABLE];
- *hash_bdy_sz -= IPA_HW_TBL_HDR_WIDTH;
- /* for table terminator */
- *hash_bdy_sz += IPA_HW_TBL_WIDTH;
- /* align the start of local rule-set */
- *hash_bdy_sz += IPA_HW_TBL_LCLADDR_ALIGNMENT;
- *hash_bdy_sz &= ~IPA_HW_TBL_LCLADDR_ALIGNMENT;
- }
- if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
- tbl->sz[IPA_RULE_NON_HASHABLE]) {
- *nhash_bdy_sz += tbl->sz[IPA_RULE_NON_HASHABLE];
- *nhash_bdy_sz -= IPA_HW_TBL_HDR_WIDTH;
- /* for table terminator */
- *nhash_bdy_sz += IPA_HW_TBL_WIDTH;
- /* align the start of local rule-set */
- *nhash_bdy_sz += IPA_HW_TBL_LCLADDR_ALIGNMENT;
- *nhash_bdy_sz &= ~IPA_HW_TBL_LCLADDR_ALIGNMENT;
- }
- }
-}
-
-/**
* ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
* (rules and tables) to HW format and fill it in the given buffers
* @ip: the ip address family type
@@ -504,28 +204,22 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
continue;
}
if (tbl->in_sys[rlt]) {
- /* only body (no header) with rule-set terminator */
+ /* only body (no header) */
tbl_mem.size = tbl->sz[rlt] -
- IPA_HW_TBL_HDR_WIDTH + IPA_HW_TBL_WIDTH;
- tbl_mem.base =
- dma_alloc_coherent(ipa3_ctx->pdev, tbl_mem.size,
- &tbl_mem.phys_base, GFP_KERNEL);
- if (!tbl_mem.base) {
- IPAERR("fail to alloc DMA buf of size %d\n",
+ ipahal_get_hw_tbl_hdr_width();
+ if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+ IPAERR("fail to alloc sys tbl of size %d\n",
tbl_mem.size);
goto err;
}
- if (tbl_mem.phys_base & IPA_HW_TBL_SYSADDR_ALIGNMENT) {
- IPAERR("sys rt tbl address is not aligned\n");
- goto align_err;
- }
- /* update the hdr at the right index */
- ipa3_write_64(tbl_mem.phys_base, hdr +
- hdr_idx * IPA_HW_TBL_HDR_WIDTH);
+ if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+ hdr, hdr_idx, true)) {
+ IPAERR("fail to wrt sys tbl addr to hdr\n");
+ goto hdr_update_fail;
+ }
tbl_mem_buf = tbl_mem.base;
- memset(tbl_mem_buf, 0, tbl_mem.size);
/* generate the rule-set */
list_for_each_entry(entry, &tbl->head_flt_rule_list,
@@ -536,14 +230,11 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
ip, entry, tbl_mem_buf);
if (res) {
IPAERR("failed to gen HW FLT rule\n");
- goto align_err;
+ goto hdr_update_fail;
}
tbl_mem_buf += entry->hw_len;
}
- /* write the rule-set terminator */
- tbl_mem_buf = ipa3_write_64(0, tbl_mem_buf);
-
if (tbl->curr_mem[rlt].phys_base) {
WARN_ON(tbl->prev_mem[rlt].phys_base);
tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
@@ -551,15 +242,13 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
tbl->curr_mem[rlt] = tbl_mem;
} else {
offset = body_i - base + body_ofst;
- if (offset & IPA_HW_TBL_LCLADDR_ALIGNMENT) {
- IPAERR("ofst isn't lcl addr aligned %llu\n",
- offset);
- goto err;
- }
/* update the hdr at the right index */
- ipa3_write_64(IPA_HW_TBL_OFSET_TO_LCLADDR(offset),
- hdr + hdr_idx * IPA_HW_TBL_HDR_WIDTH);
+ if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+ hdr_idx, true)) {
+ IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ goto hdr_update_fail;
+ }
/* generate the rule-set */
list_for_each_entry(entry, &tbl->head_flt_rule_list,
@@ -575,25 +264,21 @@ static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
body_i += entry->hw_len;
}
- /* write the rule-set terminator */
- body_i = ipa3_write_64(0, body_i);
-
/**
* advance body_i to next table alignment as local tables
* are order back-to-back
*/
- body_i += IPA_HW_TBL_LCLADDR_ALIGNMENT;
+ body_i += ipahal_get_lcl_tbl_addr_alignment();
body_i = (u8 *)((long)body_i &
- ~IPA_HW_TBL_LCLADDR_ALIGNMENT);
+ ~ipahal_get_lcl_tbl_addr_alignment());
}
hdr_idx++;
}
return 0;
-align_err:
- dma_free_coherent(ipa3_ctx->pdev, tbl_mem.size,
- tbl_mem.base, tbl_mem.phys_base);
+hdr_update_fail:
+ ipahal_free_dma_mem(&tbl_mem);
err:
return -EPERM;
}
@@ -603,24 +288,16 @@ err:
* headers and bodies are being created into buffers that will be filled into
* the local memory (sram)
* @ip: the ip address family type
- * @hash_hdr: address of the dma buffer containing hashable rules tbl headers
- * @nhash_hdr: address of the dma buffer containing
- * non-hashable rules tbl headers
- * @hash_bdy: address of the dma buffer containing hashable local rules
- * @nhash_bdy: address of the dma buffer containing non-hashable local rules
+ * @alloc_params: In and Out parameters for the allocations of the buffers
+ * 4 buffers: hdr and bdy, each hashable and non-hashable
*
* Return: 0 on success, negative on failure
*/
static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
- struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr,
- struct ipa_mem_buffer *hash_bdy, struct ipa_mem_buffer *nhash_bdy)
+ struct ipahal_fltrt_alloc_imgs_params *alloc_params)
{
u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
- u32 hash_bdy_sz;
- u32 nhash_bdy_sz;
- struct ipa3_flt_tbl *tbl;
int rc = 0;
- int i;
if (ip == IPA_IP_v4) {
nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
@@ -634,63 +311,22 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
IPA_MEM_PART(v6_flt_hash_ofst);
}
- if (ipa_alloc_init_flt_tbl_hdr(ip, hash_hdr, nhash_hdr)) {
- IPAERR("fail to alloc and init flt tbl hdr\n");
+ if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+ IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
rc = -ENOMEM;
- goto no_flt_tbls;
- }
-
- for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
- if (!ipa_is_ep_support_flt(i))
- continue;
- tbl = &ipa3_ctx->flt_tbl[i][ip];
- if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
- rc = -EPERM;
- goto prep_failed;
- }
- }
-
- ipa_get_flt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
- IPADBG_LOW("total flt tbl local body sizes: hash %u nhash %u\n",
- hash_bdy_sz, nhash_bdy_sz);
-
- hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- hash_bdy->size &= ~IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- nhash_bdy->size = nhash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- nhash_bdy->size &= ~IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
-
- if (hash_bdy->size) {
- hash_bdy->base = dma_alloc_coherent(ipa3_ctx->pdev,
- hash_bdy->size, &hash_bdy->phys_base, GFP_KERNEL);
- if (!hash_bdy->base) {
- IPAERR("fail to alloc DMA buff of size %d\n",
- hash_bdy->size);
- rc = -ENOMEM;
- goto prep_failed;
- }
- memset(hash_bdy->base, 0, hash_bdy->size);
- }
-
- if (nhash_bdy->size) {
- nhash_bdy->base = dma_alloc_coherent(ipa3_ctx->pdev,
- nhash_bdy->size, &nhash_bdy->phys_base, GFP_KERNEL);
- if (!nhash_bdy->base) {
- IPAERR("fail to alloc DMA buff of size %d\n",
- hash_bdy->size);
- rc = -ENOMEM;
- goto nhash_bdy_fail;
- }
- memset(nhash_bdy->base, 0, nhash_bdy->size);
+ goto allocate_failed;
}
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
- hash_bdy->base, hash_hdr->base, hash_bdy_start_ofst)) {
+ alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+ hash_bdy_start_ofst)) {
IPAERR("fail to translate hashable flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
- nhash_bdy->base, nhash_hdr->base, nhash_bdy_start_ofst)) {
+ alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+ nhash_bdy_start_ofst)) {
IPAERR("fail to translate non-hash flt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
@@ -699,19 +335,14 @@ static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
return rc;
translate_fail:
- if (nhash_bdy->size)
- dma_free_coherent(ipa3_ctx->pdev, nhash_bdy->size,
- nhash_bdy->base, nhash_bdy->phys_base);
-nhash_bdy_fail:
- if (hash_bdy->size)
- dma_free_coherent(ipa3_ctx->pdev, hash_bdy->size,
- hash_bdy->base, hash_bdy->phys_base);
-prep_failed:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr->size,
- hash_hdr->base, hash_hdr->phys_base);
- dma_free_coherent(ipa3_ctx->pdev, nhash_hdr->size,
- nhash_hdr->base, nhash_hdr->phys_base);
-no_flt_tbls:
+ if (alloc_params->hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params->hash_hdr);
+ ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+ if (alloc_params->hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->hash_bdy);
+ if (alloc_params->nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_failed:
return rc;
}
@@ -827,8 +458,7 @@ static bool ipa_flt_skip_pipe_config(int pipe)
*/
int __ipa_commit_flt_v3(enum ipa_ip_type ip)
{
- struct ipa_mem_buffer hash_bdy, nhash_bdy;
- struct ipa_mem_buffer hash_hdr, nhash_hdr;
+ struct ipahal_fltrt_alloc_imgs_params alloc_params;
int rc = 0;
struct ipa3_desc *desc;
struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
@@ -842,14 +472,21 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
bool lcl_hash, lcl_nhash;
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
+ u32 tbl_hdr_width;
+ struct ipa3_flt_tbl *tbl;
+
+ tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+ memset(&alloc_params, 0, sizeof(alloc_params));
+ alloc_params.ipt = ip;
+ alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
if (ip == IPA_IP_v4) {
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_flt_hash_ofst) +
- IPA_HW_TBL_HDR_WIDTH; /* to skip the bitmap */
+ tbl_hdr_width; /* to skip the bitmap */
lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_flt_nhash_ofst) +
- IPA_HW_TBL_HDR_WIDTH; /* to skip the bitmap */
+ tbl_hdr_width; /* to skip the bitmap */
lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v4_flt_hash_ofst);
lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
@@ -859,10 +496,10 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
} else {
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_flt_hash_ofst) +
- IPA_HW_TBL_HDR_WIDTH; /* to skip the bitmap */
+ tbl_hdr_width; /* to skip the bitmap */
lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_flt_nhash_ofst) +
- IPA_HW_TBL_HDR_WIDTH; /* to skip the bitmap */
+ tbl_hdr_width; /* to skip the bitmap */
lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v6_flt_hash_ofst);
lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
@@ -871,19 +508,44 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
}
- if (ipa_generate_flt_hw_tbl_img(ip,
- &hash_hdr, &nhash_hdr, &hash_bdy, &nhash_bdy)) {
+ for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+ if (!ipa_is_ep_support_flt(i))
+ continue;
+ tbl = &ipa3_ctx->flt_tbl[i][ip];
+ if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+ rc = -EPERM;
+ goto prep_failed;
+ }
+ if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+ tbl->sz[IPA_RULE_HASHABLE]) {
+ alloc_params.num_lcl_hash_tbls++;
+ alloc_params.total_sz_lcl_hash_tbls +=
+ tbl->sz[IPA_RULE_HASHABLE];
+ alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+
+ }
+ if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+ tbl->sz[IPA_RULE_NON_HASHABLE]) {
+ alloc_params.num_lcl_nhash_tbls++;
+ alloc_params.total_sz_lcl_nhash_tbls +=
+ tbl->sz[IPA_RULE_NON_HASHABLE];
+ alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+ }
+ }
+
+ if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
rc = -EFAULT;
- goto fail_gen;
+ goto prep_failed;
}
- if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, &hash_bdy)) {
+ if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+ &alloc_params.hash_bdy)) {
rc = -EFAULT;
goto fail_size_valid;
}
if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
- &nhash_bdy)) {
+ &alloc_params.nhash_bdy)) {
rc = -EFAULT;
goto fail_size_valid;
}
@@ -936,11 +598,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
- mem_cmd.system_addr = nhash_hdr.phys_base +
- hdr_idx * IPA_HW_TBL_HDR_WIDTH;
+ mem_cmd.size = tbl_hdr_width;
+ mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
+ hdr_idx * tbl_hdr_width;
mem_cmd.local_addr = lcl_nhash_hdr +
- hdr_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_idx * tbl_hdr_width;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
@@ -957,11 +619,11 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = IPA_HW_TBL_HDR_WIDTH;
- mem_cmd.system_addr = hash_hdr.phys_base +
- hdr_idx * IPA_HW_TBL_HDR_WIDTH;
+ mem_cmd.size = tbl_hdr_width;
+ mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
+ hdr_idx * tbl_hdr_width;
mem_cmd.local_addr = lcl_hash_hdr +
- hdr_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_idx * tbl_hdr_width;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
if (!cmd_pyld[num_cmd]) {
@@ -982,8 +644,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = nhash_bdy.size;
- mem_cmd.system_addr = nhash_bdy.phys_base;
+ mem_cmd.size = alloc_params.nhash_bdy.size;
+ mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
mem_cmd.local_addr = lcl_nhash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -1002,8 +664,8 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = hash_bdy.size;
- mem_cmd.system_addr = hash_bdy.phys_base;
+ mem_cmd.size = alloc_params.hash_bdy.size;
+ mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
mem_cmd.local_addr = lcl_hash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -1026,21 +688,25 @@ int __ipa_commit_flt_v3(enum ipa_ip_type ip)
}
IPADBG_LOW("Hashable HEAD\n");
- IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
+ IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+ alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
IPADBG_LOW("Non-Hashable HEAD\n");
- IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
+ IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+ alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
- if (hash_bdy.size) {
+ if (alloc_params.hash_bdy.size) {
IPADBG_LOW("Hashable BODY\n");
- IPA_DUMP_BUFF(hash_bdy.base,
- hash_bdy.phys_base, hash_bdy.size);
+ IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+ alloc_params.hash_bdy.phys_base,
+ alloc_params.hash_bdy.size);
}
- if (nhash_bdy.size) {
+ if (alloc_params.nhash_bdy.size) {
IPADBG_LOW("Non-Hashable BODY\n");
- IPA_DUMP_BUFF(nhash_bdy.base,
- nhash_bdy.phys_base, nhash_bdy.size);
+ IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+ alloc_params.nhash_bdy.phys_base,
+ alloc_params.nhash_bdy.size);
}
__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
@@ -1053,19 +719,14 @@ fail_reg_write_construct:
kfree(desc);
kfree(cmd_pyld);
fail_size_valid:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
- hash_hdr.base, hash_hdr.phys_base);
- dma_free_coherent(ipa3_ctx->pdev, nhash_hdr.size,
- nhash_hdr.base, nhash_hdr.phys_base);
-
- if (hash_bdy.size)
- dma_free_coherent(ipa3_ctx->pdev, hash_bdy.size,
- hash_bdy.base, hash_bdy.phys_base);
-
- if (nhash_bdy.size)
- dma_free_coherent(ipa3_ctx->pdev, nhash_bdy.size,
- nhash_bdy.base, nhash_bdy.phys_base);
-fail_gen:
+ if (alloc_params.hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params.hash_hdr);
+ ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+ if (alloc_params.hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.hash_bdy);
+ if (alloc_params.nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+prep_failed:
return rc;
}
@@ -1100,13 +761,10 @@ static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
}
if (rule->rule_id) {
- if (rule->rule_id >= IPA_RULE_ID_MIN_VAL &&
- rule->rule_id <= IPA_RULE_ID_MAX_VAL) {
+ if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
IPAERR("invalid rule_id provided 0x%x\n"
- "rule_id 0x%x - 0x%x are auto generated\n",
- rule->rule_id,
- IPA_RULE_ID_MIN_VAL,
- IPA_RULE_ID_MAX_VAL);
+ "rule_id with bit 0x%x are auto generated\n",
+ rule->rule_id, ipahal_get_rule_id_hi_bit());
goto error;
}
}
@@ -1269,9 +927,9 @@ static int __ipa_del_flt_rule(u32 rule_hdl)
entry->tbl->rule_cnt, entry->rule_id);
entry->cookie = 0;
/* if rule id was allocated from idr, remove it */
- if (entry->rule_id >= IPA_RULE_ID_MIN_VAL &&
- entry->rule_id <= IPA_RULE_ID_MAX_VAL)
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
/* remove the handle from the database */
@@ -1397,7 +1055,6 @@ int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
if (rules == NULL || rules->num_rules == 0 ||
rules->ip >= IPA_IP_MAX) {
IPAERR("bad parm\n");
-
return -EINVAL;
}
@@ -1683,10 +1340,9 @@ int ipa3_reset_flt(enum ipa_ip_type ip)
if (entry->rt_tbl)
entry->rt_tbl->ref_cnt--;
/* if rule id was allocated from idr, remove it */
- if (entry->rule_id >= IPA_RULE_ID_MIN_VAL &&
- entry->rule_id <= IPA_RULE_ID_MAX_VAL)
+ if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
idr_remove(&entry->tbl->rule_ids,
- entry->rule_id);
+ entry->rule_id);
entry->cookie = 0;
id = entry->id;
kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
@@ -1804,30 +1460,29 @@ int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
*
* This function reads the filtering table from IPA SRAM and prepares an array
* of entries. This function is mainly used for debugging purposes.
-
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
* Returns: 0 on success, negative on failure
*/
-int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
- enum ipa_ip_type ip_type,
- bool hashable,
- struct ipa3_flt_entry entry[],
- int *num_entry)
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
+ bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
{
+ void *ipa_sram_mmio;
+ u64 hdr_base_ofst;
int tbl_entry_idx;
- u64 tbl_entry_in_hdr_ofst;
- u64 *tbl_entry_in_hdr;
- struct ipa3_flt_rule_hw_hdr *hdr;
- u8 *buf;
- int rule_idx;
- u8 rule_size;
int i;
- void *ipa_sram_mmio;
+ int res = 0;
+ u64 tbl_addr;
+ bool is_sys;
+ u8 *rule_addr;
+ struct ipa_mem_buffer *sys_tbl_mem;
+ int rule_idx;
- IPADBG("pipe_idx=%d ip_type=%d hashable=%d\n",
- pipe_idx, ip_type, hashable);
+ IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+ pipe_idx, ip_type, hashable, entry, num_entry);
- if (pipe_idx >= ipa3_ctx->ipa_num_pipes ||
- ip_type >= IPA_IP_MAX ||
+ if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
!entry || !num_entry) {
IPAERR("Invalid params\n");
return -EFAULT;
@@ -1840,99 +1495,98 @@ int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
/* map IPA SRAM */
ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
- ipa3_ctx->ctrl->ipa_reg_base_ofst +
- ipahal_get_reg_n_ofst(
- IPA_SRAM_DIRECT_ACCESS_n,
- 0),
- ipa3_ctx->smem_sz);
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4),
+ ipa3_ctx->smem_sz);
if (!ipa_sram_mmio) {
IPAERR("fail to ioremap IPA SRAM\n");
return -ENOMEM;
}
memset(entry, 0, sizeof(*entry) * (*num_entry));
- /* calculate the offset of the tbl entry */
- tbl_entry_idx = 1; /* to skip the bitmap */
- for (i = 0; i < pipe_idx; i++)
- if (ipa3_ctx->ep_flt_bitmap & (1 << i))
- tbl_entry_idx++;
-
if (hashable) {
if (ip_type == IPA_IP_v4)
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v4_flt_hash_ofst) +
- tbl_entry_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_flt_hash_ofst);
else
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v6_flt_hash_ofst) +
- tbl_entry_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_flt_hash_ofst);
} else {
if (ip_type == IPA_IP_v4)
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v4_flt_nhash_ofst) +
- tbl_entry_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_flt_nhash_ofst);
else
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v6_flt_nhash_ofst) +
- tbl_entry_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_flt_nhash_ofst);
}
- IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
+ /* calculate the index of the tbl entry */
+ tbl_entry_idx = 1; /* skip the bitmap */
+ for (i = 0; i < pipe_idx; i++)
+ if (ipa3_ctx->ep_flt_bitmap & (1 << i))
+ tbl_entry_idx++;
- tbl_entry_in_hdr = ipa_sram_mmio + tbl_entry_in_hdr_ofst;
+ IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
+ hdr_base_ofst, tbl_entry_idx);
+
+ res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+ tbl_entry_idx, &tbl_addr, &is_sys);
+ if (res) {
+ IPAERR("failed to read table address from header structure\n");
+ goto bail;
+ }
+ IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
+ pipe_idx, tbl_addr, is_sys);
+ if (!tbl_addr) {
+ IPAERR("invalid flt tbl addr\n");
+ res = -EFAULT;
+ goto bail;
+ }
/* for tables resides in DDR access it from the virtual memory */
- if (*tbl_entry_in_hdr & 0x1) {
- /* local */
- hdr = (void *)((u8 *)tbl_entry_in_hdr -
- tbl_entry_idx * IPA_HW_TBL_HDR_WIDTH +
- (*tbl_entry_in_hdr - 1) / 16);
- } else {
- /* system */
- if (hashable)
- hdr = ipa3_ctx->flt_tbl[pipe_idx][ip_type].
- curr_mem[IPA_RULE_HASHABLE].base;
+ if (is_sys) {
+ sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
+ curr_mem[hashable ? IPA_RULE_HASHABLE :
+ IPA_RULE_NON_HASHABLE];
+ if (sys_tbl_mem->phys_base &&
+ sys_tbl_mem->phys_base != tbl_addr) {
+ IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
+ tbl_addr, &sys_tbl_mem->phys_base);
+ }
+ if (sys_tbl_mem->phys_base)
+ rule_addr = sys_tbl_mem->base;
else
- hdr = ipa3_ctx->flt_tbl[pipe_idx][ip_type].
- curr_mem[IPA_RULE_NON_HASHABLE].base;
+ rule_addr = NULL;
+ } else {
+ rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+ }
+
+ IPADBG("First rule addr 0x%p\n", rule_addr);
- if (!hdr)
- hdr = ipa3_ctx->empty_rt_tbl_mem.base;
+ if (!rule_addr) {
+ /* Modem table in system memory or empty table */
+ *num_entry = 0;
+ goto bail;
}
- IPADBG("*tbl_entry_in_hdr=0x%llx\n", *tbl_entry_in_hdr);
- IPADBG("hdr=0x%p\n", hdr);
rule_idx = 0;
while (rule_idx < *num_entry) {
- IPADBG("*((u64 *)hdr)=0x%llx\n", *((u64 *)hdr));
- if (*((u64 *)hdr) == 0)
+ res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+ if (res) {
+ IPAERR("failed parsing flt rule\n");
+ goto bail;
+ }
+
+ IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+ if (!entry[rule_idx].rule_size)
break;
- entry[rule_idx].rule.eq_attrib_type = true;
- entry[rule_idx].rule.eq_attrib.rule_eq_bitmap =
- hdr->u.hdr.en_rule;
- entry[rule_idx].rule.action = hdr->u.hdr.action;
- entry[rule_idx].rule.retain_hdr = hdr->u.hdr.retain_hdr;
- entry[rule_idx].rule.rt_tbl_idx = hdr->u.hdr.rt_tbl_idx;
- entry[rule_idx].prio = hdr->u.hdr.priority;
- entry[rule_idx].rule_id = entry->rule.rule_id =
- hdr->u.hdr.rule_id;
- buf = (u8 *)(hdr + 1);
- IPADBG("buf=0x%p\n", buf);
-
- ipa3_generate_eq_from_hw_rule(&entry[rule_idx].rule.eq_attrib,
- buf, &rule_size);
- IPADBG("rule_size=%d\n", rule_size);
- hdr = (void *)(buf + rule_size);
- IPADBG("hdr=0x%p\n", hdr);
+
+ rule_addr += entry[rule_idx].rule_size;
rule_idx++;
}
-
*num_entry = rule_idx;
+bail:
iounmap(ipa_sram_mmio);
-
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
index 38a63f8a67ba..dff3a3fdfd5a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
@@ -16,78 +16,6 @@
/* This header defines various HW related data types */
-#define IPA_RULE_ID_INVALID 0x3FF
-
-/**
- * struct ipa3_flt_rule_hw_hdr - HW header of IPA filter rule
- * @word: filtering rule properties
- * @en_rule: enable rule
- * @action: post routing action
- * @rt_tbl_idx: index in routing table
- * @retain_hdr: added to add back to the packet the header removed
- * as part of header removal. This will be done as part of
- * header insertion block.
- * @rsvd1: reserved bits
- * @priority: Rule priority. Added to distinguish rules order
- * at the integrated table consisting from hashable and
- * non-hashable parts
- * @rsvd2: reserved bits
- * @rule_id: rule ID that will be returned in the packet status
- * @rsvd3: reserved bits
- */
-struct ipa3_flt_rule_hw_hdr {
- union {
- u64 word;
- struct {
- u64 en_rule:16;
- u64 action:5;
- u64 rt_tbl_idx:5;
- u64 retain_hdr:1;
- u64 rsvd1:5;
- u64 priority:10;
- u64 rsvd2:6;
- u64 rule_id:10;
- u64 rsvd3:6;
- } hdr;
- } u;
-};
-
-/**
- * struct ipa3_rt_rule_hw_hdr - HW header of IPA routing rule
- * @word: routing rule properties
- * @en_rule: enable rule
- * @pipe_dest_idx: destination pipe index
- * @system: changed from local to system due to HW change
- * @hdr_offset: header offset
- * @proc_ctx: whether hdr_offset points to header table or to
- * header processing context table
- * @priority: Rule priority. Added to distinguish rules order
- * at the integrated table consisting from hashable and
- * non-hashable parts
- * @rsvd1: reserved bits
- * @retain_hdr: added to add back to the packet the header removed
- * as part of header removal. This will be done as part of
- * header insertion block.
- * @rule_id: rule ID that will be returned in the packet status
- * @rsvd2: reserved bits
- */
-struct ipa3_rt_rule_hw_hdr {
- union {
- u64 word;
- struct {
- u64 en_rule:16;
- u64 pipe_dest_idx:5;
- u64 system:1;
- u64 hdr_offset:9;
- u64 proc_ctx:1;
- u64 priority:10;
- u64 rsvd1:5;
- u64 retain_hdr:1;
- u64 rule_id:10;
- u64 rsvd2:6;
- } hdr;
- } u;
-};
#define IPA_A5_MUX_HDR_EXCP_FLAG_IP BIT(7)
#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT BIT(6)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 36cb5cbb8d34..e50a6be4d186 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -33,6 +33,7 @@
#include "../ipa_api.h"
#include "ipahal/ipahal_reg.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#include "../ipa_common_i.h"
#define DRV_NAME "ipa"
@@ -115,23 +116,6 @@
#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0)
#endif
-#define IPA_TOS_EQ BIT(0)
-#define IPA_PROTOCOL_EQ BIT(1)
-#define IPA_TC_EQ BIT(2)
-#define IPA_OFFSET_MEQ128_0 BIT(3)
-#define IPA_OFFSET_MEQ128_1 BIT(4)
-#define IPA_OFFSET_MEQ32_0 BIT(5)
-#define IPA_OFFSET_MEQ32_1 BIT(6)
-#define IPA_IHL_OFFSET_MEQ32_0 BIT(7)
-#define IPA_IHL_OFFSET_MEQ32_1 BIT(8)
-#define IPA_METADATA_COMPARE BIT(9)
-#define IPA_IHL_OFFSET_RANGE16_0 BIT(10)
-#define IPA_IHL_OFFSET_RANGE16_1 BIT(11)
-#define IPA_IHL_OFFSET_EQ_32 BIT(12)
-#define IPA_IHL_OFFSET_EQ_16 BIT(13)
-#define IPA_FL_EQ BIT(14)
-#define IPA_IS_FRAG BIT(15)
-
#define IPA_HDR_BIN0 0
#define IPA_HDR_BIN1 1
#define IPA_HDR_BIN2 2
@@ -159,35 +143,11 @@
#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
#define IPA_INVALID_L4_PROTOCOL 0xFF
-#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
- (((start_ofst) + 127) & ~127)
-#define IPA_RT_FLT_HW_RULE_BUF_SIZE (256)
-
-#define IPA_HW_TBL_WIDTH (8)
-#define IPA_HW_TBL_SYSADDR_ALIGNMENT (127)
-#define IPA_HW_TBL_LCLADDR_ALIGNMENT (7)
-#define IPA_HW_TBL_ADDR_MASK (127)
-#define IPA_HW_TBL_BLK_SIZE_ALIGNMENT (127)
-#define IPA_HW_TBL_HDR_WIDTH (8)
-#define IPA_HW_RULE_START_ALIGNMENT (7)
-
-/*
- * for local tables (at sram) offsets is used as tables addresses
- * offset need to be in 8B units (local address aligned) and
- * left shifted to its place. Local bit need to be enabled.
- */
-#define IPA_HW_TBL_OFSET_TO_LCLADDR(__ofst) \
- ( \
- (((__ofst)/(IPA_HW_TBL_LCLADDR_ALIGNMENT+1)) * \
- (IPA_HW_TBL_ADDR_MASK + 1)) + 1 \
- )
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
-#define IPA_RULE_MAX_PRIORITY (0)
-#define IPA_RULE_MIN_PRIORITY (1023)
-
-#define IPA_RULE_ID_MIN_VAL (0x01)
-#define IPA_RULE_ID_MAX_VAL (0x1FF)
-#define IPA_RULE_ID_RULE_MISS (0x3FF)
+#define IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst) \
+ (((start_ofst) + 127) & ~127)
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
@@ -906,28 +866,6 @@ struct ipa3_tag_completion {
atomic_t cnt;
};
-/**
- * struct ipa3_debugfs_rt_entry - IPA routing table entry for debugfs
- * @eq_attrib: equation attributes for the rule
- * @retain_hdr: retain header when hit this rule
- * @prio: rule 10bit priority which defines the order of the rule
- * @rule_id: rule 10bit ID to be returned in packet status
- * @dst: destination endpoint
- * @hdr_ofset: header offset to be added
- * @system: rule resides in system memory
- * @is_proc_ctx: indicates whether the rules points to proc_ctx or header
- */
-struct ipa3_debugfs_rt_entry {
- struct ipa_ipfltri_rule_eq eq_attrib;
- uint8_t retain_hdr;
- u16 prio;
- u16 rule_id;
- u8 dst;
- u8 hdr_ofset;
- u8 system;
- u8 is_proc_ctx;
-};
-
struct ipa3_controller;
/**
@@ -1317,7 +1255,6 @@ struct ipa3_ready_cb_info {
* @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
* @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
* @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
- * @empty_rt_tbl_mem: empty routing tables memory
* @power_mgmt_wq: workqueue for power management
* @transport_power_mgmt_wq: workqueue transport related power management
* @tag_process_before_gating: indicates whether to start tag process before
@@ -1409,7 +1346,6 @@ struct ipa3_context {
bool ip4_flt_tbl_nhash_lcl;
bool ip6_flt_tbl_hash_lcl;
bool ip6_flt_tbl_nhash_lcl;
- struct ipa_mem_buffer empty_rt_tbl_mem;
struct gen_pool *pipe_mem_pool;
struct dma_pool *dma_pool;
struct ipa3_active_clients ipa3_active_clients;
@@ -1689,8 +1625,6 @@ struct ipa3_controller {
int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
int (*ipa3_commit_flt)(enum ipa_ip_type ip);
int (*ipa3_commit_rt)(enum ipa_ip_type ip);
- int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip,
- struct ipa3_rt_entry *entry, u8 *buf);
int (*ipa3_commit_hdr)(void);
void (*ipa3_enable_clks)(void);
void (*ipa3_disable_clks)(void);
@@ -2057,12 +1991,6 @@ int ipa3_generate_hw_rule(enum ipa_ip_type ip,
const struct ipa_rule_attrib *attrib,
u8 **buf,
u16 *en_rule);
-u8 *ipa3_write_64(u64 w, u8 *dest);
-u8 *ipa3_write_32(u32 w, u8 *dest);
-u8 *ipa3_write_16(u16 hw, u8 *dest);
-u8 *ipa3_write_8(u8 b, u8 *dest);
-u8 *ipa3_pad_to_32(u8 *dest);
-u8 *ipa3_pad_to_64(u8 *dest);
int ipa3_init_hw(void);
struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
int ipa3_set_single_ndp_per_mbim(bool);
@@ -2140,13 +2068,8 @@ int _ipa_init_flt6_v3(void);
int __ipa_commit_flt_v3(enum ipa_ip_type ip);
int __ipa_commit_rt_v3(enum ipa_ip_type ip);
-int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
- struct ipa3_rt_entry *entry, u8 *buf);
int __ipa_commit_hdr_v3_0(void);
-int ipa3_generate_flt_eq(enum ipa_ip_type ip,
- const struct ipa_rule_attrib *attrib,
- struct ipa_ipfltri_rule_eq *eq_attrib);
void ipa3_skb_recycle(struct sk_buff *skb);
void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx);
void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx);
@@ -2246,19 +2169,16 @@ void ipa3_set_resorce_groups_min_max_limits(void);
void ipa3_suspend_apps_pipes(bool suspend);
void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
uint32_t qmap_id);
-int ipa3_generate_eq_from_hw_rule(
- struct ipa_ipfltri_rule_eq *attrib, u8 *buf, u8 *rule_size);
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
- struct ipa3_flt_entry entry[],
+ struct ipahal_flt_rule_entry entry[],
int *num_entry);
int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
enum ipa_ip_type ip_type,
bool hashable,
- struct ipa3_debugfs_rt_entry entry[],
+ struct ipahal_rt_rule_entry entry[],
int *num_entry);
-int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib);
int ipa3_restore_suspend_handler(void);
int ipa3_inject_dma_task_for_gsi(void);
int ipa3_uc_panic_notifier(struct notifier_block *this,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index 34c5bdc9c65f..138db3dbde84 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -14,6 +14,7 @@
#include <linux/idr.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#define IPA_RT_INDEX_BITMAP_SIZE (32)
#define IPA_RT_STATUS_OF_ADD_FAILED (-1)
@@ -27,47 +28,35 @@
)
/**
- * __ipa_generate_rt_hw_rule_v3_0() - generates the routing hardware rule
+ * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule
+ * This func will do the preparation core driver work and then calls
+ * the HAL layer for the real work.
* @ip: the ip address family type
* @entry: routing entry
* @buf: output buffer, buf == NULL means
- * caller wants to know the size of the rule as seen
- * by HW so they did not pass a valid buffer, we will use a
- * scratch buffer instead.
- * With this scheme we are going to
- * generate the rule twice, once to know size using scratch
- * buffer and second to write the rule to the actual caller
- * supplied buffer which is of required size
+ * caller wants to know the size of the rule as seen
+ * by HW so they did not pass a valid buffer, we will use a
+ * scratch buffer instead.
+ * With this scheme we are going to
+ * generate the rule twice, once to know size using scratch
+ * buffer and second to write the rule to the actual caller
+ * supplied buffer which is of required size
*
- * Returns: 0 on success, negative on failure
+ * Returns: 0 on success, negative on failure
*
* caller needs to hold any needed locks to ensure integrity
- *
*/
-int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
- struct ipa3_rt_entry *entry, u8 *buf)
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+ struct ipa3_rt_entry *entry, u8 *buf)
{
- struct ipa3_rt_rule_hw_hdr *rule_hdr;
- const struct ipa_rt_rule *rule =
- (const struct ipa_rt_rule *)&entry->rule;
- u16 en_rule = 0;
- u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
- u8 *start;
- int pipe_idx;
-
- if (buf == NULL) {
- memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
- buf = (u8 *)tmp;
- } else
- if ((long)buf & IPA_HW_RULE_START_ALIGNMENT) {
- IPAERR("buff is not rule start aligned\n");
- return -EPERM;
- }
+ struct ipahal_rt_rule_gen_params gen_params;
+ int res = 0;
- start = buf;
- rule_hdr = (struct ipa3_rt_rule_hw_hdr *)buf;
- pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
- if (pipe_idx == -1) {
+ memset(&gen_params, 0, sizeof(gen_params));
+
+ gen_params.ipt = ip;
+ gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+ if (gen_params.dst_pipe_idx == -1) {
IPAERR("Wrong destination pipe specified in RT rule\n");
WARN_ON(1);
return -EPERM;
@@ -75,56 +64,36 @@ int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
IPAERR("No RT rule on IPA_client_producer pipe.\n");
IPAERR("pipe_idx: %d dst_pipe: %d\n",
- pipe_idx, entry->rule.dst);
+ gen_params.dst_pipe_idx, entry->rule.dst);
WARN_ON(1);
return -EPERM;
}
- rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+
if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
struct ipa3_hdr_proc_ctx_entry *proc_ctx;
proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
- rule_hdr->u.hdr.system = !ipa3_ctx->hdr_proc_ctx_tbl_lcl;
- BUG_ON(proc_ctx->offset_entry->offset & 31);
- rule_hdr->u.hdr.proc_ctx = 1;
- rule_hdr->u.hdr.hdr_offset =
- (proc_ctx->offset_entry->offset +
- ipa3_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
+ gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl;
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+ gen_params.hdr_ofst = proc_ctx->offset_entry->offset +
+ ipa3_ctx->hdr_proc_ctx_tbl.start_offset;
} else if (entry->hdr) {
- rule_hdr->u.hdr.system = !ipa3_ctx->hdr_tbl_lcl;
- BUG_ON(entry->hdr->offset_entry->offset & 3);
- rule_hdr->u.hdr.proc_ctx = 0;
- rule_hdr->u.hdr.hdr_offset =
- entry->hdr->offset_entry->offset >> 2;
+ gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+ gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
} else {
- rule_hdr->u.hdr.proc_ctx = 0;
- rule_hdr->u.hdr.hdr_offset = 0;
- }
- BUG_ON(entry->prio & ~0x3FF);
- rule_hdr->u.hdr.priority = entry->prio;
- rule_hdr->u.hdr.retain_hdr = rule->retain_hdr;
- BUG_ON(entry->rule_id & ~0x3FF);
- rule_hdr->u.hdr.rule_id = entry->rule_id;
- buf += sizeof(struct ipa3_rt_rule_hw_hdr);
-
- if (ipa3_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
- IPAERR("fail to generate hw rule\n");
- return -EPERM;
+ gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+ gen_params.hdr_ofst = 0;
}
- IPADBG_LOW("en_rule 0x%x\n", en_rule);
-
- rule_hdr->u.hdr.en_rule = en_rule;
- ipa3_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+ gen_params.priority = entry->prio;
+ gen_params.id = entry->rule_id;
+ gen_params.rule = (const struct ipa_rt_rule *)&entry->rule;
- if (entry->hw_len == 0) {
- entry->hw_len = buf - start;
- } else if (entry->hw_len != (buf - start)) {
- IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%td\n",
- entry->hw_len, (buf - start));
- return -EPERM;
- }
+ res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+ if (res)
+ IPAERR("failed to generate rt h/w rule\n");
- return 0;
+ return res;
}
/**
@@ -135,7 +104,7 @@ int __ipa_generate_rt_hw_rule_v3_0(enum ipa_ip_type ip,
* @base: the rules body buffer to be filled
* @hdr: the rules header (addresses/offsets) buffer to be filled
* @body_ofst: the offset of the rules body from the rules header at
- * ipa sram
+ * ipa sram (for local body usage)
* @apps_start_idx: the first rt table index of apps tables
*
* Returns: 0 on success, negative on failure
@@ -162,49 +131,37 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
if (tbl->sz[rlt] == 0)
continue;
if (tbl->in_sys[rlt]) {
- /* only body (no header) with rule-set terminator */
+ /* only body (no header) */
tbl_mem.size = tbl->sz[rlt] -
- IPA_HW_TBL_HDR_WIDTH + IPA_HW_TBL_WIDTH;
- tbl_mem.base =
- dma_alloc_coherent(ipa3_ctx->pdev, tbl_mem.size,
- &tbl_mem.phys_base, GFP_KERNEL);
- if (!tbl_mem.base) {
- IPAERR("fail to alloc DMA buf of size %d\n",
+ ipahal_get_hw_tbl_hdr_width();
+ if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+ IPAERR("fail to alloc sys tbl of size %d\n",
tbl_mem.size);
goto err;
}
- if (tbl_mem.phys_base & IPA_HW_TBL_SYSADDR_ALIGNMENT) {
- IPAERR("sys rt tbl address is not aligned\n");
- goto align_err;
- }
- /* update the hdr at the right index */
- ipa3_write_64(tbl_mem.phys_base,
- hdr + ((tbl->idx - apps_start_idx) *
- IPA_HW_TBL_HDR_WIDTH));
+ if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+ hdr, tbl->idx - apps_start_idx, true)) {
+ IPAERR("fail to wrt sys tbl addr to hdr\n");
+ goto hdr_update_fail;
+ }
tbl_mem_buf = tbl_mem.base;
- memset(tbl_mem_buf, 0, tbl_mem.size);
/* generate the rule-set */
list_for_each_entry(entry, &tbl->head_rt_rule_list,
link) {
if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
continue;
- res = ipa3_ctx->ctrl->ipa_generate_rt_hw_rule(
- ip,
- entry,
+ res = ipa_generate_rt_hw_rule(ip, entry,
tbl_mem_buf);
if (res) {
IPAERR("failed to gen HW RT rule\n");
- goto align_err;
+ goto hdr_update_fail;
}
tbl_mem_buf += entry->hw_len;
}
- /* write the rule-set terminator */
- tbl_mem_buf = ipa3_write_64(0, tbl_mem_buf);
-
if (tbl->curr_mem[rlt].phys_base) {
WARN_ON(tbl->prev_mem[rlt].phys_base);
tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
@@ -212,25 +169,20 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
tbl->curr_mem[rlt] = tbl_mem;
} else {
offset = body_i - base + body_ofst;
- if (offset & IPA_HW_TBL_LCLADDR_ALIGNMENT) {
- IPAERR("ofst isn't lcl addr aligned %llu\n",
- offset);
- goto err;
- }
/* update the hdr at the right index */
- ipa3_write_64(IPA_HW_TBL_OFSET_TO_LCLADDR(offset),
- hdr + ((tbl->idx - apps_start_idx) *
- IPA_HW_TBL_HDR_WIDTH));
+ if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+ tbl->idx - apps_start_idx, true)) {
+ IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+ goto hdr_update_fail;
+ }
/* generate the rule-set */
list_for_each_entry(entry, &tbl->head_rt_rule_list,
link) {
if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
continue;
- res = ipa3_ctx->ctrl->ipa_generate_rt_hw_rule(
- ip,
- entry,
+ res = ipa_generate_rt_hw_rule(ip, entry,
body_i);
if (res) {
IPAERR("failed to gen HW RT rule\n");
@@ -239,24 +191,20 @@ static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
body_i += entry->hw_len;
}
- /* write the rule-set terminator */
- body_i = ipa3_write_64(0, body_i);
-
/**
* advance body_i to next table alignment as local tables
* are order back-to-back
*/
- body_i += IPA_HW_TBL_LCLADDR_ALIGNMENT;
+ body_i += ipahal_get_lcl_tbl_addr_alignment();
body_i = (u8 *)((long)body_i &
- ~IPA_HW_TBL_LCLADDR_ALIGNMENT);
+ ~ipahal_get_lcl_tbl_addr_alignment());
}
}
return 0;
-align_err:
- dma_free_coherent(ipa3_ctx->pdev, tbl_mem.size,
- tbl_mem.base, tbl_mem.phys_base);
+hdr_update_fail:
+ ipahal_free_dma_mem(&tbl_mem);
err:
return -EPERM;
}
@@ -275,10 +223,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
- dma_free_coherent(ipa3_ctx->pdev,
- tbl->prev_mem[i].size,
- tbl->prev_mem[i].base,
- tbl->prev_mem[i].phys_base);
+ ipahal_free_dma_mem(&tbl->prev_mem[i]);
memset(&tbl->prev_mem[i], 0,
sizeof(tbl->prev_mem[i]));
}
@@ -293,10 +238,7 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
IPADBG_LOW(
"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
tbl->name, ip, i);
- dma_free_coherent(ipa3_ctx->pdev,
- tbl->curr_mem[i].size,
- tbl->curr_mem[i].base,
- tbl->curr_mem[i].phys_base);
+ ipahal_free_dma_mem(&tbl->curr_mem[i]);
}
}
list_del(&tbl->link);
@@ -305,64 +247,6 @@ static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
}
/**
- * ipa_alloc_init_rt_tbl_hdr() - allocate and initialize buffers for
- * rt tables headers to be filled into sram
- * @ip: the ip address family type
- * @hash_hdr: address of the dma buffer for the hashable rt tbl header
- * @nhash_hdr: address of the dma buffer for the non-hashable rt tbl header
- *
- * Return: 0 on success, negative on failure
- */
-static int ipa_alloc_init_rt_tbl_hdr(enum ipa_ip_type ip,
- struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr)
-{
- int num_index;
- u64 *hash_entr;
- u64 *nhash_entr;
- int i;
-
- if (ip == IPA_IP_v4)
- num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
- IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
- else
- num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
- IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
-
- hash_hdr->size = num_index * IPA_HW_TBL_HDR_WIDTH;
- hash_hdr->base = dma_alloc_coherent(ipa3_ctx->pdev, hash_hdr->size,
- &hash_hdr->phys_base, GFP_KERNEL);
- if (!hash_hdr->base) {
- IPAERR("fail to alloc DMA buff of size %d\n", hash_hdr->size);
- goto err;
- }
-
- nhash_hdr->size = num_index * IPA_HW_TBL_HDR_WIDTH;
- nhash_hdr->base = dma_alloc_coherent(ipa3_ctx->pdev, nhash_hdr->size,
- &nhash_hdr->phys_base, GFP_KERNEL);
- if (!nhash_hdr->base) {
- IPAERR("fail to alloc DMA buff of size %d\n", nhash_hdr->size);
- goto nhash_alloc_fail;
- }
-
- hash_entr = (u64 *)hash_hdr->base;
- nhash_entr = (u64 *)nhash_hdr->base;
- for (i = 0; i < num_index; i++) {
- *hash_entr = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- *nhash_entr = ipa3_ctx->empty_rt_tbl_mem.phys_base;
- hash_entr++;
- nhash_entr++;
- }
-
- return 0;
-
-nhash_alloc_fail:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr->size,
- hash_hdr->base, hash_hdr->phys_base);
-err:
- return -ENOMEM;
-}
-
-/**
* ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit
* assign priorities to the rules, calculate their sizes and calculate
* the overall table size
@@ -375,32 +259,37 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
struct ipa3_rt_tbl *tbl)
{
struct ipa3_rt_entry *entry;
- u16 prio_i = 1;
+ int prio_i;
int res;
+ int max_prio;
+ u32 hdr_width;
tbl->sz[IPA_RULE_HASHABLE] = 0;
tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
- list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+ max_prio = ipahal_get_rule_max_priority();
- entry->prio = entry->rule.max_prio ?
- IPA_RULE_MAX_PRIORITY : prio_i++;
+ prio_i = max_prio;
+ list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
- if (entry->prio > IPA_RULE_MIN_PRIORITY) {
- IPAERR("cannot allocate new priority for rule\n");
- return -EPERM;
+ if (entry->rule.max_prio) {
+ entry->prio = max_prio;
+ } else {
+ if (ipahal_rule_decrease_priority(&prio_i)) {
+ IPAERR("cannot rule decrease priority - %d\n",
+ prio_i);
+ return -EPERM;
+ }
+ entry->prio = prio_i;
}
- res = ipa3_ctx->ctrl->ipa_generate_rt_hw_rule(
- ip,
- entry,
- NULL);
+ res = ipa_generate_rt_hw_rule(ip, entry, NULL);
if (res) {
IPAERR("failed to calculate HW RT rule size\n");
return -EPERM;
}
- IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
+ IPADBG("RT rule id (handle) %d hw_len %u priority %u\n",
entry->id, entry->hw_len, entry->prio);
if (entry->rule.hashable)
@@ -415,83 +304,35 @@ static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
IPAERR("rt tbl %s is with zero total size\n", tbl->name);
}
+ hdr_width = ipahal_get_hw_tbl_hdr_width();
+
if (tbl->sz[IPA_RULE_HASHABLE])
- tbl->sz[IPA_RULE_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
+ tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
if (tbl->sz[IPA_RULE_NON_HASHABLE])
- tbl->sz[IPA_RULE_NON_HASHABLE] += IPA_HW_TBL_HDR_WIDTH;
+ tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
- IPADBG_LOW("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+ IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
return 0;
}
/**
- * ipa_get_rt_tbl_lcl_bdy_size() - calc the overall memory needed on sram
- * to hold the hashable and non-hashable rt rules tables bodies
- * @ip: the ip address family type
- * @hash_bdy_sz[OUT]: size on local sram for all tbls hashable rules
- * @nhash_bdy_sz[OUT]: size on local sram for all tbls non-hashable rules
- *
- * Return: none
- */
-static void ipa_get_rt_tbl_lcl_bdy_size(enum ipa_ip_type ip,
- u32 *hash_bdy_sz, u32 *nhash_bdy_sz)
-{
- struct ipa3_rt_tbl_set *set;
- struct ipa3_rt_tbl *tbl;
-
- *hash_bdy_sz = 0;
- *nhash_bdy_sz = 0;
-
- set = &ipa3_ctx->rt_tbl_set[ip];
- list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
- if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
- tbl->sz[IPA_RULE_HASHABLE]) {
- *hash_bdy_sz += tbl->sz[IPA_RULE_HASHABLE];
- *hash_bdy_sz -= IPA_HW_TBL_HDR_WIDTH;
- /* for table terminator */
- *hash_bdy_sz += IPA_HW_TBL_WIDTH;
- /* align the start of local rule-set */
- *hash_bdy_sz += IPA_HW_TBL_LCLADDR_ALIGNMENT;
- *hash_bdy_sz &= ~IPA_HW_TBL_LCLADDR_ALIGNMENT;
- }
- if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
- tbl->sz[IPA_RULE_NON_HASHABLE]) {
- *nhash_bdy_sz += tbl->sz[IPA_RULE_NON_HASHABLE];
- *nhash_bdy_sz -= IPA_HW_TBL_HDR_WIDTH;
- /* for table terminator */
- *nhash_bdy_sz += IPA_HW_TBL_WIDTH;
- /* align the start of local rule-set */
- *nhash_bdy_sz += IPA_HW_TBL_LCLADDR_ALIGNMENT;
- *nhash_bdy_sz &= ~IPA_HW_TBL_LCLADDR_ALIGNMENT;
- }
- }
-}
-
-/**
* ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls.
- * headers and bodies are being created into buffers that will be filled into
- * the local memory (sram)
+ * headers and bodies (sys bodies) are being created into buffers that will
+ * be filled into the local memory (sram)
* @ip: the ip address family type
- * @hash_hdr: address of the dma buffer containing hashable rules tbl headers
- * @nhash_hdr: address of the dma buffer containing
- * non-hashable rules tbl headers
- * @hash_bdy: address of the dma buffer containing hashable local rules
- * @nhash_bdy: address of the dma buffer containing non-hashable local rules
+ * @alloc_params: IN/OUT parameters to hold info regard the tables headers
+ * and bodies on DDR (DMA buffers), and needed info for the allocation
+ * that the HAL needs
*
* Return: 0 on success, negative on failure
*/
static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
- struct ipa_mem_buffer *hash_hdr, struct ipa_mem_buffer *nhash_hdr,
- struct ipa_mem_buffer *hash_bdy, struct ipa_mem_buffer *nhash_bdy)
+ struct ipahal_fltrt_alloc_imgs_params *alloc_params)
{
u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
u32 apps_start_idx;
- struct ipa3_rt_tbl_set *set;
- struct ipa3_rt_tbl *tbl;
- u32 hash_bdy_sz;
- u32 nhash_bdy_sz;
int rc = 0;
if (ip == IPA_IP_v4) {
@@ -508,68 +349,21 @@ static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
}
- if (!ipa3_ctx->rt_idx_bitmap[ip]) {
- IPAERR("no rt tbls present\n");
- rc = -EPERM;
- goto no_rt_tbls;
- }
-
- if (ipa_alloc_init_rt_tbl_hdr(ip, hash_hdr, nhash_hdr)) {
- IPAERR("fail to alloc and init rt tbl hdr\n");
+ if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+ IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip);
rc = -ENOMEM;
- goto no_rt_tbls;
- }
-
- set = &ipa3_ctx->rt_tbl_set[ip];
- list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
- if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
- rc = -EPERM;
- goto prep_failed;
- }
- }
-
- ipa_get_rt_tbl_lcl_bdy_size(ip, &hash_bdy_sz, &nhash_bdy_sz);
- IPADBG_LOW("total rt tbl local body sizes: hash %u nhash %u\n",
- hash_bdy_sz, nhash_bdy_sz);
-
- hash_bdy->size = hash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- hash_bdy->size &= ~IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- nhash_bdy->size = nhash_bdy_sz + IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
- nhash_bdy->size &= ~IPA_HW_TBL_BLK_SIZE_ALIGNMENT;
-
- if (hash_bdy->size) {
- hash_bdy->base = dma_alloc_coherent(ipa3_ctx->pdev,
- hash_bdy->size, &hash_bdy->phys_base, GFP_KERNEL);
- if (!hash_bdy->base) {
- IPAERR("fail to alloc DMA buff of size %d\n",
- hash_bdy->size);
- rc = -ENOMEM;
- goto prep_failed;
- }
- memset(hash_bdy->base, 0, hash_bdy->size);
- }
-
- if (nhash_bdy->size) {
- nhash_bdy->base = dma_alloc_coherent(ipa3_ctx->pdev,
- nhash_bdy->size, &nhash_bdy->phys_base, GFP_KERNEL);
- if (!nhash_bdy->base) {
- IPAERR("fail to alloc DMA buff of size %d\n",
- hash_bdy->size);
- rc = -ENOMEM;
- goto nhash_bdy_fail;
- }
- memset(nhash_bdy->base, 0, nhash_bdy->size);
+ goto allocate_fail;
}
if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
- hash_bdy->base, hash_hdr->base,
+ alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
hash_bdy_start_ofst, apps_start_idx)) {
IPAERR("fail to translate hashable rt tbls to hw format\n");
rc = -EPERM;
goto translate_fail;
}
if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
- nhash_bdy->base, nhash_hdr->base,
+ alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
nhash_bdy_start_ofst, apps_start_idx)) {
IPAERR("fail to translate non-hashable rt tbls to hw format\n");
rc = -EPERM;
@@ -579,19 +373,14 @@ static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
return rc;
translate_fail:
- if (nhash_bdy->size)
- dma_free_coherent(ipa3_ctx->pdev, nhash_bdy->size,
- nhash_bdy->base, nhash_bdy->phys_base);
-nhash_bdy_fail:
- if (hash_bdy->size)
- dma_free_coherent(ipa3_ctx->pdev, hash_bdy->size,
- hash_bdy->base, hash_bdy->phys_base);
-prep_failed:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr->size,
- hash_hdr->base, hash_hdr->phys_base);
- dma_free_coherent(ipa3_ctx->pdev, nhash_hdr->size,
- nhash_hdr->base, nhash_hdr->phys_base);
-no_rt_tbls:
+ if (alloc_params->hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params->hash_hdr);
+ ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+ if (alloc_params->hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->hash_bdy);
+ if (alloc_params->nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_fail:
return rc;
}
@@ -639,8 +428,7 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
struct ipahal_imm_cmd_pyld *cmd_pyld[5];
int num_cmd = 0;
- struct ipa_mem_buffer hash_bdy, nhash_bdy;
- struct ipa_mem_buffer hash_hdr, nhash_hdr;
+ struct ipahal_fltrt_alloc_imgs_params alloc_params;
u32 num_modem_rt_index;
int rc = 0;
u32 lcl_hash_hdr, lcl_nhash_hdr;
@@ -649,9 +437,15 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
struct ipahal_reg_fltrt_hash_flush flush;
struct ipahal_reg_valmask valmask;
int i;
+ struct ipa3_rt_tbl_set *set;
+ struct ipa3_rt_tbl *tbl;
+ u32 tbl_hdr_width;
+ tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
memset(desc, 0, sizeof(desc));
memset(cmd_pyld, 0, sizeof(cmd_pyld));
+ memset(&alloc_params, 0, sizeof(alloc_params));
+ alloc_params.ipt = ip;
if (ip == IPA_IP_v4) {
num_modem_rt_index =
@@ -659,47 +453,79 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_rt_hash_ofst) +
- num_modem_rt_index * IPA_HW_TBL_HDR_WIDTH;
+ num_modem_rt_index * tbl_hdr_width;
lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v4_rt_nhash_ofst) +
- num_modem_rt_index * IPA_HW_TBL_HDR_WIDTH;
+ num_modem_rt_index * tbl_hdr_width;
lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v4_rt_hash_ofst);
lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v4_rt_nhash_ofst);
lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
+ alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
+ IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
} else {
num_modem_rt_index =
IPA_MEM_PART(v6_modem_rt_index_hi) -
IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_rt_hash_ofst) +
- num_modem_rt_index * IPA_HW_TBL_HDR_WIDTH;
+ num_modem_rt_index * tbl_hdr_width;
lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(v6_rt_nhash_ofst) +
- num_modem_rt_index * IPA_HW_TBL_HDR_WIDTH;
+ num_modem_rt_index * tbl_hdr_width;
lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v6_rt_hash_ofst);
lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
IPA_MEM_PART(apps_v6_rt_nhash_ofst);
lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+ alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
+ IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
}
- if (ipa_generate_rt_hw_tbl_img(ip,
- &hash_hdr, &nhash_hdr, &hash_bdy, &nhash_bdy)) {
- IPAERR("fail to generate RT HW TBL image. IP %d\n", ip);
+ if (!ipa3_ctx->rt_idx_bitmap[ip]) {
+ IPAERR("no rt tbls present\n");
+ rc = -EPERM;
+ goto no_rt_tbls;
+ }
+
+ set = &ipa3_ctx->rt_tbl_set[ip];
+ list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+ if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
+ rc = -EPERM;
+ goto no_rt_tbls;
+ }
+ if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+ tbl->sz[IPA_RULE_HASHABLE]) {
+ alloc_params.num_lcl_hash_tbls++;
+ alloc_params.total_sz_lcl_hash_tbls +=
+ tbl->sz[IPA_RULE_HASHABLE];
+ alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+ }
+ if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+ tbl->sz[IPA_RULE_NON_HASHABLE]) {
+ alloc_params.num_lcl_nhash_tbls++;
+ alloc_params.total_sz_lcl_nhash_tbls +=
+ tbl->sz[IPA_RULE_NON_HASHABLE];
+ alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+ }
+ }
+
+ if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) {
+ IPAERR("fail to generate RT HW TBL images. IP %d\n", ip);
rc = -EFAULT;
- goto fail_gen;
+ goto no_rt_tbls;
}
- if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE, &hash_bdy)) {
+ if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+ &alloc_params.hash_bdy)) {
rc = -EFAULT;
goto fail_size_valid;
}
if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
- &nhash_bdy)) {
+ &alloc_params.nhash_bdy)) {
rc = -EFAULT;
goto fail_size_valid;
}
@@ -732,8 +558,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = nhash_hdr.size;
- mem_cmd.system_addr = nhash_hdr.phys_base;
+ mem_cmd.size = alloc_params.nhash_hdr.size;
+ mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base;
mem_cmd.local_addr = lcl_nhash_hdr;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -751,8 +577,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = hash_hdr.size;
- mem_cmd.system_addr = hash_hdr.phys_base;
+ mem_cmd.size = alloc_params.hash_hdr.size;
+ mem_cmd.system_addr = alloc_params.hash_hdr.phys_base;
mem_cmd.local_addr = lcl_hash_hdr;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -771,8 +597,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = nhash_bdy.size;
- mem_cmd.system_addr = nhash_bdy.phys_base;
+ mem_cmd.size = alloc_params.nhash_bdy.size;
+ mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
mem_cmd.local_addr = lcl_nhash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -792,8 +618,8 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
mem_cmd.is_read = false;
mem_cmd.skip_pipeline_clear = false;
mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
- mem_cmd.size = hash_bdy.size;
- mem_cmd.system_addr = hash_bdy.phys_base;
+ mem_cmd.size = alloc_params.hash_bdy.size;
+ mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
mem_cmd.local_addr = lcl_hash_bdy;
cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
@@ -816,22 +642,26 @@ int __ipa_commit_rt_v3(enum ipa_ip_type ip)
goto fail_imm_cmd_construct;
}
- IPADBG_LOW("Hashable HEAD\n");
- IPA_DUMP_BUFF(hash_hdr.base, hash_hdr.phys_base, hash_hdr.size);
+ IPADBG("Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+ alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
- IPADBG_LOW("Non-Hashable HEAD\n");
- IPA_DUMP_BUFF(nhash_hdr.base, nhash_hdr.phys_base, nhash_hdr.size);
+ IPADBG("Non-Hashable HEAD\n");
+ IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+ alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
- if (hash_bdy.size) {
+ if (alloc_params.hash_bdy.size) {
IPADBG("Hashable BODY\n");
- IPA_DUMP_BUFF(hash_bdy.base,
- hash_bdy.phys_base, hash_bdy.size);
+ IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+ alloc_params.hash_bdy.phys_base,
+ alloc_params.hash_bdy.size);
}
- if (nhash_bdy.size) {
+ if (alloc_params.nhash_bdy.size) {
IPADBG("Non-Hashable BODY\n");
- IPA_DUMP_BUFF(nhash_bdy.base,
- nhash_bdy.phys_base, nhash_bdy.size);
+ IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+ alloc_params.nhash_bdy.phys_base,
+ alloc_params.nhash_bdy.size);
}
__ipa_reap_sys_rt_tbls(ip);
@@ -840,19 +670,15 @@ fail_imm_cmd_construct:
for (i = 0 ; i < num_cmd ; i++)
ipahal_destroy_imm_cmd(cmd_pyld[i]);
fail_size_valid:
- dma_free_coherent(ipa3_ctx->pdev, hash_hdr.size,
- hash_hdr.base, hash_hdr.phys_base);
- dma_free_coherent(ipa3_ctx->pdev, nhash_hdr.size,
- nhash_hdr.base, nhash_hdr.phys_base);
-
- if (hash_bdy.size)
- dma_free_coherent(ipa3_ctx->pdev, hash_bdy.size,
- hash_bdy.base, hash_bdy.phys_base);
-
- if (nhash_bdy.size)
- dma_free_coherent(ipa3_ctx->pdev, nhash_bdy.size,
- nhash_bdy.base, nhash_bdy.phys_base);
-fail_gen:
+ if (alloc_params.hash_hdr.size)
+ ipahal_free_dma_mem(&alloc_params.hash_hdr);
+ ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+ if (alloc_params.hash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.hash_bdy);
+ if (alloc_params.nhash_bdy.size)
+ ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+no_rt_tbls:
return rc;
}
@@ -1826,27 +1652,27 @@ int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
* entry array size. Then set by this function as an output parameter to
* indicate the number of entries in the array
*
- * This function reads the filtering table from IPA SRAM and prepares an array
+ * This function reads the routing table from IPA SRAM and prepares an array
* of entries. This function is mainly used for debugging purposes.
*
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
* Returns: 0 on success, negative on failure
*/
-int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
- enum ipa_ip_type ip_type,
- bool hashable,
- struct ipa3_debugfs_rt_entry entry[],
- int *num_entry)
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
+ bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry)
{
- u64 tbl_entry_in_hdr_ofst;
- u64 *tbl_entry_in_hdr;
- struct ipa3_rt_rule_hw_hdr *hdr;
- u8 *buf;
- int rule_idx;
- u8 rule_size;
void *ipa_sram_mmio;
+ u64 hdr_base_ofst;
+ int res = 0;
+ u64 tbl_addr;
+ bool is_sys;
+ struct ipa_mem_buffer *sys_tbl_mem;
+ u8 *rule_addr;
+ int rule_idx;
- IPADBG("tbl_idx=%d ip_type=%d hashable=%d\n",
- tbl_idx, ip_type, hashable);
+ IPADBG("tbl_idx=%d ip_type=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+ tbl_idx, ip_type, hashable, entry, num_entry);
if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
IPAERR("Invalid params\n");
@@ -1860,11 +1686,10 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
/* map IPA SRAM */
ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
- ipa3_ctx->ctrl->ipa_reg_base_ofst +
- ipahal_get_reg_n_ofst(
- IPA_SRAM_DIRECT_ACCESS_n,
- 0),
- ipa3_ctx->smem_sz);
+ ipa3_ctx->ctrl->ipa_reg_base_ofst +
+ ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+ ipa3_ctx->smem_restricted_bytes / 4),
+ ipa3_ctx->smem_sz);
if (!ipa_sram_mmio) {
IPAERR("fail to ioremap IPA SRAM\n");
return -ENOMEM;
@@ -1873,86 +1698,89 @@ int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
memset(entry, 0, sizeof(*entry) * (*num_entry));
if (hashable) {
if (ip_type == IPA_IP_v4)
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v4_rt_hash_ofst) +
- tbl_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_rt_hash_ofst);
else
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v6_rt_hash_ofst) +
- tbl_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_rt_hash_ofst);
} else {
if (ip_type == IPA_IP_v4)
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v4_rt_nhash_ofst) +
- tbl_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v4_rt_nhash_ofst);
else
- tbl_entry_in_hdr_ofst =
- ipa3_ctx->smem_restricted_bytes +
- IPA_MEM_PART(v6_rt_nhash_ofst) +
- tbl_idx * IPA_HW_TBL_HDR_WIDTH;
+ hdr_base_ofst =
+ IPA_MEM_PART(v6_rt_nhash_ofst);
}
- IPADBG("tbl_entry_in_hdr_ofst=0x%llx\n", tbl_entry_in_hdr_ofst);
+ IPADBG("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
- tbl_entry_in_hdr = ipa_sram_mmio + tbl_entry_in_hdr_ofst;
+ res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+ tbl_idx, &tbl_addr, &is_sys);
+ if (res) {
+ IPAERR("failed to read table address from header structure\n");
+ goto bail;
+ }
+ IPADBG("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+ tbl_idx, tbl_addr, is_sys);
+ if (!tbl_addr) {
+ IPAERR("invalid rt tbl addr\n");
+ res = -EFAULT;
+ goto bail;
+ }
/* for tables which reside in DDR access it from the virtual memory */
- if (!(*tbl_entry_in_hdr & 0x1)) {
- /* system */
+ if (is_sys) {
struct ipa3_rt_tbl_set *set;
struct ipa3_rt_tbl *tbl;
set = &ipa3_ctx->rt_tbl_set[ip_type];
- hdr = NULL;
+ rule_addr = NULL;
list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
- if (tbl->idx == tbl_idx)
- hdr = tbl->curr_mem[hashable ?
+ if (tbl->idx == tbl_idx) {
+ sys_tbl_mem = &(tbl->curr_mem[hashable ?
IPA_RULE_HASHABLE :
- IPA_RULE_NON_HASHABLE].base;
+ IPA_RULE_NON_HASHABLE]);
+ if (sys_tbl_mem->phys_base &&
+ sys_tbl_mem->phys_base != tbl_addr) {
+ IPAERR("mismatch:parsed=%llx sw=%pad\n"
+ , tbl_addr,
+ &sys_tbl_mem->phys_base);
+ }
+ if (sys_tbl_mem->phys_base)
+ rule_addr = sys_tbl_mem->base;
+ else
+ rule_addr = NULL;
+ }
}
-
- if (!hdr)
- hdr = ipa3_ctx->empty_rt_tbl_mem.base;
} else {
- /* local */
- hdr = (void *)((u8 *)tbl_entry_in_hdr -
- tbl_idx * IPA_HW_TBL_HDR_WIDTH +
- (*tbl_entry_in_hdr - 1) / 16);
+ rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
}
- IPADBG("*tbl_entry_in_hdr=0x%llx\n", *tbl_entry_in_hdr);
- IPADBG("hdr=0x%p\n", hdr);
+ IPADBG("First rule addr 0x%p\n", rule_addr);
+
+ if (!rule_addr) {
+ /* Modem table in system memory or empty table */
+ *num_entry = 0;
+ goto bail;
+ }
rule_idx = 0;
while (rule_idx < *num_entry) {
- IPADBG("*((u64 *)hdr)=0x%llx\n", *((u64 *)hdr));
- if (*((u64 *)hdr) == 0)
+ res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+ if (res) {
+ IPAERR("failed parsing rt rule\n");
+ goto bail;
+ }
+
+ IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+ if (!entry[rule_idx].rule_size)
break;
- entry[rule_idx].eq_attrib.rule_eq_bitmap = hdr->u.hdr.en_rule;
- entry[rule_idx].retain_hdr = hdr->u.hdr.retain_hdr;
- entry[rule_idx].prio = hdr->u.hdr.priority;
- entry[rule_idx].rule_id = hdr->u.hdr.rule_id;
- entry[rule_idx].dst = hdr->u.hdr.pipe_dest_idx;
- entry[rule_idx].hdr_ofset = hdr->u.hdr.hdr_offset;
- entry[rule_idx].is_proc_ctx = hdr->u.hdr.proc_ctx;
- entry[rule_idx].system = hdr->u.hdr.system;
- buf = (u8 *)(hdr + 1);
- IPADBG("buf=0x%p\n", buf);
-
- ipa3_generate_eq_from_hw_rule(&entry[rule_idx].eq_attrib, buf,
- &rule_size);
- IPADBG("rule_size=%d\n", rule_size);
- hdr = (void *)(buf + rule_size);
- IPADBG("hdr=0x%p\n", hdr);
+ rule_addr += entry[rule_idx].rule_size;
rule_idx++;
}
-
*num_entry = rule_idx;
+bail:
iounmap(ipa_sram_mmio);
-
- return 0;
+ return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index c2a70bca80b1..18ac51701e4c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -20,6 +20,7 @@
#include <linux/elf.h>
#include "ipa_i.h"
#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
#include "../ipa_rm_i.h"
#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
@@ -169,14 +170,6 @@ static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
};
-static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
- IPA_OFFSET_MEQ32_1, -1 };
-static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
- IPA_OFFSET_MEQ128_1, -1 };
-static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
- IPA_IHL_OFFSET_RANGE16_1, -1 };
-static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
- IPA_IHL_OFFSET_MEQ32_1, -1 };
enum ipa_ver {
IPA_3_0,
IPA_VER_MAX,
@@ -1114,1482 +1107,6 @@ bool ipa_is_ep_support_flt(int pipe_idx)
}
/**
- * ipa3_write_64() - convert 64 bit value to byte array
- * @w: 64 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa3_write_64(u64 w, u8 *dest)
-{
- if (unlikely(dest == NULL)) {
- IPAERR("NULL address!\n");
- return dest;
- }
- *dest++ = (u8)((w) & 0xFF);
- *dest++ = (u8)((w >> 8) & 0xFF);
- *dest++ = (u8)((w >> 16) & 0xFF);
- *dest++ = (u8)((w >> 24) & 0xFF);
- *dest++ = (u8)((w >> 32) & 0xFF);
- *dest++ = (u8)((w >> 40) & 0xFF);
- *dest++ = (u8)((w >> 48) & 0xFF);
- *dest++ = (u8)((w >> 56) & 0xFF);
-
- return dest;
-}
-
-/**
- * ipa3_write_32() - convert 32 bit value to byte array
- * @w: 32 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa3_write_32(u32 w, u8 *dest)
-{
- if (unlikely(dest == NULL)) {
- IPAERR("NULL address!\n");
- return dest;
- }
- *dest++ = (u8)((w) & 0xFF);
- *dest++ = (u8)((w >> 8) & 0xFF);
- *dest++ = (u8)((w >> 16) & 0xFF);
- *dest++ = (u8)((w >> 24) & 0xFF);
-
- return dest;
-}
-
-/**
- * ipa3_write_16() - convert 16 bit value to byte array
- * @hw: 16 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa3_write_16(u16 hw, u8 *dest)
-{
- if (unlikely(dest == NULL)) {
- IPAERR("NULL address!\n");
- return dest;
- }
- *dest++ = (u8)((hw) & 0xFF);
- *dest++ = (u8)((hw >> 8) & 0xFF);
-
- return dest;
-}
-
-/**
- * ipa3_write_8() - convert 8 bit value to byte array
- * @hw: 8 bit integer
- * @dest: byte array
- *
- * Return value: converted value
- */
-u8 *ipa3_write_8(u8 b, u8 *dest)
-{
- if (unlikely(dest == NULL)) {
- IPAERR("NULL address!\n");
- return dest;
- }
- *dest++ = (b) & 0xFF;
-
- return dest;
-}
-
-/**
- * ipa3_pad_to_32() - pad byte array to 32 bit value
- * @dest: byte array
- *
- * Return value: padded value
- */
-u8 *ipa3_pad_to_32(u8 *dest)
-{
- int i = (long)dest & 0x3;
- int j;
-
- if (i)
- for (j = 0; j < (4 - i); j++)
- *dest++ = 0;
-
- return dest;
-}
-
-/**
- * ipa3_pad_to_64() - pad byte array to 64 bit value
- * @dest: byte array
- *
- * Return value: padded value
- */
-u8 *ipa3_pad_to_64(u8 *dest)
-{
- int i = (long)dest & 0x7;
- int j;
-
- if (i)
- for (j = 0; j < (8 - i); j++)
- *dest++ = 0;
-
- return dest;
-}
-
-void ipa3_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
- u8 hdr_mac_addr_offset,
- const uint8_t mac_addr_mask[ETH_ALEN],
- const uint8_t mac_addr[ETH_ALEN])
-{
- int i;
-
- *extra = ipa3_write_8(hdr_mac_addr_offset, *extra);
-
- /* LSB MASK and ADDR */
- *rest = ipa3_write_64(0, *rest);
- *rest = ipa3_write_64(0, *rest);
-
- /* MSB MASK and ADDR */
- *rest = ipa3_write_16(0, *rest);
- for (i = 5; i >= 0; i--)
- *rest = ipa3_write_8(mac_addr_mask[i], *rest);
- *rest = ipa3_write_16(0, *rest);
- for (i = 5; i >= 0; i--)
- *rest = ipa3_write_8(mac_addr[i], *rest);
-}
-
-/**
- * ipa_rule_generation_err_check() - check basic validity on the rule
- * attribs before starting building it
- * checks if not not using ipv4 attribs on ipv6 and vice-versa
- * @ip: IP address type
- * @attrib: IPA rule attribute
- *
- * Return: 0 on success, negative on failure
- */
-static int ipa_rule_generation_err_check(
- enum ipa_ip_type ip, const struct ipa_rule_attrib *attrib)
-{
- if (ip == IPA_IP_v4) {
- if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
- attrib->attrib_mask & IPA_FLT_TC ||
- attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
- IPAERR("v6 attrib's specified for v4 rule\n");
- return -EPERM;
- }
- } else if (ip == IPA_IP_v6) {
- if (attrib->attrib_mask & IPA_FLT_TOS ||
- attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- IPAERR("v4 attrib's specified for v6 rule\n");
- return -EPERM;
- }
- } else {
- IPAERR("unsupported ip %d\n", ip);
- return -EPERM;
- }
-
- return 0;
-}
-
-static int ipa3_generate_hw_rule_ip4(u16 *en_rule,
- const struct ipa_rule_attrib *attrib,
- u8 **extra_wrds, u8 **rest_wrds)
-{
- u8 *extra = *extra_wrds;
- u8 *rest = *rest_wrds;
- u8 ofst_meq32 = 0;
- u8 ihl_ofst_rng16 = 0;
- u8 ihl_ofst_meq32 = 0;
- u8 ofst_meq128 = 0;
- int rc = 0;
-
- if (attrib->attrib_mask & IPA_FLT_TOS) {
- *en_rule |= IPA_TOS_EQ;
- extra = ipa3_write_8(attrib->u.v4.tos, extra);
- }
-
- if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- *en_rule |= IPA_PROTOCOL_EQ;
- extra = ipa3_write_8(attrib->u.v4.protocol, extra);
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -14,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -8,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -22,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -16,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- /* 0 => offset of TOS in v4 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_32((attrib->tos_mask << 16), rest);
- rest = ipa3_write_32((attrib->tos_value << 16), rest);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- /* 12 => offset of src ip in v4 header */
- extra = ipa3_write_8(12, extra);
- rest = ipa3_write_32(attrib->u.v4.src_addr_mask, rest);
- rest = ipa3_write_32(attrib->u.v4.src_addr, rest);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- /* 16 => offset of dst ip in v4 header */
- extra = ipa3_write_8(16, extra);
- rest = ipa3_write_32(attrib->u.v4.dst_addr_mask, rest);
- rest = ipa3_write_32(attrib->u.v4.dst_addr, rest);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- /* -2 => offset of ether type in L2 hdr */
- extra = ipa3_write_8((u8)-2, extra);
- rest = ipa3_write_16(0, rest);
- rest = ipa3_write_16(htons(attrib->ether_type), rest);
- rest = ipa3_write_16(0, rest);
- rest = ipa3_write_16(htons(attrib->ether_type), rest);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TYPE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 0 => offset of type after v4 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_32(0xFF, rest);
- rest = ipa3_write_32(attrib->type, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_CODE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 1 => offset of code after v4 header */
- extra = ipa3_write_8(1, extra);
- rest = ipa3_write_32(0xFF, rest);
- rest = ipa3_write_32(attrib->code, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SPI) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 0 => offset of SPI after v4 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_32(0xFFFFFFFF, rest);
- rest = ipa3_write_32(attrib->spi, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_META_DATA) {
- *en_rule |= IPA_METADATA_COMPARE;
- rest = ipa3_write_32(attrib->meta_data_mask, rest);
- rest = ipa3_write_32(attrib->meta_data, rest);
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 0 => offset of src port after v4 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_16(attrib->src_port_hi, rest);
- rest = ipa3_write_16(attrib->src_port_lo, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 2 => offset of dst port after v4 header */
- extra = ipa3_write_8(2, extra);
- rest = ipa3_write_16(attrib->dst_port_hi, rest);
- rest = ipa3_write_16(attrib->dst_port_lo, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 0 => offset of src port after v4 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_16(attrib->src_port, rest);
- rest = ipa3_write_16(attrib->src_port, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 2 => offset of dst port after v4 header */
- extra = ipa3_write_8(2, extra);
- rest = ipa3_write_16(attrib->dst_port, rest);
- rest = ipa3_write_16(attrib->dst_port, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
- *en_rule |= IPA_IS_FRAG;
-
- goto done;
-
-err:
- rc = -EPERM;
-done:
- *extra_wrds = extra;
- *rest_wrds = rest;
- return rc;
-}
-
-static int ipa3_generate_hw_rule_ip6(u16 *en_rule,
- const struct ipa_rule_attrib *attrib,
- u8 **extra_wrds, u8 **rest_wrds)
-{
- u8 *extra = *extra_wrds;
- u8 *rest = *rest_wrds;
- u8 ofst_meq32 = 0;
- u8 ihl_ofst_rng16 = 0;
- u8 ihl_ofst_meq32 = 0;
- u8 ofst_meq128 = 0;
- int rc = 0;
-
- /* v6 code below assumes no extension headers TODO: fix this */
-
- if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
- *en_rule |= IPA_PROTOCOL_EQ;
- extra = ipa3_write_8(attrib->u.v6.next_hdr, extra);
- }
-
- if (attrib->attrib_mask & IPA_FLT_TC) {
- *en_rule |= IPA_TC_EQ;
- extra = ipa3_write_8(attrib->u.v6.tc, extra);
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- /* 8 => offset of src ip in v6 header */
- extra = ipa3_write_8(8, extra);
- rest = ipa3_write_32(attrib->u.v6.src_addr_mask[3], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr_mask[2], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr[3], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr[2], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr_mask[1], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr_mask[0], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr[1], rest);
- rest = ipa3_write_32(attrib->u.v6.src_addr[0], rest);
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- /* 24 => offset of dst ip in v6 header */
- extra = ipa3_write_8(24, extra);
- rest = ipa3_write_32(attrib->u.v6.dst_addr_mask[3], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr_mask[2], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr[3], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr[2], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr_mask[1], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr_mask[0], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr[1], rest);
- rest = ipa3_write_32(attrib->u.v6.dst_addr[0], rest);
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- /* 0 => offset of TOS in v6 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_64(0, rest);
- rest = ipa3_write_64(0, rest);
- rest = ipa3_write_32(0, rest);
- rest = ipa3_write_32((attrib->tos_mask << 20), rest);
- rest = ipa3_write_32(0, rest);
- rest = ipa3_write_32((attrib->tos_value << 20), rest);
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -14,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -8,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -22,
- attrib->dst_mac_addr_mask,
- attrib->dst_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa3_generate_mac_addr_hw_rule(
- &extra,
- &rest,
- -16,
- attrib->src_mac_addr_mask,
- attrib->src_mac_addr);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- /* -2 => offset of ether type in L2 hdr */
- extra = ipa3_write_8((u8)-2, extra);
- rest = ipa3_write_16(0, rest);
- rest = ipa3_write_16(htons(attrib->ether_type), rest);
- rest = ipa3_write_16(0, rest);
- rest = ipa3_write_16(htons(attrib->ether_type), rest);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TYPE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 0 => offset of type after v6 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_32(0xFF, rest);
- rest = ipa3_write_32(attrib->type, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_CODE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 1 => offset of code after v6 header */
- extra = ipa3_write_8(1, extra);
- rest = ipa3_write_32(0xFF, rest);
- rest = ipa3_write_32(attrib->code, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SPI) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- /* 0 => offset of SPI after v6 header FIXME */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_32(0xFFFFFFFF, rest);
- rest = ipa3_write_32(attrib->spi, rest);
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_META_DATA) {
- *en_rule |= IPA_METADATA_COMPARE;
- rest = ipa3_write_32(attrib->meta_data_mask, rest);
- rest = ipa3_write_32(attrib->meta_data, rest);
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 0 => offset of src port after v6 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_16(attrib->src_port, rest);
- rest = ipa3_write_16(attrib->src_port, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 2 => offset of dst port after v6 header */
- extra = ipa3_write_8(2, extra);
- rest = ipa3_write_16(attrib->dst_port, rest);
- rest = ipa3_write_16(attrib->dst_port, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 0 => offset of src port after v6 header */
- extra = ipa3_write_8(0, extra);
- rest = ipa3_write_16(attrib->src_port_hi, rest);
- rest = ipa3_write_16(attrib->src_port_lo, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- goto err;
- }
- if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
- goto err;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- /* 2 => offset of dst port after v6 header */
- extra = ipa3_write_8(2, extra);
- rest = ipa3_write_16(attrib->dst_port_hi, rest);
- rest = ipa3_write_16(attrib->dst_port_lo, rest);
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
- *en_rule |= IPA_FL_EQ;
- rest = ipa3_write_32(attrib->u.v6.flow_label & 0xFFFFF,
- rest);
- }
-
- if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
- *en_rule |= IPA_IS_FRAG;
-
- goto done;
-
-err:
- rc = -EPERM;
-done:
- *extra_wrds = extra;
- *rest_wrds = rest;
- return rc;
-}
-
-static u8 *ipa3_copy_mem(u8 *src, u8 *dst, int cnt)
-{
- while (cnt--)
- *dst++ = *src++;
-
- return dst;
-}
-
-/**
- * ipa3_generate_hw_rule() - generate HW rule
- * @ip: IP address type
- * @attrib: IPA rule attribute
- * @buf: output buffer
- * @en_rule: enable rule
- *
- * Return codes:
- * 0: success
- * -EPERM: wrong input
- */
-int ipa3_generate_hw_rule(enum ipa_ip_type ip,
- const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
-{
- int sz;
- int rc = 0;
- u8 *extra_wrd_buf;
- u8 *rest_wrd_buf;
- u8 *extra_wrd_start;
- u8 *rest_wrd_start;
- u8 *extra_wrd_i;
- u8 *rest_wrd_i;
-
- sz = IPA_HW_TBL_WIDTH * 2 + IPA_HW_RULE_START_ALIGNMENT;
- extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
- if (!extra_wrd_buf) {
- IPAERR("failed to allocate %d bytes\n", sz);
- rc = -ENOMEM;
- goto fail_extra_alloc;
- }
-
- sz = IPA_RT_FLT_HW_RULE_BUF_SIZE + IPA_HW_RULE_START_ALIGNMENT;
- rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
- if (!rest_wrd_buf) {
- IPAERR("failed to allocate %d bytes\n", sz);
- rc = -ENOMEM;
- goto fail_rest_alloc;
- }
-
- extra_wrd_start = extra_wrd_buf + IPA_HW_RULE_START_ALIGNMENT;
- extra_wrd_start = (u8 *)((long)extra_wrd_start &
- ~IPA_HW_RULE_START_ALIGNMENT);
-
- rest_wrd_start = rest_wrd_buf + IPA_HW_RULE_START_ALIGNMENT;
- rest_wrd_start = (u8 *)((long)rest_wrd_start &
- ~IPA_HW_RULE_START_ALIGNMENT);
-
- extra_wrd_i = extra_wrd_start;
- rest_wrd_i = rest_wrd_start;
-
- rc = ipa_rule_generation_err_check(ip, attrib);
- if (rc) {
- IPAERR("ipa_rule_generation_err_check() failed\n");
- goto fail_err_check;
- }
-
- if (ip == IPA_IP_v4) {
- if (ipa3_generate_hw_rule_ip4(en_rule, attrib,
- &extra_wrd_i, &rest_wrd_i)) {
- IPAERR("failed to build ipv4 hw rule\n");
- rc = -EPERM;
- goto fail_err_check;
- }
-
- } else if (ip == IPA_IP_v6) {
- if (ipa3_generate_hw_rule_ip6(en_rule, attrib,
- &extra_wrd_i, &rest_wrd_i)) {
-
- IPAERR("failed to build ipv6 hw rule\n");
- rc = -EPERM;
- goto fail_err_check;
- }
- } else {
- IPAERR("unsupported ip %d\n", ip);
- goto fail_err_check;
- }
-
- /*
- * default "rule" means no attributes set -> map to
- * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
- */
- if (attrib->attrib_mask == 0) {
- IPADBG_LOW("building default rule\n");
- *en_rule |= ipa_ofst_meq32[0];
- extra_wrd_i = ipa3_write_8(0, extra_wrd_i); /* offset */
- rest_wrd_i = ipa3_write_32(0, rest_wrd_i); /* mask */
- rest_wrd_i = ipa3_write_32(0, rest_wrd_i); /* val */
- }
-
- IPADBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
- IPADBG_LOW("extra_word_2 0x%llx\n",
- *(u64 *)(extra_wrd_start + IPA_HW_TBL_WIDTH));
-
- extra_wrd_i = ipa3_pad_to_64(extra_wrd_i);
- sz = extra_wrd_i - extra_wrd_start;
- IPADBG_LOW("extra words params sz %d\n", sz);
- *buf = ipa3_copy_mem(extra_wrd_start, *buf, sz);
-
- rest_wrd_i = ipa3_pad_to_64(rest_wrd_i);
- sz = rest_wrd_i - rest_wrd_start;
- IPADBG_LOW("non extra words params sz %d\n", sz);
- *buf = ipa3_copy_mem(rest_wrd_start, *buf, sz);
-
-fail_err_check:
- kfree(rest_wrd_buf);
-fail_rest_alloc:
- kfree(extra_wrd_buf);
-fail_extra_alloc:
- return rc;
-}
-
-void ipa3_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
- u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
- const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
-{
- int i;
-
- eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
-
- /* LSB MASK and ADDR */
- memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
- memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
-
- /* MSB MASK and ADDR */
- memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
- for (i = 0; i <= 5; i++)
- eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
- mac_addr_mask[i];
-
- memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
- for (i = 0; i <= 0; i++)
- eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
- mac_addr[i];
-}
-
-int ipa3_generate_flt_eq_ip4(enum ipa_ip_type ip,
- const struct ipa_rule_attrib *attrib,
- struct ipa_ipfltri_rule_eq *eq_atrb)
-{
- u8 ofst_meq32 = 0;
- u8 ihl_ofst_rng16 = 0;
- u8 ihl_ofst_meq32 = 0;
- u8 ofst_meq128 = 0;
- u16 eq_bitmap = 0;
- u16 *en_rule = &eq_bitmap;
-
- if (attrib->attrib_mask & IPA_FLT_TOS) {
- *en_rule |= IPA_TOS_EQ;
- eq_atrb->tos_eq_present = 1;
- eq_atrb->tos_eq = attrib->u.v4.tos;
- }
-
- if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
- *en_rule |= IPA_PROTOCOL_EQ;
- eq_atrb->protocol_eq_present = 1;
- eq_atrb->protocol_eq = attrib->u.v4.protocol;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -14,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -8,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -22,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -16,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
- eq_atrb->offset_meq_32[ofst_meq32].mask =
- attrib->tos_mask << 16;
- eq_atrb->offset_meq_32[ofst_meq32].value =
- attrib->tos_value << 16;
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
- eq_atrb->offset_meq_32[ofst_meq32].mask =
- attrib->u.v4.src_addr_mask;
- eq_atrb->offset_meq_32[ofst_meq32].value =
- attrib->u.v4.src_addr;
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
- eq_atrb->offset_meq_32[ofst_meq32].mask =
- attrib->u.v4.dst_addr_mask;
- eq_atrb->offset_meq_32[ofst_meq32].value =
- attrib->u.v4.dst_addr;
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
- eq_atrb->offset_meq_32[ofst_meq32].mask =
- htons(attrib->ether_type);
- eq_atrb->offset_meq_32[ofst_meq32].value =
- htons(attrib->ether_type);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TYPE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->type;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_CODE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->code;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SPI) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
- 0xFFFFFFFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->spi;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_META_DATA) {
- *en_rule |= IPA_METADATA_COMPARE;
- eq_atrb->metadata_meq32_present = 1;
- eq_atrb->metadata_meq32.offset = 0;
- eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
- eq_atrb->metadata_meq32.value = attrib->meta_data;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->src_port_lo;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->src_port_hi;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->dst_port_lo;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->dst_port_hi;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->src_port;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->src_port;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->dst_port;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->dst_port;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
- *en_rule |= IPA_IS_FRAG;
- eq_atrb->ipv4_frag_eq_present = 1;
- }
-
- eq_atrb->rule_eq_bitmap = *en_rule;
- eq_atrb->num_offset_meq_32 = ofst_meq32;
- eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
- eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
- eq_atrb->num_offset_meq_128 = ofst_meq128;
-
- return 0;
-}
-
-/* This is called only before sending ipa_install_fltr_rule_req_msg to Q6 */
-int ipa3_generate_flt_eq_ip6(enum ipa_ip_type ip,
- const struct ipa_rule_attrib *attrib,
- struct ipa_ipfltri_rule_eq *eq_atrb)
-{
- u8 ofst_meq32 = 0;
- u8 ihl_ofst_rng16 = 0;
- u8 ihl_ofst_meq32 = 0;
- u8 ofst_meq128 = 0;
- u16 eq_bitmap = 0;
- u16 *en_rule = &eq_bitmap;
-
- if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
- *en_rule |= IPA_PROTOCOL_EQ;
- eq_atrb->protocol_eq_present = 1;
- eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TC) {
- *en_rule |= IPA_FLT_TC;
- eq_atrb->tc_eq_present = 1;
- eq_atrb->tc_eq = attrib->u.v6.tc;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
- /* use the same word order as in ipa v2 */
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
- = attrib->u.v6.src_addr_mask[0];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
- = attrib->u.v6.src_addr_mask[1];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
- = attrib->u.v6.src_addr_mask[2];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
- = attrib->u.v6.src_addr_mask[3];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
- = attrib->u.v6.src_addr[0];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
- = attrib->u.v6.src_addr[1];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
- = attrib->u.v6.src_addr[2];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
- 12) = attrib->u.v6.src_addr[3];
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
- /* use the same word order as in ipa v2 */
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
- = attrib->u.v6.dst_addr_mask[0];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
- = attrib->u.v6.dst_addr_mask[1];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
- = attrib->u.v6.dst_addr_mask[2];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
- = attrib->u.v6.dst_addr_mask[3];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
- = attrib->u.v6.dst_addr[0];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
- = attrib->u.v6.dst_addr[1];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
- = attrib->u.v6.dst_addr[2];
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
- 12) = attrib->u.v6.dst_addr[3];
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
- eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
- memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
- = attrib->tos_mask << 20;
- memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
- *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
- 12) = attrib->tos_value << 20;
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -14 => offset of dst mac addr in Ethernet II hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -14,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -8 => offset of src mac addr in Ethernet II hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -8,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -22 => offset of dst mac addr in 802.3 hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -22,
- attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
- if (ipa_ofst_meq128[ofst_meq128] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq128[ofst_meq128];
-
- /* -16 => offset of src mac addr in 802.3 hdr */
- ipa3_generate_flt_mac_addr_eq(eq_atrb, -16,
- attrib->src_mac_addr_mask, attrib->src_mac_addr,
- ofst_meq128);
-
- ofst_meq128++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
- if (ipa_ofst_meq32[ofst_meq32] == -1) {
- IPAERR("ran out of meq128 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ofst_meq32[ofst_meq32];
- eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
- eq_atrb->offset_meq_32[ofst_meq32].mask =
- htons(attrib->ether_type);
- eq_atrb->offset_meq_32[ofst_meq32].value =
- htons(attrib->ether_type);
- ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_TYPE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->type;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_CODE) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->code;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SPI) {
- if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
- IPAERR("ran out of ihl_meq32 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
- 0xFFFFFFFF;
- eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
- attrib->spi;
- ihl_ofst_meq32++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_META_DATA) {
- *en_rule |= IPA_METADATA_COMPARE;
- eq_atrb->metadata_meq32_present = 1;
- eq_atrb->metadata_meq32.offset = 0;
- eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
- eq_atrb->metadata_meq32.value = attrib->meta_data;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->src_port;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->src_port;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->dst_port;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->dst_port;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- if (attrib->src_port_hi < attrib->src_port_lo) {
- IPAERR("bad src port range param\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->src_port_lo;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->src_port_hi;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
- if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
- IPAERR("ran out of ihl_rng16 eq\n");
- return -EPERM;
- }
- if (attrib->dst_port_hi < attrib->dst_port_lo) {
- IPAERR("bad dst port range param\n");
- return -EPERM;
- }
- *en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
- = attrib->dst_port_lo;
- eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
- = attrib->dst_port_hi;
- ihl_ofst_rng16++;
- }
-
- if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
- *en_rule |= IPA_FLT_FLOW_LABEL;
- eq_atrb->fl_eq_present = 1;
- eq_atrb->fl_eq = attrib->u.v6.flow_label;
- }
-
- if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
- *en_rule |= IPA_IS_FRAG;
- eq_atrb->ipv4_frag_eq_present = 1;
- }
-
- eq_atrb->rule_eq_bitmap = *en_rule;
- eq_atrb->num_offset_meq_32 = ofst_meq32;
- eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
- eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
- eq_atrb->num_offset_meq_128 = ofst_meq128;
-
- return 0;
-}
-
-int ipa3_generate_flt_eq(enum ipa_ip_type ip,
- const struct ipa_rule_attrib *attrib,
- struct ipa_ipfltri_rule_eq *eq_atrb)
-{
- if (ipa_rule_generation_err_check(ip, attrib))
- return -EPERM;
-
- if (ip == IPA_IP_v4) {
- if (ipa3_generate_flt_eq_ip4(ip, attrib, eq_atrb)) {
- IPAERR("failed to build ipv4 flt eq rule\n");
- return -EPERM;
- }
-
- } else if (ip == IPA_IP_v6) {
- if (ipa3_generate_flt_eq_ip6(ip, attrib, eq_atrb)) {
- IPAERR("failed to build ipv6 flt eq rule\n");
- return -EPERM;
- }
- } else {
- IPAERR("unsupported ip %d\n", ip);
- return -EPERM;
- }
-
- /*
- * default "rule" means no attributes set -> map to
- * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
- */
- if (attrib->attrib_mask == 0) {
- eq_atrb->rule_eq_bitmap = 0;
- eq_atrb->rule_eq_bitmap |= ipa_ofst_meq32[0];
- eq_atrb->offset_meq_32[0].offset = 0;
- eq_atrb->offset_meq_32[0].mask = 0;
- eq_atrb->offset_meq_32[0].value = 0;
- }
-
- return 0;
-}
-
-/**
* ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
* @clnt_hdl: [in] opaque client handle assigned by IPA to client
*
@@ -3490,7 +2007,7 @@ int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
goto fail;
}
- aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+ aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
aligned_size = size - (aligned_start_ofst - start_ofst);
IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
@@ -3955,7 +2472,6 @@ int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
- ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v3_0;
return 0;
}
@@ -3975,8 +2491,11 @@ void ipa3_skb_recycle(struct sk_buff *skb)
int ipa3_alloc_rule_id(struct idr *rule_ids)
{
+ /* There is two groups of rule-Ids, Modem ones and Apps ones.
+ * Distinction by high bit: Modem Ids are high bit asserted.
+ */
return idr_alloc(rule_ids, NULL,
- IPA_RULE_ID_MIN_VAL, IPA_RULE_ID_MAX_VAL + 1,
+ ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
GFP_KERNEL);
}
@@ -4996,251 +3515,6 @@ end_sequence:
}
/**
- * ipa3_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
- * @attrib: equation attribute
- *
- * Return value: 0 on success, negative otherwise
- */
-int ipa3_calc_extra_wrd_bytes(const struct ipa_ipfltri_rule_eq *attrib)
-{
- int num = 0;
-
- if (attrib->tos_eq_present)
- num++;
- if (attrib->protocol_eq_present)
- num++;
- if (attrib->tc_eq_present)
- num++;
- num += attrib->num_offset_meq_128;
- num += attrib->num_offset_meq_32;
- num += attrib->num_ihl_offset_meq_32;
- num += attrib->num_ihl_offset_range_16;
- if (attrib->ihl_offset_eq_32_present)
- num++;
- if (attrib->ihl_offset_eq_16_present)
- num++;
-
- IPADBG_LOW("extra bytes number %d\n", num);
-
- return num;
-}
-
-/**
- * ipa3_calc_extra_wrd_bytes()- generate an equation from rule read from IPA HW
- * @attrib: equation attribute
- * @buf: raw rule in IPA SRAM
- * @rule_size: size of the rule pointed by buf
- *
- * Return value: 0 on success, negative otherwise
- */
-int ipa3_generate_eq_from_hw_rule(
- struct ipa_ipfltri_rule_eq *attrib, u8 *buf, u8 *rule_size)
-{
- int num_offset_meq_32;
- int num_ihl_offset_range_16;
- int num_ihl_offset_meq_32;
- int num_offset_meq_128;
- int extra_bytes;
- u8 *extra;
- u8 *rest;
- int i;
-
- IPADBG("rule_eq_bitmap=0x%x\n", attrib->rule_eq_bitmap);
- if (attrib->rule_eq_bitmap & IPA_TOS_EQ)
- attrib->tos_eq_present = true;
- if (attrib->rule_eq_bitmap & IPA_PROTOCOL_EQ)
- attrib->protocol_eq_present = true;
- if (attrib->rule_eq_bitmap & IPA_OFFSET_MEQ32_0)
- attrib->num_offset_meq_32++;
- if (attrib->rule_eq_bitmap & IPA_OFFSET_MEQ32_1)
- attrib->num_offset_meq_32++;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_RANGE16_0)
- attrib->num_ihl_offset_range_16++;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_RANGE16_1)
- attrib->num_ihl_offset_range_16++;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_EQ_16)
- attrib->ihl_offset_eq_16_present = true;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_EQ_32)
- attrib->ihl_offset_eq_32_present = true;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_MEQ32_0)
- attrib->num_ihl_offset_meq_32++;
- if (attrib->rule_eq_bitmap & IPA_OFFSET_MEQ128_0)
- attrib->num_offset_meq_128++;
- if (attrib->rule_eq_bitmap & IPA_OFFSET_MEQ128_1)
- attrib->num_offset_meq_128++;
- if (attrib->rule_eq_bitmap & IPA_TC_EQ)
- attrib->tc_eq_present = true;
- if (attrib->rule_eq_bitmap & IPA_FL_EQ)
- attrib->fl_eq_present = true;
- if (attrib->rule_eq_bitmap & IPA_PROTOCOL_EQ)
- attrib->protocol_eq_present = true;
- if (attrib->rule_eq_bitmap & IPA_IHL_OFFSET_MEQ32_1)
- attrib->num_ihl_offset_meq_32++;
- if (attrib->rule_eq_bitmap & IPA_METADATA_COMPARE)
- attrib->metadata_meq32_present = true;
- if (attrib->rule_eq_bitmap & IPA_IS_FRAG)
- attrib->ipv4_frag_eq_present = true;
-
- extra_bytes = ipa3_calc_extra_wrd_bytes(attrib);
- /*
- * only 3 eq does not have extra word param, 13 out of 16 is the number
- * of equations that needs extra word param
- */
- if (extra_bytes > 13) {
- IPAERR("too much extra bytes\n");
- return -EPERM;
- } else if (extra_bytes > IPA_HW_TBL_HDR_WIDTH) {
- /* two extra words */
- extra = buf;
- rest = buf + IPA_HW_TBL_HDR_WIDTH * 2;
- } else if (extra_bytes > 0) {
- /* single exra word */
- extra = buf;
- rest = buf + IPA_HW_TBL_HDR_WIDTH;
- } else {
- /* no extra words */
- extra = NULL;
- rest = buf;
- }
- IPADBG("buf=0x%p extra=0x%p rest=0x%p\n", buf, extra, rest);
-
- num_offset_meq_32 = attrib->num_offset_meq_32;
- num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
- num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
- num_offset_meq_128 = attrib->num_offset_meq_128;
-
- if (attrib->tos_eq_present)
- attrib->tos_eq = *extra++;
-
- if (attrib->protocol_eq_present)
- attrib->protocol_eq = *extra++;
-
- if (attrib->tc_eq_present)
- attrib->tc_eq = *extra++;
-
- if (num_offset_meq_128) {
- attrib->offset_meq_128[0].offset = *extra++;
- for (i = 0; i < 8; i++)
- attrib->offset_meq_128[0].mask[i] = *rest++;
- for (i = 0; i < 8; i++)
- attrib->offset_meq_128[0].value[i] = *rest++;
- for (i = 8; i < 16; i++)
- attrib->offset_meq_128[0].mask[i] = *rest++;
- for (i = 8; i < 16; i++)
- attrib->offset_meq_128[0].value[i] = *rest++;
- num_offset_meq_128--;
- }
-
- if (num_offset_meq_128) {
- attrib->offset_meq_128[1].offset = *extra++;
- for (i = 0; i < 8; i++)
- attrib->offset_meq_128[1].mask[i] = *rest++;
- for (i = 0; i < 8; i++)
- attrib->offset_meq_128[1].value[i] = *rest++;
- for (i = 8; i < 16; i++)
- attrib->offset_meq_128[1].mask[i] = *rest++;
- for (i = 8; i < 16; i++)
- attrib->offset_meq_128[1].value[i] = *rest++;
- num_offset_meq_128--;
- }
-
- if (num_offset_meq_32) {
- attrib->offset_meq_32[0].offset = *extra++;
- attrib->offset_meq_32[0].mask = *((u32 *)rest);
- rest += 4;
- attrib->offset_meq_32[0].value = *((u32 *)rest);
- rest += 4;
- num_offset_meq_32--;
- }
- IPADBG("buf=0x%p extra=0x%p rest=0x%p\n", buf, extra, rest);
-
- if (num_offset_meq_32) {
- attrib->offset_meq_32[1].offset = *extra++;
- attrib->offset_meq_32[1].mask = *((u32 *)rest);
- rest += 4;
- attrib->offset_meq_32[1].value = *((u32 *)rest);
- rest += 4;
- num_offset_meq_32--;
- }
- IPADBG("buf=0x%p extra=0x%p rest=0x%p\n", buf, extra, rest);
-
- if (num_ihl_offset_meq_32) {
- attrib->ihl_offset_meq_32[0].offset = *extra++;
- attrib->ihl_offset_meq_32[0].mask = *((u32 *)rest);
- rest += 4;
- attrib->ihl_offset_meq_32[0].value = *((u32 *)rest);
- rest += 4;
- num_ihl_offset_meq_32--;
- }
-
- if (num_ihl_offset_meq_32) {
- attrib->ihl_offset_meq_32[1].offset = *extra++;
- attrib->ihl_offset_meq_32[1].mask = *((u32 *)rest);
- rest += 4;
- attrib->ihl_offset_meq_32[1].value = *((u32 *)rest);
- rest += 4;
- num_ihl_offset_meq_32--;
- }
-
- if (attrib->metadata_meq32_present) {
- attrib->metadata_meq32.mask = *((u32 *)rest);
- rest += 4;
- attrib->metadata_meq32.value = *((u32 *)rest);
- rest += 4;
- }
-
- if (num_ihl_offset_range_16) {
- attrib->ihl_offset_range_16[0].offset = *extra++;
- attrib->ihl_offset_range_16[0].range_high = *((u16 *)rest);
- rest += 2;
- attrib->ihl_offset_range_16[0].range_low = *((u16 *)rest);
- rest += 2;
- num_ihl_offset_range_16--;
- }
- if (num_ihl_offset_range_16) {
- attrib->ihl_offset_range_16[1].offset = *extra++;
- attrib->ihl_offset_range_16[1].range_high = *((u16 *)rest);
- rest += 2;
- attrib->ihl_offset_range_16[1].range_low = *((u16 *)rest);
- rest += 2;
- num_ihl_offset_range_16--;
- }
-
- if (attrib->ihl_offset_eq_32_present) {
- attrib->ihl_offset_eq_32.offset = *extra++;
- attrib->ihl_offset_eq_32.value = *((u32 *)rest);
- rest += 4;
- }
-
- if (attrib->ihl_offset_eq_16_present) {
- attrib->ihl_offset_eq_16.offset = *extra++;
- attrib->ihl_offset_eq_16.value = *((u16 *)rest);
- rest += 4;
- }
-
- if (attrib->fl_eq_present) {
- attrib->fl_eq = *((u32 *)rest);
- rest += 4;
- }
-
- IPADBG("before align buf=0x%p extra=0x%p rest=0x%p\n",
- buf, extra, rest);
- /* align to 64 bit */
- rest = (u8 *)(((unsigned long)rest + IPA_HW_RULE_START_ALIGNMENT) &
- ~IPA_HW_RULE_START_ALIGNMENT);
-
- IPADBG("after align buf=0x%p extra=0x%p rest=0x%p\n",
- buf, extra, rest);
-
- *rule_size = rest - buf;
-
- IPADBG("rest - buf=0x%llx\n", (u64) (rest - buf));
- IPADBG("*rule_size=0x%x\n", *rule_size);
-
- return 0;
-}
-
-/**
* ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
*
* @firmware: Structure which contains the FW data from the user space.
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
index f927f68cfeb6..b945eb06699c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_IPA3) += ipa_hal.o
-ipa_hal-y := ipahal.o ipahal_reg.o
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index fa34b57b2d73..a702a2e52e39 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -14,6 +14,7 @@
#include "ipahal.h"
#include "ipahal_i.h"
#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
struct ipahal_context *ipahal_ctx;
@@ -1242,12 +1243,14 @@ int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
return res;
}
-int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base)
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev)
{
int result;
- IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p\n",
- ipa_hw_type, base);
+ IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
+ ipa_hw_type, base, ipa_pdev);
ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
if (!ipahal_ctx) {
@@ -1268,8 +1271,15 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base)
goto bail_free_ctx;
}
+ if (!ipa_pdev) {
+ IPAHAL_ERR("invalid IPA platform device\n");
+ result = -EINVAL;
+ goto bail_free_ctx;
+ }
+
ipahal_ctx->hw_type = ipa_hw_type;
ipahal_ctx->base = base;
+ ipahal_ctx->ipa_pdev = ipa_pdev;
if (ipahal_reg_init(ipa_hw_type)) {
IPAHAL_ERR("failed to init ipahal reg\n");
@@ -1291,6 +1301,12 @@ int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base)
ipahal_hdr_init(ipa_hw_type);
+ if (ipahal_fltrt_init(ipa_hw_type)) {
+ IPAHAL_ERR("failed to init ipahal flt rt\n");
+ result = -EFAULT;
+ goto bail_free_ctx;
+ }
+
ipahal_debugfs_init();
return 0;
@@ -1305,7 +1321,19 @@ bail_err_exit:
void ipahal_destroy(void)
{
IPAHAL_DBG("Entry\n");
+ ipahal_fltrt_destroy();
ipahal_debugfs_remove();
kfree(ipahal_ctx);
ipahal_ctx = NULL;
}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+ if (likely(mem)) {
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ mem->size = 0;
+ mem->base = NULL;
+ mem->phys_base = 0;
+ }
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index ec7eec5ba963..654977511814 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -634,7 +634,9 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
*/
int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
-int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base);
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+ struct device *ipa_pdev);
void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
new file mode 100644
index 000000000000..e355d9db3777
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -0,0 +1,3200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations
+ */
+struct ipahal_fltrt_obj {
+ bool support_hash;
+ u32 tbl_width;
+ u32 sysaddr_alignment;
+ u32 lcladdr_alignment;
+ u32 blk_sz_alignment;
+ u32 rule_start_alignment;
+ u32 tbl_hdr_width;
+ u32 tbl_addr_mask;
+ int rule_max_prio;
+ int rule_min_prio;
+ u32 low_rule_id;
+ u32 rule_id_bit_len;
+ u32 rule_buf_size;
+ u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+ u64 (*create_flt_bitmap)(u64 ep_bitmap);
+ u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+ void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+ int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+ int (*flt_generate_eq)(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+ int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+ int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+ u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+ /* At IPA3, there global configuration is possible but not used */
+ return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+ if (is_sys) {
+ if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &addr);
+ ipa_assert();
+ return 0;
+ }
+ } else {
+ if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+ IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+ addr);
+ ipa_assert();
+ return 0;
+ }
+ /*
+ * for local tables (at sram) offsets is used as tables
+ * addresses. offset need to be in 8B units
+ * (local address aligned) and left shifted to its place.
+ * Local bit need to be enabled.
+ */
+ addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ addr += 1;
+ }
+
+ return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+ IPAHAL_DBG("Parsing hwaddr 0x%llx\n", hwaddr);
+
+ *is_sys = !(hwaddr & 0x1);
+ hwaddr &= (~0ULL - 1);
+ if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+ IPAHAL_ERR(
+ "sys addr is not aligned accordingly addr=0x%pad\n",
+ &hwaddr);
+ ipa_assert();
+ return;
+ }
+
+ if (!*is_sys) {
+ hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+ hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+ }
+
+ *addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+ struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+ struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+ (ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+ (BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ * attribs before starting building it
+ * checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+ enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+ if (ipt == IPA_IP_v4) {
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+ attrib->attrib_mask & IPA_FLT_TC ||
+ attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (attrib->attrib_mask & IPA_FLT_TOS ||
+ attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+ ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+ rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+ switch (params->hdr_type) {
+ case IPAHAL_RT_RULE_HDR_PROC_CTX:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 1;
+ ipa_assert_on(params->hdr_ofst & 31);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+ break;
+ case IPAHAL_RT_RULE_HDR_RAW:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ ipa_assert_on(params->hdr_ofst & 3);
+ rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+ break;
+ case IPAHAL_RT_RULE_HDR_NONE:
+ rule_hdr->u.hdr.system = !params->hdr_lcl;
+ rule_hdr->u.hdr.proc_ctx = 0;
+ rule_hdr->u.hdr.hdr_offset = 0;
+ break;
+ default:
+ IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+ WARN_ON(1);
+ return -EINVAL;
+ };
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+ &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule 0x%x\n", en_rule);
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ u8 *start;
+ u16 en_rule = 0;
+
+ start = buf;
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+ switch (params->rule->action) {
+ case IPA_PASS_TO_ROUTING:
+ rule_hdr->u.hdr.action = 0x0;
+ break;
+ case IPA_PASS_TO_SRC_NAT:
+ rule_hdr->u.hdr.action = 0x1;
+ break;
+ case IPA_PASS_TO_DST_NAT:
+ rule_hdr->u.hdr.action = 0x2;
+ break;
+ case IPA_PASS_TO_EXCEPTION:
+ rule_hdr->u.hdr.action = 0x3;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+ rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+ rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+ rule_hdr->u.hdr.rsvd1 = 0;
+ rule_hdr->u.hdr.rsvd2 = 0;
+ rule_hdr->u.hdr.rsvd3 = 0;
+
+ ipa_assert_on(params->priority & ~0x3FF);
+ rule_hdr->u.hdr.priority = params->priority;
+ ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+ rule_hdr->u.hdr.rule_id = params->id;
+
+ buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+ if (params->rule->eq_attrib_type) {
+ if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ &params->rule->eq_attrib, &buf)) {
+ IPAHAL_ERR("fail to generate hw rule from eq\n");
+ return -EPERM;
+ }
+ en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+ } else {
+ if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+ &params->rule->attrib, &buf, &en_rule)) {
+ IPAHAL_ERR("fail to generate hw rule\n");
+ return -EPERM;
+ }
+ }
+ rule_hdr->u.hdr.en_rule = en_rule;
+
+ IPAHAL_DBG("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+ en_rule,
+ rule_hdr->u.hdr.action,
+ rule_hdr->u.hdr.rt_tbl_idx,
+ rule_hdr->u.hdr.retain_hdr);
+ IPAHAL_DBG("priority=%d, rule_id=%d\n",
+ rule_hdr->u.hdr.priority,
+ rule_hdr->u.hdr.rule_id);
+
+ ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+ if (*hw_len == 0) {
+ *hw_len = buf - start;
+ } else if (*hw_len != (buf - start)) {
+ IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+ *hw_len, (buf - start));
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ * the init function will fill it with the information from the previous
+ * IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+ /* IPAv3 */
+ [IPA_HW_v3_0] = {
+ true,
+ IPA3_0_HW_TBL_WIDTH,
+ IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+ IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+ IPA3_0_HW_RULE_START_ALIGNMENT,
+ IPA3_0_HW_TBL_HDR_WIDTH,
+ IPA3_0_HW_TBL_ADDR_MASK,
+ IPA3_0_RULE_MAX_PRIORITY,
+ IPA3_0_RULE_MIN_PRIORITY,
+ IPA3_0_LOW_RULE_ID,
+ IPA3_0_RULE_ID_BIT_LEN,
+ IPA3_0_HW_RULE_BUF_SIZE,
+ ipa_write_64,
+ ipa_fltrt_create_flt_bitmap,
+ ipa_fltrt_create_tbl_addr,
+ ipa_fltrt_parse_tbl_addr,
+ ipa_rt_gen_hw_rule,
+ ipa_flt_gen_hw_rule,
+ ipa_flt_generate_eq,
+ ipa_rt_parse_hw_rule,
+ ipa_flt_parse_hw_rule,
+ {
+ [IPA_TOS_EQ] = 0,
+ [IPA_PROTOCOL_EQ] = 1,
+ [IPA_TC_EQ] = 2,
+ [IPA_OFFSET_MEQ128_0] = 3,
+ [IPA_OFFSET_MEQ128_1] = 4,
+ [IPA_OFFSET_MEQ32_0] = 5,
+ [IPA_OFFSET_MEQ32_1] = 6,
+ [IPA_IHL_OFFSET_MEQ32_0] = 7,
+ [IPA_IHL_OFFSET_MEQ32_1] = 8,
+ [IPA_METADATA_COMPARE] = 9,
+ [IPA_IHL_OFFSET_RANGE16_0] = 10,
+ [IPA_IHL_OFFSET_RANGE16_1] = 11,
+ [IPA_IHL_OFFSET_EQ_32] = 12,
+ [IPA_IHL_OFFSET_EQ_16] = 13,
+ [IPA_FL_EQ] = 14,
+ [IPA_IS_FRAG] = 15,
+ },
+ },
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+ return -EPERM;
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+ return -EPERM;
+ }
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+ IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+ return -EPERM;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ return -EPERM;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ eq_atrb->rule_eq_bitmap = 0;
+ eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_OFFSET_MEQ32_0);
+ eq_atrb->offset_meq_32[0].offset = 0;
+ eq_atrb->offset_meq_32[0].mask = 0;
+ eq_atrb->offset_meq_32[0].value = 0;
+ }
+
+ return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+ u8 hdr_mac_addr_offset,
+ const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN])
+{
+ int i;
+
+ *extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+ /* LSB MASK and ADDR */
+ *rest = ipa_write_64(0, *rest);
+ *rest = ipa_write_64(0, *rest);
+
+ /* MSB MASK and ADDR */
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr_mask[i], *rest);
+ *rest = ipa_write_16(0, *rest);
+ for (i = 5; i >= 0; i--)
+ *rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ extra = ipa_write_8(attrib->u.v4.tos, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v4.protocol, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 0 => offset of TOS in v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32((attrib->tos_mask << 16), rest);
+ rest = ipa_write_32((attrib->tos_value << 16), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 12 => offset of src ip in v4 header */
+ extra = ipa_write_8(12, extra);
+ rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* 16 => offset of dst ip in v4 header */
+ extra = ipa_write_8(16, extra);
+ rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+ rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v4 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v4 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v4 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+ const struct ipa_rule_attrib *attrib,
+ u8 **extra_wrds, u8 **rest_wrds)
+{
+ u8 *extra = *extra_wrds;
+ u8 *rest = *rest_wrds;
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ int rc = 0;
+
+ /* v6 code below assumes no extension headers TODO: fix this */
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+ extra = ipa_write_8(attrib->u.v6.tc, extra);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 8 => offset of src ip in v6 header */
+ extra = ipa_write_8(8, extra);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 24 => offset of dst ip in v6 header */
+ extra = ipa_write_8(24, extra);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+ rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* 0 => offset of TOS in v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_64(0, rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_mask << 20), rest);
+ rest = ipa_write_32(0, rest);
+ rest = ipa_write_32((attrib->tos_value << 20), rest);
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -14,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -8,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -22,
+ attrib->dst_mac_addr_mask,
+ attrib->dst_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_fltrt_generate_mac_addr_hw_rule(
+ &extra,
+ &rest,
+ -16,
+ attrib->src_mac_addr_mask,
+ attrib->src_mac_addr);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ /* -2 => offset of ether type in L2 hdr */
+ extra = ipa_write_8((u8)-2, extra);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ rest = ipa_write_16(0, rest);
+ rest = ipa_write_16(htons(attrib->ether_type), rest);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of type after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->type, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 1 => offset of code after v6 header */
+ extra = ipa_write_8(1, extra);
+ rest = ipa_write_32(0xFF, rest);
+ rest = ipa_write_32(attrib->code, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ /* 0 => offset of SPI after v6 header FIXME */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_32(0xFFFFFFFF, rest);
+ rest = ipa_write_32(attrib->spi, rest);
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+ rest = ipa_write_32(attrib->meta_data_mask, rest);
+ rest = ipa_write_32(attrib->meta_data, rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port, rest);
+ rest = ipa_write_16(attrib->src_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ rest = ipa_write_16(attrib->dst_port, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 0 => offset of src port after v6 header */
+ extra = ipa_write_8(0, extra);
+ rest = ipa_write_16(attrib->src_port_hi, rest);
+ rest = ipa_write_16(attrib->src_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ goto err;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ goto err;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ /* 2 => offset of dst port after v6 header */
+ extra = ipa_write_8(2, extra);
+ rest = ipa_write_16(attrib->dst_port_hi, rest);
+ rest = ipa_write_16(attrib->dst_port_lo, rest);
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+ rest);
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+ goto done;
+
+err:
+ rc = -EPERM;
+done:
+ *extra_wrds = extra;
+ *rest_wrds = rest;
+ return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+ while (cnt--)
+ *dst++ = *src++;
+
+ return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+ int sz;
+ int rc = 0;
+ u8 *extra_wrd_buf;
+ u8 *rest_wrd_buf;
+ u8 *extra_wrd_start;
+ u8 *rest_wrd_start;
+ u8 *extra_wrd_i;
+ u8 *rest_wrd_i;
+
+ sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!extra_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_extra_alloc;
+ }
+
+ sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+ if (!rest_wrd_buf) {
+ IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+ rc = -ENOMEM;
+ goto fail_rest_alloc;
+ }
+
+ extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ extra_wrd_start = (u8 *)((long)extra_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+ rest_wrd_start = (u8 *)((long)rest_wrd_start &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+ extra_wrd_i = extra_wrd_start;
+ rest_wrd_i = rest_wrd_start;
+
+ rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+ if (rc) {
+ IPAHAL_ERR("rule generation err check failed\n");
+ goto fail_err_check;
+ }
+
+ if (ipt == IPA_IP_v4) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv4 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+
+ } else if (ipt == IPA_IP_v6) {
+ if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+ &extra_wrd_i, &rest_wrd_i)) {
+ IPAHAL_ERR("failed to build ipv6 hw rule\n");
+ rc = -EPERM;
+ goto fail_err_check;
+ }
+ } else {
+ IPAHAL_ERR("unsupported ip %d\n", ipt);
+ goto fail_err_check;
+ }
+
+ /*
+ * default "rule" means no attributes set -> map to
+ * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+ */
+ if (attrib->attrib_mask == 0) {
+ IPAHAL_DBG("building default rule\n");
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+ extra_wrd_i = ipa_write_8(0, extra_wrd_i); /* offset */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* mask */
+ rest_wrd_i = ipa_write_32(0, rest_wrd_i); /* val */
+ }
+
+ IPAHAL_DBG("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+ IPAHAL_DBG("extra_word_2 0x%llx\n",
+ *(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+ extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+ sz = extra_wrd_i - extra_wrd_start;
+ IPAHAL_DBG("extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+ rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+ sz = rest_wrd_i - rest_wrd_start;
+ IPAHAL_DBG("non extra words params sz %d\n", sz);
+ *buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+ kfree(rest_wrd_buf);
+fail_rest_alloc:
+ kfree(extra_wrd_buf);
+fail_extra_alloc:
+ return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+ const struct ipa_ipfltri_rule_eq *attrib)
+{
+ int num = 0;
+
+ if (attrib->tos_eq_present)
+ num++;
+ if (attrib->protocol_eq_present)
+ num++;
+ if (attrib->tc_eq_present)
+ num++;
+ num += attrib->num_offset_meq_128;
+ num += attrib->num_offset_meq_32;
+ num += attrib->num_ihl_offset_meq_32;
+ num += attrib->num_ihl_offset_range_16;
+ if (attrib->ihl_offset_eq_32_present)
+ num++;
+ if (attrib->ihl_offset_eq_16_present)
+ num++;
+
+ IPAHAL_DBG("extra bytes number %d\n", num);
+
+ return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+ const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+ int num_offset_meq_32 = attrib->num_offset_meq_32;
+ int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+ int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+ int num_offset_meq_128 = attrib->num_offset_meq_128;
+ int i;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single exra word */
+ extra = *buf;
+ rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ extra = NULL;
+ rest = *buf;
+ }
+
+ if (attrib->tos_eq_present)
+ extra = ipa_write_8(attrib->tos_eq, extra);
+
+ if (attrib->protocol_eq_present)
+ extra = ipa_write_8(attrib->protocol_eq, extra);
+
+ if (attrib->tc_eq_present)
+ extra = ipa_write_8(attrib->tc_eq, extra);
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_128) {
+ extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 0; i < 8; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+ rest);
+ for (i = 8; i < 16; i++)
+ rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+ rest);
+ num_offset_meq_128--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_offset_meq_32) {
+ extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+ rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+ num_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (num_ihl_offset_meq_32) {
+ extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+ extra);
+
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+ rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+ num_ihl_offset_meq_32--;
+ }
+
+ if (attrib->metadata_meq32_present) {
+ rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+ rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (num_ihl_offset_range_16) {
+ extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+ extra);
+
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+ rest);
+ rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+ rest);
+ num_ihl_offset_range_16--;
+ }
+
+ if (attrib->ihl_offset_eq_32_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+ rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+ }
+
+ if (attrib->ihl_offset_eq_16_present) {
+ extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+ rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+ rest = ipa_write_16(0, rest);
+ }
+
+ if (attrib->fl_eq_present)
+ rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+ extra = ipa_pad_to_64(extra);
+ rest = ipa_pad_to_64(rest);
+ *buf = rest;
+
+ return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+ u8 hdr_mac_addr_offset, const uint8_t mac_addr_mask[ETH_ALEN],
+ const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+ int i;
+
+ eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+ /* LSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+ /* MSB MASK and ADDR */
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+ mac_addr_mask[i];
+
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+ for (i = 0; i <= 5; i++)
+ eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+ mac_addr[i];
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_TOS) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+ eq_atrb->tos_eq_present = 1;
+ eq_atrb->tos_eq = attrib->u.v4.tos;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v4.protocol;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->tos_mask << 16;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->tos_value << 16;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.src_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.src_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ attrib->u.v4.dst_addr_mask;
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ attrib->u.v4.dst_addr;
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ u8 ofst_meq32 = 0;
+ u8 ihl_ofst_rng16 = 0;
+ u8 ihl_ofst_meq32 = 0;
+ u8 ofst_meq128 = 0;
+ u16 eq_bitmap = 0;
+ u16 *en_rule = &eq_bitmap;
+
+ if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_PROTOCOL_EQ);
+ eq_atrb->protocol_eq_present = 1;
+ eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TC) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_TC_EQ);
+ eq_atrb->tc_eq_present = 1;
+ eq_atrb->tc_eq = attrib->u.v6.tc;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ /* use the same word order as in ipa v2 */
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.src_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.src_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.src_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.src_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.src_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.src_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.src_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.src_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+ /* use the same word order as in ipa v2 */
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+ = attrib->u.v6.dst_addr_mask[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+ = attrib->u.v6.dst_addr_mask[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+ = attrib->u.v6.dst_addr_mask[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->u.v6.dst_addr_mask[3];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+ = attrib->u.v6.dst_addr[0];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+ = attrib->u.v6.dst_addr[1];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+ = attrib->u.v6.dst_addr[2];
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->u.v6.dst_addr[3];
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+ eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+ = attrib->tos_mask << 20;
+ memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+ *(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+ 12) = attrib->tos_value << 20;
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -14 => offset of dst mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -8 => offset of src mac addr in Ethernet II hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -22 => offset of dst mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+ attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+ IPAHAL_ERR("ran out of meq128 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq128[ofst_meq128]);
+
+ /* -16 => offset of src mac addr in 802.3 hdr */
+ ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+ attrib->src_mac_addr_mask, attrib->src_mac_addr,
+ ofst_meq128);
+
+ ofst_meq128++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+ IPAHAL_ERR("ran out of meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ofst_meq32[ofst_meq32]);
+ eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+ eq_atrb->offset_meq_32[ofst_meq32].mask =
+ htons(attrib->ether_type);
+ eq_atrb->offset_meq_32[ofst_meq32].value =
+ htons(attrib->ether_type);
+ ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_TYPE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->type;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_CODE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->code;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SPI) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+ ihl_ofst_meq32)) {
+ IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+ 0xFFFFFFFF;
+ eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+ attrib->spi;
+ ihl_ofst_meq32++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_METADATA_COMPARE);
+ eq_atrb->metadata_meq32_present = 1;
+ eq_atrb->metadata_meq32.offset = 0;
+ eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+ eq_atrb->metadata_meq32.value = attrib->meta_data;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->src_port_hi < attrib->src_port_lo) {
+ IPAHAL_ERR("bad src port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->src_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->src_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+ if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+ ihl_ofst_rng16)) {
+ IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+ return -EPERM;
+ }
+ if (attrib->dst_port_hi < attrib->dst_port_lo) {
+ IPAHAL_ERR("bad dst port range param\n");
+ return -EPERM;
+ }
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+ = attrib->dst_port_lo;
+ eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+ = attrib->dst_port_hi;
+ ihl_ofst_rng16++;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+ eq_atrb->fl_eq_present = 1;
+ eq_atrb->fl_eq = attrib->u.v6.flow_label;
+ }
+
+ if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+ *en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+ IPA_IS_FRAG);
+ eq_atrb->ipv4_frag_eq_present = 1;
+ }
+
+ eq_atrb->rule_eq_bitmap = *en_rule;
+ eq_atrb->num_offset_meq_32 = ofst_meq32;
+ eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+ eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+ eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+ return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+ struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+ u16 eq_bitmap;
+ int extra_bytes;
+ u8 *extra;
+ u8 *rest;
+ int i;
+ u8 dummy_extra_wrd;
+
+ if (!addr || !atrb || !rule_size) {
+ IPAHAL_ERR("Input error: addr=%p atrb=%p rule_size=%p\n",
+ addr, atrb, rule_size);
+ return -EINVAL;
+ }
+
+ eq_bitmap = atrb->rule_eq_bitmap;
+
+ IPAHAL_DBG("eq_bitmap=0x%x\n", eq_bitmap);
+
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
+ atrb->tos_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+ atrb->protocol_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+ atrb->tc_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+ atrb->num_offset_meq_128++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+ atrb->num_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+ atrb->num_ihl_offset_meq_32++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+ atrb->metadata_meq32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+ atrb->num_ihl_offset_range_16++;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+ atrb->ihl_offset_eq_32_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+ atrb->ihl_offset_eq_16_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+ atrb->fl_eq_present = true;
+ if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+ atrb->ipv4_frag_eq_present = true;
+
+ extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+ /* only 3 eq does not have extra word param, 13 out of 16 is the number
+ * of equations that needs extra word param
+ */
+ if (extra_bytes > 13) {
+ IPAHAL_ERR("too much extra bytes\n");
+ return -EPERM;
+ } else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+ /* two extra words */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+ } else if (extra_bytes > 0) {
+ /* single extra word */
+ extra = addr + hdr_sz;
+ rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+ } else {
+ /* no extra words */
+ dummy_extra_wrd = 0;
+ extra = &dummy_extra_wrd;
+ rest = addr + hdr_sz;
+ }
+ IPAHAL_DBG("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+
+ if (atrb->tos_eq_present)
+ atrb->tos_eq = *extra++;
+ if (atrb->protocol_eq_present)
+ atrb->protocol_eq = *extra++;
+ if (atrb->tc_eq_present)
+ atrb->tc_eq = *extra++;
+
+ if (atrb->num_offset_meq_128 > 0) {
+ atrb->offset_meq_128[0].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[0].value[i] = *rest++;
+ }
+ if (atrb->num_offset_meq_128 > 1) {
+ atrb->offset_meq_128[1].offset = *extra++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 0; i < 8; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].mask[i] = *rest++;
+ for (i = 8; i < 16; i++)
+ atrb->offset_meq_128[1].value[i] = *rest++;
+ }
+
+ if (atrb->num_offset_meq_32 > 0) {
+ atrb->offset_meq_32[0].offset = *extra++;
+ atrb->offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_offset_meq_32 > 1) {
+ atrb->offset_meq_32[1].offset = *extra++;
+ atrb->offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_meq_32 > 0) {
+ atrb->ihl_offset_meq_32[0].offset = *extra++;
+ atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+ rest += 4;
+ }
+ if (atrb->num_ihl_offset_meq_32 > 1) {
+ atrb->ihl_offset_meq_32[1].offset = *extra++;
+ atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+ rest += 4;
+ atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->metadata_meq32_present) {
+ atrb->metadata_meq32.mask = *((u32 *)rest);
+ rest += 4;
+ atrb->metadata_meq32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->num_ihl_offset_range_16 > 0) {
+ atrb->ihl_offset_range_16[0].offset = *extra++;
+ atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+ if (atrb->num_ihl_offset_range_16 > 1) {
+ atrb->ihl_offset_range_16[1].offset = *extra++;
+ atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+ rest += 2;
+ atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+ rest += 2;
+ }
+
+ if (atrb->ihl_offset_eq_32_present) {
+ atrb->ihl_offset_eq_32.offset = *extra++;
+ atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->ihl_offset_eq_16_present) {
+ atrb->ihl_offset_eq_16.offset = *extra++;
+ atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+ rest += 4;
+ }
+
+ if (atrb->fl_eq_present) {
+ atrb->fl_eq = *((u32 *)rest);
+ atrb->fl_eq &= 0xfffff;
+ rest += 4;
+ }
+
+ IPAHAL_DBG("before rule alignment rest=0x%p\n", rest);
+ rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+ ~IPA3_0_HW_RULE_START_ALIGNMENT);
+ IPAHAL_DBG("after rule alignment rest=0x%p\n", rest);
+
+ *rule_size = rest - addr;
+ IPAHAL_DBG("rule_size=0x%x\n", *rule_size);
+
+ return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+ struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+ atrb = &rule->eq_attrib;
+
+ IPAHAL_DBG("read hdr 0x%llx\n", rule_hdr->u.word);
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+ if (rule_hdr->u.hdr.proc_ctx) {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+ } else {
+ rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+ rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+ }
+ rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+ struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+ struct ipa_ipfltri_rule_eq *atrb;
+
+ IPAHAL_DBG("Entry\n");
+
+ rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+ atrb = &rule->rule.eq_attrib;
+
+ if (rule_hdr->u.word == 0) {
+ /* table termintator - empty table */
+ rule->rule_size = 0;
+ return 0;
+ }
+
+ switch (rule_hdr->u.hdr.action) {
+ case 0x0:
+ rule->rule.action = IPA_PASS_TO_ROUTING;
+ break;
+ case 0x1:
+ rule->rule.action = IPA_PASS_TO_SRC_NAT;
+ break;
+ case 0x2:
+ rule->rule.action = IPA_PASS_TO_DST_NAT;
+ break;
+ case 0x3:
+ rule->rule.action = IPA_PASS_TO_EXCEPTION;
+ break;
+ default:
+ IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+ WARN_ON(1);
+ rule->rule.action = rule_hdr->u.hdr.action;
+ }
+
+ rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+ rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+ rule->priority = rule_hdr->u.hdr.priority;
+ rule->id = rule_hdr->u.hdr.rule_id;
+
+ atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+ rule->rule.eq_attrib_type = 1;
+ return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+ atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ * See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ * register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+ struct ipahal_fltrt_obj zero_obj;
+ int i;
+ struct ipa_mem_buffer *mem;
+ int rc = -EFAULT;
+
+ IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+ if (ipa_hw_type >= IPA_HW_MAX) {
+ IPAHAL_ERR("Invalid H/W type\n");
+ return -EFAULT;
+ }
+
+ memset(&zero_obj, 0, sizeof(zero_obj));
+ for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+ if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+ sizeof(struct ipahal_fltrt_obj))) {
+ memcpy(&ipahal_fltrt_objs[i+1],
+ &ipahal_fltrt_objs[i],
+ sizeof(struct ipahal_fltrt_obj));
+ } else {
+ /*
+ * explicitly overridden FLT RT info
+ * Check validity
+ */
+ if (!ipahal_fltrt_objs[i+1].tbl_width) {
+ IPAHAL_ERR(
+ "Zero tbl width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+ IPAHAL_ERR(
+ "No tbl sysaddr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+ IPAHAL_ERR(
+ "No tbl lcladdr alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+ IPAHAL_ERR(
+ "No blk sz alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+ IPAHAL_ERR(
+ "No rule start alignment ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+ IPAHAL_ERR(
+ "Zero tbl hdr width ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+ IPAHAL_ERR(
+ "Too little bits for rule_id ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+ IPAHAL_ERR(
+ "zero rule buf size ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+ IPAHAL_ERR(
+ "No write_val_to_hdr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+ IPAHAL_ERR(
+ "No create_flt_bitmap CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+ IPAHAL_ERR(
+ "No create_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+ IPAHAL_ERR(
+ "No parse_tbl_addr CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_generate_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+ IPAHAL_ERR(
+ "No flt_generate_eq CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No rt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+ IPAHAL_ERR(
+ "No flt_parse_hw_rule CB ipaver=%d\n",
+ i+1);
+ WARN_ON(1);
+ }
+ }
+ }
+
+ mem = &ipahal_ctx->empty_fltrt_tbl;
+
+ /* setup an empty table in system memory; This will
+ * be used, for example, to delete a rt tbl safely
+ */
+ mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+ mem->size);
+ return -ENOMEM;
+ }
+
+ if (mem->phys_base &
+ ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+ IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+ &mem->phys_base);
+ rc = -EFAULT;
+ goto clear_empty_tbl;
+ }
+
+ memset(mem->base, 0, mem->size);
+ IPAHAL_DBG("empty table allocated in system memory");
+
+ return 0;
+
+clear_empty_tbl:
+ dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+ mem->phys_base);
+ return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+ dma_free_coherent(ipahal_ctx->ipa_pdev,
+ ipahal_ctx->empty_fltrt_tbl.size,
+ ipahal_ctx->empty_fltrt_tbl.base,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!prio) {
+ IPAHAL_ERR("Invalid Input\n");
+ return -EINVAL;
+ }
+
+ /* Priority logic is reverse. 0 priority considred max priority */
+ if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+ IPAHAL_ERR("Invalid given priority %d\n", *prio);
+ return -EINVAL;
+ }
+
+ *prio += 1;
+
+ if (*prio > obj->rule_min_prio) {
+ IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+ *prio -= 1;
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+ return (id ==
+ ((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+ -1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+ return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+{
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+ IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ if (obj->support_hash &&
+ (hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+ IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+
+ return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+{
+ int flt_spc;
+ u64 flt_bitmap;
+ int i;
+ u64 addr;
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!tbls_num || !nhash_hdr_size || !mem) {
+ IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+ tbls_num, nhash_hdr_size, mem);
+ return -EINVAL;
+ }
+ if (obj->support_hash && !hash_hdr_size) {
+ IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+ return -EINVAL;
+ }
+
+ if (obj->support_hash) {
+ flt_spc = hash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+ }
+
+ flt_spc = nhash_hdr_size;
+ /* bitmap word */
+ if (ep_bitmap)
+ flt_spc -= obj->tbl_hdr_width;
+ flt_spc /= obj->tbl_hdr_width;
+ if (tbls_num > flt_spc) {
+ IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mem->size = tbls_num * obj->tbl_hdr_width;
+ if (ep_bitmap)
+ mem->size += obj->tbl_hdr_width;
+ mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+ &mem->phys_base, GFP_KERNEL);
+ if (!mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+ return -ENOMEM;
+ }
+
+ if (ep_bitmap) {
+ flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+ IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+ obj->write_val_to_hdr(flt_bitmap, mem->base);
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+ if (ep_bitmap) {
+ for (i = 1; i <= tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ } else {
+ for (i = 0; i < tbls_num; i++)
+ obj->write_val_to_hdr(addr,
+ mem->base + i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ * flt/rt tables headers to be filled into sram. Init each table to point
+ * to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ u64 addr;
+ int i;
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!params) {
+ IPAHAL_ERR("Input error: params=%p\n", params);
+ return -EINVAL;
+ }
+
+ params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->nhash_hdr.size,
+ &params->nhash_hdr.phys_base, GFP_KERNEL);
+ if (!params->nhash_hdr.size) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_hdr.size);
+ goto nhash_alloc_fail;
+ }
+
+ if (obj->support_hash) {
+ params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+ params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+ params->hash_hdr.size, &params->hash_hdr.phys_base,
+ GFP_KERNEL);
+ if (!params->hash_hdr.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_hdr.size);
+ goto hash_alloc_fail;
+ }
+ }
+
+ addr = obj->create_tbl_addr(true,
+ ipahal_ctx->empty_fltrt_tbl.phys_base);
+ for (i = 0; i < params->tbls_num; i++) {
+ obj->write_val_to_hdr(addr,
+ params->nhash_hdr.base + i * obj->tbl_hdr_width);
+ if (obj->support_hash)
+ obj->write_val_to_hdr(addr,
+ params->hash_hdr.base +
+ i * obj->tbl_hdr_width);
+ }
+
+ return 0;
+
+hash_alloc_fail:
+ ipahal_free_dma_mem(&params->nhash_hdr);
+nhash_alloc_fail:
+ return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ * local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* The HAL allocates larger sizes than the given effective ones
+ * for alignments and border indications
+ */
+ IPAHAL_DBG("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+ params->total_sz_lcl_hash_tbls,
+ params->total_sz_lcl_nhash_tbls);
+
+ IPAHAL_DBG("lcl tbl bdy count: hash=%u nhash=%u\n",
+ params->num_lcl_hash_tbls,
+ params->num_lcl_nhash_tbls);
+
+ /* Align the sizes to coop with termination word
+ * and H/W local table start offset alignment
+ */
+ if (params->nhash_bdy.size) {
+ params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+ /* for table terminator */
+ params->nhash_bdy.size += obj->tbl_width *
+ params->num_lcl_nhash_tbls;
+ /* align the start of local rule-set */
+ params->nhash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_nhash_tbls;
+ /* SRAM block size alignment */
+ params->nhash_bdy.size += obj->blk_sz_alignment;
+ params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("nhash lcl tbl bdy total h/w size = %u\n",
+ params->nhash_bdy.size);
+
+ params->nhash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+ &params->nhash_bdy.phys_base, GFP_KERNEL);
+ if (!params->nhash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->nhash_bdy.size);
+ return -ENOMEM;
+ }
+ memset(params->nhash_bdy.base, 0, params->nhash_bdy.size);
+ }
+
+ if (!obj->support_hash && params->hash_bdy.size) {
+ IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+ WARN_ON(1);
+ }
+
+ if (obj->support_hash && params->hash_bdy.size) {
+ params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+ /* for table terminator */
+ params->hash_bdy.size += obj->tbl_width *
+ params->num_lcl_hash_tbls;
+ /* align the start of local rule-set */
+ params->hash_bdy.size += obj->lcladdr_alignment *
+ params->num_lcl_hash_tbls;
+ /* SRAM block size alignment */
+ params->hash_bdy.size += obj->blk_sz_alignment;
+ params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+ IPAHAL_DBG("hash lcl tbl bdy total h/w size = %u\n",
+ params->hash_bdy.size);
+
+ params->hash_bdy.base = dma_alloc_coherent(
+ ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+ &params->hash_bdy.phys_base, GFP_KERNEL);
+ if (!params->hash_bdy.base) {
+ IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+ params->hash_bdy.size);
+ goto hash_bdy_fail;
+ }
+ memset(params->hash_bdy.base, 0, params->hash_bdy.size);
+ }
+
+ return 0;
+
+hash_bdy_fail:
+ if (params->nhash_bdy.size)
+ ipahal_free_dma_mem(&params->nhash_bdy);
+
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params)
+{
+ IPAHAL_DBG("Entry\n");
+
+ /* Input validation */
+ if (!params) {
+ IPAHAL_ERR("Input err: no params\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+ IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+ return -ENOMEM;
+ }
+
+ if (ipa_fltrt_alloc_lcl_bdy(params)) {
+ IPAHAL_ERR("fail to alloc tbl bodies\n");
+ goto bdy_alloc_fail;
+ }
+
+ return 0;
+
+bdy_alloc_fail:
+ ipahal_free_dma_mem(&params->nhash_hdr);
+ if (params->hash_hdr.size)
+ ipahal_free_dma_mem(&params->hash_hdr);
+ return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+ struct ipahal_fltrt_obj *obj;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!tbl_mem) {
+ IPAHAL_ERR("Input err\n");
+ return -EINVAL;
+ }
+
+ if (!tbl_mem->size) {
+ IPAHAL_ERR("Input err: zero table size\n");
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ /* add word for rule-set terminator */
+ tbl_mem->size += obj->tbl_width;
+
+ tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+ &tbl_mem->phys_base, GFP_KERNEL);
+ if (!tbl_mem->base) {
+ IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+ tbl_mem->size);
+ return -ENOMEM;
+ }
+ if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+ IPAHAL_ERR("sys rt tbl address is not aligned\n");
+ goto align_err;
+ }
+
+ memset(tbl_mem->base, 0, tbl_mem->size);
+
+ return 0;
+
+align_err:
+ ipahal_free_dma_mem(tbl_mem);
+ return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base) {
+ IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%p\n",
+ addr, hdr_base);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = obj->create_tbl_addr(is_sys, addr);
+ obj->write_val_to_hdr(hwaddr, hdr);
+
+ return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys)
+{
+ struct ipahal_fltrt_obj *obj;
+ u64 hwaddr;
+ u8 *hdr;
+
+ IPAHAL_DBG("Entry\n");
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (!addr || !hdr_base || !is_sys) {
+ IPAHAL_ERR("Input err: addr=%p hdr_base=%p is_sys=%p\n",
+ addr, hdr_base, is_sys);
+ return -EINVAL;
+ }
+
+ hdr = (u8 *)hdr_base;
+ hdr += hdr_idx * obj->tbl_hdr_width;
+ hwaddr = *((u64 *)hdr);
+ obj->parse_tbl_addr(hwaddr, addr, is_sys);
+ return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf)
+{
+ struct ipahal_fltrt_obj *obj;
+ u8 *tmp = NULL;
+ int rc;
+
+ IPAHAL_DBG("Entry\n");
+
+ if (!params || !hw_len) {
+ IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+ return -EINVAL;
+ }
+ if (!params->rule) {
+ IPAHAL_ERR("Input err: invalid rule\n");
+ return -EINVAL;
+ }
+ if (params->ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+ return -EINVAL;
+ }
+
+ obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+ if (buf == NULL) {
+ tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+ if (!tmp) {
+ IPAHAL_ERR("failed to alloc %u bytes\n",
+ obj->rule_buf_size);
+ return -ENOMEM;
+ }
+ buf = tmp;
+ } else
+ if ((long)buf & obj->rule_start_alignment) {
+ IPAHAL_ERR("buff is not rule rule start aligned\n");
+ return -EPERM;
+ }
+
+ rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule(
+ params, hw_len, buf);
+ if (!tmp && !rc) {
+ /* write the rule-set terminator */
+ memset(buf + *hw_len, 0, obj->tbl_width);
+ }
+
+ kfree(tmp);
+
+ return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (ipt >= IPA_IP_MAX) {
+ IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+ return -EINVAL;
+ }
+
+ if (!attrib || !eq_atrb) {
+ IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+ attrib, eq_atrb);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+ attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+ rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule)
+{
+ IPAHAL_DBG("Entry\n");
+
+ if (!rule_addr || !rule) {
+ IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+ rule_addr, rule);
+ return -EINVAL;
+ }
+
+ return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+ rule_addr, rule);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
new file mode 100644
index 000000000000..ee2704d62b50
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -0,0 +1,288 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ * The allocation logic will allocate DMA memory representing the header.
+ * If the bodies are local (SRAM) the allocation will allocate
+ * a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+ enum ipa_ip_type ipt;
+ u32 tbls_num;
+ u32 num_lcl_hash_tbls;
+ u32 num_lcl_nhash_tbls;
+ u32 total_sz_lcl_hash_tbls;
+ u32 total_sz_lcl_nhash_tbls;
+
+ /* OUT PARAMS */
+ struct ipa_mem_buffer hash_hdr;
+ struct ipa_mem_buffer nhash_hdr;
+ struct ipa_mem_buffer hash_bdy;
+ struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+ IPAHAL_RT_RULE_HDR_NONE,
+ IPAHAL_RT_RULE_HDR_RAW,
+ IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ int dst_pipe_idx;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ u32 priority;
+ u32 id;
+ const struct ipa_rt_rule *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+ int dst_pipe_idx;
+ bool hdr_lcl;
+ u32 hdr_ofst;
+ enum ipahal_rt_rule_hdr_type hdr_type;
+ u32 priority;
+ bool retain_hdr;
+ u32 id;
+ struct ipa_ipfltri_rule_eq eq_attrib;
+ u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+ enum ipa_ip_type ipt;
+ u32 rt_tbl_idx;
+ u32 priority;
+ u32 id;
+ const struct ipa_flt_rule *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+ struct ipa_flt_rule rule;
+ u32 priority;
+ u32 id;
+ u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ * Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ * Creates filter header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ * should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+ u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ * Used usually during commit.
+ * Allocates header structures and init them to point to empty DDR table
+ * Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+ struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ * allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ * Given table addr/offset, adapt it to IPA H/W format and write it
+ * to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+ bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ * content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+ bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ * be built in internal temp buf. This is used e.g. to get the rule size
+ * only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+ u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ * Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ * for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+ const struct ipa_rule_attrib *attrib,
+ struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ * Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+ struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
new file mode 100644
index 000000000000..0c0637d4dd60
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ * These are names values to the equations that can be used
+ * The HAL layer holds mapping between these names and H/W
+ * presentation.
+ */
+enum ipa_fltrt_equations {
+ IPA_TOS_EQ,
+ IPA_PROTOCOL_EQ,
+ IPA_TC_EQ,
+ IPA_OFFSET_MEQ128_0,
+ IPA_OFFSET_MEQ128_1,
+ IPA_OFFSET_MEQ32_0,
+ IPA_OFFSET_MEQ32_1,
+ IPA_IHL_OFFSET_MEQ32_0,
+ IPA_IHL_OFFSET_MEQ32_1,
+ IPA_METADATA_COMPARE,
+ IPA_IHL_OFFSET_RANGE16_0,
+ IPA_IHL_OFFSET_RANGE16_1,
+ IPA_IHL_OFFSET_EQ_32,
+ IPA_IHL_OFFSET_EQ_16,
+ IPA_FL_EQ,
+ IPA_IS_FRAG,
+ IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ * header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 pipe_dest_idx:5;
+ u64 system:1;
+ u64 hdr_offset:9;
+ u64 proc_ctx:1;
+ u64 priority:10;
+ u64 rsvd1:5;
+ u64 retain_hdr:1;
+ u64 rule_id:10;
+ u64 rsvd2:6;
+ } hdr;
+ } u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ * as part of header removal. This will be done as part of
+ * header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+ union {
+ u64 word;
+ struct {
+ u64 en_rule:16;
+ u64 action:5;
+ u64 rt_tbl_idx:5;
+ u64 retain_hdr:1;
+ u64 rsvd1:5;
+ u64 priority:10;
+ u64 rsvd2:6;
+ u64 rule_id:10;
+ u64 rsvd3:6;
+ } hdr;
+ } u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 6a22240e951b..4c4b6661e8fc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -53,11 +53,15 @@
* I/O memory mapped address.
* Controlled by debugfs. default is off
* @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
*/
struct ipahal_context {
enum ipa_hw_type hw_type;
void __iomem *base;
struct dentry *dent;
+ struct device *ipa_pdev;
+ struct ipa_mem_buffer empty_fltrt_tbl;
};
extern struct ipahal_context *ipahal_ctx;