summaryrefslogtreecommitdiff
path: root/drivers/platform
diff options
context:
space:
mode:
authorRavi Gummadidala <rgummadi@codeaurora.org>2015-07-21 11:37:30 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:11:44 -0700
commit5dadfa9238463612908c7e2f1872eb60f85668da (patch)
treec1f5e9766f1ad15de3381fcf0c1b9d185354859b /drivers/platform
parent9d60c70de9ae7be75052c16d20e307d8f880358c (diff)
msm: ipa: add support for TX of sk_buff's with paged data
This support is needed to enable GSO (Generic Segmentation Offload). Change-Id: Ib6e7a5a5f3139697e0b6c68782134377a7bb2dc6 Signed-off-by: Ravi Gummadidala <rgummadi@codeaurora.org>
Diffstat (limited to 'drivers/platform')
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c134
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h4
3 files changed, 117 insertions, 23 deletions
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 2b7f815f1acf..15340f49adc6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -860,6 +860,7 @@ static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
"hw_tx=%u\n"
+ "tx_non_linear=%u\n"
"tx_compl=%u\n"
"wan_rx=%u\n"
"stat_compl=%u\n"
@@ -875,6 +876,7 @@ static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
"flow_disable=%u\n",
ipa_ctx->stats.tx_sw_pkts,
ipa_ctx->stats.tx_hw_pkts,
+ ipa_ctx->stats.tx_non_linear,
ipa_ctx->stats.tx_pkts_compl,
ipa_ctx->stats.rx_pkts,
ipa_ctx->stats.stat_compl,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index bd38d99ab1be..e65ac24c6c3c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -94,11 +94,19 @@ static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
list_del(&tx_pkt_expected->link);
sys->len--;
spin_unlock_bh(&sys->spinlock);
- if (!tx_pkt_expected->no_unmap_dma)
- dma_unmap_single(ipa_ctx->pdev,
+ if (!tx_pkt_expected->no_unmap_dma) {
+ if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa_ctx->pdev,
+ tx_pkt_expected->mem.phys_base,
+ tx_pkt_expected->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa_ctx->pdev,
tx_pkt_expected->mem.phys_base,
tx_pkt_expected->mem.size,
DMA_TO_DEVICE);
+ }
+ }
if (tx_pkt_expected->callback)
tx_pkt_expected->callback(tx_pkt_expected->user1,
tx_pkt_expected->user2);
@@ -473,18 +481,34 @@ int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
INIT_LIST_HEAD(&tx_pkt->link);
tx_pkt->type = desc[i].type;
- tx_pkt->mem.base = desc[i].pyld;
- tx_pkt->mem.size = desc[i].len;
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
- if (!desc->dma_address_valid) {
- tx_pkt->mem.phys_base =
- dma_map_single(ipa_ctx->pdev,
- tx_pkt->mem.base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ dma_map_single(ipa_ctx->pdev,
+ tx_pkt->mem.base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ tx_pkt->mem.phys_base = desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
} else {
- tx_pkt->mem.phys_base = desc->dma_address;
- tx_pkt->no_unmap_dma = true;
+ tx_pkt->mem.base = desc[i].frag;
+ tx_pkt->mem.size = skb_frag_size(desc[i].frag);
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ skb_frag_dma_map(ipa_ctx->pdev,
+ desc[i].frag,
+ 0, tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ tx_pkt->mem.phys_base = desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
}
if (!tx_pkt->mem.phys_base) {
@@ -550,9 +574,15 @@ failure:
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+ }
kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
@@ -1479,24 +1509,40 @@ static void ipa_tx_cmd_comp(void *user1, int user2)
int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa_tx_meta *meta)
{
- struct ipa_desc desc[2];
+ struct ipa_desc *desc;
+ struct ipa_desc _desc[2];
int dst_ep_idx;
struct ipa_ip_packet_init *cmd;
struct ipa_sys_context *sys;
int src_ep_idx;
+ int num_frags, f;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
return -EINVAL;
}
- memset(desc, 0, 2 * sizeof(struct ipa_desc));
-
if (skb->len == 0) {
IPAERR("packet size is 0\n");
return -EINVAL;
}
+ num_frags = skb_shinfo(skb)->nr_frags;
+ if (num_frags) {
+ /* 1 desc is needed for the linear portion of skb;
+ * 1 desc may be needed for the PACKET_INIT;
+ * 1 desc for each frag
+ */
+ desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
+ if (!desc) {
+ IPAERR("failed to alloc desc array\n");
+ goto fail_mem;
+ }
+ } else {
+ memset(_desc, 0, 2 * sizeof(struct ipa_desc));
+ desc = &_desc[0];
+ }
+
/*
* USB_CONS: PKT_INIT ep_idx = dst pipe
* Q6_CONS: PKT_INIT ep_idx = sender pipe
@@ -1549,7 +1595,7 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[0].callback = ipa_tx_cmd_comp;
desc[0].user1 = cmd;
desc[1].pyld = skb->data;
- desc[1].len = skb->len;
+ desc[1].len = skb_headlen(skb);
desc[1].type = IPA_DATA_DESC_SKB;
desc[1].callback = ipa_tx_comp_usr_notify_release;
desc[1].user1 = skb;
@@ -1562,15 +1608,29 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[1].dma_address = meta->dma_address;
}
- if (ipa_send(sys, 2, desc, true)) {
- IPAERR("fail to send immediate command\n");
+ for (f = 0; f < num_frags; f++) {
+ desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+ }
+
+ /* don't free skb till frag mappings are released */
+ if (num_frags) {
+ desc[2+f-1].callback = desc[1].callback;
+ desc[2+f-1].user1 = desc[1].user1;
+ desc[2+f-1].user2 = desc[1].user2;
+ desc[1].callback = NULL;
+ }
+
+ if (ipa_send(sys, num_frags + 2, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u SWP\n",
+ skb, num_frags);
goto fail_send;
}
IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
} else {
/* HW data path */
desc[0].pyld = skb->data;
- desc[0].len = skb->len;
+ desc[0].len = skb_headlen(skb);
desc[0].type = IPA_DATA_DESC_SKB;
desc[0].callback = ipa_tx_comp_usr_notify_release;
desc[0].user1 = skb;
@@ -1581,18 +1641,46 @@ int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[0].dma_address = meta->dma_address;
}
- if (ipa_send_one(sys, &desc[0], true)) {
- IPAERR("fail to send skb\n");
- goto fail_gen;
+ if (num_frags == 0) {
+ if (ipa_send_one(sys, desc, true)) {
+ IPAERR("fail to send skb %p HWP\n", skb);
+ goto fail_gen;
+ }
+ } else {
+ for (f = 0; f < num_frags; f++) {
+ desc[1+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
+ }
+
+ /* don't free skb till frag mappings are released */
+ desc[1+f-1].callback = desc[0].callback;
+ desc[1+f-1].user1 = desc[0].user1;
+ desc[1+f-1].user2 = desc[0].user2;
+ desc[0].callback = NULL;
+
+ if (ipa_send(sys, num_frags + 1, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u HWP\n",
+ skb, num_frags);
+ goto fail_gen;
+ }
}
+
IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
}
+ if (num_frags) {
+ kfree(desc);
+ IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
+ }
+
return 0;
fail_send:
kfree(cmd);
fail_gen:
+ if (num_frags)
+ kfree(desc);
+fail_mem:
return -EFAULT;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 834fa31fbd03..16b2c518a5c3 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -615,6 +615,7 @@ struct ipa_sys_context {
enum ipa_desc_type {
IPA_DATA_DESC,
IPA_DATA_DESC_SKB,
+ IPA_DATA_DESC_SKB_PAGED,
IPA_IMM_CMD_DESC
};
@@ -657,6 +658,7 @@ struct ipa_tx_pkt_wrapper {
* struct ipa_desc - IPA descriptor
* @type: skb or immediate command or plain old data
* @pyld: points to skb
+ * @frag: points to paged fragment
* or kmalloc'ed immediate command parameters/plain old data
* @dma_address: dma mapped address of pyld
* @dma_address_valid: valid field for dma_address
@@ -670,6 +672,7 @@ struct ipa_tx_pkt_wrapper {
struct ipa_desc {
enum ipa_desc_type type;
void *pyld;
+ skb_frag_t *frag;
dma_addr_t dma_address;
bool dma_address_valid;
u16 len;
@@ -779,6 +782,7 @@ struct ipa_stats {
u32 lan_repl_rx_empty;
u32 flow_enable;
u32 flow_disable;
+ u32 tx_non_linear;
};
struct ipa_active_clients {