summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h6
-rw-r--r--drivers/net/ethernet/msm/Kconfig31
-rw-r--r--drivers/net/ethernet/msm/Makefile7
-rw-r--r--drivers/net/ethernet/msm/ecm_ipa.c1436
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c1397
-rw-r--r--drivers/net/ethernet/msm/rndis_ipa.c2455
-rw-r--r--drivers/net/ethernet/msm/rndis_ipa_trace.h81
11 files changed, 5431 insertions, 25 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 31c5e476fd64..0e6c8f249125 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -110,6 +110,7 @@ source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/msm/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 071f84eb6f3f..34cd0537a6df 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -50,6 +50,7 @@ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_ARCH_QCOM) += msm/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index c08d34f618b9..3e77c3e843bb 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2182,21 +2182,20 @@ static int atl1c_tx_map(struct atl1c_adapter *adapter,
use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
}
-
- /* The last tpd */
- use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
- /* The last buffer info contain the skb address,
- so it will be free after unmap */
- buffer_info->skb = skb;
-
- return 0;
-
+ if (use_tpd && buffer_info) {
+ /*The last tpd*/
+ use_tpd->word1 |= 1 << TPD_EOP_SHIFT;
+ /*The last buffer info contain the skb address,
+ *so it will be free after unmap
+ */
+ buffer_info->skb = skb;
+ return 0;
+ }
err_dma:
buffer_info->dma = 0;
buffer_info->length = 0;
return -1;
}
-
static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
{
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 6d649e7b45a9..bd5c22986343 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -97,9 +97,9 @@ static void unfill_desc(struct hnae_ring *ring)
ring_ptr_move_bw(ring, next_to_use);
}
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data)
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_ring *ring = ring_data->ring;
@@ -169,6 +169,10 @@ int hns_nic_net_xmit_hw(struct net_device *ndev,
dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
netdev_tx_sent_queue(dev_queue, skb->len);
+ ndev->trans_start = jiffies;
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
@@ -1086,17 +1090,11 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- int ret;
assert(skb->queue_mapping < ndev->ae_handle->q_num);
- ret = hns_nic_net_xmit_hw(ndev, skb,
- &tx_ring_data(priv, skb->queue_mapping));
- if (ret == NETDEV_TX_OK) {
- ndev->trans_start = jiffies;
- ndev->stats.tx_bytes += skb->len;
- ndev->stats.tx_packets++;
- }
- return (netdev_tx_t)ret;
+
+ return hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
}
static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index dae0ed19ac6d..b4489ffdb4de 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -77,8 +77,8 @@ void hns_ethtool_set_ops(struct net_device *ndev);
void hns_nic_net_reset(struct net_device *ndev);
void hns_nic_net_reinit(struct net_device *netdev);
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
-int hns_nic_net_xmit_hw(struct net_device *ndev,
- struct sk_buff *skb,
- struct hns_nic_ring_data *ring_data);
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
new file mode 100644
index 000000000000..b0fe6a6d7115
--- /dev/null
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -0,0 +1,31 @@
+#
+# msm network device configuration
+#
+
+config MSM_RMNET_MHI
+ bool "RMNET MHI Driver"
+ depends on MSM_MHI
+ help
+ Implements RMNET over MHI interface.
+ RMNET provides a virtual ethernet interface
+ for routing IP packets within the MSM using
+ BAM as a physical transport.
+
+config ECM_IPA
+ tristate "STD ECM LAN Driver support"
+ depends on IPA || IPA3
+ help
+ Enables LAN between applications processor and a tethered
+ host using the STD ECM protocol.
+ This Network interface is aimed to allow data path go through
+ IPA core while using STD ECM protocol.
+
+config RNDIS_IPA
+ tristate "RNDIS_IPA Network Interface Driver support"
+ depends on IPA || IPA3
+ help
+ Enables LAN between applications processor and a tethered
+ host using the RNDIS protocol.
+ This Network interface is aimed to allow data path go through
+ IPA core while using RNDIS protocol.
+
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
new file mode 100644
index 000000000000..5bf934e82fce
--- /dev/null
+++ b/drivers/net/ethernet/msm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the msm networking support.
+#
+
+obj-$(CONFIG_MSM_RMNET_MHI) += msm_rmnet_mhi.o
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
+obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
new file mode 100644
index 000000000000..a89face0e891
--- /dev/null
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -0,0 +1,1436 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/ecm_ipa.h>
+
+#define DRIVER_NAME "ecm_ipa"
+#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
+#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define TX_TIMEOUT (5 * HZ)
+
+
+#define ECM_IPA_DEBUG(fmt, args...) \
+ pr_debug("ctx:%s: "\
+ fmt, current->comm, ## args)
+
+#define ECM_IPA_INFO(fmt, args...) \
+ pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+ fmt, __func__, __LINE__, current->comm, ## args)
+
+#define ECM_IPA_ERROR(fmt, args...) \
+ pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+ fmt, __func__, __LINE__, current->comm, ## args)
+
+#define NULL_CHECK(ptr) \
+ do { \
+ if (!(ptr)) { \
+ ECM_IPA_ERROR("null pointer #ptr\n"); \
+ return -EINVAL; \
+ } \
+ } \
+ while (0)
+
+#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n")
+#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n")
+
+/**
+ * enum ecm_ipa_state - specify the current driver internal state
+ * which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ * after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ * the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ * pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ * the interface got UP request from the network stack. this is the driver
+ * idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum ecm_ipa_state {
+ ECM_IPA_UNLOADED = 0,
+ ECM_IPA_INITIALIZED,
+ ECM_IPA_CONNECTED,
+ ECM_IPA_UP,
+ ECM_IPA_CONNECTED_AND_UP,
+ ECM_IPA_INVALID,
+};
+
+/**
+ * enum ecm_ipa_operation - enumerations used to descibe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum ecm_ipa_operation {
+ ECM_IPA_INITIALIZE,
+ ECM_IPA_CONNECT,
+ ECM_IPA_OPEN,
+ ECM_IPA_STOP,
+ ECM_IPA_DISCONNECT,
+ ECM_IPA_CLEANUP,
+};
+
+#define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \
+ ECM_IPA_DEBUG("Driver state - %s\n",\
+ ecm_ipa_state_string(ecm_ipa_ctx->state));
+
+/**
+ * struct ecm_ipa_dev - main driver context parameters
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debuging switches
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * to netdev queue start (after stopped due to outstanding_high reached)
+ * @state: current state of ecm_ipa driver
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @ipa_to_usb_client: consumer client
+ * @usb_to_ipa_client: producer client
+ * @ipa_rm_resource_name_prod: IPA resource manager producer resource
+ * @ipa_rm_resource_name_cons: IPA resource manager consumer resource
+ */
+struct ecm_ipa_dev {
+ struct net_device *net;
+ struct dentry *directory;
+ uint32_t eth_ipv4_hdr_hdl;
+ uint32_t eth_ipv6_hdr_hdl;
+ u32 usb_to_ipa_hdl;
+ u32 ipa_to_usb_hdl;
+ atomic_t outstanding_pkts;
+ u8 outstanding_high;
+ u8 outstanding_low;
+ enum ecm_ipa_state state;
+ void (*device_ready_notify)(void);
+ enum ipa_client_type ipa_to_usb_client;
+ enum ipa_client_type usb_to_ipa_client;
+ enum ipa_rm_resource_name ipa_rm_resource_name_prod;
+ enum ipa_rm_resource_name ipa_rm_resource_name_cons;
+};
+
+static int ecm_ipa_open(struct net_device *net);
+static void ecm_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_timeout(struct net_device *net);
+static int ecm_ipa_stop(struct net_device *net);
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *ecm_ipa_ctx,
+ const void *dst_mac, const void *src_mac);
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_deregister_properties(void);
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data);
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net);
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
+static int ecm_ipa_set_device_ethernet_addr(u8 *dev_ethaddr,
+ u8 device_ethaddr[]);
+static enum ecm_ipa_state ecm_ipa_next_state(enum ecm_ipa_state current_state,
+ enum ecm_ipa_operation operation);
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state);
+static int ecm_ipa_init_module(void);
+static void ecm_ipa_cleanup_module(void);
+
+static const struct net_device_ops ecm_ipa_netdev_ops = {
+ .ndo_open = ecm_ipa_open,
+ .ndo_stop = ecm_ipa_stop,
+ .ndo_start_xmit = ecm_ipa_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_tx_timeout = ecm_ipa_tx_timeout,
+ .ndo_get_stats = ecm_ipa_get_stats,
+};
+
+const struct file_operations ecm_ipa_debugfs_atomic_ops = {
+ .open = ecm_ipa_debugfs_atomic_open,
+ .read = ecm_ipa_debugfs_atomic_read,
+};
+
+static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ kfree(buff);
+}
+
+/**
+ * ecm_ipa_init() - create network device and initializes internal
+ * data structures
+ * @params: in/out parameters required for ecm_ipa initialization
+ *
+ * Shall be called prior to pipe connection.
+ * The out parameters (the callbacks) shall be supplied to ipa_connect.
+ * Detailed description:
+ * - allocate the network device
+ * - set default values for driver internals
+ * - create debugfs folder and files
+ * - create IPA resource manager client
+ * - add header insertion rules for IPA driver (based on host/device
+ * Ethernet addresses given in input params)
+ * - register tx/rx properties to IPA driver (will be later used
+ * by IPA configuration manager to configure reset of the IPA rules)
+ * - set the carrier state to "off" (until ecm_ipa_connect is called)
+ * - register the network device
+ * - set the out parameters
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+ int result = 0;
+ struct net_device *net;
+ struct ecm_ipa_dev *ecm_ipa_ctx;
+
+ ECM_IPA_LOG_ENTRY();
+
+ ECM_IPA_DEBUG("%s initializing\n", DRIVER_NAME);
+ NULL_CHECK(params);
+
+ ECM_IPA_DEBUG("host_ethaddr=%pM, device_ethaddr=%pM\n",
+ params->host_ethaddr,
+ params->device_ethaddr);
+
+ net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
+ if (!net) {
+ result = -ENOMEM;
+ ECM_IPA_ERROR("fail to allocate etherdev\n");
+ goto fail_alloc_etherdev;
+ }
+ ECM_IPA_DEBUG("network device was successfully allocated\n");
+
+ ecm_ipa_ctx = netdev_priv(net);
+ if (!ecm_ipa_ctx) {
+ ECM_IPA_ERROR("fail to extract netdev priv\n");
+ result = -ENOMEM;
+ goto fail_netdev_priv;
+ }
+ memset(ecm_ipa_ctx, 0, sizeof(*ecm_ipa_ctx));
+ ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %p\n", ecm_ipa_ctx);
+
+ ecm_ipa_ctx->net = net;
+ ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+ atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+ snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
+ net->netdev_ops = &ecm_ipa_netdev_ops;
+ net->watchdog_timeo = TX_TIMEOUT;
+ ECM_IPA_DEBUG("internal data structures were intialized\n");
+
+ if (!params->device_ready_notify)
+ ECM_IPA_DEBUG("device_ready_notify() was not supplied");
+ ecm_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+ ecm_ipa_debugfs_init(ecm_ipa_ctx);
+
+ result = ecm_ipa_set_device_ethernet_addr(net->dev_addr,
+ params->device_ethaddr);
+ if (result) {
+ ECM_IPA_ERROR("set device MAC failed\n");
+ goto fail_set_device_ethernet;
+ }
+ ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+ result = ecm_ipa_rules_cfg(ecm_ipa_ctx, params->host_ethaddr,
+ params->device_ethaddr);
+ if (result) {
+ ECM_IPA_ERROR("fail on ipa rules set\n");
+ goto fail_rules_cfg;
+ }
+ ECM_IPA_DEBUG("Ethernet header insertion set\n");
+
+ netif_carrier_off(net);
+ ECM_IPA_DEBUG("netif_carrier_off() was called\n");
+
+ netif_stop_queue(ecm_ipa_ctx->net);
+ ECM_IPA_DEBUG("netif_stop_queue() was called");
+
+ result = register_netdev(net);
+ if (result) {
+ ECM_IPA_ERROR("register_netdev failed: %d\n", result);
+ goto fail_register_netdev;
+ }
+ ECM_IPA_DEBUG("register_netdev succeeded\n");
+
+ params->ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify;
+ params->ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify;
+ params->private = (void *)ecm_ipa_ctx;
+ params->skip_ep_cfg = false;
+ ecm_ipa_ctx->state = ECM_IPA_INITIALIZED;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ ECM_IPA_INFO("ECM_IPA was initialized successfully\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return 0;
+
+fail_register_netdev:
+ ecm_ipa_rules_destroy(ecm_ipa_ctx);
+fail_set_device_ethernet:
+fail_rules_cfg:
+ ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+fail_netdev_priv:
+ free_netdev(net);
+fail_alloc_etherdev:
+ return result;
+}
+EXPORT_SYMBOL(ecm_ipa_init);
+
+/**
+ * ecm_ipa_connect() - notify ecm_ipa for IPA<->USB pipes connection
+ * @usb_to_ipa_hdl: handle of IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle of IPA driver client for IPA->USB
+ * @priv: same value that was set by ecm_ipa_init(), this
+ * parameter holds the network device pointer.
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow ecm_ipa complete the data path configurations.
+ * Caller should make sure that it is calling this function
+ * from a context that allows it to handle device_ready_notify().
+ * Detailed description:
+ * - configure the IPA end-points register
+ * - notify the Linux kernel for "carrier_on"
+ * After this function is done the driver state changes to "Connected".
+ * This API is expected to be called after ecm_ipa_init() or
+ * after a call to ecm_ipa_disconnect.
+ */
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+ int next_state;
+ struct ipa_ecm_msg *ecm_msg;
+ struct ipa_msg_meta msg_meta;
+ int retval;
+
+ ECM_IPA_LOG_ENTRY();
+ NULL_CHECK(priv);
+ ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%p\n",
+ usb_to_ipa_hdl, ipa_to_usb_hdl, priv);
+
+ next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CONNECT);
+ if (next_state == ECM_IPA_INVALID) {
+ ECM_IPA_ERROR("can't call connect before calling initialize\n");
+ return -EPERM;
+ }
+ ecm_ipa_ctx->state = next_state;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ if (!ipa_is_client_handle_valid(usb_to_ipa_hdl)) {
+ ECM_IPA_ERROR("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
+ usb_to_ipa_hdl);
+ return -EINVAL;
+ }
+ if (!ipa_is_client_handle_valid(ipa_to_usb_hdl)) {
+ ECM_IPA_ERROR("ipa_to_usb_hdl(%d) is not a valid ipa handle\n",
+ ipa_to_usb_hdl);
+ return -EINVAL;
+ }
+
+ ecm_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+ ecm_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+
+ ecm_ipa_ctx->ipa_to_usb_client = ipa_get_client_mapping(ipa_to_usb_hdl);
+ if (ecm_ipa_ctx->ipa_to_usb_client < 0) {
+ ECM_IPA_ERROR(
+ "Error getting IPA->USB client from handle %d\n",
+ ecm_ipa_ctx->ipa_to_usb_client);
+ return -EINVAL;
+ }
+ ECM_IPA_DEBUG("ipa_to_usb_client = %d\n",
+ ecm_ipa_ctx->ipa_to_usb_client);
+
+ ecm_ipa_ctx->usb_to_ipa_client = ipa_get_client_mapping(usb_to_ipa_hdl);
+ if (ecm_ipa_ctx->usb_to_ipa_client < 0) {
+ ECM_IPA_ERROR(
+ "Error getting USB->IPA client from handle %d\n",
+ ecm_ipa_ctx->usb_to_ipa_client);
+ return -EINVAL;
+ }
+ ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
+ ecm_ipa_ctx->usb_to_ipa_client);
+
+ ecm_ipa_ctx->ipa_rm_resource_name_cons =
+ ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
+ if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
+ ECM_IPA_ERROR("Error getting CONS RM resource from handle %d\n",
+ ecm_ipa_ctx->ipa_rm_resource_name_cons);
+ return -EINVAL;
+ }
+ ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
+ ecm_ipa_ctx->ipa_rm_resource_name_cons);
+
+ ecm_ipa_ctx->ipa_rm_resource_name_prod =
+ ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
+ if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
+ ECM_IPA_ERROR("Error getting PROD RM resource from handle %d\n",
+ ecm_ipa_ctx->ipa_rm_resource_name_prod);
+ return -EINVAL;
+ }
+ ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
+ ecm_ipa_ctx->ipa_rm_resource_name_prod);
+
+ retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
+ if (retval) {
+ ECM_IPA_ERROR("fail on RM create\n");
+ goto fail_create_rm;
+ }
+ ECM_IPA_DEBUG("RM resource was created\n");
+
+ retval = ecm_ipa_register_properties(ecm_ipa_ctx);
+ if (retval) {
+ ECM_IPA_ERROR("fail on properties set\n");
+ goto fail_create_rm;
+ }
+ ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
+
+ retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl);
+ if (retval) {
+ ECM_IPA_ERROR("fail on ep cfg\n");
+ goto fail;
+ }
+ ECM_IPA_DEBUG("end-point configured\n");
+
+ netif_carrier_on(ecm_ipa_ctx->net);
+
+ ecm_msg = kzalloc(sizeof(struct ipa_ecm_msg), GFP_KERNEL);
+ if (!ecm_msg) {
+ ECM_IPA_ERROR("can't alloc msg mem\n");
+ retval = -ENOMEM;
+ goto fail;
+ }
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = ECM_CONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+ strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+ IPA_RESOURCE_NAME_MAX);
+ ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+ retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+ if (retval) {
+ ECM_IPA_ERROR("fail to send ECM_CONNECT message\n");
+ kfree(ecm_msg);
+ goto fail;
+ }
+
+ if (!netif_carrier_ok(ecm_ipa_ctx->net)) {
+ ECM_IPA_ERROR("netif_carrier_ok error\n");
+ retval = -EBUSY;
+ goto fail;
+ }
+ ECM_IPA_DEBUG("carrier_on notified\n");
+
+ if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+ ecm_ipa_enable_data_path(ecm_ipa_ctx);
+ else
+ ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+ ECM_IPA_INFO("ECM_IPA was connected successfully\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return 0;
+
+fail:
+ ecm_ipa_deregister_properties();
+fail_create_rm:
+ ecm_ipa_destory_rm_resource(ecm_ipa_ctx);
+ return retval;
+}
+EXPORT_SYMBOL(ecm_ipa_connect);
+
+/**
+ * ecm_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ */
+static int ecm_ipa_open(struct net_device *net)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx;
+ int next_state;
+
+ ECM_IPA_LOG_ENTRY();
+
+ ecm_ipa_ctx = netdev_priv(net);
+
+ next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_OPEN);
+ if (next_state == ECM_IPA_INVALID) {
+ ECM_IPA_ERROR("can't bring driver up before initialize\n");
+ return -EPERM;
+ }
+ ecm_ipa_ctx->state = next_state;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+ ecm_ipa_enable_data_path(ecm_ipa_ctx);
+ else
+ ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return 0;
+}
+
+/**
+ * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * @skb: packet received from Linux network stack
+ * @net: the network device being used to send this packet
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ * in "send" state
+ * - The driver internal state is in "UP" state.
+ * - Filter Tx switch is turned off
+ * - The IPA resource manager state for the driver producer client
+ * is "Granted" which implies that all the resources in the dependency
+ * graph are valid for data flow.
+ * - outstanding high boundary did not reach.
+ *
+ * In case all of the above conditions are met, the network driver will
+ * send the packet by using the IPA API for Tx.
+ * In case the outstanding packet high boundary is reached, the driver will
+ * stop the send queue until enough packet were proceeded by the IPA core.
+ */
+static netdev_tx_t ecm_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ int ret;
+ netdev_tx_t status = NETDEV_TX_BUSY;
+ struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+ net->trans_start = jiffies;
+
+ ECM_IPA_DEBUG("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+ skb->len, skb->protocol,
+ atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+ if (unlikely(netif_queue_stopped(net))) {
+ ECM_IPA_ERROR("interface queue is stopped\n");
+ goto out;
+ }
+
+ if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+ ECM_IPA_ERROR("Missing pipe connected and/or iface up\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ ret = resource_request(ecm_ipa_ctx);
+ if (ret) {
+ ECM_IPA_DEBUG("Waiting to resource\n");
+ netif_stop_queue(net);
+ goto resource_busy;
+ }
+
+ if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >=
+ ecm_ipa_ctx->outstanding_high) {
+ ECM_IPA_DEBUG("outstanding high (%d)- stopping\n",
+ ecm_ipa_ctx->outstanding_high);
+ netif_stop_queue(net);
+ status = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
+ if (ret) {
+ ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+ goto fail_tx_packet;
+ }
+
+ atomic_inc(&ecm_ipa_ctx->outstanding_pkts);
+
+ status = NETDEV_TX_OK;
+ goto out;
+
+fail_tx_packet:
+out:
+ resource_release(ecm_ipa_ctx);
+resource_busy:
+ return status;
+}
+
+/**
+ * ecm_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data pointing
+ * to Ethernet packet frame.
+ */
+static void ecm_ipa_packet_receive_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+ int result;
+ unsigned int packet_len;
+
+ if (!skb) {
+ ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+ return;
+ }
+
+ packet_len = skb->len;
+ ECM_IPA_DEBUG("packet RX, len=%d\n", skb->len);
+
+ if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+ ECM_IPA_DEBUG("Missing pipe connected and/or iface up\n");
+ return;
+ }
+
+ if (evt != IPA_RECEIVE) {
+ ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
+ return;
+ }
+
+ skb->dev = ecm_ipa_ctx->net;
+ skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
+
+ result = netif_rx(skb);
+ if (result)
+ ECM_IPA_ERROR("fail on netif_rx\n");
+ ecm_ipa_ctx->net->stats.rx_packets++;
+ ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
+
+ return;
+}
+
+/** ecm_ipa_stop() - called when network device transitions to the down
+ * state.
+ * @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ */
+static int ecm_ipa_stop(struct net_device *net)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+ int next_state;
+
+ ECM_IPA_LOG_ENTRY();
+
+ next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_STOP);
+ if (next_state == ECM_IPA_INVALID) {
+ ECM_IPA_ERROR("can't do network interface down without up\n");
+ return -EPERM;
+ }
+ ecm_ipa_ctx->state = next_state;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ netif_stop_queue(net);
+ ECM_IPA_DEBUG("network device stopped\n");
+
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+/** ecm_ipa_disconnect() - called when the USB cable is unplugged.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ * parameter holds the network device pointer.
+ *
+ * Once the USB cable is unplugged the USB driver will notify the network
+ * interface driver.
+ * The internal driver state will returned to its initialized state and
+ * Linux network stack will be informed for carrier off and the send queue
+ * will be stopped.
+ */
+int ecm_ipa_disconnect(void *priv)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+ int next_state;
+ struct ipa_ecm_msg *ecm_msg;
+ struct ipa_msg_meta msg_meta;
+ int retval;
+ int outstanding_dropped_pkts;
+
+ ECM_IPA_LOG_ENTRY();
+ NULL_CHECK(ecm_ipa_ctx);
+ ECM_IPA_DEBUG("priv=0x%p\n", priv);
+
+ next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_DISCONNECT);
+ if (next_state == ECM_IPA_INVALID) {
+ ECM_IPA_ERROR("can't disconnect before connect\n");
+ return -EPERM;
+ }
+ ecm_ipa_ctx->state = next_state;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ netif_carrier_off(ecm_ipa_ctx->net);
+ ECM_IPA_DEBUG("carrier_off notifcation was sent\n");
+
+ ecm_msg = kzalloc(sizeof(struct ipa_ecm_msg), GFP_KERNEL);
+ if (!ecm_msg) {
+ ECM_IPA_ERROR("can't alloc msg mem\n");
+ return -ENOMEM;
+ }
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = ECM_DISCONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+ strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+ IPA_RESOURCE_NAME_MAX);
+ ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+ retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+ if (retval) {
+ ECM_IPA_ERROR("fail to send ECM_DISCONNECT message\n");
+ kfree(ecm_msg);
+ return -EPERM;
+ }
+
+ netif_stop_queue(ecm_ipa_ctx->net);
+ ECM_IPA_DEBUG("queue stopped\n");
+
+ ecm_ipa_destory_rm_resource(ecm_ipa_ctx);
+
+ outstanding_dropped_pkts =
+ atomic_read(&ecm_ipa_ctx->outstanding_pkts);
+ ecm_ipa_ctx->net->stats.tx_errors += outstanding_dropped_pkts;
+ atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+
+ ECM_IPA_INFO("ECM_IPA was disconnected successfully\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_disconnect);
+
+
+/**
+ * ecm_ipa_cleanup() - unregister the network interface driver and free
+ * internal data structs.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ * parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support ECM.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ * - delete the driver dependency defined for IPA resource manager and
+ * destroy the producer resource.
+ * - remove the debugfs entries
+ * - deregister the network interface from Linux network stack
+ * - free all internal data structs
+ */
+void ecm_ipa_cleanup(void *priv)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+ int next_state;
+
+ ECM_IPA_LOG_ENTRY();
+
+ ECM_IPA_DEBUG("priv=0x%p\n", priv);
+
+ if (!ecm_ipa_ctx) {
+ ECM_IPA_ERROR("ecm_ipa_ctx NULL pointer\n");
+ return;
+ }
+
+ next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CLEANUP);
+ if (next_state == ECM_IPA_INVALID) {
+ ECM_IPA_ERROR("can't clean driver without cable disconnect\n");
+ return;
+ }
+ ecm_ipa_ctx->state = next_state;
+ ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+ ecm_ipa_rules_destroy(ecm_ipa_ctx);
+ ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+
+ unregister_netdev(ecm_ipa_ctx->net);
+ free_netdev(ecm_ipa_ctx->net);
+
+ ECM_IPA_INFO("ECM_IPA was destroyed successfully\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return;
+}
+EXPORT_SYMBOL(ecm_ipa_cleanup);
+
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ if (ecm_ipa_ctx->device_ready_notify) {
+ ecm_ipa_ctx->device_ready_notify();
+ ECM_IPA_DEBUG("USB device_ready_notify() was called\n");
+ } else {
+ ECM_IPA_DEBUG("device_ready_notify() not supplied\n");
+ }
+
+ netif_start_queue(ecm_ipa_ctx->net);
+ ECM_IPA_DEBUG("queue started\n");
+}
+
+/**
+ * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
+ * Headers will be commited to HW
+ * @ecm_ipa_ctx: main driver context parameters
+ * @dst_mac: destination MAC address
+ * @src_mac: source MAC address
+ *
+ * Returns negative errno, or zero on success
+ */
+static int ecm_ipa_rules_cfg(struct ecm_ipa_dev *ecm_ipa_ctx,
+ const void *dst_mac, const void *src_mac)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ struct ethhdr *eth_ipv4;
+ struct ethhdr *eth_ipv6;
+ int result = 0;
+
+ ECM_IPA_LOG_ENTRY();
+ hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+ GFP_KERNEL);
+ if (!hdrs) {
+ result = -ENOMEM;
+ goto out;
+ }
+ ipv4_hdr = &hdrs->hdr[0];
+ eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr;
+ ipv6_hdr = &hdrs->hdr[1];
+ eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr;
+ strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN);
+ memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN);
+ eth_ipv4->h_proto = htons(ETH_P_IP);
+ ipv4_hdr->hdr_len = ETH_HLEN;
+ ipv4_hdr->is_partial = 0;
+ ipv4_hdr->is_eth2_ofst_valid = true;
+ ipv4_hdr->eth2_ofst = 0;
+ ipv4_hdr->type = IPA_HDR_L2_ETHERNET_II;
+ strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+ memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN);
+ memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN);
+ eth_ipv6->h_proto = htons(ETH_P_IPV6);
+ ipv6_hdr->hdr_len = ETH_HLEN;
+ ipv6_hdr->is_partial = 0;
+ ipv6_hdr->is_eth2_ofst_valid = true;
+ ipv6_hdr->eth2_ofst = 0;
+ ipv6_hdr->type = IPA_HDR_L2_ETHERNET_II;
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+ result = ipa_add_hdr(hdrs);
+ if (result) {
+ ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+ goto out_free_mem;
+ }
+ if (ipv4_hdr->status) {
+ ECM_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ result = ipv4_hdr->status;
+ goto out_free_mem;
+ }
+ if (ipv6_hdr->status) {
+ ECM_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+ ipv6_hdr->status);
+ result = ipv6_hdr->status;
+ goto out_free_mem;
+ }
+ ecm_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ ecm_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+ ECM_IPA_LOG_EXIT();
+out_free_mem:
+ kfree(hdrs);
+out:
+ return result;
+}
+
+/**
+ * ecm_ipa_rules_destroy() - remove the IPA core configuration done for
+ * the driver data path.
+ * @ecm_ipa_ctx: the driver context
+ *
+ * Revert the work done on ecm_ipa_rules_cfg.
+ */
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *ipv4;
+ struct ipa_hdr_del *ipv6;
+ int result;
+ del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+ sizeof(*ipv6), GFP_KERNEL);
+ if (!del_hdr)
+ return;
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 2;
+ ipv4 = &del_hdr->hdl[0];
+ ipv4->hdl = ecm_ipa_ctx->eth_ipv4_hdr_hdl;
+ ipv6 = &del_hdr->hdl[1];
+ ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl;
+ result = ipa_del_hdr(del_hdr);
+ if (result || ipv4->status || ipv6->status)
+ ECM_IPA_ERROR("ipa_del_hdr failed\n");
+ kfree(del_hdr);
+}
+
+/* ecm_ipa_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register ecm0 interface with 2 Tx properties and 2 Rx properties:
+ * The 2 Tx properties are for data flowing from IPA to USB, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * The 2 Rx properties are for data flowing from USB to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *ipv4_property;
+ struct ipa_ioc_tx_intf_prop *ipv6_property;
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ int result = 0;
+
+ ECM_IPA_LOG_ENTRY();
+
+ tx_properties.prop = properties;
+ ipv4_property = &tx_properties.prop[0];
+ ipv4_property->ip = IPA_IP_v4;
+ ipv4_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+ strlcpy(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ ipv6_property = &tx_properties.prop[1];
+ ipv6_property->ip = IPA_IP_v6;
+ ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+ ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ strlcpy(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ tx_properties.num_props = 2;
+
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask = 0;
+ rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+ rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask = 0;
+ rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+ rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_properties.num_props = 2;
+
+ result = ipa_register_intf("ecm0", &tx_properties, &rx_properties);
+ if (result)
+ ECM_IPA_ERROR("fail on Tx/Rx properties registration\n");
+
+ ECM_IPA_LOG_EXIT();
+
+ return result;
+}
+
+static void ecm_ipa_deregister_properties(void)
+{
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ result = ipa_deregister_intf("ecm0");
+ if (result)
+ ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+/**
+ * ecm_ipa_configure() - make IPA core end-point specific configuration
+ * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver
+ * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ *
+ * Configure the usb_to_ipa and ipa_to_usb end-point registers
+ * - USB->IPA end-point: disable de-aggregation, enable link layer
+ * header removal (Ethernet removal), source NATing and default routing.
+ * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet)
+ * - allocate Ethernet device
+ * - register to Linux network stack
+ *
+ * Returns negative errno, or zero on success
+ */
+
+
+static void ecm_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = user_data;
+ ECM_IPA_LOG_ENTRY();
+ if (event == IPA_RM_RESOURCE_GRANTED &&
+ netif_queue_stopped(ecm_ipa_ctx->net)) {
+ ECM_IPA_DEBUG("Resource Granted - starting queue\n");
+ netif_start_queue(ecm_ipa_ctx->net);
+ } else {
+ ECM_IPA_DEBUG("Resource released\n");
+ }
+ ECM_IPA_LOG_EXIT();
+}
+
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net)
+{
+ return &net->stats;
+}
+
+
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ struct ipa_rm_create_params create_params = {0};
+ struct ipa_rm_perf_profile profile;
+ int result;
+ ECM_IPA_LOG_ENTRY();
+ create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
+ create_params.reg_params.user_data = ecm_ipa_ctx;
+ create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
+ result = ipa_rm_create_resource(&create_params);
+ if (result) {
+ ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+ goto fail_rm_create;
+ }
+ ECM_IPA_DEBUG("rm client was created");
+
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile);
+
+ result = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_STD_ECM_PROD,
+ INACTIVITY_MSEC_DELAY);
+ if (result) {
+ ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+ goto fail_it;
+ }
+ ECM_IPA_DEBUG("rm_it client was created");
+
+ result = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_STD_ECM_PROD,
+ ecm_ipa_ctx->ipa_rm_resource_name_cons);
+ if (result && result != -EINPROGRESS)
+ ECM_IPA_ERROR("unable to add ECM/USB dependency (%d)\n",
+ result);
+
+ result = ipa_rm_add_dependency_sync(
+ ecm_ipa_ctx->ipa_rm_resource_name_prod,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (result && result != -EINPROGRESS)
+ ECM_IPA_ERROR("unable to add USB/APPS dependency (%d)\n",
+ result);
+
+ ECM_IPA_DEBUG("rm dependency was set\n");
+
+ ECM_IPA_LOG_EXIT();
+ return 0;
+
+fail_it:
+fail_rm_create:
+ return result;
+}
+
+static void ecm_ipa_destory_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ int result;
+
+ ECM_IPA_LOG_ENTRY();
+
+ ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD,
+ ecm_ipa_ctx->ipa_rm_resource_name_cons);
+ ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod,
+ IPA_RM_RESOURCE_APPS_CONS);
+ ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
+ result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+ if (result)
+ ECM_IPA_ERROR("resource deletion failed\n");
+
+ ECM_IPA_LOG_EXIT();
+}
+
+static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ return ipa_rm_inactivity_timer_request_resource(
+ IPA_RM_RESOURCE_STD_ECM_PROD);
+}
+
+static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+}
+
+/**
+ * ecm_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void ecm_ipa_tx_complete_notify(void *priv,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+
+ if (!skb) {
+ ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+ return;
+ }
+
+ if (!ecm_ipa_ctx) {
+ ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
+ return;
+ }
+
+ ECM_IPA_DEBUG("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+ skb->len, skb->protocol,
+ atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+ if (evt != IPA_WRITE_DONE) {
+ ECM_IPA_ERROR("unsupported event on Tx callback\n");
+ return;
+ }
+
+ if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+ ECM_IPA_DEBUG("dropping Tx-complete pkt, state=%s",
+ ecm_ipa_state_string(ecm_ipa_ctx->state));
+ goto out;
+ }
+
+ ecm_ipa_ctx->net->stats.tx_packets++;
+ ecm_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+ atomic_dec(&ecm_ipa_ctx->outstanding_pkts);
+ if (netif_queue_stopped(ecm_ipa_ctx->net) &&
+ netif_carrier_ok(ecm_ipa_ctx->net) &&
+ atomic_read(&ecm_ipa_ctx->outstanding_pkts) <
+ (ecm_ipa_ctx->outstanding_low)) {
+ ECM_IPA_DEBUG("outstanding low (%d) - waking up queue\n",
+ ecm_ipa_ctx->outstanding_low);
+ netif_wake_queue(ecm_ipa_ctx->net);
+ }
+
+out:
+ dev_kfree_skb_any(skb);
+ return;
+}
+
+static void ecm_ipa_tx_timeout(struct net_device *net)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+ ECM_IPA_ERROR("possible IPA stall was detected, %d outstanding",
+ atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+ net->stats.tx_errors++;
+}
+
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+ struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
+ ECM_IPA_LOG_ENTRY();
+ file->private_data = &(ecm_ipa_ctx->outstanding_pkts);
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes;
+ u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+ atomic_t *atomic_var = file->private_data;
+ nbytes = scnprintf(atomic_str, sizeof(atomic_str), "%d\n",
+ atomic_read(atomic_var));
+ return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ const mode_t flags_read_write = S_IRUGO | S_IWUGO;
+ const mode_t flags_read_only = S_IRUGO;
+ struct dentry *file;
+
+ ECM_IPA_LOG_ENTRY();
+
+ if (!ecm_ipa_ctx)
+ return;
+
+ ecm_ipa_ctx->directory = debugfs_create_dir("ecm_ipa", NULL);
+ if (!ecm_ipa_ctx->directory) {
+ ECM_IPA_ERROR("could not create debugfs directory entry\n");
+ goto fail_directory;
+ }
+ file = debugfs_create_u8("outstanding_high", flags_read_write,
+ ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high);
+ if (!file) {
+ ECM_IPA_ERROR("could not create outstanding_high file\n");
+ goto fail_file;
+ }
+ file = debugfs_create_u8("outstanding_low", flags_read_write,
+ ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_low);
+ if (!file) {
+ ECM_IPA_ERROR("could not create outstanding_low file\n");
+ goto fail_file;
+ }
+ file = debugfs_create_file("outstanding", flags_read_only,
+ ecm_ipa_ctx->directory,
+ ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops);
+ if (!file) {
+ ECM_IPA_ERROR("could not create outstanding file\n");
+ goto fail_file;
+ }
+
+ ECM_IPA_DEBUG("debugfs entries were created\n");
+ ECM_IPA_LOG_EXIT();
+
+ return;
+fail_file:
+ debugfs_remove_recursive(ecm_ipa_ctx->directory);
+fail_directory:
+ return;
+}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+ debugfs_remove_recursive(ecm_ipa_ctx->directory);
+}
+
+#else /* !CONFIG_DEBUG_FS*/
+
+static void ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx) {}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx) {}
+
+#endif /* CONFIG_DEBUG_FS */
+
+/**
+ * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
+ *
+ *usb_to_ipa_hdl: handle received from ipa_connect
+ *ipa_to_usb_hdl: handle received from ipa_connect
+ *
+ * USB to IPA pipe:
+ * - No de-aggregation
+ * - Remove Ethernet header
+ * - SRC NAT
+ * - Default routing(0)
+ * IPA to USB Pipe:
+ * - No aggregation
+ * - Add Ethernet header
+ */
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl)
+{
+ int result = 0;
+ struct ipa_ep_cfg usb_to_ipa_ep_cfg;
+ struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+ ECM_IPA_LOG_ENTRY();
+ memset(&usb_to_ipa_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+ usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+ usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
+ usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+ usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC;
+ result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure USB to IPA point\n");
+ goto out;
+ }
+ memset(&ipa_to_usb_ep_cfg, 0 , sizeof(struct ipa_ep_cfg));
+ ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+ ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN;
+ ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+ result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+ if (result) {
+ ECM_IPA_ERROR("failed to configure IPA to USB end-point\n");
+ goto out;
+ }
+ ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+ ECM_IPA_LOG_EXIT();
+ return result;
+}
+
+/**
+ * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
+ * @dev_ethaddr: device etherenet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int ecm_ipa_set_device_ethernet_addr(u8 *dev_ethaddr,
+ u8 device_ethaddr[])
+{
+ if (!is_valid_ether_addr(device_ethaddr))
+ return -EINVAL;
+ memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+ ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr);
+ return 0;
+}
+
+/** ecm_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ * by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value ECM_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum ecm_ipa_state ecm_ipa_next_state(enum ecm_ipa_state current_state,
+ enum ecm_ipa_operation operation)
+{
+ int next_state = ECM_IPA_INVALID;
+
+ switch (current_state) {
+ case ECM_IPA_UNLOADED:
+ if (operation == ECM_IPA_INITIALIZE)
+ next_state = ECM_IPA_INITIALIZED;
+ break;
+ case ECM_IPA_INITIALIZED:
+ if (operation == ECM_IPA_CONNECT)
+ next_state = ECM_IPA_CONNECTED;
+ else if (operation == ECM_IPA_OPEN)
+ next_state = ECM_IPA_UP;
+ else if (operation == ECM_IPA_CLEANUP)
+ next_state = ECM_IPA_UNLOADED;
+ break;
+ case ECM_IPA_CONNECTED:
+ if (operation == ECM_IPA_DISCONNECT)
+ next_state = ECM_IPA_INITIALIZED;
+ else if (operation == ECM_IPA_OPEN)
+ next_state = ECM_IPA_CONNECTED_AND_UP;
+ break;
+ case ECM_IPA_UP:
+ if (operation == ECM_IPA_STOP)
+ next_state = ECM_IPA_INITIALIZED;
+ else if (operation == ECM_IPA_CONNECT)
+ next_state = ECM_IPA_CONNECTED_AND_UP;
+ else if (operation == ECM_IPA_CLEANUP)
+ next_state = ECM_IPA_UNLOADED;
+ break;
+ case ECM_IPA_CONNECTED_AND_UP:
+ if (operation == ECM_IPA_STOP)
+ next_state = ECM_IPA_CONNECTED;
+ else if (operation == ECM_IPA_DISCONNECT)
+ next_state = ECM_IPA_UP;
+ break;
+ default:
+ ECM_IPA_ERROR("State is not supported\n");
+ break;
+ }
+
+ ECM_IPA_DEBUG("state transition ( %s -> %s )- %s\n",
+ ecm_ipa_state_string(current_state),
+ ecm_ipa_state_string(next_state) ,
+ next_state == ECM_IPA_INVALID ?
+ "Forbidden" : "Allowed");
+
+ return next_state;
+}
+
+/**
+ * ecm_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state)
+{
+ switch (state) {
+ case ECM_IPA_UNLOADED:
+ return "ECM_IPA_UNLOADED";
+ case ECM_IPA_INITIALIZED:
+ return "ECM_IPA_INITIALIZED";
+ case ECM_IPA_CONNECTED:
+ return "ECM_IPA_CONNECTED";
+ case ECM_IPA_UP:
+ return "ECM_IPA_UP";
+ case ECM_IPA_CONNECTED_AND_UP:
+ return "ECM_IPA_CONNECTED_AND_UP";
+ default:
+ return "Not supported";
+ }
+}
+
+/**
+ * ecm_ipa_init_module() - module initialization
+ *
+ */
+static int ecm_ipa_init_module(void)
+{
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_LOG_EXIT();
+ return 0;
+}
+
+/**
+ * ecm_ipa_cleanup_module() - module cleanup
+ *
+ */
+static void ecm_ipa_cleanup_module(void)
+{
+ ECM_IPA_LOG_ENTRY();
+ ECM_IPA_LOG_EXIT();
+ return;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ECM IPA network interface");
+
+late_initcall(ecm_ipa_init_module);
+module_exit(ecm_ipa_cleanup_module);
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
new file mode 100644
index 000000000000..a342e39b9f43
--- /dev/null
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -0,0 +1,1397 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * MHI RMNET Network interface
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/msm_rmnet.h>
+#include <linux/if_arp.h>
+#include <linux/dma-mapping.h>
+#include <linux/msm_mhi.h>
+#include <linux/debugfs.h>
+#include <linux/ipc_logging.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of_device.h>
+#include <linux/rtnetlink.h>
+
+#define RMNET_MHI_DRIVER_NAME "rmnet_mhi"
+#define MHI_DEFAULT_MTU 8000
+#define MHI_MAX_MRU 0xFFFF
+#define MHI_NAPI_WEIGHT_VALUE 12
+#define WATCHDOG_TIMEOUT (30 * HZ)
+#define RMNET_IPC_LOG_PAGES (100)
+#define IRQ_MASKED_BIT (0)
+
+enum DBG_LVL {
+ MSG_VERBOSE = 0x1,
+ MSG_INFO = 0x2,
+ MSG_DBG = 0x4,
+ MSG_WARNING = 0x8,
+ MSG_ERROR = 0x10,
+ MSG_CRITICAL = 0x20,
+ MSG_reserved = 0x80000000
+};
+
+struct debug_params {
+ enum DBG_LVL rmnet_msg_lvl;
+ enum DBG_LVL rmnet_ipc_log_lvl;
+ u64 tx_interrupts_count;
+ u64 rx_interrupts_count;
+ u64 tx_ring_full_count;
+ u64 tx_queued_packets_count;
+ u64 rx_interrupts_in_masked_irq;
+ u64 rx_napi_skb_burst_min;
+ u64 rx_napi_skb_burst_max;
+ u64 tx_cb_skb_free_burst_min;
+ u64 tx_cb_skb_free_burst_max;
+ u64 rx_napi_budget_overflow;
+ u64 rx_fragmentation;
+};
+
+struct __packed mhi_skb_priv {
+ dma_addr_t dma_addr;
+ size_t dma_size;
+};
+
+#define rmnet_log(rmnet_mhi_ptr, _msg_lvl, _msg, ...) do { \
+ if ((_msg_lvl) >= rmnet_mhi_ptr->debug.rmnet_msg_lvl) \
+ pr_alert("[%s] " _msg, __func__, ##__VA_ARGS__);\
+ if (rmnet_mhi_ptr->rmnet_ipc_log && \
+ ((_msg_lvl) >= rmnet_mhi_ptr->debug.rmnet_ipc_log_lvl)) \
+ ipc_log_string(rmnet_mhi_ptr->rmnet_ipc_log, \
+ "[%s] " _msg, __func__, ##__VA_ARGS__); \
+} while (0)
+
+struct rmnet_mhi_private {
+ struct list_head node;
+ u32 dev_id;
+ const char *interface_name;
+ struct mhi_client_handle *tx_client_handle;
+ struct mhi_client_handle *rx_client_handle;
+ enum MHI_CLIENT_CHANNEL tx_channel;
+ enum MHI_CLIENT_CHANNEL rx_channel;
+ struct sk_buff_head tx_buffers;
+ struct sk_buff_head rx_buffers;
+ atomic_t rx_pool_len;
+ u32 mru;
+ u32 max_mru;
+ u32 max_mtu;
+ struct napi_struct napi;
+ gfp_t allocation_flags;
+ uint32_t tx_buffers_max;
+ uint32_t rx_buffers_max;
+ u32 alloc_fail;
+ u32 tx_enabled;
+ u32 rx_enabled;
+ u32 mhi_enabled;
+ struct platform_device *pdev;
+ struct net_device *dev;
+ unsigned long flags;
+ int wake_count;
+ spinlock_t out_chan_full_lock; /* tx queue lock */
+ struct sk_buff *frag_skb;
+ struct work_struct alloc_work;
+ /* lock to queue hardware and internal queue */
+ spinlock_t alloc_lock;
+ void *rmnet_ipc_log;
+ rwlock_t pm_lock; /* state change lock */
+ struct debug_params debug;
+ struct dentry *dentry;
+};
+
+static LIST_HEAD(rmnet_mhi_ctxt_list);
+static struct platform_driver rmnet_mhi_driver;
+
+static int rmnet_mhi_process_fragment(struct rmnet_mhi_private *rmnet_mhi_ptr,
+ struct sk_buff *skb, int frag)
+{
+ struct sk_buff *temp_skb;
+ if (rmnet_mhi_ptr->frag_skb) {
+ /* Merge the new skb into the old fragment */
+ temp_skb = skb_copy_expand(rmnet_mhi_ptr->frag_skb,
+ 0,
+ skb->len,
+ GFP_ATOMIC);
+ if (!temp_skb) {
+ kfree(rmnet_mhi_ptr->frag_skb);
+ rmnet_mhi_ptr->frag_skb = NULL;
+ return -ENOMEM;
+ }
+ dev_kfree_skb_any(rmnet_mhi_ptr->frag_skb);
+ rmnet_mhi_ptr->frag_skb = temp_skb;
+ memcpy(skb_put(rmnet_mhi_ptr->frag_skb, skb->len),
+ skb->data,
+ skb->len);
+ dev_kfree_skb_any(skb);
+ if (!frag) {
+ /* Last fragmented piece was received, ship it */
+ netif_receive_skb(rmnet_mhi_ptr->frag_skb);
+ rmnet_mhi_ptr->frag_skb = NULL;
+ }
+ } else {
+ if (frag) {
+ /* This is the first fragment */
+ rmnet_mhi_ptr->frag_skb = skb;
+ rmnet_mhi_ptr->debug.rx_fragmentation++;
+ } else {
+ netif_receive_skb(skb);
+ }
+ }
+ return 0;
+}
+static void rmnet_mhi_internal_clean_unmap_buffers(struct net_device *dev,
+ struct sk_buff_head *queue,
+ enum dma_data_direction dir)
+{
+ struct mhi_skb_priv *skb_priv;
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n");
+ while (!skb_queue_empty(queue)) {
+ struct sk_buff *skb = skb_dequeue(queue);
+ skb_priv = (struct mhi_skb_priv *)(skb->cb);
+ if (skb != 0) {
+ kfree_skb(skb);
+ }
+ }
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited\n");
+}
+
+static __be16 rmnet_mhi_ip_type_trans(struct sk_buff *skb)
+{
+ __be16 protocol = 0;
+
+ /* Determine L3 protocol */
+ switch (skb->data[0] & 0xf0) {
+ case 0x40:
+ protocol = htons(ETH_P_IP);
+ break;
+ case 0x60:
+ protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ /* Default is QMAP */
+ protocol = htons(ETH_P_MAP);
+ break;
+ }
+ return protocol;
+}
+
+static int rmnet_alloc_rx(struct rmnet_mhi_private *rmnet_mhi_ptr,
+ gfp_t alloc_flags)
+{
+ u32 cur_mru = rmnet_mhi_ptr->mru;
+ struct mhi_skb_priv *skb_priv;
+ int ret;
+ struct sk_buff *skb;
+
+ while (atomic_read(&rmnet_mhi_ptr->rx_pool_len) <
+ rmnet_mhi_ptr->rx_buffers_max) {
+ skb = alloc_skb(cur_mru, alloc_flags);
+ if (!skb) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "SKB Alloc failed with flags:0x%x\n",
+ alloc_flags);
+ return -ENOMEM;
+ }
+ skb_priv = (struct mhi_skb_priv *)(skb->cb);
+ skb_priv->dma_size = cur_mru;
+ skb_priv->dma_addr = 0;
+
+ /* These steps must be in atomic context */
+ spin_lock_bh(&rmnet_mhi_ptr->alloc_lock);
+
+ /* It's possible by the time alloc_skb (GFP_KERNEL)
+ * returns we already called rmnet_alloc_rx
+ * in atomic context and allocated memory using
+ * GFP_ATOMIC and returned.
+ */
+ if (unlikely(atomic_read(&rmnet_mhi_ptr->rx_pool_len) >=
+ rmnet_mhi_ptr->rx_buffers_max)) {
+ spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "!interface is disabled\n");
+ dev_kfree_skb_any(skb);
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
+ return -EIO;
+ }
+
+ ret = mhi_queue_xfer(rmnet_mhi_ptr->rx_client_handle,
+ skb->data,
+ skb_priv->dma_size,
+ MHI_EOT);
+ if (unlikely(ret != 0)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "mhi_queue_xfer failed, error %d", ret);
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ skb_queue_tail(&rmnet_mhi_ptr->rx_buffers, skb);
+ atomic_inc(&rmnet_mhi_ptr->rx_pool_len);
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ spin_unlock_bh(&rmnet_mhi_ptr->alloc_lock);
+ }
+
+ return 0;
+}
+
+static void rmnet_mhi_alloc_work(struct work_struct *work)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr = container_of(work,
+ struct rmnet_mhi_private,
+ alloc_work);
+ int ret;
+ /* sleep about 1 sec and retry, that should be enough time
+ * for system to reclaim freed memory back.
+ */
+ const int sleep_ms = 1000;
+ int retry = 60;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n");
+ do {
+ ret = rmnet_alloc_rx(rmnet_mhi_ptr,
+ rmnet_mhi_ptr->allocation_flags);
+ /* sleep and try again */
+ if (ret == -ENOMEM) {
+ msleep(sleep_ms);
+ retry--;
+ }
+ } while (ret == -ENOMEM && retry);
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exit with status:%d retry:%d\n",
+ ret, retry);
+}
+
+static int rmnet_mhi_poll(struct napi_struct *napi, int budget)
+{
+ int received_packets = 0;
+ struct net_device *dev = napi->dev;
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+ int res = 0;
+ bool should_reschedule = true;
+ struct sk_buff *skb;
+ struct mhi_skb_priv *skb_priv;
+ int r;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
+
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "interface is disabled!\n");
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ return 0;
+ }
+ while (received_packets < budget) {
+ struct mhi_result *result =
+ mhi_poll(rmnet_mhi_ptr->rx_client_handle);
+ if (result->transaction_status == -ENOTCONN) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "Transaction status not ready, continuing\n");
+ break;
+ } else if (result->transaction_status != 0 &&
+ result->transaction_status != -EOVERFLOW) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "mhi_poll failed, error %d\n",
+ result->transaction_status);
+ break;
+ }
+
+ /* Nothing more to read, or out of buffers in MHI layer */
+ if (unlikely(!result->buf_addr || !result->bytes_xferd)) {
+ should_reschedule = false;
+ break;
+ }
+
+ atomic_dec(&rmnet_mhi_ptr->rx_pool_len);
+ skb = skb_dequeue(&(rmnet_mhi_ptr->rx_buffers));
+ if (unlikely(!skb)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "No RX buffers to match");
+ break;
+ }
+
+ skb_priv = (struct mhi_skb_priv *)(skb->cb);
+
+ /* Setup the tail to the end of data */
+ skb_put(skb, result->bytes_xferd);
+
+ skb->dev = dev;
+ skb->protocol = rmnet_mhi_ip_type_trans(skb);
+
+ if (result->transaction_status == -EOVERFLOW)
+ r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 1);
+ else
+ r = rmnet_mhi_process_fragment(rmnet_mhi_ptr, skb, 0);
+ if (r) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "Failed to process fragmented packet ret %d",
+ r);
+ BUG();
+ }
+
+ /* Statistics */
+ received_packets++;
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += result->bytes_xferd;
+
+ } /* while (received_packets < budget) or any other error */
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+
+ /* Queue new buffers */
+ res = rmnet_alloc_rx(rmnet_mhi_ptr, GFP_ATOMIC);
+
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (likely(rmnet_mhi_ptr->mhi_enabled)) {
+ if (res == -ENOMEM) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "out of mem, queuing bg worker\n");
+ rmnet_mhi_ptr->alloc_fail++;
+ schedule_work(&rmnet_mhi_ptr->alloc_work);
+ }
+
+ napi_complete(napi);
+
+ /* We got a NULL descriptor back */
+ if (!should_reschedule) {
+ if (test_and_clear_bit(IRQ_MASKED_BIT,
+ &rmnet_mhi_ptr->flags))
+ mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
+ mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, true);
+ rmnet_mhi_ptr->wake_count--;
+ } else {
+ if (received_packets == budget)
+ rmnet_mhi_ptr->debug.rx_napi_budget_overflow++;
+ napi_reschedule(napi);
+ }
+
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_min =
+ min((u64)received_packets,
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_min);
+
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_max =
+ max((u64)received_packets,
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_max);
+ }
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE,
+ "Exited, polled %d pkts\n", received_packets);
+ return received_packets;
+}
+
+static int rmnet_mhi_init_inbound(struct rmnet_mhi_private *rmnet_mhi_ptr)
+{
+ int res;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered\n");
+ rmnet_mhi_ptr->tx_buffers_max = mhi_get_max_desc(
+ rmnet_mhi_ptr->tx_client_handle);
+ rmnet_mhi_ptr->rx_buffers_max = mhi_get_max_desc(
+ rmnet_mhi_ptr->rx_client_handle);
+ atomic_set(&rmnet_mhi_ptr->rx_pool_len, 0);
+ res = rmnet_alloc_rx(rmnet_mhi_ptr,
+ rmnet_mhi_ptr->allocation_flags);
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited with %d\n", res);
+ return res;
+}
+
+static void rmnet_mhi_tx_cb(struct mhi_result *result)
+{
+ struct net_device *dev;
+ struct rmnet_mhi_private *rmnet_mhi_ptr;
+ unsigned long burst_counter = 0;
+ unsigned long flags, pm_flags;
+
+ rmnet_mhi_ptr = result->user_data;
+ dev = rmnet_mhi_ptr->dev;
+ rmnet_mhi_ptr->debug.tx_interrupts_count++;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
+ if (!result->buf_addr || !result->bytes_xferd)
+ return;
+ /* Free the buffers which are TX'd up to the provided address */
+ while (!skb_queue_empty(&(rmnet_mhi_ptr->tx_buffers))) {
+ struct sk_buff *skb =
+ skb_dequeue(&(rmnet_mhi_ptr->tx_buffers));
+ if (!skb) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "NULL buffer returned, error");
+ break;
+ } else {
+ if (skb->data == result->buf_addr) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ burst_counter++;
+
+ /* Update statistics */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+
+ /* The payload is expected to be the phy addr.
+ Comparing to see if it's the last skb to
+ replenish
+ */
+ }
+ } /* While TX queue is not empty */
+
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_min =
+ min((u64)burst_counter,
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_min);
+
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max =
+ max((u64)burst_counter,
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max);
+
+ /* In case we couldn't write again, now we can! */
+ read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, pm_flags);
+ if (likely(rmnet_mhi_ptr->mhi_enabled)) {
+ spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock, flags);
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Waking up queue\n");
+ netif_wake_queue(dev);
+ spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
+ }
+ read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, pm_flags);
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
+}
+
+static void rmnet_mhi_rx_cb(struct mhi_result *result)
+{
+ struct net_device *dev;
+ struct rmnet_mhi_private *rmnet_mhi_ptr;
+ unsigned long flags;
+
+ rmnet_mhi_ptr = result->user_data;
+ dev = rmnet_mhi_ptr->dev;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
+ rmnet_mhi_ptr->debug.rx_interrupts_count++;
+ read_lock_irqsave(&rmnet_mhi_ptr->pm_lock, flags);
+ if (likely(rmnet_mhi_ptr->mhi_enabled)) {
+ if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) {
+ if (!test_and_set_bit(IRQ_MASKED_BIT,
+ &rmnet_mhi_ptr->flags))
+ mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
+ mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false);
+ rmnet_mhi_ptr->wake_count++;
+ __napi_schedule(&rmnet_mhi_ptr->napi);
+ } else {
+ rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++;
+ }
+ }
+ read_unlock_irqrestore(&rmnet_mhi_ptr->pm_lock, flags);
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
+}
+
+static int rmnet_mhi_open(struct net_device *dev)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Opened net dev interface for MHI chans %d and %d\n",
+ rmnet_mhi_ptr->tx_channel,
+ rmnet_mhi_ptr->rx_channel);
+
+ /* tx queue may not necessarily be stopped already
+ * so stop the queue if tx path is not enabled
+ */
+ if (!rmnet_mhi_ptr->tx_client_handle)
+ netif_stop_queue(dev);
+ else
+ netif_start_queue(dev);
+
+ /* Poll to check if any buffers are accumulated in the
+ * transport buffers
+ */
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (likely(rmnet_mhi_ptr->mhi_enabled)) {
+ if (napi_schedule_prep(&rmnet_mhi_ptr->napi)) {
+ if (!test_and_set_bit(IRQ_MASKED_BIT,
+ &rmnet_mhi_ptr->flags)) {
+ mhi_mask_irq(rmnet_mhi_ptr->rx_client_handle);
+ }
+ mhi_set_lpm(rmnet_mhi_ptr->rx_client_handle, false);
+ rmnet_mhi_ptr->wake_count++;
+ __napi_schedule(&rmnet_mhi_ptr->napi);
+ } else {
+ rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq++;
+ }
+ }
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ return 0;
+
+}
+
+static int rmnet_mhi_disable(struct rmnet_mhi_private *rmnet_mhi_ptr)
+{
+ napi_disable(&(rmnet_mhi_ptr->napi));
+ rmnet_mhi_ptr->rx_enabled = 0;
+ rmnet_mhi_internal_clean_unmap_buffers(rmnet_mhi_ptr->dev,
+ &rmnet_mhi_ptr->rx_buffers,
+ DMA_FROM_DEVICE);
+ if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags))
+ mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
+
+ return 0;
+}
+
+static int rmnet_mhi_stop(struct net_device *dev)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+ netif_stop_queue(dev);
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Entered\n");
+ if (test_and_clear_bit(IRQ_MASKED_BIT, &rmnet_mhi_ptr->flags)) {
+ mhi_unmask_irq(rmnet_mhi_ptr->rx_client_handle);
+ rmnet_log(rmnet_mhi_ptr, MSG_ERROR,
+ "IRQ was masked, unmasking...\n");
+ }
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
+ return 0;
+}
+
+static int rmnet_mhi_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+ if (new_mtu < 0 || rmnet_mhi_ptr->max_mtu < new_mtu)
+ return -EINVAL;
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+ int res = 0;
+ unsigned long flags;
+ struct mhi_skb_priv *tx_priv;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE,
+ "Entered chan %d\n", rmnet_mhi_ptr->tx_channel);
+
+ tx_priv = (struct mhi_skb_priv *)(skb->cb);
+ tx_priv->dma_size = skb->len;
+ tx_priv->dma_addr = 0;
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (unlikely(!rmnet_mhi_ptr->mhi_enabled)) {
+ /* Only reason interface could be disabled and we get data
+ * is due to an SSR. We do not want to stop the queue and
+ * return error. instead we will flush all the uplink packets
+ * and return successful
+ */
+ res = NETDEV_TX_OK;
+ dev_kfree_skb_any(skb);
+ goto mhi_xmit_exit;
+ }
+
+ if (mhi_get_free_desc(rmnet_mhi_ptr->tx_client_handle) <= 0) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_VERBOSE,
+ "Stopping Queue\n");
+ spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
+ rmnet_mhi_ptr->debug.tx_ring_full_count++;
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
+ res = NETDEV_TX_BUSY;
+ goto mhi_xmit_exit;
+ }
+ res = mhi_queue_xfer(rmnet_mhi_ptr->tx_client_handle,
+ skb->data,
+ skb->len,
+ MHI_EOT);
+
+ if (res != 0) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "Failed to queue with reason:%d\n",
+ res);
+ spin_lock_irqsave(&rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
+ netif_stop_queue(dev);
+ spin_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
+ res = NETDEV_TX_BUSY;
+ goto mhi_xmit_exit;
+ }
+ res = NETDEV_TX_OK;
+ skb_queue_tail(&(rmnet_mhi_ptr->tx_buffers), skb);
+ dev->trans_start = jiffies;
+ rmnet_mhi_ptr->debug.tx_queued_packets_count++;
+mhi_xmit_exit:
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ rmnet_log(rmnet_mhi_ptr, MSG_VERBOSE, "Exited\n");
+ return res;
+}
+
+static int rmnet_mhi_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
+{
+ struct rmnet_ioctl_extended_s ext_cmd;
+ int rc = 0;
+ struct rmnet_mhi_private *rmnet_mhi_ptr =
+ *(struct rmnet_mhi_private **)netdev_priv(dev);
+
+
+ rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
+ sizeof(struct rmnet_ioctl_extended_s));
+
+ if (rc) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "copy_from_user failed ,error %d",
+ rc);
+ return rc;
+ }
+
+ switch (ext_cmd.extended_ioctl) {
+ case RMNET_IOCTL_SET_MRU:
+ if (!ext_cmd.u.data ||
+ ext_cmd.u.data > rmnet_mhi_ptr->max_mru) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "Can't set MRU, value:%u is invalid max:%u\n",
+ ext_cmd.u.data, rmnet_mhi_ptr->max_mru);
+ return -EINVAL;
+ }
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "MRU change request to 0x%x\n",
+ ext_cmd.u.data);
+ rmnet_mhi_ptr->mru = ext_cmd.u.data;
+ break;
+ case RMNET_IOCTL_GET_EPID:
+ ext_cmd.u.data =
+ mhi_get_epid(rmnet_mhi_ptr->tx_client_handle);
+ break;
+ case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+ ext_cmd.u.data = 0;
+ break;
+ case RMNET_IOCTL_GET_DRIVER_NAME:
+ strlcpy(ext_cmd.u.if_name, rmnet_mhi_ptr->interface_name,
+ sizeof(ext_cmd.u.if_name));
+ break;
+ case RMNET_IOCTL_SET_SLEEP_STATE:
+ read_lock_bh(&rmnet_mhi_ptr->pm_lock);
+ if (rmnet_mhi_ptr->mhi_enabled &&
+ rmnet_mhi_ptr->tx_client_handle != NULL) {
+ rmnet_mhi_ptr->wake_count += (ext_cmd.u.data) ? -1 : 1;
+ mhi_set_lpm(rmnet_mhi_ptr->tx_client_handle,
+ ext_cmd.u.data);
+ } else {
+ rmnet_log(rmnet_mhi_ptr, MSG_ERROR,
+ "Cannot set LPM value, MHI is not up.\n");
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ return -ENODEV;
+ }
+ read_unlock_bh(&rmnet_mhi_ptr->pm_lock);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
+ sizeof(struct rmnet_ioctl_extended_s));
+
+ if (rc)
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "copy_to_user failed, error %d\n",
+ rc);
+
+ return rc;
+}
+
+static int rmnet_mhi_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ int rc = 0;
+ struct rmnet_ioctl_data_s ioctl_data;
+
+ switch (cmd) {
+ case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
+ break;
+ case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
+ ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+ sizeof(struct rmnet_ioctl_data_s)))
+ rc = -EFAULT;
+ break;
+ case RMNET_IOCTL_SET_QOS_ENABLE:
+ rc = -EINVAL;
+ break;
+ case RMNET_IOCTL_SET_QOS_DISABLE:
+ rc = 0;
+ break;
+ case RMNET_IOCTL_OPEN:
+ case RMNET_IOCTL_CLOSE:
+ /* We just ignore them and return success */
+ rc = 0;
+ break;
+ case RMNET_IOCTL_EXTENDED:
+ rc = rmnet_mhi_ioctl_extended(dev, ifr);
+ break;
+ default:
+ /* Don't fail any IOCTL right now */
+ rc = 0;
+ break;
+ }
+
+ return rc;
+}
+
+static const struct net_device_ops rmnet_mhi_ops_ip = {
+ .ndo_open = rmnet_mhi_open,
+ .ndo_stop = rmnet_mhi_stop,
+ .ndo_start_xmit = rmnet_mhi_xmit,
+ .ndo_do_ioctl = rmnet_mhi_ioctl,
+ .ndo_change_mtu = rmnet_mhi_change_mtu,
+ .ndo_set_mac_address = 0,
+ .ndo_validate_addr = 0,
+};
+
+static void rmnet_mhi_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &rmnet_mhi_ops_ip;
+ ether_setup(dev);
+
+ /* set this after calling ether_setup */
+ dev->header_ops = 0; /* No header */
+ dev->type = ARPHRD_RAWIP;
+ dev->hard_header_len = 0;
+ dev->mtu = MHI_DEFAULT_MTU;
+ dev->addr_len = 0;
+ dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ dev->watchdog_timeo = WATCHDOG_TIMEOUT;
+}
+
+static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
+{
+ int ret = 0;
+ struct rmnet_mhi_private **rmnet_mhi_ctxt = NULL;
+ int r = 0;
+ char ifalias[IFALIASZ];
+ char ifname[IFNAMSIZ];
+ struct mhi_client_handle *client_handle = NULL;
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Entered.\n");
+
+ rmnet_mhi_ptr->debug.tx_interrupts_count = 0;
+ rmnet_mhi_ptr->debug.rx_interrupts_count = 0;
+ rmnet_mhi_ptr->debug.rx_interrupts_in_masked_irq = 0;
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_min = 0;
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_max = 0;
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_min = 0;
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max = 0;
+ rmnet_mhi_ptr->debug.tx_ring_full_count = 0;
+ rmnet_mhi_ptr->debug.tx_queued_packets_count = 0;
+ rmnet_mhi_ptr->debug.rx_napi_budget_overflow = 0;
+ rmnet_mhi_ptr->debug.rx_napi_skb_burst_min = UINT_MAX;
+ rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_min = UINT_MAX;
+
+ skb_queue_head_init(&(rmnet_mhi_ptr->tx_buffers));
+ skb_queue_head_init(&(rmnet_mhi_ptr->rx_buffers));
+
+ if (rmnet_mhi_ptr->tx_client_handle != NULL) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "Opening TX channel\n");
+ r = mhi_open_channel(rmnet_mhi_ptr->tx_client_handle);
+ if (r != 0) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "Failed to start TX chan ret %d\n",
+ r);
+ goto mhi_tx_chan_start_fail;
+ }
+
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
+ }
+ if (rmnet_mhi_ptr->rx_client_handle != NULL) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "Opening RX channel\n");
+ r = mhi_open_channel(rmnet_mhi_ptr->rx_client_handle);
+ if (r != 0) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "Failed to start RX chan ret %d\n",
+ r);
+ goto mhi_rx_chan_start_fail;
+ }
+ /* Both tx & rx client handle contain same device info */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
+ }
+
+ if (!client_handle) {
+ ret = -EINVAL;
+ goto net_dev_alloc_fail;
+ }
+
+
+ if (!rmnet_mhi_ptr->dev) {
+ snprintf(ifalias, sizeof(ifalias),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ rmnet_mhi_ptr->interface_name,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
+
+ snprintf(ifname, sizeof(ifname), "%s%%d",
+ rmnet_mhi_ptr->interface_name);
+
+ rtnl_lock();
+ rmnet_mhi_ptr->dev = alloc_netdev(
+ sizeof(struct rmnet_mhi_private *),
+ ifname, NET_NAME_PREDICTABLE, rmnet_mhi_setup);
+
+ if (!rmnet_mhi_ptr->dev) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "Network device allocation failed\n");
+ ret = -ENOMEM;
+ goto net_dev_alloc_fail;
+ }
+ SET_NETDEV_DEV(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->pdev->dev);
+ dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias));
+ rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev);
+ rtnl_unlock();
+ *rmnet_mhi_ctxt = rmnet_mhi_ptr;
+
+ ret = dma_set_mask(&rmnet_mhi_ptr->dev->dev, MHI_DMA_MASK);
+ if (ret)
+ rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
+ else
+ rmnet_mhi_ptr->allocation_flags = GFP_DMA;
+
+ netif_napi_add(rmnet_mhi_ptr->dev, &rmnet_mhi_ptr->napi,
+ rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);
+
+ ret = register_netdev(rmnet_mhi_ptr->dev);
+ if (ret) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "Network device registration failed\n");
+ goto net_dev_reg_fail;
+ }
+ }
+
+ write_lock_irq(&rmnet_mhi_ptr->pm_lock);
+ rmnet_mhi_ptr->mhi_enabled = 1;
+ write_unlock_irq(&rmnet_mhi_ptr->pm_lock);
+
+ r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
+ if (r) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Failed to init inbound ret %d\n", r);
+ }
+
+ napi_enable(&(rmnet_mhi_ptr->napi));
+
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited.\n");
+
+ return 0;
+
+net_dev_reg_fail:
+ netif_napi_del(&(rmnet_mhi_ptr->napi));
+ free_netdev(rmnet_mhi_ptr->dev);
+net_dev_alloc_fail:
+ if (rmnet_mhi_ptr->rx_client_handle) {
+ mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
+ rmnet_mhi_ptr->dev = NULL;
+ }
+mhi_rx_chan_start_fail:
+ if (rmnet_mhi_ptr->tx_client_handle)
+ mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
+mhi_tx_chan_start_fail:
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO, "Exited ret %d.\n", ret);
+ return ret;
+}
+
+static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr;
+ struct mhi_result *result;
+ char ifalias[IFALIASZ];
+ int r = 0;
+
+ if (!cb_info || !cb_info->result) {
+ pr_err("%s: Invalid data in MHI callback\n", __func__);
+ return;
+ }
+
+ result = cb_info->result;
+ rmnet_mhi_ptr = result->user_data;
+
+ switch (cb_info->cb_reason) {
+ case MHI_CB_MHI_DISABLED:
+ case MHI_CB_MHI_SHUTDOWN:
+ case MHI_CB_SYS_ERROR:
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Got MHI_SYS_ERROR notification. Stopping stack\n");
+
+ /* Disable interface on first notification. Long
+ * as we set mhi_enabled = 0, we gurantee rest of
+ * driver will not touch any critical data.
+ */
+ snprintf(ifalias, sizeof(ifalias), "%s", "unidentified_netdev");
+ write_lock_irq(&rmnet_mhi_ptr->pm_lock);
+ rmnet_mhi_ptr->mhi_enabled = 0;
+ write_unlock_irq(&rmnet_mhi_ptr->pm_lock);
+ /* Set unidentified_net_dev string to ifalias
+ * on error notification
+ */
+ rtnl_lock();
+ dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias));
+ rtnl_unlock();
+
+ if (cb_info->chan == rmnet_mhi_ptr->rx_channel) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Receive MHI_DISABLE notification for rx path\n");
+ if (rmnet_mhi_ptr->dev)
+ rmnet_mhi_disable(rmnet_mhi_ptr);
+ } else {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Receive MHI_DISABLE notification for tx path\n");
+ rmnet_mhi_ptr->tx_enabled = 0;
+ if (rmnet_mhi_ptr->dev)
+ rmnet_mhi_internal_clean_unmap_buffers(
+ rmnet_mhi_ptr->dev,
+ &rmnet_mhi_ptr->tx_buffers,
+ DMA_TO_DEVICE);
+ }
+
+ /* Remove all votes disabling low power mode */
+ if (!rmnet_mhi_ptr->tx_enabled && !rmnet_mhi_ptr->rx_enabled) {
+ struct mhi_client_handle *handle =
+ rmnet_mhi_ptr->rx_client_handle;
+
+ if (!handle)
+ handle = rmnet_mhi_ptr->tx_client_handle;
+ while (rmnet_mhi_ptr->wake_count) {
+ mhi_set_lpm(handle, true);
+ rmnet_mhi_ptr->wake_count--;
+ }
+ }
+ break;
+ case MHI_CB_MHI_ENABLED:
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "Got MHI_ENABLED notification. Starting stack\n");
+ if (cb_info->chan == rmnet_mhi_ptr->rx_channel)
+ rmnet_mhi_ptr->rx_enabled = 1;
+ else
+ rmnet_mhi_ptr->tx_enabled = 1;
+
+ if ((rmnet_mhi_ptr->tx_enabled && rmnet_mhi_ptr->rx_enabled) ||
+ (rmnet_mhi_ptr->tx_enabled &&
+ !rmnet_mhi_ptr->rx_client_handle) ||
+ (rmnet_mhi_ptr->rx_enabled &&
+ !rmnet_mhi_ptr->tx_client_handle)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "enabling iface.\n");
+ r = rmnet_mhi_enable_iface(rmnet_mhi_ptr);
+ if (r)
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "Failed to enable iface for chan %d\n",
+ cb_info->chan);
+ else
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_INFO,
+ "Enabled iface for chan %d\n",
+ cb_info->chan);
+ }
+ break;
+ case MHI_CB_XFER:
+ if (cb_info->chan == rmnet_mhi_ptr->rx_channel)
+ rmnet_mhi_rx_cb(cb_info->result);
+ else
+ rmnet_mhi_tx_cb(cb_info->result);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *dentry;
+
+static void rmnet_mhi_create_debugfs(struct rmnet_mhi_private *rmnet_mhi_ptr)
+{
+ char node_name[32];
+ int i;
+ const umode_t mode = (S_IRUSR | S_IWUSR);
+ struct dentry *file;
+ struct mhi_client_handle *client_handle;
+
+ const struct {
+ char *name;
+ u64 *ptr;
+ } debugfs_table[] = {
+ {
+ "tx_interrupts_count",
+ &rmnet_mhi_ptr->debug.tx_interrupts_count
+ },
+ {
+ "rx_interrupts_count",
+ &rmnet_mhi_ptr->debug.rx_interrupts_count
+ },
+ {
+ "tx_ring_full_count",
+ &rmnet_mhi_ptr->debug.tx_ring_full_count
+ },
+ {
+ "tx_queued_packets_count",
+ &rmnet_mhi_ptr->debug.tx_queued_packets_count
+ },
+ {
+ "rx_interrupts_in_masked_irq",
+ &rmnet_mhi_ptr->
+ debug.rx_interrupts_in_masked_irq
+ },
+ {
+ "rx_napi_skb_burst_min",
+ &rmnet_mhi_ptr->debug.rx_napi_skb_burst_min
+ },
+ {
+ "rx_napi_skb_burst_max",
+ &rmnet_mhi_ptr->debug.rx_napi_skb_burst_max
+ },
+ {
+ "tx_cb_skb_free_burst_min",
+ &rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_min
+ },
+ {
+ "tx_cb_skb_free_burst_max",
+ &rmnet_mhi_ptr->debug.tx_cb_skb_free_burst_max
+ },
+ {
+ "rx_napi_budget_overflow",
+ &rmnet_mhi_ptr->debug.rx_napi_budget_overflow
+ },
+ {
+ "rx_fragmentation",
+ &rmnet_mhi_ptr->debug.rx_fragmentation
+ },
+ {
+ NULL, NULL
+ },
+ };
+
+ /* Both tx & rx client handle contain same device info */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
+ if (!client_handle)
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
+
+ snprintf(node_name,
+ sizeof(node_name),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ rmnet_mhi_ptr->interface_name,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
+
+ if (IS_ERR_OR_NULL(dentry))
+ return;
+
+ rmnet_mhi_ptr->dentry = debugfs_create_dir(node_name, dentry);
+ if (IS_ERR_OR_NULL(rmnet_mhi_ptr->dentry))
+ return;
+
+ file = debugfs_create_u32("msg_lvl",
+ mode,
+ rmnet_mhi_ptr->dentry,
+ (u32 *)&rmnet_mhi_ptr->debug.rmnet_msg_lvl);
+ if (IS_ERR_OR_NULL(file))
+ return;
+
+ file = debugfs_create_u32("ipc_log_lvl",
+ mode,
+ rmnet_mhi_ptr->dentry,
+ (u32 *)&rmnet_mhi_ptr->
+ debug.rmnet_ipc_log_lvl);
+ if (IS_ERR_OR_NULL(file))
+ return;
+
+ file = debugfs_create_u32("mru",
+ mode,
+ rmnet_mhi_ptr->dentry,
+ &rmnet_mhi_ptr->mru);
+ if (IS_ERR_OR_NULL(file))
+ return;
+
+ /* Add debug stats table */
+ for (i = 0; debugfs_table[i].name; i++) {
+ file = debugfs_create_u64(debugfs_table[i].name,
+ mode,
+ rmnet_mhi_ptr->dentry,
+ debugfs_table[i].ptr);
+ if (IS_ERR_OR_NULL(file))
+ return;
+ }
+}
+
+static void rmnet_mhi_create_debugfs_dir(void)
+{
+ dentry = debugfs_create_dir(RMNET_MHI_DRIVER_NAME, 0);
+}
+#else
+static void rmnet_mhi_create_debugfs(struct rmnet_mhi_private *rmnet_mhi_ptr)
+{
+}
+
+static void rmnet_mhi_create_debugfs_dir(void)
+{
+}
+#endif
+
+static int rmnet_mhi_probe(struct platform_device *pdev)
+{
+ int rc;
+ u32 channel;
+ struct rmnet_mhi_private *rmnet_mhi_ptr;
+ struct mhi_client_handle *client_handle = NULL;
+ char node_name[32];
+ struct mhi_client_info_t client_info;
+
+ if (unlikely(!pdev->dev.of_node))
+ return -ENODEV;
+
+ if (!mhi_is_device_ready(&pdev->dev, "qcom,mhi"))
+ return -EPROBE_DEFER;
+
+ pdev->id = of_alias_get_id(pdev->dev.of_node, "mhi_rmnet");
+ if (unlikely(pdev->id < 0))
+ return -ENODEV;
+
+ rmnet_mhi_ptr = kzalloc(sizeof(*rmnet_mhi_ptr), GFP_KERNEL);
+ if (unlikely(!rmnet_mhi_ptr))
+ return -ENOMEM;
+ rmnet_mhi_ptr->pdev = pdev;
+ spin_lock_init(&rmnet_mhi_ptr->out_chan_full_lock);
+ rwlock_init(&rmnet_mhi_ptr->pm_lock);
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-mru",
+ &rmnet_mhi_ptr->mru);
+ if (unlikely(rc)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "failed to get valid mru\n");
+ goto probe_fail;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "cell-index",
+ &rmnet_mhi_ptr->dev_id);
+ if (unlikely(rc)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "failed to get valid 'cell-index'\n");
+ goto probe_fail;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-max-mru",
+ &rmnet_mhi_ptr->max_mru);
+ if (likely(rc)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "max-mru not defined, setting to max %d\n",
+ MHI_MAX_MRU);
+ rmnet_mhi_ptr->max_mru = MHI_MAX_MRU;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-max-mtu",
+ &rmnet_mhi_ptr->max_mtu);
+ if (likely(rc)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "max-mtu not defined, setting to max %d\n",
+ MHI_MAX_MTU);
+ rmnet_mhi_ptr->max_mtu = MHI_MAX_MTU;
+ }
+
+ rc = of_property_read_string(pdev->dev.of_node, "qcom,interface-name",
+ &rmnet_mhi_ptr->interface_name);
+ if (likely(rc)) {
+ rmnet_log(rmnet_mhi_ptr, MSG_INFO,
+ "interface-name not defined, setting to default name %s\n",
+ RMNET_MHI_DRIVER_NAME);
+ rmnet_mhi_ptr->interface_name = rmnet_mhi_driver.driver.name;
+ }
+
+ client_info.dev = &pdev->dev;
+ client_info.node_name = "qcom,mhi";
+ client_info.mhi_client_cb = rmnet_mhi_cb;
+ client_info.user_data = rmnet_mhi_ptr;
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-tx-channel",
+ &channel);
+ if (rc == 0) {
+ rmnet_mhi_ptr->tx_channel = channel;
+ client_info.chan = channel;
+ client_info.max_payload = rmnet_mhi_ptr->max_mtu;
+
+ rc = mhi_register_channel(&rmnet_mhi_ptr->tx_client_handle,
+ &client_info);
+ if (unlikely(rc)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "mhi_register_channel failed chan %d ret %d\n",
+ rmnet_mhi_ptr->tx_channel,
+ rc);
+ goto probe_fail;
+ }
+ client_handle = rmnet_mhi_ptr->tx_client_handle;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,mhi-rx-channel",
+ &channel);
+ if (rc == 0) {
+ rmnet_mhi_ptr->rx_channel = channel;
+ client_info.max_payload = rmnet_mhi_ptr->max_mru;
+ client_info.chan = channel;
+ rc = mhi_register_channel(&rmnet_mhi_ptr->rx_client_handle,
+ &client_info);
+ if (unlikely(rc)) {
+ rmnet_log(rmnet_mhi_ptr,
+ MSG_CRITICAL,
+ "mhi_register_channel failed chan %d ret %d\n",
+ rmnet_mhi_ptr->rx_channel,
+ rc);
+ goto probe_fail;
+ }
+ /* overwriting tx_client_handle is ok because dev_id and
+ * bdf are same for both channels
+ */
+ client_handle = rmnet_mhi_ptr->rx_client_handle;
+ INIT_WORK(&rmnet_mhi_ptr->alloc_work, rmnet_mhi_alloc_work);
+ spin_lock_init(&rmnet_mhi_ptr->alloc_lock);
+ }
+
+ /* We must've have @ least one valid channel */
+ if (!client_handle) {
+ rmnet_log(rmnet_mhi_ptr, MSG_CRITICAL,
+ "No registered channels\n");
+ rc = -ENODEV;
+ goto probe_fail;
+ }
+
+ snprintf(node_name,
+ sizeof(node_name),
+ "%s_%04x_%02u.%02u.%02u_%u",
+ rmnet_mhi_ptr->interface_name,
+ client_handle->dev_id,
+ client_handle->domain,
+ client_handle->bus,
+ client_handle->slot,
+ rmnet_mhi_ptr->dev_id);
+ rmnet_mhi_ptr->rmnet_ipc_log =
+ ipc_log_context_create(RMNET_IPC_LOG_PAGES,
+ node_name, 0);
+ rmnet_mhi_ptr->debug.rmnet_msg_lvl = MSG_CRITICAL;
+
+#ifdef CONFIG_MSM_MHI_DEBUG
+ rmnet_mhi_ptr->debug.rmnet_ipc_log_lvl = MSG_VERBOSE;
+#else
+ rmnet_mhi_ptr->debug.rmnet_ipc_log_lvl = MSG_ERROR;
+#endif
+
+ rmnet_mhi_create_debugfs(rmnet_mhi_ptr);
+ list_add_tail(&rmnet_mhi_ptr->node, &rmnet_mhi_ctxt_list);
+ return 0;
+
+probe_fail:
+ kfree(rmnet_mhi_ptr);
+ return rc;
+}
+
+static const struct of_device_id msm_mhi_match_table[] = {
+ {.compatible = "qcom,mhi-rmnet"},
+ {},
+};
+
+static struct platform_driver rmnet_mhi_driver = {
+ .probe = rmnet_mhi_probe,
+ .driver = {
+ .name = RMNET_MHI_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = msm_mhi_match_table,
+ },
+};
+
+static int __init rmnet_mhi_init(void)
+{
+ rmnet_mhi_create_debugfs_dir();
+
+ return platform_driver_register(&rmnet_mhi_driver);
+}
+
+static void __exit rmnet_mhi_exit(void)
+{
+ struct rmnet_mhi_private *rmnet_mhi_ptr = 0;
+
+ list_for_each_entry(rmnet_mhi_ptr, &rmnet_mhi_ctxt_list, node) {
+ if (rmnet_mhi_ptr->tx_client_handle)
+ mhi_deregister_channel(rmnet_mhi_ptr->tx_client_handle);
+ if (rmnet_mhi_ptr->rx_client_handle)
+ mhi_deregister_channel(rmnet_mhi_ptr->rx_client_handle);
+ }
+}
+
+module_exit(rmnet_mhi_exit);
+module_init(rmnet_mhi_init);
+
+MODULE_DESCRIPTION("MHI RMNET Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/net/ethernet/msm/rndis_ipa.c
new file mode 100644
index 000000000000..1e0ca8a31888
--- /dev/null
+++ b/drivers/net/ethernet/msm/rndis_ipa.c
@@ -0,0 +1,2455 @@
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/in.h>
+#include <linux/stddef.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/ipa.h>
+#include <linux/random.h>
+#include <linux/rndis_ipa.h>
+#include <linux/workqueue.h>
+
+#define CREATE_TRACE_POINTS
+#include "rndis_ipa_trace.h"
+
+#define DRV_NAME "RNDIS_IPA"
+#define DEBUGFS_DIR_NAME "rndis_ipa"
+#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
+#define NETDEV_NAME "rndis"
+#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD
+#define IPV4_HDR_NAME "rndis_eth_ipv4"
+#define IPV6_HDR_NAME "rndis_eth_ipv6"
+#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define RNDIS_IPA_PKT_TYPE 0x00000001
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define FROM_IPA_TO_USB_BAMDMA 4
+#define FROM_USB_TO_IPA_BAMDMA 5
+#define BAM_DMA_MAX_PKT_NUMBER 10
+#define BAM_DMA_DATA_FIFO_SIZE \
+ (BAM_DMA_MAX_PKT_NUMBER* \
+ (ETH_FRAME_LEN + sizeof(struct rndis_pkt_hdr)))
+#define BAM_DMA_DESC_FIFO_SIZE \
+ (BAM_DMA_MAX_PKT_NUMBER*(sizeof(struct sps_iovec)))
+#define TX_TIMEOUT (5 * HZ)
+#define MIN_TX_ERROR_SLEEP_PERIOD 500
+#define DEFAULT_AGGR_TIME_LIMIT 1
+#define DEFAULT_AGGR_PKT_LIMIT 0
+
+
+#define RNDIS_IPA_ERROR(fmt, args...) \
+ pr_err(DRV_NAME "@%s@%d@ctx:%s: "\
+ fmt, __func__, __LINE__, current->comm, ## args)
+#define RNDIS_IPA_DEBUG(fmt, args...) \
+ pr_debug("ctx: %s, "fmt, current->comm, ## args)
+
+#define NULL_CHECK_RETVAL(ptr) \
+ do { \
+ if (!(ptr)) { \
+ RNDIS_IPA_ERROR("null pointer #ptr\n"); \
+ return -EINVAL; \
+ } \
+ } \
+ while (0)
+
+#define NULL_CHECK_NO_RETVAL(ptr) \
+ do { \
+ if (!(ptr)) {\
+ RNDIS_IPA_ERROR("null pointer #ptr\n"); \
+ return; \
+ } \
+ } \
+ while (0)
+
+#define RNDIS_HDR_OFST(field) offsetof(struct rndis_pkt_hdr, field)
+#define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n")
+#define RNDIS_IPA_LOG_EXIT() RNDIS_IPA_DEBUG("end\n")
+
+
+/**
+ * enum rndis_ipa_state - specify the current driver internal state
+ * which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ * after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ * the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ * pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ * the interface got UP request from the network stack. this is the driver
+ * idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum rndis_ipa_state {
+ RNDIS_IPA_UNLOADED = 0,
+ RNDIS_IPA_INITIALIZED = 1,
+ RNDIS_IPA_CONNECTED = 2,
+ RNDIS_IPA_UP = 3,
+ RNDIS_IPA_CONNECTED_AND_UP = 4,
+ RNDIS_IPA_INVALID = 5,
+};
+
+/**
+ * enum rndis_ipa_operation - enumerations used to describe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum rndis_ipa_operation {
+ RNDIS_IPA_INITIALIZE,
+ RNDIS_IPA_CONNECT,
+ RNDIS_IPA_OPEN,
+ RNDIS_IPA_STOP,
+ RNDIS_IPA_DISCONNECT,
+ RNDIS_IPA_CLEANUP,
+};
+
+#define RNDIS_IPA_STATE_DEBUG(ctx) \
+ RNDIS_IPA_DEBUG("Driver state: %s\n",\
+ rndis_ipa_state_string(ctx->state));
+
+
+/**
+ * struct rndis_ipa_dev - main driver context parameters
+ *
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debugging switches
+ * @tx_filter: flag that enable/disable Tx path to continue to IPA
+ * @tx_dropped: number of filtered out Tx packets
+ * @tx_dump_enable: dump all Tx packets
+ * @rx_filter: flag that enable/disable Rx path to continue to IPA
+ * @rx_dropped: number of filtered out Rx packets
+ * @rx_dump_enable: dump all Rx packets
+ * @icmp_filter: allow all ICMP packet to pass through the filters
+ * @rm_enable: flag that enable/disable Resource manager request prior to Tx
+ * @deaggregation_enable: enable/disable IPA HW deaggregation logic
+ * @during_xmit_error: flags that indicate that the driver is in a middle
+ * of error handling in Tx path
+ * @directory: holds all debug flags used by the driver to allow cleanup
+ * for driver unload
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * to netdev queue start (after stopped due to outstanding_high reached)
+ * @error_msec_sleep_time: number of msec for sleeping in case of Tx error
+ * @state: current state of the driver
+ * @host_ethaddr: holds the tethered PC ethernet address
+ * @device_ethaddr: holds the device ethernet address
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails
+ * @state_lock: used to protect the state variable.
+ */
+struct rndis_ipa_dev {
+ struct net_device *net;
+ bool tx_filter;
+ u32 tx_dropped;
+ bool tx_dump_enable;
+ bool rx_filter;
+ u32 rx_dropped;
+ bool rx_dump_enable;
+ bool icmp_filter;
+ bool rm_enable;
+ bool deaggregation_enable;
+ bool during_xmit_error;
+ struct dentry *directory;
+ uint32_t eth_ipv4_hdr_hdl;
+ uint32_t eth_ipv6_hdr_hdl;
+ u32 usb_to_ipa_hdl;
+ u32 ipa_to_usb_hdl;
+ atomic_t outstanding_pkts;
+ u32 outstanding_high;
+ u32 outstanding_low;
+ u32 error_msec_sleep_time;
+ enum rndis_ipa_state state;
+ u8 host_ethaddr[ETH_ALEN];
+ u8 device_ethaddr[ETH_ALEN];
+ void (*device_ready_notify)(void);
+ struct delayed_work xmit_error_delayed_work;
+ spinlock_t state_lock; /* Spinlock for the state variable.*/
+};
+
+/**
+ * rndis_pkt_hdr - RNDIS_IPA representation of REMOTE_NDIS_PACKET_MSG
+ * @msg_type: for REMOTE_NDIS_PACKET_MSG this value should be 1
+ * @msg_len: total message length in bytes, including RNDIS header an payload
+ * @data_ofst: offset in bytes from start of the data_ofst to payload
+ * @data_len: payload size in bytes
+ * @zeroes: OOB place holder - not used for RNDIS_IPA.
+ */
+struct rndis_pkt_hdr {
+ __le32 msg_type;
+ __le32 msg_len;
+ __le32 data_ofst;
+ __le32 data_len;
+ __le32 zeroes[7];
+} __packed__;
+
+static int rndis_ipa_open(struct net_device *net);
+static void rndis_ipa_packet_receive_notify(void *private,
+ enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_complete_notify(void *private,
+ enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_timeout(struct net_device *net);
+static int rndis_ipa_stop(struct net_device *net);
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb);
+static void rndis_ipa_xmit_error(struct sk_buff *skb);
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work);
+static void rndis_ipa_prepare_header_insertion(int eth_type,
+ const char *hdr_name, struct ipa_hdr_add *add_hdr,
+ const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_cfg(struct rndis_ipa_dev *rndis_ipa_ctx,
+ const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
+static int rndis_ipa_register_properties(char *netdev_name);
+static int rndis_ipa_deregister_properties(char *netdev_name);
+static void rndis_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data);
+static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_destory_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx);
+static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net);
+static int rndis_ipa_debugfs_atomic_open(struct inode *inode,
+ struct file *file);
+static int rndis_ipa_debugfs_aggr_open(struct inode *inode,
+ struct file *file);
+static ssize_t rndis_ipa_debugfs_aggr_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos);
+static void rndis_ipa_dump_skb(struct sk_buff *skb);
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev,
+ u32 max_xfer_size_bytes_to_host, u32 mtu,
+ bool deaggr_enable);
+static int rndis_ipa_set_device_ethernet_addr(u8 *dev_ethaddr,
+ u8 device_ethaddr[]);
+static enum rndis_ipa_state rndis_ipa_next_state(
+ enum rndis_ipa_state current_state,
+ enum rndis_ipa_operation operation);
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state);
+static int rndis_ipa_init_module(void);
+static void rndis_ipa_cleanup_module(void);
+
+struct rndis_ipa_dev *rndis_ipa;
+
+static const struct net_device_ops rndis_ipa_netdev_ops = {
+ .ndo_open = rndis_ipa_open,
+ .ndo_stop = rndis_ipa_stop,
+ .ndo_start_xmit = rndis_ipa_start_xmit,
+ .ndo_tx_timeout = rndis_ipa_tx_timeout,
+ .ndo_get_stats = rndis_ipa_get_stats,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+const struct file_operations rndis_ipa_debugfs_atomic_ops = {
+ .open = rndis_ipa_debugfs_atomic_open,
+ .read = rndis_ipa_debugfs_atomic_read,
+};
+
+const struct file_operations rndis_ipa_aggr_ops = {
+ .open = rndis_ipa_debugfs_aggr_open,
+ .write = rndis_ipa_debugfs_aggr_write,
+};
+
+static struct ipa_ep_cfg ipa_to_usb_ep_cfg = {
+ .mode = {
+ .mode = IPA_BASIC,
+ .dst = IPA_CLIENT_APPS_LAN_CONS,
+ },
+ .hdr = {
+ .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+ .hdr_ofst_metadata_valid = false,
+ .hdr_ofst_metadata = 0,
+ .hdr_additional_const_len = ETH_HLEN,
+ .hdr_ofst_pkt_size_valid = true,
+ .hdr_ofst_pkt_size = 3*sizeof(u32),
+ .hdr_a5_mux = false,
+ .hdr_remove_additional = false,
+ .hdr_metadata_reg_valid = false,
+ },
+ .hdr_ext = {
+ .hdr_pad_to_alignment = 0,
+ .hdr_total_len_or_pad_offset = 1*sizeof(u32),
+ .hdr_payload_len_inc_padding = false,
+ .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+ .hdr_total_len_or_pad_valid = true,
+ .hdr_little_endian = true,
+ },
+ .aggr = {
+ .aggr_en = IPA_ENABLE_AGGR,
+ .aggr = IPA_GENERIC,
+ .aggr_byte_limit = 4,
+ .aggr_time_limit = DEFAULT_AGGR_TIME_LIMIT,
+ .aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT
+ },
+ .deaggr = {
+ .deaggr_hdr_len = 0,
+ .packet_offset_valid = 0,
+ .packet_offset_location = 0,
+ .max_packet_len = 0,
+ },
+ .route = {
+ .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+ },
+ .nat = {
+ .nat_en = IPA_SRC_NAT,
+ },
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_dis = {
+ .mode = {
+ .mode = IPA_BASIC,
+ .dst = IPA_CLIENT_APPS_LAN_CONS,
+ },
+ .hdr = {
+ .hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+ .hdr_ofst_metadata_valid = false,
+ .hdr_ofst_metadata = 0,
+ .hdr_additional_const_len = 0,
+ .hdr_ofst_pkt_size_valid = true,
+ .hdr_ofst_pkt_size = 3*sizeof(u32) +
+ sizeof(struct rndis_pkt_hdr),
+ .hdr_a5_mux = false,
+ .hdr_remove_additional = false,
+ .hdr_metadata_reg_valid = false,
+ },
+ .hdr_ext = {
+ .hdr_pad_to_alignment = 0,
+ .hdr_total_len_or_pad_offset = 1*sizeof(u32),
+ .hdr_payload_len_inc_padding = false,
+ .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+ .hdr_total_len_or_pad_valid = true,
+ .hdr_little_endian = true,
+ },
+
+ .aggr = {
+ .aggr_en = IPA_BYPASS_AGGR,
+ .aggr = 0,
+ .aggr_byte_limit = 0,
+ .aggr_time_limit = 0,
+ .aggr_pkt_limit = 0,
+ },
+ .deaggr = {
+ .deaggr_hdr_len = 0,
+ .packet_offset_valid = false,
+ .packet_offset_location = 0,
+ .max_packet_len = 0,
+ },
+
+ .route = {
+ .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+ },
+ .nat = {
+ .nat_en = IPA_BYPASS_NAT,
+ },
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_en = {
+ .mode = {
+ .mode = IPA_BASIC,
+ .dst = IPA_CLIENT_APPS_LAN_CONS,
+ },
+ .hdr = {
+ .hdr_len = ETH_HLEN,
+ .hdr_ofst_metadata_valid = false,
+ .hdr_ofst_metadata = 0,
+ .hdr_additional_const_len = 0,
+ .hdr_ofst_pkt_size_valid = true,
+ .hdr_ofst_pkt_size = 3*sizeof(u32),
+ .hdr_a5_mux = false,
+ .hdr_remove_additional = false,
+ .hdr_metadata_reg_valid = false,
+ },
+ .hdr_ext = {
+ .hdr_pad_to_alignment = 0,
+ .hdr_total_len_or_pad_offset = 1*sizeof(u32),
+ .hdr_payload_len_inc_padding = false,
+ .hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+ .hdr_total_len_or_pad_valid = true,
+ .hdr_little_endian = true,
+ },
+ .aggr = {
+ .aggr_en = IPA_ENABLE_DEAGGR,
+ .aggr = IPA_GENERIC,
+ .aggr_byte_limit = 0,
+ .aggr_time_limit = 0,
+ .aggr_pkt_limit = 0,
+ },
+ .deaggr = {
+ .deaggr_hdr_len = sizeof(struct rndis_pkt_hdr),
+ .packet_offset_valid = true,
+ .packet_offset_location = 8,
+ .max_packet_len = 8192, /* Will be overridden*/
+ },
+ .route = {
+ .rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+ },
+ .nat = {
+ .nat_en = IPA_BYPASS_NAT,
+ },
+};
+
+
+/**
+ * rndis_template_hdr - RNDIS template structure for RNDIS_IPA SW insertion
+ * @msg_type: set for REMOTE_NDIS_PACKET_MSG (0x00000001)
+ * this value will be used for all data packets
+ * @msg_len: will add the skb length to get final size
+ * @data_ofst: this field value will not be changed
+ * @data_len: set as skb length to get final size
+ * @zeroes: make sure all OOB data is not used
+ */
+struct rndis_pkt_hdr rndis_template_hdr = {
+ .msg_type = RNDIS_IPA_PKT_TYPE,
+ .msg_len = sizeof(struct rndis_pkt_hdr),
+ .data_ofst = sizeof(struct rndis_pkt_hdr) - RNDIS_HDR_OFST(data_ofst),
+ .data_len = 0,
+ .zeroes = {0},
+};
+
+static void rndis_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+ kfree(buff);
+}
+
+/**
+ * rndis_ipa_init() - create network device and initialize internal
+ * data structures
+ * @params: in/out parameters required for initialization,
+ * see "struct ipa_usb_init_params" for more details
+ *
+ * Shall be called prior to pipe connection.
+ * Detailed description:
+ * - allocate the network device
+ * - set default values for driver internal switches and stash them inside
+ * the netdev private field
+ * - set needed headroom for RNDIS header
+ * - create debugfs folder and files
+ * - create IPA resource manager client
+ * - set the ethernet address for the netdev to be added on SW Tx path
+ * - add header insertion rules for IPA driver (based on host/device Ethernet
+ * addresses given in input params and on RNDIS data template struct)
+ * - register tx/rx properties to IPA driver (will be later used
+ * by IPA configuration manager to configure rest of the IPA rules)
+ * - set the carrier state to "off" (until connect is called)
+ * - register the network device
+ * - set the out parameters
+ * - change driver internal state to INITIALIZED
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+ int result = 0;
+ struct net_device *net;
+ struct rndis_ipa_dev *rndis_ipa_ctx;
+
+ RNDIS_IPA_LOG_ENTRY();
+ RNDIS_IPA_DEBUG("%s initializing\n", DRV_NAME);
+ NULL_CHECK_RETVAL(params);
+
+ RNDIS_IPA_DEBUG("host_ethaddr=%pM, device_ethaddr=%pM\n",
+ params->host_ethaddr,
+ params->device_ethaddr);
+
+ net = alloc_etherdev(sizeof(struct rndis_ipa_dev));
+ if (!net) {
+ result = -ENOMEM;
+ RNDIS_IPA_ERROR("fail to allocate Ethernet device\n");
+ goto fail_alloc_etherdev;
+ }
+ RNDIS_IPA_DEBUG("network device was successfully allocated\n");
+
+ rndis_ipa_ctx = netdev_priv(net);
+ if (!rndis_ipa_ctx) {
+ result = -ENOMEM;
+ RNDIS_IPA_ERROR("fail to extract netdev priv\n");
+ goto fail_netdev_priv;
+ }
+ memset(rndis_ipa_ctx, 0, sizeof(*rndis_ipa_ctx));
+ RNDIS_IPA_DEBUG("rndis_ipa_ctx (private)=%p\n", rndis_ipa_ctx);
+
+ spin_lock_init(&rndis_ipa_ctx->state_lock);
+
+ rndis_ipa_ctx->net = net;
+ rndis_ipa_ctx->tx_filter = false;
+ rndis_ipa_ctx->rx_filter = false;
+ rndis_ipa_ctx->icmp_filter = true;
+ rndis_ipa_ctx->rm_enable = true;
+ rndis_ipa_ctx->tx_dropped = 0;
+ rndis_ipa_ctx->rx_dropped = 0;
+ rndis_ipa_ctx->tx_dump_enable = false;
+ rndis_ipa_ctx->rx_dump_enable = false;
+ rndis_ipa_ctx->deaggregation_enable = false;
+ rndis_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+ rndis_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+ atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+ memcpy(rndis_ipa_ctx->device_ethaddr, params->device_ethaddr,
+ sizeof(rndis_ipa_ctx->device_ethaddr));
+ memcpy(rndis_ipa_ctx->host_ethaddr, params->host_ethaddr,
+ sizeof(rndis_ipa_ctx->host_ethaddr));
+ INIT_DELAYED_WORK(&rndis_ipa_ctx->xmit_error_delayed_work,
+ rndis_ipa_xmit_error_aftercare_wq);
+ rndis_ipa_ctx->error_msec_sleep_time =
+ MIN_TX_ERROR_SLEEP_PERIOD;
+ RNDIS_IPA_DEBUG("internal data structures were set\n");
+
+ if (!params->device_ready_notify)
+ RNDIS_IPA_DEBUG("device_ready_notify() was not supplied\n");
+ rndis_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+ snprintf(net->name, sizeof(net->name), "%s%%d", NETDEV_NAME);
+ RNDIS_IPA_DEBUG("Setting network interface driver name to: %s\n",
+ net->name);
+
+ net->netdev_ops = &rndis_ipa_netdev_ops;
+ net->watchdog_timeo = TX_TIMEOUT;
+
+ net->needed_headroom = sizeof(rndis_template_hdr);
+ RNDIS_IPA_DEBUG("Needed headroom for RNDIS header set to %d\n",
+ net->needed_headroom);
+
+ rndis_ipa_debugfs_init(rndis_ipa_ctx);
+
+ result = rndis_ipa_set_device_ethernet_addr(net->dev_addr,
+ rndis_ipa_ctx->device_ethaddr);
+ if (result) {
+ RNDIS_IPA_ERROR("set device MAC failed\n");
+ goto fail_set_device_ethernet;
+ }
+ RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+ result = rndis_ipa_hdrs_cfg(rndis_ipa_ctx,
+ params->host_ethaddr,
+ params->device_ethaddr);
+ if (result) {
+ RNDIS_IPA_ERROR("fail on ipa hdrs set\n");
+ goto fail_hdrs_cfg;
+ }
+ RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n");
+
+ result = rndis_ipa_register_properties(net->name);
+ if (result) {
+ RNDIS_IPA_ERROR("fail on properties set\n");
+ goto fail_register_tx;
+ }
+ RNDIS_IPA_DEBUG("2 TX and 2 RX properties were registered\n");
+
+ netif_carrier_off(net);
+ RNDIS_IPA_DEBUG("set carrier off until pipes are connected\n");
+
+ result = register_netdev(net);
+ if (result) {
+ RNDIS_IPA_ERROR("register_netdev failed: %d\n", result);
+ goto fail_register_netdev;
+ }
+ RNDIS_IPA_DEBUG("netdev:%s registration succeeded, index=%d\n",
+ net->name, net->ifindex);
+
+ rndis_ipa = rndis_ipa_ctx;
+ params->ipa_rx_notify = rndis_ipa_packet_receive_notify;
+ params->ipa_tx_notify = rndis_ipa_tx_complete_notify;
+ params->private = rndis_ipa_ctx;
+ params->skip_ep_cfg = false;
+ rndis_ipa_ctx->state = RNDIS_IPA_INITIALIZED;
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+ pr_info("RNDIS_IPA NetDev was initialized");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+
+fail_register_netdev:
+ rndis_ipa_deregister_properties(net->name);
+fail_register_tx:
+ rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+fail_set_device_ethernet:
+fail_hdrs_cfg:
+ rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+fail_netdev_priv:
+ free_netdev(net);
+fail_alloc_etherdev:
+ return result;
+}
+EXPORT_SYMBOL(rndis_ipa_init);
+
+/**
+ * rndis_ipa_pipe_connect_notify() - notify rndis_ipa Netdev that the USB pipes
+ * were connected
+ * @usb_to_ipa_hdl: handle from IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle from IPA driver client for IPA->USB
+ * @private: same value that was set by init(), this parameter holds the
+ * network device pointer.
+ * @max_transfer_byte_size: RNDIS protocol specific, the maximum size that
+ * the host expect
+ * @max_packet_number: RNDIS protocol specific, the maximum packet number
+ * that the host expects
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow the driver to complete the data path configurations.
+ * Detailed description:
+ * - configure the IPA end-points register
+ * - notify the Linux kernel for "carrier_on"
+ * - change the driver internal state
+ *
+ * After this function is done the driver state changes to "Connected" or
+ * Connected and Up.
+ * This API is expected to be called after initialization() or
+ * after a call to disconnect().
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_packet_number_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ void *private)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = private;
+ int next_state;
+ int result;
+ unsigned long flags;
+ struct ipa_ecm_msg *rndis_msg;
+ struct ipa_msg_meta msg_meta;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ NULL_CHECK_RETVAL(private);
+
+ RNDIS_IPA_DEBUG("usb_to_ipa_hdl=%d, ipa_to_usb_hdl=%d, private=0x%p\n",
+ usb_to_ipa_hdl, ipa_to_usb_hdl, private);
+ RNDIS_IPA_DEBUG("max_xfer_sz_to_dev=%d, max_pkt_num_to_dev=%d\n",
+ max_xfer_size_bytes_to_dev,
+ max_packet_number_to_dev);
+ RNDIS_IPA_DEBUG("max_xfer_sz_to_host=%d\n",
+ max_xfer_size_bytes_to_host);
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_CONNECT);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n");
+ return -EPERM;
+ }
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ if (usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
+ RNDIS_IPA_ERROR("usb_to_ipa_hdl(%d) - not valid ipa handle\n",
+ usb_to_ipa_hdl);
+ return -EINVAL;
+ }
+ if (ipa_to_usb_hdl >= IPA_CLIENT_MAX) {
+ RNDIS_IPA_ERROR("ipa_to_usb_hdl(%d) - not valid ipa handle\n",
+ ipa_to_usb_hdl);
+ return -EINVAL;
+ }
+
+ result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
+ if (result) {
+ RNDIS_IPA_ERROR("fail on RM create\n");
+ goto fail_create_rm;
+ }
+ RNDIS_IPA_DEBUG("RM resource was created\n");
+
+ rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+ rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+ if (max_packet_number_to_dev > 1)
+ rndis_ipa_ctx->deaggregation_enable = true;
+ else
+ rndis_ipa_ctx->deaggregation_enable = false;
+ result = rndis_ipa_ep_registers_cfg(usb_to_ipa_hdl,
+ ipa_to_usb_hdl,
+ max_xfer_size_bytes_to_dev,
+ max_xfer_size_bytes_to_host,
+ rndis_ipa_ctx->net->mtu,
+ rndis_ipa_ctx->deaggregation_enable);
+ if (result) {
+ RNDIS_IPA_ERROR("fail on ep cfg\n");
+ goto fail;
+ }
+ RNDIS_IPA_DEBUG("end-points configured\n");
+
+ netif_stop_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("netif_stop_queue() was called\n");
+
+ netif_carrier_on(rndis_ipa_ctx->net);
+ if (!netif_carrier_ok(rndis_ipa_ctx->net)) {
+ RNDIS_IPA_ERROR("netif_carrier_ok error\n");
+ result = -EBUSY;
+ goto fail;
+ }
+ RNDIS_IPA_DEBUG("netif_carrier_on() was called\n");
+
+ rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+ if (!rndis_msg) {
+ result = -ENOMEM;
+ goto fail;
+ }
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = ECM_CONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+ strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+ IPA_RESOURCE_NAME_MAX);
+ rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+ result = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+ if (result) {
+ RNDIS_IPA_ERROR("fail to send ECM_CONNECT for rndis\n");
+ kfree(rndis_msg);
+ goto fail;
+ }
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_CONNECT);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n");
+ return -EPERM;
+ }
+ rndis_ipa_ctx->state = next_state;
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+ if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+ rndis_ipa_enable_data_path(rndis_ipa_ctx);
+ else
+ RNDIS_IPA_DEBUG("queue shall be started after open()\n");
+
+ pr_info("RNDIS_IPA NetDev pipes were connected\n");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+
+fail:
+ rndis_ipa_destory_rm_resource(rndis_ipa_ctx);
+fail_create_rm:
+ return result;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify);
+
+/**
+ * rndis_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ * The driver internal state shall be changed to Up or Connected and Up
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_open(struct net_device *net)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx;
+ int next_state;
+ unsigned long flags;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ rndis_ipa_ctx = netdev_priv(net);
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_OPEN);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("can't bring driver up before initialize\n");
+ return -EPERM;
+ }
+
+ rndis_ipa_ctx->state = next_state;
+
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+
+ if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+ rndis_ipa_enable_data_path(rndis_ipa_ctx);
+ else
+ RNDIS_IPA_DEBUG("queue shall be started after connect()\n");
+
+ pr_info("RNDIS_IPA NetDev was opened\n");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+}
+
+/**
+ * rndis_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * using SW path (Tx data path)
+ * Tx path for this Netdev is Apps-processor->IPA->USB
+ * @skb: packet received from Linux network stack destined for tethered PC
+ * @net: the network device being used to send this packet (rndis0)
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ * in "started" state
+ * - The driver internal state is in Connected and Up state.
+ * - Filters Tx switch are turned off
+ * - The IPA resource manager state for the driver producer client
+ * is "Granted" which implies that all the resources in the dependency
+ * graph are valid for data flow.
+ * - outstanding high boundary was not reached.
+ *
+ * In case the outstanding packets high boundary is reached, the driver will
+ * stop the send queue until enough packets are processed by
+ * the IPA core (based on calls to rndis_ipa_tx_complete_notify).
+ *
+ * In case all of the conditions are met, the network driver shall:
+ * - encapsulate the Ethernet packet with RNDIS header (REMOTE_NDIS_PACKET_MSG)
+ * - send the packet by using IPA Driver SW path (IP_PACKET_INIT)
+ * - Netdev status fields shall be updated based on the current Tx packet
+ *
+ * Returns NETDEV_TX_BUSY if retry should be made later,
+ * or NETDEV_TX_OK on success.
+ */
+static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
+ struct net_device *net)
+{
+ int ret;
+ netdev_tx_t status = NETDEV_TX_BUSY;
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+
+ net->trans_start = jiffies;
+
+ RNDIS_IPA_DEBUG("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+ skb->len, skb->protocol,
+ atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+ if (unlikely(netif_queue_stopped(net))) {
+ RNDIS_IPA_ERROR("interface queue is stopped\n");
+ goto out;
+ }
+
+ if (unlikely(rndis_ipa_ctx->tx_dump_enable))
+ rndis_ipa_dump_skb(skb);
+
+ if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+ RNDIS_IPA_ERROR("Missing pipe connected and/or iface up\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(tx_filter(skb))) {
+ dev_kfree_skb_any(skb);
+ RNDIS_IPA_DEBUG("packet got filtered out on Tx path\n");
+ rndis_ipa_ctx->tx_dropped++;
+ status = NETDEV_TX_OK;
+ goto out;
+ }
+
+ ret = resource_request(rndis_ipa_ctx);
+ if (ret) {
+ RNDIS_IPA_DEBUG("Waiting to resource\n");
+ netif_stop_queue(net);
+ goto resource_busy;
+ }
+
+ if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >=
+ rndis_ipa_ctx->outstanding_high) {
+ RNDIS_IPA_DEBUG("Outstanding high boundary reached (%d)\n",
+ rndis_ipa_ctx->outstanding_high);
+ netif_stop_queue(net);
+ RNDIS_IPA_DEBUG("send queue was stopped\n");
+ status = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ skb = rndis_encapsulate_skb(skb);
+ trace_rndis_tx_dp(skb->protocol);
+ ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
+ if (ret) {
+ RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+ goto fail_tx_packet;
+ }
+
+ atomic_inc(&rndis_ipa_ctx->outstanding_pkts);
+
+ status = NETDEV_TX_OK;
+ goto out;
+
+fail_tx_packet:
+ rndis_ipa_xmit_error(skb);
+out:
+ resource_release(rndis_ipa_ctx);
+resource_busy:
+ RNDIS_IPA_DEBUG("packet Tx done - %s\n",
+ (status == NETDEV_TX_OK) ? "OK" : "FAIL");
+
+ return status;
+}
+
+/**
+ * rndis_ipa_tx_complete_notify() - notification for Netdev that the
+ * last packet was successfully sent
+ * @private: driver context stashed by IPA driver upon pipe connect
+ * @evt: event type (expected to be write-done event)
+ * @data: data provided with event (this is actually the skb that
+ * holds the sent packet)
+ *
+ * This function will be called on interrupt bottom halve deferred context.
+ * outstanding packets counter shall be decremented.
+ * Network stack send queue will be re-started in case low outstanding
+ * boundary is reached and queue was stopped before.
+ * At the end the skb shall be freed.
+ */
+static void rndis_ipa_tx_complete_notify(void *private,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct rndis_ipa_dev *rndis_ipa_ctx = private;
+
+ NULL_CHECK_NO_RETVAL(private);
+
+ trace_rndis_status_rcvd(skb->protocol);
+
+ RNDIS_IPA_DEBUG("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+ skb->len, skb->protocol,
+ atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+ if (unlikely((evt != IPA_WRITE_DONE))) {
+ RNDIS_IPA_ERROR("unsupported event on TX call-back\n");
+ return;
+ }
+
+ if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+ RNDIS_IPA_DEBUG("dropping Tx-complete pkt, state=%s\n",
+ rndis_ipa_state_string(rndis_ipa_ctx->state));
+ goto out;
+ }
+
+ rndis_ipa_ctx->net->stats.tx_packets++;
+ rndis_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+ atomic_dec(&rndis_ipa_ctx->outstanding_pkts);
+ if (netif_queue_stopped(rndis_ipa_ctx->net) &&
+ netif_carrier_ok(rndis_ipa_ctx->net) &&
+ atomic_read(&rndis_ipa_ctx->outstanding_pkts) <
+ (rndis_ipa_ctx->outstanding_low)) {
+ RNDIS_IPA_DEBUG("outstanding low boundary reached (%d)n",
+ rndis_ipa_ctx->outstanding_low);
+ netif_wake_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("send queue was awaken\n");
+ }
+
+out:
+ dev_kfree_skb_any(skb);
+
+ return;
+}
+
+static void rndis_ipa_tx_timeout(struct net_device *net)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+ int outstanding = atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+ RNDIS_IPA_ERROR("possible IPA stall was detected, %d outstanding\n",
+ outstanding);
+
+ net->stats.tx_errors++;
+}
+
+/**
+ * rndis_ipa_rm_notify() - callback supplied to IPA resource manager
+ * for grant/release events
+ * user_data: the driver context supplied to IPA resource manager during call
+ * to ipa_rm_create_resource().
+ * event: the event notified to us by IPA resource manager (Release/Grant)
+ * data: reserved field supplied by IPA resource manager
+ *
+ * This callback shall be called based on resource request/release sent
+ * to the IPA resource manager.
+ * In case the queue was stopped during EINPROGRESS for Tx path and the
+ * event received is Grant then the queue shall be restarted.
+ * In case the event notified is a release notification the netdev discard it.
+ */
+static void rndis_ipa_rm_notify(void *user_data, enum ipa_rm_event event,
+ unsigned long data)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = user_data;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ if (event == IPA_RM_RESOURCE_RELEASED) {
+ RNDIS_IPA_DEBUG("Resource Released\n");
+ return;
+ }
+
+ if (event != IPA_RM_RESOURCE_GRANTED) {
+ RNDIS_IPA_ERROR("Unexceoted event receieved from RM (%d\n)",
+ event);
+ return;
+ }
+ RNDIS_IPA_DEBUG("Resource Granted\n");
+
+ if (netif_queue_stopped(rndis_ipa_ctx->net)) {
+ RNDIS_IPA_DEBUG("starting queue\n");
+ netif_start_queue(rndis_ipa_ctx->net);
+ } else {
+ RNDIS_IPA_DEBUG("queue already awake\n");
+ }
+
+ RNDIS_IPA_LOG_EXIT();
+}
+
+/**
+ * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from
+ * tethered PC (USB->IPA).
+ * is USB->IPA->Apps-processor
+ * @private: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Once IPA driver receives a packet from USB client this callback will be
+ * called from bottom-half interrupt handling context (ipa Rx workqueue).
+ *
+ * Packets that shall be sent to Apps processor may be of two types:
+ * 1) Packets that are destined for Apps (e.g: WEBSERVER running on Apps)
+ * 2) Exception packets that need special handling (based on IPA core
+ * configuration, e.g: new TCP session or any other packets that IPA core
+ * can't handle)
+ * If the next conditions are met, the packet shall be sent up to the
+ * Linux network stack:
+ * - Driver internal state is Connected and Up
+ * - Notification received from IPA driver meets the expected type
+ * for Rx packet
+ * -Filters Rx switch are turned off
+ *
+ * Prior to the sending to the network stack:
+ * - Netdev struct shall be stashed to the skb as required by the network stack
+ * - Ethernet header shall be removed (skb->data shall point to the Ethernet
+ * payload, Ethernet still stashed under MAC header).
+ * - The skb->pkt_protocol shall be set based on the ethernet destination
+ * address, Can be Broadcast, Multicast or Other-Host, The later
+ * pkt-types packets shall be dropped in case the Netdev is not
+ * in promisc mode.
+ * - Set the skb protocol field based on the EtherType field
+ *
+ * Netdev status fields shall be updated based on the current Rx packet
+ */
+static void rndis_ipa_packet_receive_notify(void *private,
+ enum ipa_dp_evt_type evt,
+ unsigned long data)
+{
+ struct sk_buff *skb = (struct sk_buff *)data;
+ struct rndis_ipa_dev *rndis_ipa_ctx = private;
+ int result;
+ unsigned int packet_len = skb->len;
+
+ RNDIS_IPA_DEBUG("packet Rx, len=%d\n",
+ skb->len);
+
+ if (unlikely(rndis_ipa_ctx->rx_dump_enable))
+ rndis_ipa_dump_skb(skb);
+
+ if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+ RNDIS_IPA_DEBUG("use connect()/up() before receive()\n");
+ RNDIS_IPA_DEBUG("packet dropped (length=%d)\n",
+ skb->len);
+ return;
+ }
+
+ if (evt != IPA_RECEIVE) {
+ RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
+ return;
+ }
+
+ if (!rndis_ipa_ctx->deaggregation_enable)
+ skb_pull(skb, sizeof(struct rndis_pkt_hdr));
+
+ skb->dev = rndis_ipa_ctx->net;
+ skb->protocol = eth_type_trans(skb, rndis_ipa_ctx->net);
+
+ if (rx_filter(skb)) {
+ RNDIS_IPA_DEBUG("packet got filtered out on RX path\n");
+ rndis_ipa_ctx->rx_dropped++;
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ trace_rndis_netif_ni(skb->protocol);
+ result = netif_rx_ni(skb);
+ if (result)
+ RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
+ rndis_ipa_ctx->net->stats.rx_packets++;
+ rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
+
+ return;
+}
+
+/** rndis_ipa_stop() - notify the network interface to stop
+ * sending/receiving data
+ * @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ * The Netdev shall be returned to be "Up" after rndis_ipa_open().
+ */
+static int rndis_ipa_stop(struct net_device *net)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+ int next_state;
+ unsigned long flags;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_STOP);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_DEBUG("can't do network interface down without up\n");
+ return -EPERM;
+ }
+
+ rndis_ipa_ctx->state = next_state;
+
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ netif_stop_queue(net);
+ pr_info("RNDIS_IPA NetDev queue is stopped\n");
+
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+}
+
+/** rndis_ipa_disconnect() - notify rndis_ipa Netdev that the USB pipes
+ * were disconnected
+ * @private: same value that was set by init(), this parameter holds the
+ * network device pointer.
+ *
+ * USB shall notify the Netdev after disconnecting the pipe.
+ * - The internal driver state shall returned to its previous
+ * state (Up or Initialized).
+ * - Linux network stack shall be informed for carrier off to notify
+ * user space for pipe disconnect
+ * - send queue shall be stopped
+ * During the transition between the pipe disconnection to
+ * the Netdev notification packets
+ * are expected to be dropped by IPA driver or IPA core.
+ */
+int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = private;
+ int next_state;
+ int outstanding_dropped_pkts;
+ int retval;
+ unsigned long flags;
+ struct ipa_ecm_msg *rndis_msg;
+ struct ipa_msg_meta msg_meta;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ NULL_CHECK_RETVAL(rndis_ipa_ctx);
+ RNDIS_IPA_DEBUG("private=0x%p\n", private);
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_DISCONNECT);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("can't disconnect before connect\n");
+ return -EPERM;
+ }
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ if (rndis_ipa_ctx->during_xmit_error) {
+ RNDIS_IPA_DEBUG("canceling xmit-error delayed work\n");
+ cancel_delayed_work_sync(
+ &rndis_ipa_ctx->xmit_error_delayed_work);
+ rndis_ipa_ctx->during_xmit_error = false;
+ }
+
+ netif_carrier_off(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("carrier_off notification was sent\n");
+
+ rndis_msg = kzalloc(sizeof(*rndis_msg), GFP_KERNEL);
+ if (!rndis_msg)
+ return -ENOMEM;
+
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ msg_meta.msg_type = ECM_DISCONNECT;
+ msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+ strlcpy(rndis_msg->name, rndis_ipa_ctx->net->name,
+ IPA_RESOURCE_NAME_MAX);
+ rndis_msg->ifindex = rndis_ipa_ctx->net->ifindex;
+
+ retval = ipa_send_msg(&msg_meta, rndis_msg, rndis_ipa_msg_free_cb);
+ if (retval) {
+ RNDIS_IPA_ERROR("fail to send ECM_DISCONNECT for rndis\n");
+ kfree(rndis_msg);
+ return -EPERM;
+ }
+ netif_stop_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("queue stopped\n");
+
+ outstanding_dropped_pkts =
+ atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+ rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
+ atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+
+ retval = rndis_ipa_destory_rm_resource(rndis_ipa_ctx);
+ if (retval) {
+ RNDIS_IPA_ERROR("Fail to clean RM\n");
+ return retval;
+ }
+ RNDIS_IPA_DEBUG("RM was successfully destroyed\n");
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_DISCONNECT);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("can't disconnect before connect\n");
+ return -EPERM;
+ }
+ rndis_ipa_ctx->state = next_state;
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+ pr_info("RNDIS_IPA NetDev pipes disconnected (%d outstanding clr)\n",
+ outstanding_dropped_pkts);
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_disconnect_notify);
+
+/**
+ * rndis_ipa_cleanup() - unregister the network interface driver and free
+ * internal data structs.
+ * @private: same value that was set by init(), this
+ * parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support it.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ * - remove header-insertion headers from IPA core
+ * - delete the driver dependency defined for IPA resource manager and
+ * destroy the producer resource.
+ * - remove the debugfs entries
+ * - deregister the network interface from Linux network stack
+ * - free all internal data structs
+ *
+ * It is assumed that no packets shall be sent through HW bridging
+ * during cleanup to avoid packets trying to add an header that is
+ * removed during cleanup (IPA configuration manager should have
+ * removed them at this point)
+ */
+void rndis_ipa_cleanup(void *private)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = private;
+ int next_state;
+ int retval;
+ unsigned long flags;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ RNDIS_IPA_DEBUG("private=0x%p\n", private);
+
+ if (!rndis_ipa_ctx) {
+ RNDIS_IPA_ERROR("rndis_ipa_ctx NULL pointer\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_CLEANUP);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("use disconnect()before clean()\n");
+ return;
+ }
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+
+ RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+ retval = rndis_ipa_deregister_properties(rndis_ipa_ctx->net->name);
+ if (retval) {
+ RNDIS_IPA_ERROR("Fail to deregister Tx/Rx properties\n");
+ return;
+ }
+ RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n");
+
+ retval = rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+ if (retval)
+ RNDIS_IPA_ERROR(
+ "Failed removing RNDIS headers from IPA core. Continue anyway\n");
+ else
+ RNDIS_IPA_DEBUG("RNDIS headers were removed from IPA core\n");
+
+ rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+ RNDIS_IPA_DEBUG("debugfs remove was done\n");
+
+ unregister_netdev(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("netdev unregistered\n");
+
+ spin_lock_irqsave(&rndis_ipa_ctx->state_lock, flags);
+ next_state = rndis_ipa_next_state(rndis_ipa_ctx->state,
+ RNDIS_IPA_CLEANUP);
+ if (next_state == RNDIS_IPA_INVALID) {
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ RNDIS_IPA_ERROR("use disconnect()before clean()\n");
+ return;
+ }
+ rndis_ipa_ctx->state = next_state;
+ spin_unlock_irqrestore(&rndis_ipa_ctx->state_lock, flags);
+ free_netdev(rndis_ipa_ctx->net);
+ pr_info("RNDIS_IPA NetDev was cleaned\n");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return;
+}
+EXPORT_SYMBOL(rndis_ipa_cleanup);
+
+
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ if (rndis_ipa_ctx->device_ready_notify) {
+ rndis_ipa_ctx->device_ready_notify();
+ RNDIS_IPA_DEBUG("USB device_ready_notify() was called\n");
+ } else {
+ RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n");
+ }
+
+ netif_start_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+}
+
+static void rndis_ipa_xmit_error(struct sk_buff *skb)
+{
+ bool retval;
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+ unsigned long delay_jiffies;
+ u8 rand_dealy_msec;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ RNDIS_IPA_DEBUG("starting Tx-queue backoff\n");
+
+ netif_stop_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("netif_stop_queue was called\n");
+
+ skb_pull(skb, sizeof(rndis_template_hdr));
+ rndis_ipa_ctx->net->stats.tx_errors++;
+
+ get_random_bytes(&rand_dealy_msec, sizeof(rand_dealy_msec));
+ delay_jiffies = msecs_to_jiffies(
+ rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec);
+
+ retval = schedule_delayed_work(
+ &rndis_ipa_ctx->xmit_error_delayed_work, delay_jiffies);
+ if (!retval) {
+ RNDIS_IPA_ERROR("fail to schedule delayed work\n");
+ netif_start_queue(rndis_ipa_ctx->net);
+ } else {
+ RNDIS_IPA_DEBUG("work scheduled to start Tx-queue in %d msec\n",
+ rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec);
+ rndis_ipa_ctx->during_xmit_error = true;
+ }
+
+ RNDIS_IPA_LOG_EXIT();
+}
+
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx;
+ struct delayed_work *delayed_work;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ RNDIS_IPA_DEBUG("Starting queue after xmit error\n");
+
+ delayed_work = to_delayed_work(work);
+ rndis_ipa_ctx = container_of(delayed_work, struct rndis_ipa_dev,
+ xmit_error_delayed_work);
+
+ if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+ RNDIS_IPA_ERROR("error aftercare handling in bad state (%d)",
+ rndis_ipa_ctx->state);
+ return;
+ }
+
+ rndis_ipa_ctx->during_xmit_error = false;
+
+ netif_start_queue(rndis_ipa_ctx->net);
+ RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+
+ RNDIS_IPA_LOG_EXIT();
+}
+
+/**
+ * rndis_ipa_prepare_header_insertion() - prepare the header insertion request
+ * for IPA driver
+ * eth_type: the Ethernet type for this header-insertion header
+ * hdr_name: string that shall represent this header in IPA data base
+ * add_hdr: output for caller to be used with ipa_add_hdr() to configure
+ * the IPA core
+ * dst_mac: tethered PC MAC (Ethernet) address to be added to packets
+ * for IPA->USB pipe
+ * src_mac: device MAC (Ethernet) address to be added to packets
+ * for IPA->USB pipe
+ *
+ * This function shall build the header-insertion block request for a
+ * single Ethernet+RNDIS header)
+ * this header shall be inserted for packets processed by IPA
+ * and destined for USB client.
+ * This header shall be used for HW bridging for packets destined for
+ * tethered PC.
+ * For SW data-path, this header won't be used.
+ */
+static void rndis_ipa_prepare_header_insertion(int eth_type,
+ const char *hdr_name, struct ipa_hdr_add *add_hdr,
+ const void *dst_mac, const void *src_mac)
+{
+ struct ethhdr *eth_hdr;
+
+ add_hdr->hdr_len = sizeof(rndis_template_hdr);
+ add_hdr->is_partial = false;
+ strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
+
+ memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr));
+ eth_hdr = (struct ethhdr *)(add_hdr->hdr + sizeof(rndis_template_hdr));
+ memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+ memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+ eth_hdr->h_proto = htons(eth_type);
+ add_hdr->hdr_len += ETH_HLEN;
+ add_hdr->is_eth2_ofst_valid = true;
+ add_hdr->eth2_ofst = sizeof(rndis_template_hdr);
+ add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+}
+
+/**
+ * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core
+ * to allow HW bridging
+ * @rndis_ipa_ctx: main driver context
+ * @dst_mac: destination MAC address (tethered PC)
+ * @src_mac: source MAC address (MDM device)
+ *
+ * This function shall add 2 headers.
+ * One header for Ipv4 and one header for Ipv6.
+ * Both headers shall contain Ethernet header and RNDIS header, the only
+ * difference shall be in the EtherTye field.
+ * Headers will be committed to HW
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_hdrs_cfg(struct rndis_ipa_dev *rndis_ipa_ctx,
+ const void *dst_mac, const void *src_mac)
+{
+ struct ipa_ioc_add_hdr *hdrs;
+ struct ipa_hdr_add *ipv4_hdr;
+ struct ipa_hdr_add *ipv6_hdr;
+ int result = 0;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+ GFP_KERNEL);
+ if (!hdrs) {
+ RNDIS_IPA_ERROR("mem allocation fail for header-insertion\n");
+ result = -ENOMEM;
+ goto fail_mem;
+ }
+
+ ipv4_hdr = &hdrs->hdr[0];
+ ipv6_hdr = &hdrs->hdr[1];
+ rndis_ipa_prepare_header_insertion(ETH_P_IP, IPV4_HDR_NAME,
+ ipv4_hdr, dst_mac, src_mac);
+ rndis_ipa_prepare_header_insertion(ETH_P_IPV6, IPV6_HDR_NAME,
+ ipv6_hdr, dst_mac, src_mac);
+
+ hdrs->commit = 1;
+ hdrs->num_hdrs = 2;
+ result = ipa_add_hdr(hdrs);
+ if (result) {
+ RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+ goto fail_add_hdr;
+ }
+ if (ipv4_hdr->status) {
+ RNDIS_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+ ipv4_hdr->status);
+ result = ipv4_hdr->status;
+ goto fail_add_hdr;
+ }
+ if (ipv6_hdr->status) {
+ RNDIS_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+ ipv6_hdr->status);
+ result = ipv6_hdr->status;
+ goto fail_add_hdr;
+ }
+ rndis_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+ rndis_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+ RNDIS_IPA_LOG_EXIT();
+
+fail_add_hdr:
+ kfree(hdrs);
+fail_mem:
+ return result;
+}
+
+/**
+ * rndis_ipa_hdrs_destroy() - remove the IPA core configuration done for
+ * the driver data path bridging.
+ * @rndis_ipa_ctx: the driver context
+ *
+ * Revert the work done on rndis_ipa_hdrs_cfg(), which is,
+ * remove 2 headers for Ethernet+RNDIS.
+ */
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ struct ipa_ioc_del_hdr *del_hdr;
+ struct ipa_hdr_del *ipv4;
+ struct ipa_hdr_del *ipv6;
+ int result;
+
+ del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+ sizeof(*ipv6), GFP_KERNEL);
+ if (!del_hdr) {
+ RNDIS_IPA_ERROR("memory allocation for del_hdr failed\n");
+ return -ENOMEM;
+ }
+
+ del_hdr->commit = 1;
+ del_hdr->num_hdls = 2;
+
+ ipv4 = &del_hdr->hdl[0];
+ ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl;
+ ipv6 = &del_hdr->hdl[1];
+ ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl;
+
+ result = ipa_del_hdr(del_hdr);
+ if (result || ipv4->status || ipv6->status)
+ RNDIS_IPA_ERROR("ipa_del_hdr failed\n");
+ else
+ RNDIS_IPA_DEBUG("hdrs deletion done\n");
+
+ kfree(del_hdr);
+ return result;
+}
+
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net)
+{
+ return &net->stats;
+}
+
+
+/**
+ * rndis_ipa_register_properties() - set Tx/Rx properties needed
+ * by IPA configuration manager
+ * @netdev_name: a string with the name of the network interface device
+ *
+ * Register Tx/Rx properties to allow user space configuration (IPA
+ * Configuration Manager):
+ *
+ * - Two Tx properties (IPA->USB): specify the header names and pipe number
+ * that shall be used by user space for header-addition configuration
+ * for ipv4/ipv6 packets flowing from IPA to USB for HW bridging data.
+ * That header-addition header is added by the Netdev and used by user
+ * space to close the the HW bridge by adding filtering and routing rules
+ * that point to this header.
+ *
+ * - Two Rx properties (USB->IPA): these properties shall be used by user space
+ * to configure the IPA core to identify the packets destined
+ * for Apps-processor by configuring the unicast rules destined for
+ * the Netdev IP address.
+ * This rules shall be added based on the attribute mask supplied at
+ * this function, that is, always hit rule.
+ */
+static int rndis_ipa_register_properties(char *netdev_name)
+{
+ struct ipa_tx_intf tx_properties = {0};
+ struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+ struct ipa_ioc_tx_intf_prop *ipv4_property;
+ struct ipa_ioc_tx_intf_prop *ipv6_property;
+ struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+ struct ipa_rx_intf rx_properties = {0};
+ struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+ struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+ int result = 0;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ tx_properties.prop = properties;
+ ipv4_property = &tx_properties.prop[0];
+ ipv4_property->ip = IPA_IP_v4;
+ ipv4_property->dst_pipe = IPA_TO_USB_CLIENT;
+ strlcpy(ipv4_property->hdr_name, IPV4_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ ipv6_property = &tx_properties.prop[1];
+ ipv6_property->ip = IPA_IP_v6;
+ ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
+ strlcpy(ipv6_property->hdr_name, IPV6_HDR_NAME,
+ IPA_RESOURCE_NAME_MAX);
+ ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ tx_properties.num_props = 2;
+
+ rx_properties.prop = rx_ioc_properties;
+ rx_ipv4_property = &rx_properties.prop[0];
+ rx_ipv4_property->ip = IPA_IP_v4;
+ rx_ipv4_property->attrib.attrib_mask = 0;
+ rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD;
+ rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_ipv6_property = &rx_properties.prop[1];
+ rx_ipv6_property->ip = IPA_IP_v6;
+ rx_ipv6_property->attrib.attrib_mask = 0;
+ rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD;
+ rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+ rx_properties.num_props = 2;
+
+ result = ipa_register_intf("rndis0", &tx_properties, &rx_properties);
+ if (result)
+ RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n");
+ else
+ RNDIS_IPA_DEBUG("Tx/Rx properties registration done\n");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return result;
+}
+
+/**
+ * rndis_ipa_deregister_properties() - remove the 2 Tx and 2 Rx properties
+ * @netdev_name: a string with the name of the network interface device
+ *
+ * This function revert the work done on rndis_ipa_register_properties().
+ */
+static int rndis_ipa_deregister_properties(char *netdev_name)
+{
+ int result;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ result = ipa_deregister_intf(netdev_name);
+ if (result) {
+ RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n");
+ return result;
+ }
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+}
+
+/**
+ * rndis_ipa_create_rm_resource() -creates the resource representing
+ * this Netdev and supply notification callback for resource event
+ * such as Grant/Release
+ * @rndis_ipa_ctx: this driver context
+ *
+ * In order make sure all needed resources are available during packet
+ * transmit this Netdev shall use Request/Release mechanism of
+ * the IPA resource manager.
+ * This mechanism shall iterate over a dependency graph and make sure
+ * all dependent entities are ready to for packet Tx
+ * transfer (Apps->IPA->USB).
+ * In this function the resource representing the Netdev is created
+ * in addition to the basic dependency between the Netdev and the USB client.
+ * Hence, USB client, is a dependency for the Netdev and may be notified in
+ * case of packet transmit from this Netdev to tethered Host.
+ * As implied from the "may" in the above sentence there is a scenario where
+ * the USB is not notified. This is done thanks to the IPA resource manager
+ * inactivity timer.
+ * The inactivity timer allow the Release requests to be delayed in order
+ * prevent ping-pong with the USB and other dependencies.
+ */
+static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ struct ipa_rm_create_params create_params = {0};
+ struct ipa_rm_perf_profile profile;
+ int result;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ create_params.name = DRV_RESOURCE_ID;
+ create_params.reg_params.user_data = rndis_ipa_ctx;
+ create_params.reg_params.notify_cb = rndis_ipa_rm_notify;
+ result = ipa_rm_create_resource(&create_params);
+ if (result) {
+ RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+ goto fail_rm_create;
+ }
+ RNDIS_IPA_DEBUG("RM client was created\n");
+
+ profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+ ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile);
+
+ result = ipa_rm_inactivity_timer_init(DRV_RESOURCE_ID,
+ INACTIVITY_MSEC_DELAY);
+ if (result) {
+ RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+ goto fail_inactivity_timer;
+ }
+
+ RNDIS_IPA_DEBUG("rm_it client was created\n");
+
+ result = ipa_rm_add_dependency_sync(DRV_RESOURCE_ID,
+ IPA_RM_RESOURCE_USB_CONS);
+
+ if (result && result != -EINPROGRESS)
+ RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n",
+ result);
+ else
+ RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n");
+
+ result = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (result && result != -EINPROGRESS)
+ RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n",
+ result);
+ else
+ RNDIS_IPA_DEBUG("USB/APPS dependency was set\n");
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+
+fail_inactivity_timer:
+fail_rm_create:
+ return result;
+}
+
+/**
+ * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
+ * the resource done on rndis_ipa_create_rm_resource()
+ * @rndis_ipa_ctx: this driver context
+ *
+ * This function shall delete the dependency create between
+ * the Netdev to the USB.
+ * In addition the inactivity time shall be destroy and the resource shall
+ * be deleted.
+ */
+static int rndis_ipa_destory_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ int result;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ result = ipa_rm_delete_dependency(DRV_RESOURCE_ID,
+ IPA_RM_RESOURCE_USB_CONS);
+ if (result && result != -EINPROGRESS) {
+ RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n");
+ goto bail;
+ }
+ RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n");
+
+ result = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+ IPA_RM_RESOURCE_APPS_CONS);
+ if (result == -EINPROGRESS) {
+ RNDIS_IPA_DEBUG("RM dependency deletion is in progress");
+ } else if (result) {
+ RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n");
+ goto bail;
+ } else {
+ RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n");
+ }
+
+ result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID);
+ if (result) {
+ RNDIS_IPA_ERROR("Fail to destroy inactivity timern");
+ goto bail;
+ }
+ RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n");
+
+ result = ipa_rm_delete_resource(DRV_RESOURCE_ID);
+ if (result) {
+ RNDIS_IPA_ERROR("resource deletion failed\n");
+ goto bail;
+ }
+ RNDIS_IPA_DEBUG("Netdev RM resource was deleted (resid:%d)\n",
+ DRV_RESOURCE_ID);
+
+
+ RNDIS_IPA_LOG_EXIT();
+
+bail:
+ return result;
+}
+
+/**
+ * resource_request() - request for the Netdev resource
+ * @rndis_ipa_ctx: main driver context
+ *
+ * This function shall send the IPA resource manager inactivity time a request
+ * to Grant the Netdev producer.
+ * In case the resource is already Granted the function shall return immediately
+ * and "pet" the inactivity timer.
+ * In case the resource was not already Granted this function shall
+ * return EINPROGRESS and the Netdev shall stop the send queue until
+ * the IPA resource manager notify it that the resource is
+ * granted (done in a differ context)
+ */
+static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ int result = 0;
+
+ if (!rm_enabled(rndis_ipa_ctx))
+ goto out;
+ result = ipa_rm_inactivity_timer_request_resource(
+ DRV_RESOURCE_ID);
+out:
+ return result;
+}
+
+/**
+ * resource_release() - release the Netdev resource
+ * @rndis_ipa_ctx: main driver context
+ *
+ * start the inactivity timer count down.by using the IPA resource
+ * manager inactivity time.
+ * The actual resource release shall occur only if no request shall be done
+ * during the INACTIVITY_MSEC_DELAY.
+ */
+static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ if (!rm_enabled(rndis_ipa_ctx))
+ goto out;
+ ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
+out:
+ return;
+}
+
+/**
+ * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
+ * an RNDIS header
+ * @skb: packet to be encapsulated with the RNDIS header
+ *
+ * Shall use a template header for RNDIS and update it with the given
+ * skb values.
+ * Ethernet is expected to be already encapsulate the packet.
+ */
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb)
+{
+ struct rndis_pkt_hdr *rndis_hdr;
+ int payload_byte_len = skb->len;
+
+ /* if there is no room in this skb, allocate a new one */
+ if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
+ struct sk_buff *new_skb = skb_copy_expand(skb,
+ sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
+ if (!new_skb) {
+ RNDIS_IPA_ERROR("no memory for skb expand\n");
+ return skb;
+ }
+ RNDIS_IPA_DEBUG("skb expanded. old %p new %p\n", skb, new_skb);
+ dev_kfree_skb_any(skb);
+ skb = new_skb;
+ }
+
+ /* make room at the head of the SKB to put the RNDIS header */
+ rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb,
+ sizeof(rndis_template_hdr));
+
+ memcpy(rndis_hdr, &rndis_template_hdr, sizeof(*rndis_hdr));
+ rndis_hdr->msg_len += payload_byte_len;
+ rndis_hdr->data_len += payload_byte_len;
+
+ return skb;
+}
+
+/**
+ * rx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent up to the network stack
+ *
+ * This function shall do Rx packet filtering on the Netdev level.
+ */
+static bool rx_filter(struct sk_buff *skb)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+
+ return rndis_ipa_ctx->rx_filter;
+}
+
+/**
+ * tx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent to the USB core
+ *
+ * This function shall do Tx packet filtering on the Netdev level.
+ * ICMP filter bypass is possible to allow only ICMP packet to be
+ * sent (pings and etc)
+ */
+
+static bool tx_filter(struct sk_buff *skb)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+ bool is_icmp;
+
+ if (likely(rndis_ipa_ctx->tx_filter == false))
+ return false;
+
+ is_icmp = (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_ICMP);
+
+ if ((rndis_ipa_ctx->icmp_filter == false) && is_icmp)
+ return false;
+
+ return true;
+}
+
+/**
+ * rm_enabled() - allow the use of resource manager Request/Release to
+ * be bypassed
+ * @rndis_ipa_ctx: main driver context
+ *
+ * By disabling the resource manager flag the Request for the Netdev resource
+ * shall be bypassed and the packet shall be sent.
+ * accordingly, Release request shall be bypass as well.
+ */
+static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ return rndis_ipa_ctx->rm_enable;
+}
+
+/**
+ * rndis_ipa_ep_registers_cfg() - configure the USB endpoints
+ * @usb_to_ipa_hdl: handle received from ipa_connect which represents
+ * the USB to IPA end-point
+ * @ipa_to_usb_hdl: handle received from ipa_connect which represents
+ * the IPA to USB end-point
+ * @max_xfer_size_bytes_to_dev: the maximum size, in bytes, that the device
+ * expects to receive from the host. supplied on REMOTE_NDIS_INITIALIZE_CMPLT.
+ * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host
+ * expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG.
+ * @mtu: the netdev MTU size, in bytes
+ *
+ * USB to IPA pipe:
+ * - de-aggregation
+ * - Remove Ethernet header
+ * - Remove RNDIS header
+ * - SRC NAT
+ * - Default routing(0)
+ * IPA to USB Pipe:
+ * - aggregation
+ * - Add Ethernet header
+ * - Add RNDIS header
+ */
+static int rndis_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl,
+ u32 ipa_to_usb_hdl,
+ u32 max_xfer_size_bytes_to_dev,
+ u32 max_xfer_size_bytes_to_host,
+ u32 mtu,
+ bool deaggr_enable)
+{
+ int result;
+ struct ipa_ep_cfg *usb_to_ipa_ep_cfg;
+
+ if (deaggr_enable) {
+ usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en;
+ RNDIS_IPA_DEBUG("deaggregation enabled\n");
+ } else {
+ usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis;
+ RNDIS_IPA_DEBUG("deaggregation disabled\n");
+ }
+
+ usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev;
+ result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg);
+ if (result) {
+ pr_err("failed to configure USB to IPA point\n");
+ return result;
+ }
+ RNDIS_IPA_DEBUG("IPA<-USB end-point configured\n");
+
+ ipa_to_usb_ep_cfg.aggr.aggr_byte_limit =
+ (max_xfer_size_bytes_to_host - mtu)/1024;
+
+ if (ipa_to_usb_ep_cfg.aggr.aggr_byte_limit == 0) {
+ ipa_to_usb_ep_cfg.aggr.aggr_time_limit = 0;
+ ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = 1;
+ } else {
+ ipa_to_usb_ep_cfg.aggr.aggr_time_limit =
+ DEFAULT_AGGR_TIME_LIMIT;
+ ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit =
+ DEFAULT_AGGR_PKT_LIMIT;
+ }
+
+ RNDIS_IPA_DEBUG("RNDIS aggregation param:"
+ " en=%d"
+ " byte_limit=%d"
+ " time_limit=%d"
+ " pkt_limit=%d\n",
+ ipa_to_usb_ep_cfg.aggr.aggr_en,
+ ipa_to_usb_ep_cfg.aggr.aggr_byte_limit,
+ ipa_to_usb_ep_cfg.aggr.aggr_time_limit,
+ ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+
+ result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+ if (result) {
+ pr_err("failed to configure IPA to USB end-point\n");
+ return result;
+ }
+ RNDIS_IPA_DEBUG("IPA->USB end-point configured\n");
+
+ return 0;
+}
+
+/**
+ * rndis_ipa_set_device_ethernet_addr() - set device Ethernet address
+ * @dev_ethaddr: device Ethernet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int rndis_ipa_set_device_ethernet_addr(u8 *dev_ethaddr,
+ u8 device_ethaddr[])
+{
+ if (!is_valid_ether_addr(device_ethaddr))
+ return -EINVAL;
+ memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+
+ return 0;
+}
+
+/** rndis_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ * by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value RNDIS_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum rndis_ipa_state rndis_ipa_next_state(
+ enum rndis_ipa_state current_state,
+ enum rndis_ipa_operation operation)
+{
+ int next_state = RNDIS_IPA_INVALID;
+
+ switch (current_state) {
+ case RNDIS_IPA_UNLOADED:
+ if (operation == RNDIS_IPA_INITIALIZE)
+ next_state = RNDIS_IPA_INITIALIZED;
+ break;
+ case RNDIS_IPA_INITIALIZED:
+ if (operation == RNDIS_IPA_CONNECT)
+ next_state = RNDIS_IPA_CONNECTED;
+ else if (operation == RNDIS_IPA_OPEN)
+ next_state = RNDIS_IPA_UP;
+ else if (operation == RNDIS_IPA_CLEANUP)
+ next_state = RNDIS_IPA_UNLOADED;
+ break;
+ case RNDIS_IPA_CONNECTED:
+ if (operation == RNDIS_IPA_DISCONNECT)
+ next_state = RNDIS_IPA_INITIALIZED;
+ else if (operation == RNDIS_IPA_OPEN)
+ next_state = RNDIS_IPA_CONNECTED_AND_UP;
+ break;
+ case RNDIS_IPA_UP:
+ if (operation == RNDIS_IPA_STOP)
+ next_state = RNDIS_IPA_INITIALIZED;
+ else if (operation == RNDIS_IPA_CONNECT)
+ next_state = RNDIS_IPA_CONNECTED_AND_UP;
+ else if (operation == RNDIS_IPA_CLEANUP)
+ next_state = RNDIS_IPA_UNLOADED;
+ break;
+ case RNDIS_IPA_CONNECTED_AND_UP:
+ if (operation == RNDIS_IPA_STOP)
+ next_state = RNDIS_IPA_CONNECTED;
+ else if (operation == RNDIS_IPA_DISCONNECT)
+ next_state = RNDIS_IPA_UP;
+ break;
+ default:
+ RNDIS_IPA_ERROR("State is not supported\n");
+ WARN_ON(true);
+ break;
+ }
+
+ RNDIS_IPA_DEBUG("state transition ( %s -> %s )- %s\n",
+ rndis_ipa_state_string(current_state),
+ rndis_ipa_state_string(next_state) ,
+ next_state == RNDIS_IPA_INVALID ?
+ "Forbidden" : "Allowed");
+
+ return next_state;
+}
+
+/**
+ * rndis_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state)
+{
+ switch (state) {
+ case RNDIS_IPA_UNLOADED:
+ return "RNDIS_IPA_UNLOADED";
+ case RNDIS_IPA_INITIALIZED:
+ return "RNDIS_IPA_INITIALIZED";
+ case RNDIS_IPA_CONNECTED:
+ return "RNDIS_IPA_CONNECTED";
+ case RNDIS_IPA_UP:
+ return "RNDIS_IPA_UP";
+ case RNDIS_IPA_CONNECTED_AND_UP:
+ return "RNDIS_IPA_CONNECTED_AND_UP";
+ default:
+ return "Not supported";
+ }
+}
+
+static void rndis_ipa_dump_skb(struct sk_buff *skb)
+{
+ int i;
+ u32 *cur = (u32 *)skb->data;
+ u8 *byte;
+
+ RNDIS_IPA_DEBUG("packet dump start for skb->len=%d\n",
+ skb->len);
+
+ for (i = 0; i < (skb->len/4); i++) {
+ byte = (u8 *)(cur + i);
+ pr_info("%2d %08x %02x %02x %02x %02x\n",
+ i, *(cur + i),
+ byte[0], byte[1], byte[2], byte[3]);
+ }
+ RNDIS_IPA_DEBUG("packet dump ended for skb->len=%d\n",
+ skb->len);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * Creates the root folder for the driver
+ */
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ const mode_t flags_read_write = S_IRUGO | S_IWUGO;
+ const mode_t flags_read_only = S_IRUGO;
+ const mode_t flags_write_only = S_IWUGO;
+ struct dentry *file;
+ struct dentry *aggr_directory;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ if (!rndis_ipa_ctx)
+ return;
+
+ rndis_ipa_ctx->directory = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+ if (!rndis_ipa_ctx->directory) {
+ RNDIS_IPA_ERROR("could not create debugfs directory entry\n");
+ goto fail_directory;
+ }
+
+ file = debugfs_create_bool("tx_filter", flags_read_write,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_filter);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create debugfs tx_filter file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("rx_filter", flags_read_write,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_filter);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create debugfs rx_filter file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("icmp_filter", flags_read_write,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->icmp_filter);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create debugfs icmp_filter file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("rm_enable", flags_read_write,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create debugfs rm file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("outstanding_high", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->outstanding_high);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create outstanding_high file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("outstanding_low", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->outstanding_low);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create outstanding_low file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_file("outstanding", flags_read_only,
+ rndis_ipa_ctx->directory,
+ rndis_ipa_ctx, &rndis_ipa_debugfs_atomic_ops);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create outstanding file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u8("state", flags_read_only,
+ rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create state file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("tx_dropped", flags_read_only,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_dropped);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create tx_dropped file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("rx_dropped", flags_read_only,
+ rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_dropped);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create rx_dropped file\n");
+ goto fail_file;
+ }
+
+ aggr_directory = debugfs_create_dir(DEBUGFS_AGGR_DIR_NAME,
+ rndis_ipa_ctx->directory);
+ if (!aggr_directory) {
+ RNDIS_IPA_ERROR("could not create debugfs aggr entry\n");
+ goto fail_directory;
+ }
+
+ file = debugfs_create_file("aggr_value_set", flags_write_only,
+ aggr_directory,
+ rndis_ipa_ctx, &rndis_ipa_aggr_ops);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_value_set file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u8("aggr_enable", flags_read_write,
+ aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr_en);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_enable file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u8("aggr_type", flags_read_write,
+ aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_type file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("aggr_byte_limit", flags_read_write,
+ aggr_directory,
+ &ipa_to_usb_ep_cfg.aggr.aggr_byte_limit);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_byte_limit file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("aggr_time_limit", flags_read_write,
+ aggr_directory,
+ &ipa_to_usb_ep_cfg.aggr.aggr_time_limit);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_time_limit file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("aggr_pkt_limit", flags_read_write,
+ aggr_directory,
+ &ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+ if (!file) {
+ RNDIS_IPA_ERROR("could not create aggr_pkt_limit file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("tx_dump_enable", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->tx_dump_enable);
+ if (!file) {
+ RNDIS_IPA_ERROR("fail to create tx_dump_enable file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("rx_dump_enable", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->rx_dump_enable);
+ if (!file) {
+ RNDIS_IPA_ERROR("fail to create rx_dump_enable file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("deaggregation_enable", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->deaggregation_enable);
+ if (!file) {
+ RNDIS_IPA_ERROR("fail to create deaggregation_enable file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_u32("error_msec_sleep_time", flags_read_write,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->error_msec_sleep_time);
+ if (!file) {
+ RNDIS_IPA_ERROR("fail to create error_msec_sleep_time file\n");
+ goto fail_file;
+ }
+
+ file = debugfs_create_bool("during_xmit_error", flags_read_only,
+ rndis_ipa_ctx->directory,
+ &rndis_ipa_ctx->during_xmit_error);
+ if (!file) {
+ RNDIS_IPA_ERROR("fail to create during_xmit_error file\n");
+ goto fail_file;
+ }
+
+ RNDIS_IPA_DEBUG("debugfs entries were created\n");
+ RNDIS_IPA_LOG_EXIT();
+
+ return;
+fail_file:
+ debugfs_remove_recursive(rndis_ipa_ctx->directory);
+fail_directory:
+ return;
+}
+
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+ debugfs_remove_recursive(rndis_ipa_ctx->directory);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+
+static void rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx) {}
+
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx) {}
+
+#endif /* CONFIG_DEBUG_FS*/
+
+static int rndis_ipa_debugfs_aggr_open(struct inode *inode,
+ struct file *file)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+ file->private_data = rndis_ipa_ctx;
+
+ return 0;
+}
+
+
+static ssize_t rndis_ipa_debugfs_aggr_write(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
+ int result;
+
+ result = ipa_cfg_ep(rndis_ipa_ctx->usb_to_ipa_hdl, &ipa_to_usb_ep_cfg);
+ if (result) {
+ pr_err("failed to re-configure USB to IPA point\n");
+ return result;
+ }
+ pr_info("IPA<-USB end-point re-configured\n");
+
+ return count;
+}
+
+static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+ struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ file->private_data = &(rndis_ipa_ctx->outstanding_pkts);
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_atomic_read(struct file *file,
+ char __user *ubuf, size_t count, loff_t *ppos)
+{
+ int nbytes;
+ u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+ atomic_t *atomic_var = file->private_data;
+
+ RNDIS_IPA_LOG_ENTRY();
+
+ nbytes = scnprintf(atomic_str, sizeof(atomic_str), "%d\n",
+ atomic_read(atomic_var));
+
+ RNDIS_IPA_LOG_EXIT();
+
+ return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+static int rndis_ipa_init_module(void)
+{
+ pr_info("RNDIS_IPA module is loaded.");
+ return 0;
+}
+
+static void rndis_ipa_cleanup_module(void)
+{
+ pr_info("RNDIS_IPA module is unloaded.");
+ return;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RNDIS_IPA network interface");
+
+late_initcall(rndis_ipa_init_module);
+module_exit(rndis_ipa_cleanup_module);
diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/net/ethernet/msm/rndis_ipa_trace.h
new file mode 100644
index 000000000000..c0fc573799f2
--- /dev/null
+++ b/drivers/net/ethernet/msm/rndis_ipa_trace.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rndis_ipa
+#define TRACE_INCLUDE_FILE rndis_ipa_trace
+
+#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RNDIS_IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+ rndis_netif_ni,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+ rndis_tx_dp,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+ rndis_status_rcvd,
+
+ TP_PROTO(unsigned long proto),
+
+ TP_ARGS(proto),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, proto)
+ ),
+
+ TP_fast_assign(
+ __entry->proto = proto;
+ ),
+
+ TP_printk("proto =%lu\n", __entry->proto)
+);
+
+#endif /* _RNDIS_IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>