summaryrefslogtreecommitdiff
path: root/drivers/net/can
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/can')
-rw-r--r--drivers/net/can/spi/Kconfig17
-rw-r--r--drivers/net/can/spi/Makefile3
-rw-r--r--drivers/net/can/spi/k61.c964
-rw-r--r--drivers/net/can/spi/qti-can.c1509
-rw-r--r--drivers/net/can/spi/rh850.c1208
5 files changed, 3701 insertions, 0 deletions
diff --git a/drivers/net/can/spi/Kconfig b/drivers/net/can/spi/Kconfig
index 249d2db7d600..618ec96b5adb 100644
--- a/drivers/net/can/spi/Kconfig
+++ b/drivers/net/can/spi/Kconfig
@@ -8,4 +8,21 @@ config CAN_MCP251X
Driver for the Microchip MCP251x and MCP25625 SPI CAN
controllers.
+config CAN_RH850
+ tristate "Renesas RH850 SPI CAN controller"
+ depends on HAS_DMA
+ ---help---
+ Driver for the Renesas RH850 SPI CAN controller.
+
+config CAN_K61
+ tristate "Freescale K61 SPI CAN controllers"
+ depends on SPI
+ ---help---
+ Driver for the Freescale K61 SPI CAN controllers.
+
+config QTI_CAN
+ tristate "Unified driver for QTI CAN controllers"
+ depends on SPI
+ ---help---
+ Unified driver for QTI CAN controllers.
endmenu
diff --git a/drivers/net/can/spi/Makefile b/drivers/net/can/spi/Makefile
index 0e86040cdd8c..c1951ccc0034 100644
--- a/drivers/net/can/spi/Makefile
+++ b/drivers/net/can/spi/Makefile
@@ -4,3 +4,6 @@
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
+obj-$(CONFIG_CAN_RH850) += rh850.o
+obj-${CONFIG_CAN_K61} += k61.o
+obj-$(CONFIG_QTI_CAN) += qti-can.o
diff --git a/drivers/net/can/spi/k61.c b/drivers/net/can/spi/k61.c
new file mode 100644
index 000000000000..7830d5badb94
--- /dev/null
+++ b/drivers/net/can/spi/k61.c
@@ -0,0 +1,964 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/pm.h>
+
+#define DEBUG_K61 0
+#if DEBUG_K61 == 1
+#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_info(netdev, __VA_ARGS__)
+#else
+#define LOGDI(...)
+#define LOGNI(...)
+#endif
+#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNE(...) netdev_err(netdev, __VA_ARGS__)
+
+#define MAX_TX_BUFFERS 1
+#define XFER_BUFFER_SIZE 64
+#define K61_CLOCK 120000000
+#define K61_MAX_CHANNELS 1
+#define K61_FW_QUERY_RETRY_COUNT 3
+
+struct k61_can {
+ struct net_device *netdev;
+ struct spi_device *spidev;
+
+ struct mutex spi_lock; /* SPI device lock */
+
+ struct workqueue_struct *tx_wq;
+ char *tx_buf, *rx_buf;
+ int xfer_length;
+ atomic_t msg_seq;
+
+ atomic_t netif_queue_stop;
+ struct completion response_completion;
+ int reset;
+ int wait_cmd;
+ int cmd_result;
+ int bits_per_word;
+ int reset_delay_msec;
+};
+
+struct k61_netdev_privdata {
+ struct can_priv can;
+ struct k61_can *k61_can;
+};
+
+struct k61_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+};
+
+/* Message definitions */
+struct spi_mosi { /* TLV for MOSI line */
+ u8 cmd;
+ u8 len;
+ u16 seq;
+ u8 data[];
+} __packed;
+
+struct spi_miso { /* TLV for MISO line */
+ u8 cmd;
+ u8 len;
+ u16 seq; /* should match seq field from request, or 0 for unsols */
+ u8 data[];
+} __packed;
+
+#define CMD_GET_FW_VERSION 0x81
+#define CMD_CAN_SEND_FRAME 0x82
+#define CMD_CAN_ADD_FILTER 0x83
+#define CMD_CAN_REMOVE_FILTER 0x84
+#define CMD_CAN_RECEIVE_FRAME 0x85
+#define CMD_CAN_DATA_BUFF_ADD 0x87
+#define CMD_CAN_DATA_BUFF_REMOVE 0x88
+#define CMD_CAN_RELEASE_BUFFER 0x89
+#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
+
+#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
+#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
+#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
+#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
+#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
+#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
+
+struct can_fw_resp {
+ u8 maj;
+ u8 min;
+ u8 ver;
+} __packed;
+
+struct can_write_req {
+ u32 ts;
+ u32 mid;
+ u8 dlc;
+ u8 data[];
+} __packed;
+
+struct can_write_resp {
+ u8 err;
+} __packed;
+
+struct can_receive_frame {
+ u32 ts;
+ u32 mid;
+ u8 dlc;
+ u8 data[];
+} __packed;
+
+struct can_add_filter_req {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+ u8 type;
+} __packed;
+
+static struct can_bittiming_const k61_bittiming_const = {
+ .name = "k61",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 4,
+ .brp_max = 1023,
+ .brp_inc = 1,
+};
+
+struct k61_add_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct k61_delete_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+static int k61_rx_message(struct k61_can *priv_data);
+
+static irqreturn_t k61_irq(int irq, void *priv)
+{
+ struct k61_can *priv_data = priv;
+
+ LOGDI("k61_irq\n");
+ k61_rx_message(priv_data);
+ return IRQ_HANDLED;
+}
+
+static void k61_frame_error(struct k61_can *priv_data,
+ struct can_receive_frame *frame)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+
+ netdev = priv_data->netdev;
+ skb = alloc_can_err_skb(netdev, &cf);
+ if (!skb) {
+ LOGDE("skb alloc failed\n");
+ return;
+ }
+
+ cf->can_id |= CAN_ERR_BUSERROR;
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ netdev->stats.rx_errors++;
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cf->can_dlc;
+}
+
+static void k61_receive_frame(struct k61_can *priv_data,
+ struct can_receive_frame *frame)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ struct timeval tv;
+ static int msec;
+ struct net_device *netdev;
+ int i;
+
+ if (frame->dlc > 8) {
+ LOGDE("can rx frame error\n");
+ k61_frame_error(priv_data, frame);
+ return;
+ }
+
+ netdev = priv_data->netdev;
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb) {
+ LOGDE("skb alloc failed\n");
+ return;
+ }
+
+ LOGDI("rcv frame %d %x %d %x %x %x %x %x %x %x %x\n",
+ frame->ts, frame->mid, frame->dlc, frame->data[0],
+ frame->data[1], frame->data[2], frame->data[3], frame->data[4],
+ frame->data[5], frame->data[6], frame->data[7]);
+ cf->can_id = le32_to_cpu(frame->mid);
+ cf->can_dlc = get_can_dlc(frame->dlc);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = frame->data[i];
+
+ msec = le32_to_cpu(frame->ts);
+ tv.tv_sec = msec / 1000;
+ tv.tv_usec = (msec - tv.tv_sec * 1000) * 1000;
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = timeval_to_ktime(tv);
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = timeval_to_ktime(tv);
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += cf->can_dlc;
+}
+
+static void k61_process_response(struct k61_can *priv_data,
+ struct spi_miso *resp)
+{
+ int ret = 0;
+
+ LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
+ if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
+ struct can_receive_frame *frame =
+ (struct can_receive_frame *)&resp->data;
+ k61_receive_frame(priv_data, frame);
+ } else if (resp->cmd == CMD_GET_FW_VERSION) {
+ struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
+
+ dev_info(&priv_data->spidev->dev, "fw %d.%d.%d",
+ fw_resp->maj, fw_resp->min, fw_resp->ver);
+ }
+
+ if (resp->cmd == priv_data->wait_cmd) {
+ priv_data->cmd_result = ret;
+ complete(&priv_data->response_completion);
+ }
+}
+
+static void k61_process_rx(struct k61_can *priv_data, char *rx_buf)
+{
+ struct spi_miso *resp;
+ int length_processed = 0, actual_length = priv_data->xfer_length;
+
+ while (length_processed < actual_length) {
+ int length_left = actual_length - length_processed;
+ int length = 0; /* length of consumed chunk */
+ void *data;
+
+ data = rx_buf + length_processed;
+ resp = (struct spi_miso *)data;
+
+ if (resp->cmd == 0) {
+ /* special case. ignore cmd==0 */
+ length_processed += 1;
+ continue;
+ }
+
+ LOGDI("processing. p %d -> l %d (t %d)\n",
+ length_processed, length_left, priv_data->xfer_length);
+ length = resp->len + sizeof(*resp);
+
+ if (length <= length_left) {
+ k61_process_response(priv_data, resp);
+ length_processed += length;
+ } else {
+ /* Incomplete command */
+ break;
+ }
+ }
+}
+
+static int k61_do_spi_transaction(struct k61_can *priv_data)
+{
+ struct spi_device *spi;
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ int ret;
+
+ spi = priv_data->spidev;
+ msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL);
+ xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL);
+ if (xfer == 0 || msg == 0)
+ return -ENOMEM;
+ spi_message_init(msg);
+
+ spi_message_add_tail(xfer, msg);
+ xfer->tx_buf = priv_data->tx_buf;
+ xfer->rx_buf = priv_data->rx_buf;
+ xfer->len = XFER_BUFFER_SIZE;
+ xfer->bits_per_word = priv_data->bits_per_word;
+
+ ret = spi_sync(spi, msg);
+ LOGDI("spi_sync ret %d\n", ret);
+
+ if (ret == 0) {
+ devm_kfree(&spi->dev, msg);
+ devm_kfree(&spi->dev, xfer);
+ k61_process_rx(priv_data, priv_data->rx_buf);
+ }
+ return ret;
+}
+
+static int k61_rx_message(struct k61_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ ret = k61_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int k61_query_firmware_version(struct k61_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_GET_FW_VERSION;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ priv_data->wait_cmd = CMD_GET_FW_VERSION;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = k61_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion, 0.001 * HZ);
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int k61_can_write(struct k61_can *priv_data, struct can_frame *cf)
+{
+ char *tx_buf, *rx_buf;
+ int ret, i;
+ struct spi_mosi *req;
+ struct can_write_req *req_d;
+ struct net_device *netdev;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_SEND_FRAME;
+ req->len = sizeof(struct can_write_req) + 8;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ req_d = (struct can_write_req *)req->data;
+ req_d->mid = cf->can_id;
+ req_d->dlc = cf->can_dlc;
+ for (i = 0; i < cf->can_dlc; i++)
+ req_d->data[i] = cf->data[i];
+
+ ret = k61_do_spi_transaction(priv_data);
+ netdev = priv_data->netdev;
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += cf->can_dlc;
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int k61_netdev_open(struct net_device *netdev)
+{
+ int err;
+
+ LOGNI("Open");
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int k61_netdev_close(struct net_device *netdev)
+{
+ LOGNI("Close");
+
+ netif_stop_queue(netdev);
+ close_candev(netdev);
+ return 0;
+}
+
+static void k61_send_can_frame(struct work_struct *ws)
+{
+ struct k61_tx_work *tx_work;
+ struct can_frame *cf;
+ struct k61_can *priv_data;
+ struct net_device *netdev;
+ struct k61_netdev_privdata *netdev_priv_data;
+
+ tx_work = container_of(ws, struct k61_tx_work, work);
+ netdev = tx_work->netdev;
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+ LOGDI("send_can_frame ws %p\n", ws);
+ LOGDI("send_can_frame tx %p\n", tx_work);
+
+ cf = (struct can_frame *)tx_work->skb->data;
+ k61_can_write(priv_data, cf);
+
+ dev_kfree_skb(tx_work->skb);
+ kfree(tx_work);
+}
+
+static int k61_frame_filter(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_add_filter_req *add_filter;
+ struct can_add_filter_req *filter_request;
+ struct k61_can *priv_data;
+ struct k61_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+ spi = priv_data->spidev;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ if (!ifr)
+ return -EINVAL;
+
+ filter_request =
+ devm_kzalloc(&spi->dev, sizeof(struct can_add_filter_req),
+ GFP_KERNEL);
+ if (!filter_request)
+ return -ENOMEM;
+
+ if (copy_from_user(filter_request, ifr->ifr_data,
+ sizeof(struct can_add_filter_req))) {
+ devm_kfree(&spi->dev, filter_request);
+ return -EFAULT;
+ }
+
+ req = (struct spi_mosi *)tx_buf;
+ if (cmd == IOCTL_ADD_FRAME_FILTER)
+ req->cmd = CMD_CAN_ADD_FILTER;
+ else
+ req->cmd = CMD_CAN_REMOVE_FILTER;
+
+ req->len = sizeof(struct can_add_filter_req);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ add_filter = (struct can_add_filter_req *)req->data;
+ add_filter->can_if = filter_request->can_if;
+ add_filter->mid = filter_request->mid;
+ add_filter->mask = filter_request->mask;
+
+ ret = k61_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, filter_request);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static netdev_tx_t k61_netdev_start_xmit(
+ struct sk_buff *skb, struct net_device *netdev)
+{
+ struct k61_netdev_privdata *netdev_priv_data = netdev_priv(netdev);
+ struct k61_can *priv_data = netdev_priv_data->k61_can;
+ struct k61_tx_work *tx_work;
+
+ LOGNI("netdev_start_xmit");
+ if (can_dropped_invalid_skb(netdev, skb)) {
+ LOGNE("Dropping invalid can frame\n");
+ return NETDEV_TX_OK;
+ }
+ tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC);
+ if (tx_work == 0)
+ return NETDEV_TX_OK;
+ INIT_WORK(&tx_work->work, k61_send_can_frame);
+ tx_work->netdev = netdev;
+ tx_work->skb = skb;
+ queue_work(priv_data->tx_wq, &tx_work->work);
+
+ return NETDEV_TX_OK;
+}
+
+static int k61_send_release_can_buffer_cmd(struct net_device *netdev)
+{
+ struct k61_can *priv_data;
+ struct k61_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+ spi = priv_data->spidev;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_RELEASE_BUFFER;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ ret = k61_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static int k61_remove_all_buffering(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct k61_can *priv_data;
+ struct k61_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ priv_data->wait_cmd = req->cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = k61_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("k61_do_blocking_ioctl ready to wait for response\n");
+ /* Flash write may take some time. Hence give 2s as
+ * wait duration in the worst case. This wait time should
+ * increase if more number of frame IDs are stored in flash.
+ */
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion, 2 * HZ);
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int k61_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
+{
+ switch (ioctl_cmd) {
+ case IOCTL_ENABLE_BUFFERING:
+ return CMD_CAN_DATA_BUFF_ADD;
+ case IOCTL_DISABLE_BUFFERING:
+ return CMD_CAN_DATA_BUFF_REMOVE;
+ }
+ return -EINVAL;
+}
+
+static int k61_data_buffering(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ int spi_cmd, ret;
+ char *tx_buf, *rx_buf;
+ struct k61_can *priv_data;
+ struct spi_mosi *req;
+ struct k61_netdev_privdata *netdev_priv_data;
+ struct k61_add_can_buffer *enable_buffering;
+ struct k61_add_can_buffer *add_request;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+ spi = priv_data->spidev;
+
+ mutex_lock(&priv_data->spi_lock);
+ spi_cmd = k61_convert_ioctl_cmd_to_spi_cmd(cmd);
+ if (spi_cmd < 0) {
+ LOGDE("k61_do_blocking_ioctl wrong command %d\n", cmd);
+ return spi_cmd;
+ }
+
+ if (!ifr)
+ return -EINVAL;
+
+ add_request = devm_kzalloc(&spi->dev, sizeof(struct k61_add_can_buffer),
+ GFP_KERNEL);
+ if (!add_request)
+ return -ENOMEM;
+
+ if (copy_from_user(add_request, ifr->ifr_data,
+ sizeof(struct k61_add_can_buffer))) {
+ devm_kfree(&spi->dev, add_request);
+ return -EFAULT;
+ }
+
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = spi_cmd;
+ req->len = sizeof(struct k61_add_can_buffer);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ enable_buffering = (struct k61_add_can_buffer *)req->data;
+ enable_buffering->can_if = add_request->can_if;
+ enable_buffering->mid = add_request->mid;
+ enable_buffering->mask = add_request->mask;
+
+ priv_data->wait_cmd = spi_cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = k61_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, add_request);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("k61_do_blocking_ioctl ready to wait for response\n");
+ /* Flash write may take some time. Hence give 400ms as
+ * wait duration in the worst case.
+ */
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion, 0.4 * HZ);
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int k61_netdev_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct k61_can *priv_data;
+ struct k61_netdev_privdata *netdev_priv_data;
+ int ret = -EINVAL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->k61_can;
+ LOGDI("k61_netdev_do_ioctl %x\n", cmd);
+
+ switch (cmd) {
+ case IOCTL_ADD_FRAME_FILTER:
+ case IOCTL_REMOVE_FRAME_FILTER:
+ ret = k61_frame_filter(netdev, ifr, cmd);
+ break;
+ case IOCTL_ENABLE_BUFFERING:
+ case IOCTL_DISABLE_BUFFERING:
+ ret = k61_data_buffering(netdev, ifr, cmd);
+ break;
+ case IOCTL_DISABLE_ALL_BUFFERING:
+ ret = k61_remove_all_buffering(netdev);
+ break;
+ case IOCTL_RELEASE_CAN_BUFFER:
+ ret = k61_send_release_can_buffer_cmd(netdev);
+ break;
+ }
+ return ret;
+}
+
+static const struct net_device_ops k61_netdev_ops = {
+ .ndo_open = k61_netdev_open,
+ .ndo_stop = k61_netdev_close,
+ .ndo_start_xmit = k61_netdev_start_xmit,
+ .ndo_do_ioctl = k61_netdev_do_ioctl,
+};
+
+static int k61_create_netdev(struct spi_device *spi,
+ struct k61_can *priv_data)
+{
+ struct net_device *netdev;
+ struct k61_netdev_privdata *netdev_priv_data;
+
+ LOGDI("k61_create_netdev\n");
+ netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS);
+ if (!netdev) {
+ LOGDE("Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ netdev_priv_data = netdev_priv(netdev);
+ netdev_priv_data->k61_can = priv_data;
+
+ priv_data->netdev = netdev;
+
+ netdev->netdev_ops = &k61_netdev_ops;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
+ netdev_priv_data->can.bittiming_const = &k61_bittiming_const;
+ netdev_priv_data->can.clock.freq = K61_CLOCK;
+
+ return 0;
+}
+
+static struct k61_can *k61_create_priv_data(struct spi_device *spi)
+{
+ struct k61_can *priv_data;
+ int err;
+ struct device *dev;
+
+ dev = &spi->dev;
+ priv_data = devm_kzalloc(dev, sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data) {
+ dev_err(dev, "Couldn't alloc k61_can\n");
+ return 0;
+ }
+ spi_set_drvdata(spi, priv_data);
+ atomic_set(&priv_data->netif_queue_stop, 0);
+ priv_data->spidev = spi;
+
+ priv_data->tx_wq = alloc_workqueue("k61_tx_wq", 0, 0);
+ if (!priv_data->tx_wq) {
+ dev_err(dev, "Couldn't alloc workqueue\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_buf = devm_kzalloc(dev, XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ priv_data->rx_buf = devm_kzalloc(dev, XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->tx_buf || !priv_data->rx_buf) {
+ dev_err(dev, "Couldn't alloc tx or rx buffers\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+ priv_data->xfer_length = 0;
+
+ mutex_init(&priv_data->spi_lock);
+ atomic_set(&priv_data->msg_seq, 0);
+ init_completion(&priv_data->response_completion);
+ return priv_data;
+
+cleanup_privdata:
+ if (priv_data) {
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ }
+ return 0;
+}
+
+static int k61_probe(struct spi_device *spi)
+{
+ int err, retry = 0, query_err = -1;
+ struct k61_can *priv_data;
+ struct device *dev;
+
+ dev = &spi->dev;
+ dev_dbg(dev, "k61_probe");
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "spi_setup failed: %d", err);
+ return err;
+ }
+
+ priv_data = k61_create_priv_data(spi);
+ if (!priv_data) {
+ dev_err(dev, "Failed to create k61_can priv_data\n");
+ err = -ENOMEM;
+ return err;
+ }
+ dev_dbg(dev, "k61_probe created priv_data");
+
+ err = of_property_read_u32(spi->dev.of_node, "bits-per-word",
+ &priv_data->bits_per_word);
+ if (err)
+ priv_data->bits_per_word = 16;
+
+ err = of_property_read_u32(spi->dev.of_node, "reset-delay-msec",
+ &priv_data->reset_delay_msec);
+ if (err)
+ priv_data->reset_delay_msec = 1;
+
+ priv_data->reset = of_get_named_gpio(spi->dev.of_node, "reset-gpio", 0);
+ if (gpio_is_valid(priv_data->reset)) {
+ err = gpio_request(priv_data->reset, "k61-reset");
+ if (err < 0) {
+ dev_err(&spi->dev,
+ "failed to request gpio %d: %d\n",
+ priv_data->reset, err);
+ goto cleanup_candev;
+ }
+
+ gpio_direction_output(priv_data->reset, 0);
+ udelay(1);
+ gpio_direction_output(priv_data->reset, 1);
+ msleep(priv_data->reset_delay_msec);
+ }
+
+ err = k61_create_netdev(spi, priv_data);
+ if (err) {
+ dev_err(dev, "Failed to create CAN device: %d", err);
+ goto cleanup_candev;
+ }
+
+ err = register_candev(priv_data->netdev);
+ if (err) {
+ dev_err(dev, "Failed to register CAN device: %d", err);
+ goto unregister_candev;
+ }
+
+ err = request_threaded_irq(spi->irq, NULL, k61_irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "k61", priv_data);
+ if (err) {
+ dev_err(dev, "Failed to request irq: %d", err);
+ goto unregister_candev;
+ }
+ dev_dbg(dev, "Request irq %d ret %d\n", spi->irq, err);
+
+ while ((query_err != 0) && (retry < K61_FW_QUERY_RETRY_COUNT)) {
+ query_err = k61_query_firmware_version(priv_data);
+ retry++;
+ }
+
+ if (query_err) {
+ dev_info(dev, "K61 probe failed\n");
+ err = -ENODEV;
+ goto free_irq;
+ }
+ return 0;
+
+free_irq:
+ free_irq(spi->irq, priv_data);
+unregister_candev:
+ unregister_candev(priv_data->netdev);
+cleanup_candev:
+ if (priv_data) {
+ if (priv_data->netdev)
+ free_candev(priv_data->netdev);
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ }
+ return err;
+}
+
+static int k61_remove(struct spi_device *spi)
+{
+ struct k61_can *priv_data = spi_get_drvdata(spi);
+
+ LOGDI("k61_remove\n");
+ unregister_candev(priv_data->netdev);
+ free_candev(priv_data->netdev);
+ destroy_workqueue(priv_data->tx_wq);
+ return 0;
+}
+
+static const struct of_device_id k61_match_table[] = {
+ { .compatible = "fsl,k61" },
+ { }
+};
+
+#ifdef CONFIG_PM
+static int k61_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ enable_irq_wake(spi->irq);
+ return 0;
+}
+
+static int k61_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct k61_can *priv_data = spi_get_drvdata(spi);
+
+ disable_irq_wake(spi->irq);
+ k61_rx_message(priv_data);
+ return 0;
+}
+
+static const struct dev_pm_ops k61_dev_pm_ops = {
+ .suspend = k61_suspend,
+ .resume = k61_resume,
+};
+#endif
+
+static struct spi_driver k61_driver = {
+ .driver = {
+ .name = "k61",
+ .of_match_table = k61_match_table,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &k61_dev_pm_ops,
+#endif
+ },
+ .probe = k61_probe,
+ .remove = k61_remove,
+};
+module_spi_driver(k61_driver);
+
+MODULE_DESCRIPTION("Freescale K61 SPI-CAN module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/qti-can.c b/drivers/net/can/spi/qti-can.c
new file mode 100644
index 000000000000..f7ba4510d1bc
--- /dev/null
+++ b/drivers/net/can/spi/qti-can.c
@@ -0,0 +1,1509 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/pm.h>
+#include <asm/div64.h>
+
+#define DEBUG_QTI_CAN 0
+#if DEBUG_QTI_CAN == 1
+#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_info(netdev, __VA_ARGS__)
+#else
+#define LOGDI(...) dev_dbg(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_dbg(netdev, __VA_ARGS__)
+#endif
+#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNE(...) netdev_err(netdev, __VA_ARGS__)
+
+#define MAX_TX_BUFFERS 1
+#define XFER_BUFFER_SIZE 64
+#define RX_ASSEMBLY_BUFFER_SIZE 128
+#define QTI_CAN_FW_QUERY_RETRY_COUNT 3
+#define DRIVER_MODE_RAW_FRAMES 0
+#define DRIVER_MODE_PROPERTIES 1
+#define DRIVER_MODE_AMB 2
+#define QUERY_FIRMWARE_TIMEOUT_MS 100
+
+struct qti_can {
+ struct net_device **netdev;
+ struct spi_device *spidev;
+ struct mutex spi_lock; /* SPI device lock */
+ struct workqueue_struct *tx_wq;
+ char *tx_buf, *rx_buf;
+ int xfer_length;
+ atomic_t msg_seq;
+ char *assembly_buffer;
+ u8 assembly_buffer_size;
+ atomic_t netif_queue_stop;
+ struct completion response_completion;
+ int wait_cmd;
+ int cmd_result;
+ int driver_mode;
+ int clk_freq_mhz;
+ int max_can_channels;
+ int bits_per_word;
+ int reset_delay_msec;
+ int reset;
+ bool support_can_fd;
+ bool can_fw_cmd_timeout_req;
+ u32 rem_all_buffering_timeout_ms;
+ u32 can_fw_cmd_timeout_ms;
+ s64 time_diff;
+};
+
+struct qti_can_netdev_privdata {
+ struct can_priv can;
+ struct qti_can *qti_can;
+ u8 netdev_index;
+};
+
+struct qti_can_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+};
+
+/* Message definitions */
+struct spi_mosi { /* TLV for MOSI line */
+ u8 cmd;
+ u8 len;
+ u16 seq;
+ u8 data[];
+} __packed;
+
+struct spi_miso { /* TLV for MISO line */
+ u8 cmd;
+ u8 len;
+ u16 seq; /* should match seq field from request, or 0 for unsols */
+ u8 data[];
+} __packed;
+
+#define CMD_GET_FW_VERSION 0x81
+#define CMD_CAN_SEND_FRAME 0x82
+#define CMD_CAN_ADD_FILTER 0x83
+#define CMD_CAN_REMOVE_FILTER 0x84
+#define CMD_CAN_RECEIVE_FRAME 0x85
+#define CMD_CAN_CONFIG_BIT_TIMING 0x86
+#define CMD_CAN_DATA_BUFF_ADD 0x87
+#define CMD_CAN_DATA_BUFF_REMOVE 0X88
+#define CMD_CAN_RELEASE_BUFFER 0x89
+#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
+#define CMD_PROPERTY_WRITE 0x8B
+#define CMD_PROPERTY_READ 0x8C
+#define CMD_GET_FW_BR_VERSION 0x95
+#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96
+#define CMD_FIRMWARE_UPGRADE_DATA 0x97
+#define CMD_END_FIRMWARE_UPGRADE 0x98
+#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99
+#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A
+#define CMD_END_BOOT_ROM_UPGRADE 0x9B
+#define CMD_END_FW_UPDATE_FILE 0x9C
+#define CMD_UPDATE_TIME_INFO 0x9D
+#define CMD_UPDATE_SUSPEND_EVENT 0x9E
+#define CMD_UPDATE_RESUME_EVENT 0x9F
+
+#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
+#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
+#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
+#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
+#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
+#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
+#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7)
+#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8)
+#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9)
+#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10)
+#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11)
+#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12)
+#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13)
+#define IOCTL_END_FW_UPDATE_FILE (SIOCDEVPRIVATE + 14)
+
+#define IFR_DATA_OFFSET 0x100
+struct can_fw_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[48];
+} __packed;
+
+struct can_write_req {
+ u8 can_if;
+ u32 mid;
+ u8 dlc;
+ u8 data[8];
+} __packed;
+
+struct can_write_resp {
+ u8 err;
+} __packed;
+
+struct can_filter_req {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_add_filter_resp {
+ u8 err;
+} __packed;
+
+struct can_receive_frame {
+ u8 can_if;
+ __le64 ts;
+ u32 mid;
+ u8 dlc;
+ u8 data[8];
+} __packed;
+
+struct can_config_bit_timing {
+ u8 can_if;
+ u32 prop_seg;
+ u32 phase_seg1;
+ u32 phase_seg2;
+ u32 sjw;
+ u32 brp;
+} __packed;
+
+struct can_time_info {
+ __le64 time;
+} __packed;
+
+static struct can_bittiming_const rh850_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const flexcan_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 4,
+ .tseg1_max = 16,
+ .tseg2_min = 2,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 256,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const qti_can_bittiming_const;
+
+static struct can_bittiming_const qti_can_data_bittiming_const = {
+ .name = "qti_can",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+struct vehicle_property {
+ int id;
+ u64 ts;
+ int zone;
+ int val_type;
+ u32 data_len;
+ union {
+ u8 bval;
+ int val;
+ int val_arr[4];
+ float f_value;
+ float float_arr[4];
+ u8 str[36];
+ };
+} __packed;
+
+struct qti_can_release_can_buffer {
+ u8 enable;
+} __packed;
+
+struct qti_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_fw_br_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[32];
+ u8 br_maj;
+ u8 br_min;
+ u8 curr_exec_mode;
+} __packed;
+
+struct qti_can_ioctl_req {
+ u8 len;
+ u8 data[64];
+} __packed;
+
+static int qti_can_rx_message(struct qti_can *priv_data);
+
+static irqreturn_t qti_can_irq(int irq, void *priv)
+{
+ struct qti_can *priv_data = priv;
+
+ LOGDI("qti_can_irq\n");
+ qti_can_rx_message(priv_data);
+ return IRQ_HANDLED;
+}
+
+static void qti_can_receive_frame(struct qti_can *priv_data,
+ struct can_receive_frame *frame)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ ktime_t nsec;
+ struct net_device *netdev;
+ int i;
+ struct device *dev;
+
+ dev = &priv_data->spidev->dev;
+ if (frame->can_if >= priv_data->max_can_channels) {
+ LOGDE("qti_can rcv error. Channel is %d\n", frame->can_if);
+ return;
+ }
+
+ netdev = priv_data->netdev[frame->can_if];
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if);
+ return;
+ }
+
+ LOGDI("rcv frame %d %llu %x %d %x %x %x %x %x %x %x %x\n",
+ frame->can_if, frame->ts, frame->mid, frame->dlc,
+ frame->data[0], frame->data[1], frame->data[2], frame->data[3],
+ frame->data[4], frame->data[5], frame->data[6], frame->data[7]);
+ cf->can_id = le32_to_cpu(frame->mid);
+ cf->can_dlc = get_can_dlc(frame->dlc);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = frame->data[i];
+
+ nsec = ms_to_ktime(le64_to_cpu(frame->ts) + priv_data->time_diff);
+
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = nsec;
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = nsec;
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static void qti_can_receive_property(struct qti_can *priv_data,
+ struct vehicle_property *property)
+{
+ struct canfd_frame *cfd;
+ u8 *p;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ ktime_t nsec;
+ struct net_device *netdev;
+ struct device *dev;
+ int i;
+
+ /* can0 as the channel with properties */
+ dev = &priv_data->spidev->dev;
+ netdev = priv_data->netdev[0];
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", 0);
+ return;
+ }
+
+ LOGDI("rcv property:0x%x data:%2x %2x %2x %2x", property->id,
+ property->str[0], property->str[1],
+ property->str[2], property->str[3]);
+ cfd->can_id = 0x00;
+ cfd->len = sizeof(struct vehicle_property);
+
+ p = (u8 *)property;
+ for (i = 0; i < cfd->len; i++)
+ cfd->data[i] = p[i];
+
+ nsec = ns_to_ktime(le64_to_cpu(property->ts));
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = nsec;
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = nsec;
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static int qti_can_process_response(struct qti_can *priv_data,
+ struct spi_miso *resp, int length)
+{
+ int ret = 0;
+ u64 mstime;
+ ktime_t ktime_now;
+
+ LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
+ if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
+ struct can_receive_frame *frame =
+ (struct can_receive_frame *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n", length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ qti_can_receive_frame(priv_data, frame);
+ }
+ } else if (resp->cmd == CMD_PROPERTY_READ) {
+ struct vehicle_property *property =
+ (struct vehicle_property *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n", length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ qti_can_receive_property(priv_data, property);
+ }
+ } else if (resp->cmd == CMD_GET_FW_VERSION) {
+ struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
+
+ dev_info(&priv_data->spidev->dev, "fw %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ } else if (resp->cmd == CMD_GET_FW_BR_VERSION) {
+ struct can_fw_br_resp *fw_resp =
+ (struct can_fw_br_resp *)resp->data;
+ dev_info(&priv_data->spidev->dev, "fw_can %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d",
+ fw_resp->br_maj, fw_resp->br_min,
+ fw_resp->curr_exec_mode);
+ ret = fw_resp->curr_exec_mode << 28;
+ ret |= (fw_resp->br_maj & 0xF) << 24;
+ ret |= (fw_resp->br_min & 0xFF) << 16;
+ ret |= (fw_resp->maj & 0xF) << 8;
+ ret |= (fw_resp->min & 0xFF);
+ } else if (resp->cmd == CMD_UPDATE_TIME_INFO) {
+ struct can_time_info *time_data =
+ (struct can_time_info *)resp->data;
+ ktime_now = ktime_get_boottime();
+ mstime = ktime_to_ms(ktime_now);
+ priv_data->time_diff = mstime - (le64_to_cpu(time_data->time));
+ }
+
+ if (resp->cmd == priv_data->wait_cmd) {
+ priv_data->cmd_result = ret;
+ complete(&priv_data->response_completion);
+ }
+ return ret;
+}
+
+static int qti_can_process_rx(struct qti_can *priv_data, char *rx_buf)
+{
+ struct spi_miso *resp;
+ struct device *dev;
+ int length_processed = 0, actual_length = priv_data->xfer_length;
+ int ret = 0;
+
+ dev = &priv_data->spidev->dev;
+ while (length_processed < actual_length) {
+ int length_left = actual_length - length_processed;
+ int length = 0; /* length of consumed chunk */
+ void *data;
+
+ if (priv_data->assembly_buffer_size > 0) {
+ LOGDI("callback: Reassembling %d bytes\n",
+ priv_data->assembly_buffer_size);
+ /* should copy just 1 byte instead, since cmd should */
+ /* already been copied as being first byte */
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, 2);
+ data = priv_data->assembly_buffer;
+ resp = (struct spi_miso *)data;
+ length = resp->len + sizeof(*resp)
+ - priv_data->assembly_buffer_size;
+ if (length > 0)
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, length);
+ length_left += priv_data->assembly_buffer_size;
+ priv_data->assembly_buffer_size = 0;
+ } else {
+ data = rx_buf + length_processed;
+ resp = (struct spi_miso *)data;
+ if (resp->cmd == 0x00 || resp->cmd == 0xFF) {
+ /* special case. ignore cmd==0x00, 0xFF */
+ length_processed += 1;
+ continue;
+ }
+ length = resp->len + sizeof(struct spi_miso);
+ }
+ LOGDI("processing. p %d -> l %d (t %d)\n",
+ length_processed, length_left, priv_data->xfer_length);
+ length_processed += length;
+ if (length_left >= sizeof(*resp) &&
+ resp->len + sizeof(*resp) <= length_left) {
+ struct spi_miso *resp =
+ (struct spi_miso *)data;
+ ret = qti_can_process_response(priv_data, resp,
+ length_left);
+ } else if (length_left > 0) {
+ /* Not full message. Store however much we have for */
+ /* later assembly */
+ LOGDI("callback: Storing %d bytes of response\n",
+ length_left);
+ memcpy(priv_data->assembly_buffer, data, length_left);
+ priv_data->assembly_buffer_size = length_left;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int qti_can_do_spi_transaction(struct qti_can *priv_data)
+{
+ struct spi_device *spi;
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ struct device *dev;
+ int ret;
+
+ spi = priv_data->spidev;
+ dev = &spi->dev;
+ msg = devm_kzalloc(&spi->dev, sizeof(*msg), GFP_KERNEL);
+ xfer = devm_kzalloc(&spi->dev, sizeof(*xfer), GFP_KERNEL);
+ if (!xfer || !msg)
+ return -ENOMEM;
+ LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0],
+ priv_data->tx_buf[1], priv_data->tx_buf[2]);
+ spi_message_init(msg);
+ spi_message_add_tail(xfer, msg);
+ xfer->tx_buf = priv_data->tx_buf;
+ xfer->rx_buf = priv_data->rx_buf;
+ xfer->len = priv_data->xfer_length;
+ xfer->bits_per_word = priv_data->bits_per_word;
+ ret = spi_sync(spi, msg);
+ LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret,
+ priv_data->rx_buf[0], priv_data->rx_buf[1],
+ priv_data->rx_buf[2], priv_data->rx_buf[3],
+ priv_data->rx_buf[4], priv_data->rx_buf[5],
+ priv_data->rx_buf[6], priv_data->rx_buf[7]);
+
+ if (ret == 0)
+ qti_can_process_rx(priv_data, priv_data->rx_buf);
+ devm_kfree(&spi->dev, msg);
+ devm_kfree(&spi->dev, xfer);
+ return ret;
+}
+
+static int qti_can_rx_message(struct qti_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_query_firmware_version(struct qti_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_GET_FW_VERSION;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ priv_data->wait_cmd = CMD_GET_FW_VERSION;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(QUERY_FIRMWARE_TIMEOUT_MS));
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int qti_can_notify_power_events(struct qti_can *priv_data, u8 event_type)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = event_type;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_set_bitrate(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_config_bit_timing *req_d;
+ struct qti_can *priv_data;
+ struct can_priv *priv = netdev_priv(netdev);
+ struct qti_can_netdev_privdata *qti_can_priv;
+
+ qti_can_priv = netdev_priv(netdev);
+ priv_data = qti_can_priv->qti_can;
+
+ netdev_info(netdev, "ch%i, bitrate setting>%i",
+ qti_can_priv->netdev_index, priv->bittiming.bitrate);
+ LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i",
+ priv->bittiming.sjw, priv->bittiming.brp,
+ priv->bittiming.phase_seg1,
+ priv->bittiming.phase_seg2,
+ priv->bittiming.sample_point,
+ priv->bittiming.tq, priv->bittiming.prop_seg);
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_CONFIG_BIT_TIMING;
+ req->len = sizeof(struct can_config_bit_timing);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ req_d = (struct can_config_bit_timing *)req->data;
+ req_d->can_if = qti_can_priv->netdev_index;
+ req_d->prop_seg = priv->bittiming.prop_seg;
+ req_d->phase_seg1 = priv->bittiming.phase_seg1;
+ req_d->phase_seg2 = priv->bittiming.phase_seg2;
+ req_d->sjw = priv->bittiming.sjw;
+ req_d->brp = priv->bittiming.brp;
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_write(struct qti_can *priv_data,
+ int can_channel, struct canfd_frame *cf)
+{
+ char *tx_buf, *rx_buf;
+ int ret, i;
+ struct spi_mosi *req;
+ struct can_write_req *req_d;
+ struct net_device *netdev;
+
+ if (can_channel < 0 || can_channel >= priv_data->max_can_channels) {
+ LOGDE("qti_can_write error. Channel is %d\n", can_channel);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) {
+ req->cmd = CMD_CAN_SEND_FRAME;
+ req->len = sizeof(struct can_write_req) + 8;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ req_d = (struct can_write_req *)req->data;
+ req_d->can_if = can_channel;
+ req_d->mid = cf->can_id;
+ req_d->dlc = cf->len;
+
+ for (i = 0; i < cf->len; i++)
+ req_d->data[i] = cf->data[i];
+ } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES ||
+ priv_data->driver_mode == DRIVER_MODE_AMB) {
+ req->cmd = CMD_PROPERTY_WRITE;
+ req->len = sizeof(struct vehicle_property);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ for (i = 0; i < cf->len; i++)
+ req->data[i] = cf->data[i];
+ } else {
+ LOGDE("qti_can_write: wrong driver mode %i",
+ priv_data->driver_mode);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ netdev = priv_data->netdev[can_channel];
+ netdev->stats.tx_packets++;
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int qti_can_netdev_open(struct net_device *netdev)
+{
+ int err;
+
+ LOGNI("Open");
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int qti_can_netdev_close(struct net_device *netdev)
+{
+ LOGNI("Close");
+
+ netif_stop_queue(netdev);
+ close_candev(netdev);
+ return 0;
+}
+
+static void qti_can_send_can_frame(struct work_struct *ws)
+{
+ struct qti_can_tx_work *tx_work;
+ struct canfd_frame *cf;
+ struct qti_can *priv_data;
+ struct net_device *netdev;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int can_channel;
+
+ tx_work = container_of(ws, struct qti_can_tx_work, work);
+ netdev = tx_work->netdev;
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ can_channel = netdev_priv_data->netdev_index;
+
+ LOGDI("send_can_frame ws %pK\n", ws);
+ LOGDI("send_can_frame tx %pK\n", tx_work);
+
+ cf = (struct canfd_frame *)tx_work->skb->data;
+ qti_can_write(priv_data, can_channel, cf);
+
+ dev_kfree_skb(tx_work->skb);
+ kfree(tx_work);
+}
+
+static netdev_tx_t qti_can_netdev_start_xmit(
+ struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qti_can_netdev_privdata *netdev_priv_data = netdev_priv(netdev);
+ struct qti_can *priv_data = netdev_priv_data->qti_can;
+ struct qti_can_tx_work *tx_work;
+
+ LOGNI("netdev_start_xmit");
+ if (can_dropped_invalid_skb(netdev, skb)) {
+ LOGNE("Dropping invalid can frame\n");
+ return NETDEV_TX_OK;
+ }
+ tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC);
+ if (!tx_work)
+ return NETDEV_TX_OK;
+ INIT_WORK(&tx_work->work, qti_can_send_can_frame);
+ tx_work->netdev = netdev;
+ tx_work->skb = skb;
+ queue_work(priv_data->tx_wq, &tx_work->work);
+
+ return NETDEV_TX_OK;
+}
+
+static int qti_can_send_release_can_buffer_cmd(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int *mode;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_RELEASE_BUFFER;
+ req->len = sizeof(int);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ mode = (int *)req->data;
+ *mode = priv_data->driver_mode;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static int qti_can_data_buffering(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ u32 timeout;
+ struct spi_mosi *req;
+ struct qti_can_buffer *enable_buffering;
+ struct qti_can_buffer *add_request;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+ timeout = priv_data->can_fw_cmd_timeout_ms;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+ if (!ifr)
+ return -EINVAL;
+ add_request = devm_kzalloc(&spi->dev,
+ sizeof(struct qti_can_buffer),
+ GFP_KERNEL);
+ if (!add_request)
+ return -ENOMEM;
+
+ if (copy_from_user(add_request, ifr->ifr_data,
+ sizeof(struct qti_can_buffer))) {
+ devm_kfree(&spi->dev, add_request);
+ return -EFAULT;
+ }
+
+ req = (struct spi_mosi *)tx_buf;
+ if (cmd == IOCTL_ENABLE_BUFFERING)
+ req->cmd = CMD_CAN_DATA_BUFF_ADD;
+ else
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE;
+ req->len = sizeof(struct qti_can_buffer);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ enable_buffering = (struct qti_can_buffer *)req->data;
+ enable_buffering->can_if = add_request->can_if;
+ enable_buffering->mid = add_request->mid;
+ enable_buffering->mask = add_request->mask;
+
+ if (priv_data->can_fw_cmd_timeout_req) {
+ priv_data->wait_cmd = req->cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, add_request);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
+ LOGDI("qti_can_data_buffering ready to wait for response\n");
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(timeout));
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int qti_can_remove_all_buffering(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ u32 timeout;
+ struct spi_mosi *req;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ timeout = priv_data->rem_all_buffering_timeout_ms;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (priv_data->can_fw_cmd_timeout_req) {
+ priv_data->wait_cmd = req->cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+ }
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0 && priv_data->can_fw_cmd_timeout_req) {
+ LOGDI("qti_can_remove_all_buffering wait for response\n");
+ ret = wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ msecs_to_jiffies(timeout));
+ ret = priv_data->cmd_result;
+ }
+
+ return ret;
+}
+
+static int qti_can_frame_filter(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_filter_req *add_filter;
+ struct can_filter_req *filter_request;
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ if (!ifr)
+ return -EINVAL;
+
+ filter_request =
+ devm_kzalloc(&spi->dev, sizeof(struct can_filter_req),
+ GFP_KERNEL);
+ if (!filter_request)
+ return -ENOMEM;
+
+ if (copy_from_user(filter_request, ifr->ifr_data,
+ sizeof(struct can_filter_req))) {
+ devm_kfree(&spi->dev, filter_request);
+ return -EFAULT;
+ }
+
+ req = (struct spi_mosi *)tx_buf;
+ if (cmd == IOCTL_ADD_FRAME_FILTER)
+ req->cmd = CMD_CAN_ADD_FILTER;
+ else
+ req->cmd = CMD_CAN_REMOVE_FILTER;
+
+ req->len = sizeof(struct can_filter_req);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ add_filter = (struct can_filter_req *)req->data;
+ add_filter->can_if = filter_request->can_if;
+ add_filter->mid = filter_request->mid;
+ add_filter->mask = filter_request->mask;
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ devm_kfree(&spi->dev, filter_request);
+ mutex_unlock(&priv_data->spi_lock);
+ return ret;
+}
+
+static int qti_can_send_spi_locked(struct qti_can *priv_data, int cmd, int len,
+ u8 *data)
+{
+ char *tx_buf, *rx_buf;
+ struct spi_mosi *req;
+ int ret;
+
+ LOGDI("qti_can_send_spi_locked\n");
+
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = cmd;
+ req->len = len;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (unlikely(len > 64))
+ return -EINVAL;
+ memcpy(req->data, data, len);
+
+ ret = qti_can_do_spi_transaction(priv_data);
+ return ret;
+}
+
+static int qti_can_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
+{
+ switch (ioctl_cmd) {
+ case IOCTL_GET_FW_BR_VERSION:
+ return CMD_GET_FW_BR_VERSION;
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ return CMD_BEGIN_FIRMWARE_UPGRADE;
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ return CMD_FIRMWARE_UPGRADE_DATA;
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ return CMD_END_FIRMWARE_UPGRADE;
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ return CMD_BEGIN_BOOT_ROM_UPGRADE;
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ return CMD_BOOT_ROM_UPGRADE_DATA;
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ return CMD_END_BOOT_ROM_UPGRADE;
+ case IOCTL_END_FW_UPDATE_FILE:
+ return CMD_END_FW_UPDATE_FILE;
+ }
+ return -EINVAL;
+}
+
+static int qti_can_do_blocking_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ int spi_cmd, ret;
+
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ struct qti_can_ioctl_req *ioctl_data = NULL;
+ struct spi_device *spi;
+ int len = 0;
+ u8 *data = NULL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+
+ spi_cmd = qti_can_convert_ioctl_cmd_to_spi_cmd(cmd);
+ LOGDI("qti_can_do_blocking_ioctl spi_cmd %x\n", spi_cmd);
+ if (spi_cmd < 0) {
+ LOGDE("qti_can_do_blocking_ioctl wrong command %d\n", cmd);
+ return spi_cmd;
+ }
+
+ if (!ifr)
+ return -EINVAL;
+
+ mutex_lock(&priv_data->spi_lock);
+ if (spi_cmd == CMD_FIRMWARE_UPGRADE_DATA ||
+ spi_cmd == CMD_BOOT_ROM_UPGRADE_DATA) {
+ ioctl_data =
+ devm_kzalloc(&spi->dev,
+ sizeof(struct qti_can_ioctl_req),
+ GFP_KERNEL);
+ if (!ioctl_data)
+ return -ENOMEM;
+
+ if (copy_from_user(ioctl_data, ifr->ifr_data,
+ sizeof(struct qti_can_ioctl_req))) {
+ devm_kfree(&spi->dev, ioctl_data);
+ return -EFAULT;
+ }
+
+ /* Regular NULL check will fail here as ioctl_data is at
+ * some offset
+ */
+ if ((void *)ioctl_data > (void *)0x100) {
+ len = ioctl_data->len;
+ data = ioctl_data->data;
+ }
+ }
+ LOGDI("qti_can_do_blocking_ioctl len %d\n", len);
+
+ priv_data->wait_cmd = spi_cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = qti_can_send_spi_locked(priv_data, spi_cmd, len, data);
+ if (ioctl_data)
+ devm_kfree(&spi->dev, ioctl_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("qti_can_do_blocking_ioctl ready to wait for response\n");
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion,
+ 5 * HZ);
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int qti_can_netdev_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct qti_can *priv_data;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+ int *mode;
+ int ret = -EINVAL;
+ struct spi_device *spi;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->qti_can;
+ spi = priv_data->spidev;
+ LOGDI("qti_can_netdev_do_ioctl %x\n", cmd);
+
+ switch (cmd) {
+ case IOCTL_RELEASE_CAN_BUFFER:
+ if (!ifr)
+ return -EINVAL;
+
+ /* Regular NULL check will fail here as ioctl_data is at
+ * some offset
+ */
+ if (ifr->ifr_data > (void __user *)IFR_DATA_OFFSET) {
+ mutex_lock(&priv_data->spi_lock);
+ mode = devm_kzalloc(&spi->dev, sizeof(int), GFP_KERNEL);
+ if (!mode)
+ return -ENOMEM;
+ if (copy_from_user(mode, ifr->ifr_data, sizeof(int))) {
+ devm_kfree(&spi->dev, mode);
+ return -EFAULT;
+ }
+ priv_data->driver_mode = *mode;
+ LOGDE("qti_can_driver_mode %d\n",
+ priv_data->driver_mode);
+ devm_kfree(&spi->dev, mode);
+ mutex_unlock(&priv_data->spi_lock);
+ }
+ qti_can_send_release_can_buffer_cmd(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ENABLE_BUFFERING:
+ case IOCTL_DISABLE_BUFFERING:
+ qti_can_data_buffering(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_DISABLE_ALL_BUFFERING:
+ qti_can_remove_all_buffering(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ADD_FRAME_FILTER:
+ case IOCTL_REMOVE_FRAME_FILTER:
+ qti_can_frame_filter(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_GET_FW_BR_VERSION:
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ case IOCTL_END_FW_UPDATE_FILE:
+ ret = qti_can_do_blocking_ioctl(netdev, ifr, cmd);
+ break;
+ }
+ LOGDI("qti_can_netdev_do_ioctl ret %d\n", ret);
+
+ return ret;
+}
+
+static const struct net_device_ops qti_can_netdev_ops = {
+ .ndo_open = qti_can_netdev_open,
+ .ndo_stop = qti_can_netdev_close,
+ .ndo_start_xmit = qti_can_netdev_start_xmit,
+ .ndo_do_ioctl = qti_can_netdev_do_ioctl,
+};
+
+static int qti_can_create_netdev(struct spi_device *spi,
+ struct qti_can *priv_data, int index)
+{
+ struct net_device *netdev;
+ struct qti_can_netdev_privdata *netdev_priv_data;
+
+ LOGDI("qti_can_create_netdev %d\n", index);
+ if (index < 0 || index >= priv_data->max_can_channels) {
+ LOGDE("qti_can_create_netdev wrong index %d\n", index);
+ return -EINVAL;
+ }
+ netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS);
+ if (!netdev) {
+ LOGDE("Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ netdev->mtu = CANFD_MTU;
+
+ netdev_priv_data = netdev_priv(netdev);
+ netdev_priv_data->qti_can = priv_data;
+ netdev_priv_data->netdev_index = index;
+
+ priv_data->netdev[index] = netdev;
+
+ netdev->netdev_ops = &qti_can_netdev_ops;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
+ if (priv_data->support_can_fd)
+ netdev_priv_data->can.ctrlmode_supported |= CAN_CTRLMODE_FD;
+ netdev_priv_data->can.bittiming_const = &qti_can_bittiming_const;
+ netdev_priv_data->can.data_bittiming_const =
+ &qti_can_data_bittiming_const;
+ netdev_priv_data->can.clock.freq = priv_data->clk_freq_mhz;
+ netdev_priv_data->can.do_set_bittiming = qti_can_set_bitrate;
+
+ return 0;
+}
+
+static struct qti_can *qti_can_create_priv_data(struct spi_device *spi)
+{
+ struct qti_can *priv_data;
+ int err;
+ struct device *dev;
+
+ dev = &spi->dev;
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data) {
+ err = -ENOMEM;
+ return NULL;
+ }
+ spi_set_drvdata(spi, priv_data);
+ atomic_set(&priv_data->netif_queue_stop, 0);
+ priv_data->spidev = spi;
+ priv_data->assembly_buffer = kzalloc(RX_ASSEMBLY_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->assembly_buffer) {
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_wq = alloc_workqueue("qti_can_tx_wq", 0, 0);
+ if (!priv_data->tx_wq) {
+ LOGDE("Couldn't alloc workqueue\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_buf = kzalloc(XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ priv_data->rx_buf = kzalloc(XFER_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->tx_buf || !priv_data->rx_buf) {
+ LOGDE("Couldn't alloc tx or rx buffers\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+ priv_data->xfer_length = 0;
+ priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES;
+
+ mutex_init(&priv_data->spi_lock);
+ atomic_set(&priv_data->msg_seq, 0);
+ init_completion(&priv_data->response_completion);
+ return priv_data;
+
+cleanup_privdata:
+ if (priv_data) {
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data);
+ }
+ return NULL;
+}
+
+static const struct of_device_id qti_can_match_table[] = {
+ { .compatible = "qcom,nxp,mpc5746c" },
+ { }
+};
+
+static int qti_can_probe(struct spi_device *spi)
+{
+ int err, retry = 0, query_err = -1, i;
+ struct qti_can *priv_data = NULL;
+ struct device *dev;
+ u32 irq_type;
+
+ dev = &spi->dev;
+ dev_info(dev, "qti_can_probe");
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "spi_setup failed: %d", err);
+ return err;
+ }
+
+ priv_data = qti_can_create_priv_data(spi);
+ if (!priv_data) {
+ dev_err(dev, "Failed to create qti_can priv_data\n");
+ err = -ENOMEM;
+ return err;
+ }
+ dev_info(dev, "qti_can_probe created priv_data");
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,clk-freq-mhz",
+ &priv_data->clk_freq_mhz);
+ if (err) {
+ LOGDE("DT property: qcom,clk-freq-hz not defined\n");
+ return err;
+ }
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,max-can-channels",
+ &priv_data->max_can_channels);
+ if (err) {
+ LOGDE("DT property: qcom,max-can-channels not defined\n");
+ return err;
+ }
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,bits-per-word",
+ &priv_data->bits_per_word);
+ if (err)
+ priv_data->bits_per_word = 16;
+
+ err = of_property_read_u32(spi->dev.of_node, "qcom,reset-delay-msec",
+ &priv_data->reset_delay_msec);
+ if (err)
+ priv_data->reset_delay_msec = 1;
+
+ priv_data->can_fw_cmd_timeout_req =
+ of_property_read_bool(spi->dev.of_node,
+ "qcom,can-fw-cmd-timeout-req");
+
+ err = of_property_read_u32(spi->dev.of_node,
+ "qcom,can-fw-cmd-timeout-ms",
+ &priv_data->can_fw_cmd_timeout_ms);
+ if (err)
+ priv_data->can_fw_cmd_timeout_ms = 0;
+
+ err = of_property_read_u32(spi->dev.of_node,
+ "qcom,rem-all-buffering-timeout-ms",
+ &priv_data->rem_all_buffering_timeout_ms);
+ if (err)
+ priv_data->rem_all_buffering_timeout_ms = 0;
+
+ priv_data->reset = of_get_named_gpio(spi->dev.of_node,
+ "qcom,reset-gpio", 0);
+
+ if (gpio_is_valid(priv_data->reset)) {
+ err = gpio_request(priv_data->reset, "qti-can-reset");
+ if (err < 0) {
+ LOGDE("failed to request gpio %d: %d\n",
+ priv_data->reset, err);
+ return err;
+ }
+
+ gpio_direction_output(priv_data->reset, 0);
+ udelay(1);
+ gpio_direction_output(priv_data->reset, 1);
+ msleep(priv_data->reset_delay_msec);
+ }
+
+ priv_data->support_can_fd = of_property_read_bool(spi->dev.of_node,
+ "qcom,support-can-fd");
+
+ if (of_device_is_compatible(spi->dev.of_node, "qcom,nxp,mpc5746c"))
+ qti_can_bittiming_const = flexcan_bittiming_const;
+ else if (of_device_is_compatible(spi->dev.of_node,
+ "qcom,renesas,rh850"))
+ qti_can_bittiming_const = rh850_bittiming_const;
+
+ priv_data->netdev = kzalloc(sizeof(priv_data->netdev[0]) *
+ priv_data->max_can_channels,
+ GFP_KERNEL);
+ if (!priv_data->netdev) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ err = qti_can_create_netdev(spi, priv_data, i);
+ if (err) {
+ LOGDE("Failed to create CAN device: %d", err);
+ goto cleanup_candev;
+ }
+
+ err = register_candev(priv_data->netdev[i]);
+ if (err) {
+ LOGDE("Failed to register CAN device: %d", err);
+ goto unregister_candev;
+ }
+ }
+
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
+ err = request_threaded_irq(spi->irq, NULL, qti_can_irq,
+ irq_type | IRQF_ONESHOT,
+ "qti-can", priv_data);
+ if (err) {
+ LOGDE("Failed to request irq: %d", err);
+ goto unregister_candev;
+ }
+ dev_info(dev, "Request irq %d ret %d\n", spi->irq, err);
+
+ while ((query_err != 0) && (retry < QTI_CAN_FW_QUERY_RETRY_COUNT)) {
+ query_err = qti_can_query_firmware_version(priv_data);
+ priv_data->assembly_buffer_size = 0;
+ retry++;
+ }
+
+ if (query_err) {
+ LOGDE("QTI CAN probe failed\n");
+ err = -ENODEV;
+ goto free_irq;
+ }
+ return 0;
+
+free_irq:
+ free_irq(spi->irq, priv_data);
+unregister_candev:
+ for (i = 0; i < priv_data->max_can_channels; i++)
+ unregister_candev(priv_data->netdev[i]);
+cleanup_candev:
+ if (priv_data) {
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ if (priv_data->netdev[i])
+ free_candev(priv_data->netdev[i]);
+ }
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data->netdev);
+ kfree(priv_data);
+ }
+ return err;
+}
+
+static int qti_can_remove(struct spi_device *spi)
+{
+ struct qti_can *priv_data = spi_get_drvdata(spi);
+ int i;
+
+ LOGDI("qti_can_remove\n");
+ for (i = 0; i < priv_data->max_can_channels; i++) {
+ unregister_candev(priv_data->netdev[i]);
+ free_candev(priv_data->netdev[i]);
+ }
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data->netdev);
+ kfree(priv_data);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int qti_can_suspend(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct qti_can *priv_data = spi_get_drvdata(spi);
+ u8 power_event = CMD_UPDATE_SUSPEND_EVENT;
+
+ qti_can_notify_power_events(priv_data, power_event);
+
+ enable_irq_wake(spi->irq);
+ return 0;
+}
+
+static int qti_can_resume(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct qti_can *priv_data = spi_get_drvdata(spi);
+ u8 power_event = CMD_UPDATE_RESUME_EVENT;
+
+ disable_irq_wake(spi->irq);
+ qti_can_notify_power_events(priv_data, power_event);
+ return 0;
+}
+
+static const struct dev_pm_ops qti_can_dev_pm_ops = {
+ .suspend = qti_can_suspend,
+ .resume = qti_can_resume,
+};
+#endif
+
+static struct spi_driver qti_can_driver = {
+ .driver = {
+ .name = "qti-can",
+ .of_match_table = qti_can_match_table,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &qti_can_dev_pm_ops,
+#endif
+ },
+ .probe = qti_can_probe,
+ .remove = qti_can_remove,
+};
+module_spi_driver(qti_can_driver);
+
+MODULE_DESCRIPTION("QTI CAN controller module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/spi/rh850.c b/drivers/net/can/spi/rh850.c
new file mode 100644
index 000000000000..b32ae2ddd41b
--- /dev/null
+++ b/drivers/net/can/spi/rh850.c
@@ -0,0 +1,1208 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/completion.h>
+#include <linux/irq.h>
+
+#define DEBUG_RH850 0
+#if DEBUG_RH850 == 1
+#define LOGDI(...) dev_info(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNI(...) netdev_info(netdev, __VA_ARGS__)
+#else
+#define LOGDI(...)
+#define LOGNI(...)
+#endif
+#define LOGDE(...) dev_err(&priv_data->spidev->dev, __VA_ARGS__)
+#define LOGNE(...) netdev_err(netdev, __VA_ARGS__)
+
+#define MAX_TX_BUFFERS 1
+#define XFER_BUFFER_SIZE 64
+#define RX_ASSEMBLY_BUFFER_SIZE 128
+#define RH850_CLOCK 16000000
+#define RH850_MAX_CHANNELS 4
+#define DRIVER_MODE_RAW_FRAMES 0
+#define DRIVER_MODE_PROPERTIES 1
+#define DRIVER_MODE_AMB 2
+
+struct rh850_can {
+ struct net_device *netdev[RH850_MAX_CHANNELS];
+ struct spi_device *spidev;
+
+ struct mutex spi_lock; /* SPI device lock */
+
+ struct workqueue_struct *tx_wq;
+ char *tx_buf, *rx_buf;
+ int xfer_length;
+ atomic_t msg_seq;
+
+ char *assembly_buffer;
+ u8 assembly_buffer_size;
+ atomic_t netif_queue_stop;
+ struct completion response_completion;
+ int wait_cmd;
+ int cmd_result;
+ int driver_mode;
+};
+
+struct rh850_netdev_privdata {
+ struct can_priv can;
+ struct rh850_can *rh850_can;
+ u8 netdev_index;
+};
+
+struct rh850_tx_work {
+ struct work_struct work;
+ struct sk_buff *skb;
+ struct net_device *netdev;
+};
+
+/* Message definitions */
+struct spi_mosi { /* TLV for MOSI line */
+ u8 cmd;
+ u8 len;
+ u16 seq;
+ u8 data[];
+} __packed;
+
+struct spi_miso { /* TLV for MISO line */
+ u8 cmd;
+ u8 len;
+ u16 seq; /* should match seq field from request, or 0 for unsols */
+ u8 data[];
+} __packed;
+
+#define CMD_GET_FW_VERSION 0x81
+#define CMD_CAN_SEND_FRAME 0x82
+#define CMD_CAN_ADD_FILTER 0x83
+#define CMD_CAN_REMOVE_FILTER 0x84
+#define CMD_CAN_RECEIVE_FRAME 0x85
+#define CMD_CAN_CONFIG_BIT_TIMING 0x86
+
+#define CMD_CAN_DATA_BUFF_ADD 0x87
+#define CMD_CAN_DATA_BUFF_REMOVE 0X88
+#define CMD_CAN_RELEASE_BUFFER 0x89
+#define CMD_CAN_DATA_BUFF_REMOVE_ALL 0x8A
+#define CMD_PROPERTY_WRITE 0x8B
+#define CMD_PROPERTY_READ 0x8C
+
+#define CMD_GET_FW_BR_VERSION 0x95
+#define CMD_BEGIN_FIRMWARE_UPGRADE 0x96
+#define CMD_FIRMWARE_UPGRADE_DATA 0x97
+#define CMD_END_FIRMWARE_UPGRADE 0x98
+#define CMD_BEGIN_BOOT_ROM_UPGRADE 0x99
+#define CMD_BOOT_ROM_UPGRADE_DATA 0x9A
+#define CMD_END_BOOT_ROM_UPGRADE 0x9B
+
+#define IOCTL_RELEASE_CAN_BUFFER (SIOCDEVPRIVATE + 0)
+#define IOCTL_ENABLE_BUFFERING (SIOCDEVPRIVATE + 1)
+#define IOCTL_ADD_FRAME_FILTER (SIOCDEVPRIVATE + 2)
+#define IOCTL_REMOVE_FRAME_FILTER (SIOCDEVPRIVATE + 3)
+#define IOCTL_DISABLE_BUFFERING (SIOCDEVPRIVATE + 5)
+#define IOCTL_DISABLE_ALL_BUFFERING (SIOCDEVPRIVATE + 6)
+#define IOCTL_GET_FW_BR_VERSION (SIOCDEVPRIVATE + 7)
+#define IOCTL_BEGIN_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 8)
+#define IOCTL_FIRMWARE_UPGRADE_DATA (SIOCDEVPRIVATE + 9)
+#define IOCTL_END_FIRMWARE_UPGRADE (SIOCDEVPRIVATE + 10)
+#define IOCTL_BEGIN_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 11)
+#define IOCTL_BOOT_ROM_UPGRADE_DATA (SIOCDEVPRIVATE + 12)
+#define IOCTL_END_BOOT_ROM_UPGRADE (SIOCDEVPRIVATE + 13)
+
+struct can_fw_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[32];
+} __packed;
+
+struct can_write_req {
+ u8 can_if;
+ u32 mid;
+ u8 dlc;
+ u8 data[];
+} __packed;
+
+struct can_write_resp {
+ u8 err;
+} __packed;
+
+struct can_add_filter_req {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_add_filter_resp {
+ u8 err;
+} __packed;
+
+struct can_remove_filter_req {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_receive_frame {
+ u8 can_if;
+ u32 ts;
+ u32 mid;
+ u8 dlc;
+ u8 data[];
+} __packed;
+
+struct can_config_bit_timing {
+ u8 can_if;
+ u32 brp;
+ u32 tseg1;
+ u32 tseg2;
+ u32 sjw;
+} __packed;
+
+struct vehicle_property {
+ int id;
+ u64 ts;
+ int zone;
+ int val_type;
+ u32 data_len;
+ union {
+ u8 bval;
+ int val;
+ int val_arr[4];
+ float f_value;
+ float float_arr[4];
+ u8 str[36];
+ };
+} __packed;
+
+/* IOCTL messages */
+struct rh850_release_can_buffer {
+ u8 enable;
+} __packed;
+
+struct rh850_add_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct rh850_delete_can_buffer {
+ u8 can_if;
+ u32 mid;
+ u32 mask;
+} __packed;
+
+struct can_fw_br_resp {
+ u8 maj;
+ u8 min;
+ u8 ver[32];
+ u8 br_maj;
+ u8 br_min;
+ u8 curr_exec_mode;
+} __packed;
+
+struct rh850_ioctl_req {
+ u8 len;
+ u8 data[];
+} __packed;
+
+static struct can_bittiming_const rh850_bittiming_const = {
+ .name = "rh850",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+static struct can_bittiming_const rh850_data_bittiming_const = {
+ .name = "rh850",
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 16,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 70,
+ .brp_inc = 1,
+};
+
+static int rh850_rx_message(struct rh850_can *priv_data);
+
+static irqreturn_t rh850_irq(int irq, void *priv)
+{
+ struct rh850_can *priv_data = priv;
+
+ LOGDI("rh850_irq\n");
+ rh850_rx_message(priv_data);
+ return IRQ_HANDLED;
+}
+
+static void rh850_receive_frame(struct rh850_can *priv_data,
+ struct can_receive_frame *frame)
+{
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ struct timeval tv;
+ static int msec;
+ struct net_device *netdev;
+ int i;
+ if (frame->can_if >= RH850_MAX_CHANNELS) {
+ LOGDE("rh850 rcv error. Channel is %d\n", frame->can_if);
+ return;
+ }
+ netdev = priv_data->netdev[frame->can_if];
+ skb = alloc_can_skb(netdev, &cf);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", frame->can_if);
+ return;
+ }
+
+ LOGDI("rcv frame %d %d %x %d %x %x %x %x %x %x %x %x\n",
+ frame->can_if, frame->ts, frame->mid, frame->dlc, frame->data[0],
+ frame->data[1], frame->data[2], frame->data[3], frame->data[4],
+ frame->data[5], frame->data[6], frame->data[7]);
+ cf->can_id = le32_to_cpu(frame->mid);
+ cf->can_dlc = get_can_dlc(frame->dlc);
+
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = frame->data[i];
+
+ msec = le32_to_cpu(frame->ts);
+ tv.tv_sec = msec / 1000;
+ tv.tv_usec = (msec - tv.tv_sec * 1000) * 1000;
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = timeval_to_ktime(tv);
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = timeval_to_ktime(tv);
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static void rh850_receive_property(struct rh850_can *priv_data,
+ struct vehicle_property *property)
+{
+ struct canfd_frame *cfd;
+ u8 *p;
+ struct sk_buff *skb;
+ struct skb_shared_hwtstamps *skt;
+ struct timeval tv;
+ static u64 nanosec;
+ struct net_device *netdev;
+ int i;
+
+ /* can0 as the channel with properties */
+ netdev = priv_data->netdev[0];
+ skb = alloc_canfd_skb(netdev, &cfd);
+ if (!skb) {
+ LOGDE("skb alloc failed. frame->can_if %d\n", 0);
+ return;
+ }
+
+ LOGDI("rcv property:0x%x data:%2x %2x %2x %2x",
+ property->id, property->str[0], property->str[1],
+ property->str[2], property->str[3]);
+ cfd->can_id = 0x00;
+ cfd->len = sizeof(struct vehicle_property);
+
+ p = (u8 *)property;
+ for (i = 0; i < cfd->len; i++)
+ cfd->data[i] = p[i];
+
+ nanosec = le64_to_cpu(property->ts);
+ tv.tv_sec = (int)(nanosec / 1000000000);
+ tv.tv_usec = (int)(nanosec - (u64)tv.tv_sec * 1000000000) / 1000;
+ skt = skb_hwtstamps(skb);
+ skt->hwtstamp = timeval_to_ktime(tv);
+ LOGDI(" hwtstamp %lld\n", ktime_to_ms(skt->hwtstamp));
+ skb->tstamp = timeval_to_ktime(tv);
+ netif_rx(skb);
+ netdev->stats.rx_packets++;
+}
+
+static int rh850_process_response(struct rh850_can *priv_data,
+ struct spi_miso *resp, int length)
+{
+ int ret = 0;
+ LOGDI("<%x %2d [%d]\n", resp->cmd, resp->len, resp->seq);
+ if (resp->cmd == CMD_CAN_RECEIVE_FRAME) {
+ struct can_receive_frame *frame =
+ (struct can_receive_frame *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n",
+ length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ rh850_receive_frame(priv_data, frame);
+ }
+ } else if (resp->cmd == CMD_PROPERTY_READ) {
+ struct vehicle_property *property =
+ (struct vehicle_property *)&resp->data;
+ if (resp->len > length) {
+ LOGDE("Error. This should never happen\n");
+ LOGDE("process_response: Saving %d bytes\n",
+ length);
+ memcpy(priv_data->assembly_buffer, (char *)resp,
+ length);
+ priv_data->assembly_buffer_size = length;
+ } else {
+ rh850_receive_property(priv_data, property);
+ }
+ } else if (resp->cmd == CMD_GET_FW_VERSION) {
+ struct can_fw_resp *fw_resp = (struct can_fw_resp *)resp->data;
+ dev_info(&priv_data->spidev->dev, "fw %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ } else if (resp->cmd == CMD_GET_FW_BR_VERSION) {
+ struct can_fw_br_resp *fw_resp =
+ (struct can_fw_br_resp *)resp->data;
+
+ dev_info(&priv_data->spidev->dev, "fw_can %d.%d",
+ fw_resp->maj, fw_resp->min);
+ dev_info(&priv_data->spidev->dev, "fw string %s",
+ fw_resp->ver);
+ dev_info(&priv_data->spidev->dev, "fw_br %d.%d exec_mode %d",
+ fw_resp->br_maj, fw_resp->br_min,
+ fw_resp->curr_exec_mode);
+ ret = fw_resp->curr_exec_mode << 28;
+ ret |= (fw_resp->br_maj & 0xF) << 24;
+ ret |= (fw_resp->br_min & 0xFF) << 16;
+ ret |= (fw_resp->maj & 0xF) << 8;
+ ret |= (fw_resp->min & 0xFF);
+ }
+
+ if (resp->cmd == priv_data->wait_cmd) {
+ priv_data->cmd_result = ret;
+ complete(&priv_data->response_completion);
+ }
+ return ret;
+}
+
+static int rh850_process_rx(struct rh850_can *priv_data, char *rx_buf)
+{
+ struct spi_miso *resp;
+ int length_processed = 0, actual_length = priv_data->xfer_length;
+ int ret = 0;
+
+ while (length_processed < actual_length) {
+ int length_left = actual_length - length_processed;
+ int length = 0; /* length of consumed chunk */
+ void *data;
+
+ if (priv_data->assembly_buffer_size > 0) {
+ LOGDI("callback: Reassembling %d bytes\n",
+ priv_data->assembly_buffer_size);
+ /* should copy just 1 byte instead, since cmd should */
+ /* already been copied as being first byte */
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, 2);
+ data = priv_data->assembly_buffer;
+ resp = (struct spi_miso *)data;
+ length = resp->len + sizeof(*resp)
+ - priv_data->assembly_buffer_size;
+ if (length > 0)
+ memcpy(priv_data->assembly_buffer +
+ priv_data->assembly_buffer_size,
+ rx_buf, length);
+ length_left += priv_data->assembly_buffer_size;
+ priv_data->assembly_buffer_size = 0;
+ } else {
+ data = rx_buf + length_processed;
+ resp = (struct spi_miso *)data;
+ if (resp->cmd == 0) {
+ /* special case. ignore cmd==0 */
+ length_processed += 1;
+ continue;
+ }
+ length = resp->len + sizeof(struct spi_miso);
+ }
+ LOGDI("processing. p %d -> l %d (t %d)\n",
+ length_processed, length_left, priv_data->xfer_length);
+ length_processed += length;
+ if (length_left >= sizeof(*resp) &&
+ resp->len + sizeof(*resp) <= length_left) {
+ struct spi_miso *resp =
+ (struct spi_miso *)data;
+ ret = rh850_process_response(priv_data, resp,
+ length_left);
+ } else if (length_left > 0) {
+ /* Not full message. Store however much we have for */
+ /* later assembly */
+ LOGDI("callback: Storing %d bytes of response\n",
+ length_left);
+ memcpy(priv_data->assembly_buffer, data, length_left);
+ priv_data->assembly_buffer_size = length_left;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int rh850_do_spi_transaction(struct rh850_can *priv_data)
+{
+ struct spi_device *spi;
+ struct spi_transfer *xfer;
+ struct spi_message *msg;
+ int ret;
+
+ spi = priv_data->spidev;
+ xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (xfer == 0 || msg == 0)
+ return -ENOMEM;
+ LOGDI(">%x %2d [%d]\n", priv_data->tx_buf[0],
+ priv_data->tx_buf[1], priv_data->tx_buf[2]);
+ spi_message_init(msg);
+ spi_message_add_tail(xfer, msg);
+ xfer->tx_buf = priv_data->tx_buf;
+ xfer->rx_buf = priv_data->rx_buf;
+ xfer->len = priv_data->xfer_length;
+ ret = spi_sync(spi, msg);
+ LOGDI("spi_sync ret %d data %x %x %x %x %x %x %x %x\n", ret,
+ priv_data->rx_buf[0], priv_data->rx_buf[1], priv_data->rx_buf[2],
+ priv_data->rx_buf[3], priv_data->rx_buf[4], priv_data->rx_buf[5],
+ priv_data->rx_buf[6], priv_data->rx_buf[7]);
+ if (ret == 0)
+ ret = rh850_process_rx(priv_data, priv_data->rx_buf);
+ kfree(msg);
+ kfree(xfer);
+ return ret;
+}
+
+static int rh850_rx_message(struct rh850_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_query_firmware_version(struct rh850_can *priv_data)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_GET_FW_VERSION;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_set_bitrate(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_config_bit_timing *req_d;
+ struct rh850_can *priv_data;
+ struct can_priv *priv = netdev_priv(netdev);
+ struct rh850_netdev_privdata *rh850_priv;
+
+ rh850_priv = netdev_priv(netdev);
+ priv_data = rh850_priv->rh850_can;
+
+ netdev_info(netdev, "ch%i, bitrate setting>%i",
+ rh850_priv->netdev_index, priv->bittiming.bitrate);
+ LOGNI("sjw>%i brp>%i ph_sg1>%i ph_sg2>%i smpl_pt>%i tq>%i pr_seg>%i",
+ priv->bittiming.sjw, priv->bittiming.brp,
+ priv->bittiming.phase_seg1,
+ priv->bittiming.phase_seg2,
+ priv->bittiming.sample_point,
+ priv->bittiming.tq, priv->bittiming.prop_seg);
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_CONFIG_BIT_TIMING;
+ req->len = sizeof(struct can_config_bit_timing);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ req_d = (struct can_config_bit_timing *)req->data;
+ req_d->can_if = rh850_priv->netdev_index;
+ req_d->brp = priv->bittiming.brp;
+ req_d->tseg1 = priv->bittiming.phase_seg1 + priv->bittiming.prop_seg;
+ req_d->tseg2 = priv->bittiming.phase_seg2;
+ req_d->sjw = priv->bittiming.sjw;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_can_write(struct rh850_can *priv_data,
+ int can_channel, struct canfd_frame *cf)
+{
+ char *tx_buf, *rx_buf;
+ int ret, i;
+ struct spi_mosi *req;
+ struct can_write_req *req_d;
+ struct net_device *netdev;
+
+ if (can_channel < 0 || can_channel >= RH850_MAX_CHANNELS) {
+ LOGDE("rh850_can_write error. Channel is %d\n", can_channel);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ if (priv_data->driver_mode == DRIVER_MODE_RAW_FRAMES) {
+ req->cmd = CMD_CAN_SEND_FRAME;
+ req->len = sizeof(struct can_write_req) + 8;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ req_d = (struct can_write_req *)req->data;
+ req_d->can_if = can_channel;
+ req_d->mid = cf->can_id;
+ req_d->dlc = cf->len;
+
+ for (i = 0; i < cf->len; i++)
+ req_d->data[i] = cf->data[i];
+ } else if (priv_data->driver_mode == DRIVER_MODE_PROPERTIES ||
+ priv_data->driver_mode == DRIVER_MODE_AMB) {
+ req->cmd = CMD_PROPERTY_WRITE;
+ req->len = sizeof(struct vehicle_property);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ for (i = 0; i < cf->len; i++)
+ req->data[i] = cf->data[i];
+ } else {
+ LOGDE("rh850_can_write: wrong driver mode %i",
+ priv_data->driver_mode);
+ }
+
+ ret = rh850_do_spi_transaction(priv_data);
+ netdev = priv_data->netdev[can_channel];
+ netdev->stats.tx_packets++;
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_netdev_open(struct net_device *netdev)
+{
+ int err;
+
+ LOGNI("Open");
+ err = open_candev(netdev);
+ if (err)
+ return err;
+
+ netif_start_queue(netdev);
+
+ return 0;
+}
+
+static int rh850_netdev_close(struct net_device *netdev)
+{
+ LOGNI("Close");
+
+ netif_stop_queue(netdev);
+ close_candev(netdev);
+ return 0;
+}
+
+static void rh850_send_can_frame(struct work_struct *ws)
+{
+ struct rh850_tx_work *tx_work;
+ struct canfd_frame *cf;
+ struct rh850_can *priv_data;
+ struct net_device *netdev;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ int can_channel;
+
+ tx_work = container_of(ws, struct rh850_tx_work, work);
+ netdev = tx_work->netdev;
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+ can_channel = netdev_priv_data->netdev_index;
+ LOGDI("send_can_frame ws %p\n", ws);
+ LOGDI("send_can_frame tx %p\n", tx_work);
+
+ cf = (struct canfd_frame *)tx_work->skb->data;
+ rh850_can_write(priv_data, can_channel, cf);
+
+ dev_kfree_skb(tx_work->skb);
+ kfree(tx_work);
+}
+
+static netdev_tx_t rh850_netdev_start_xmit(
+ struct sk_buff *skb, struct net_device *netdev)
+{
+ struct rh850_netdev_privdata *netdev_priv_data = netdev_priv(netdev);
+ struct rh850_can *priv_data = netdev_priv_data->rh850_can;
+ struct rh850_tx_work *tx_work;
+
+ LOGNI("netdev_start_xmit");
+ if (can_dropped_invalid_skb(netdev, skb)) {
+ LOGNE("Dropping invalid can frame\n");
+ return NETDEV_TX_OK;
+ }
+ tx_work = kzalloc(sizeof(*tx_work), GFP_ATOMIC);
+ if (tx_work == 0)
+ return NETDEV_TX_OK;
+ INIT_WORK(&tx_work->work, rh850_send_can_frame);
+ tx_work->netdev = netdev;
+ tx_work->skb = skb;
+ queue_work(priv_data->tx_wq, &tx_work->work);
+
+ return NETDEV_TX_OK;
+}
+
+static int rh850_send_release_can_buffer_cmd(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ int *mode;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_RELEASE_BUFFER;
+ req->len = sizeof(int);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+ mode = (int *)req->data;
+ *mode = priv_data->driver_mode;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_data_buffering(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_add_can_buffer *enable_buffering;
+ struct rh850_add_can_buffer *add_request;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ add_request = ifr->ifr_data;
+ req = (struct spi_mosi *)tx_buf;
+
+ if (cmd == IOCTL_ENABLE_BUFFERING)
+ req->cmd = CMD_CAN_DATA_BUFF_ADD;
+ else
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE;
+
+ req->len = sizeof(struct rh850_add_can_buffer);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ enable_buffering = (struct rh850_add_can_buffer *)req->data;
+ enable_buffering->can_if = add_request->can_if;
+ enable_buffering->mid = add_request->mid;
+ enable_buffering->mask = add_request->mask;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_remove_all_buffering(struct net_device *netdev)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = CMD_CAN_DATA_BUFF_REMOVE_ALL;
+ req->len = 0;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_frame_filter(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ char *tx_buf, *rx_buf;
+ int ret;
+ struct spi_mosi *req;
+ struct can_add_filter_req *add_filter;
+ struct can_add_filter_req *filter_request;
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ mutex_lock(&priv_data->spi_lock);
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ filter_request = ifr->ifr_data;
+ req = (struct spi_mosi *)tx_buf;
+
+ if (cmd == IOCTL_ADD_FRAME_FILTER)
+ req->cmd = CMD_CAN_ADD_FILTER;
+ else
+ req->cmd = CMD_CAN_REMOVE_FILTER;
+
+ req->len = sizeof(struct can_add_filter_req);
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ add_filter = (struct can_add_filter_req *)req->data;
+ add_filter->can_if = filter_request->can_if;
+ add_filter->mid = filter_request->mid;
+ add_filter->mask = filter_request->mask;
+
+ ret = rh850_do_spi_transaction(priv_data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ return ret;
+}
+
+static int rh850_send_spi_locked(struct rh850_can *priv_data, int cmd, int len,
+ u8 *data)
+{
+ char *tx_buf, *rx_buf;
+ struct spi_mosi *req;
+ int ret;
+
+ LOGDI("rh850_send_spi_locked\n");
+
+ tx_buf = priv_data->tx_buf;
+ rx_buf = priv_data->rx_buf;
+ memset(tx_buf, 0, XFER_BUFFER_SIZE);
+ memset(rx_buf, 0, XFER_BUFFER_SIZE);
+ priv_data->xfer_length = XFER_BUFFER_SIZE;
+
+ req = (struct spi_mosi *)tx_buf;
+ req->cmd = cmd;
+ req->len = len;
+ req->seq = atomic_inc_return(&priv_data->msg_seq);
+
+ if (unlikely(len > 64))
+ return -EINVAL;
+ memcpy(req->data, data, len);
+
+ ret = rh850_do_spi_transaction(priv_data);
+ return ret;
+}
+
+static int rh850_convert_ioctl_cmd_to_spi_cmd(int ioctl_cmd)
+{
+ switch (ioctl_cmd) {
+ case IOCTL_GET_FW_BR_VERSION:
+ return CMD_GET_FW_BR_VERSION;
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ return CMD_BEGIN_FIRMWARE_UPGRADE;
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ return CMD_FIRMWARE_UPGRADE_DATA;
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ return CMD_END_FIRMWARE_UPGRADE;
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ return CMD_BEGIN_BOOT_ROM_UPGRADE;
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ return CMD_BOOT_ROM_UPGRADE_DATA;
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ return CMD_END_BOOT_ROM_UPGRADE;
+ }
+ return -EINVAL;
+}
+
+static int rh850_do_blocking_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ int spi_cmd, ret;
+
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ struct rh850_ioctl_req *ioctl_data;
+ int len = 0;
+ u8 *data = NULL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+
+ spi_cmd = rh850_convert_ioctl_cmd_to_spi_cmd(cmd);
+ LOGDI("rh850_do_blocking_ioctl spi_cmd %x\n", spi_cmd);
+ if (spi_cmd < 0) {
+ LOGDE("rh850_do_blocking_ioctl wrong command %d\n", cmd);
+ return spi_cmd;
+ }
+ if (!ifr)
+ return -EINVAL;
+ ioctl_data = ifr->ifr_data;
+ /* Regular NULL check fails here as ioctl_data is at some offset */
+ if ((void *)ioctl_data > (void *)0x100) {
+ len = ioctl_data->len;
+ data = ioctl_data->data;
+ }
+ LOGDI("rh850_do_blocking_ioctl len %d\n", len);
+ mutex_lock(&priv_data->spi_lock);
+
+ priv_data->wait_cmd = spi_cmd;
+ priv_data->cmd_result = -1;
+ reinit_completion(&priv_data->response_completion);
+
+ ret = rh850_send_spi_locked(priv_data, spi_cmd, len, data);
+ mutex_unlock(&priv_data->spi_lock);
+
+ if (ret == 0) {
+ LOGDI("rh850_do_blocking_ioctl ready to wait for response\n");
+ wait_for_completion_interruptible_timeout(
+ &priv_data->response_completion, 5 * HZ);
+ ret = priv_data->cmd_result;
+ }
+ return ret;
+}
+
+static int rh850_netdev_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct rh850_can *priv_data;
+ struct rh850_netdev_privdata *netdev_priv_data;
+ int *mode;
+ int ret = -EINVAL;
+
+ netdev_priv_data = netdev_priv(netdev);
+ priv_data = netdev_priv_data->rh850_can;
+ LOGDI("rh850_netdev_do_ioctl %x\n", cmd);
+
+ switch (cmd) {
+ case IOCTL_RELEASE_CAN_BUFFER:
+ if (ifr->ifr_data > (void *)0x100) {
+ mode = ifr->ifr_data;
+ priv_data->driver_mode = *mode;
+ }
+ LOGDE("rh850_driver_mode %d\n", priv_data->driver_mode);
+ rh850_send_release_can_buffer_cmd(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ENABLE_BUFFERING:
+ case IOCTL_DISABLE_BUFFERING:
+ rh850_data_buffering(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_DISABLE_ALL_BUFFERING:
+ rh850_remove_all_buffering(netdev);
+ ret = 0;
+ break;
+ case IOCTL_ADD_FRAME_FILTER:
+ case IOCTL_REMOVE_FRAME_FILTER:
+ rh850_frame_filter(netdev, ifr, cmd);
+ ret = 0;
+ break;
+ case IOCTL_GET_FW_BR_VERSION:
+ case IOCTL_BEGIN_FIRMWARE_UPGRADE:
+ case IOCTL_FIRMWARE_UPGRADE_DATA:
+ case IOCTL_END_FIRMWARE_UPGRADE:
+ case IOCTL_BEGIN_BOOT_ROM_UPGRADE:
+ case IOCTL_BOOT_ROM_UPGRADE_DATA:
+ case IOCTL_END_BOOT_ROM_UPGRADE:
+ ret = rh850_do_blocking_ioctl(netdev, ifr, cmd);
+ break;
+ }
+ LOGDI("rh850_netdev_do_ioctl ret %d\n", ret);
+
+ return ret;
+}
+
+static const struct net_device_ops rh850_netdev_ops = {
+ .ndo_open = rh850_netdev_open,
+ .ndo_stop = rh850_netdev_close,
+ .ndo_start_xmit = rh850_netdev_start_xmit,
+ .ndo_do_ioctl = rh850_netdev_do_ioctl,
+};
+
+static int rh850_create_netdev(struct spi_device *spi,
+ struct rh850_can *priv_data, int index)
+{
+ struct net_device *netdev;
+ struct rh850_netdev_privdata *netdev_priv_data;
+
+ LOGDI("rh850_create_netdev %d\n", index);
+ if (index < 0 || index >= RH850_MAX_CHANNELS) {
+ LOGDE("rh850_create_netdev wrong index %d\n", index);
+ return -EINVAL;
+ }
+ netdev = alloc_candev(sizeof(*netdev_priv_data), MAX_TX_BUFFERS);
+ if (!netdev) {
+ LOGDE("Couldn't alloc candev\n");
+ return -ENOMEM;
+ }
+
+ netdev->mtu = CANFD_MTU;
+
+ netdev_priv_data = netdev_priv(netdev);
+ netdev_priv_data->rh850_can = priv_data;
+ netdev_priv_data->netdev_index = index;
+
+ priv_data->netdev[index] = netdev;
+
+ netdev->netdev_ops = &rh850_netdev_ops;
+ SET_NETDEV_DEV(netdev, &spi->dev);
+ netdev_priv_data->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY |
+ CAN_CTRLMODE_FD;
+ netdev_priv_data->can.bittiming_const = &rh850_bittiming_const;
+ netdev_priv_data->can.data_bittiming_const =
+ &rh850_data_bittiming_const;
+ netdev_priv_data->can.clock.freq = RH850_CLOCK;
+ netdev_priv_data->can.do_set_bittiming = rh850_set_bitrate;
+
+ return 0;
+}
+
+static struct rh850_can *rh850_create_priv_data(struct spi_device *spi)
+{
+ struct rh850_can *priv_data;
+ int err;
+ struct device *dev;
+
+ dev = &spi->dev;
+ priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
+ if (!priv_data) {
+ dev_err(dev, "Couldn't alloc rh850_can\n");
+ return 0;
+ }
+ spi_set_drvdata(spi, priv_data);
+ atomic_set(&priv_data->netif_queue_stop, 0);
+ priv_data->spidev = spi;
+ priv_data->assembly_buffer = kzalloc(RX_ASSEMBLY_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!priv_data->assembly_buffer) {
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_wq = alloc_workqueue("rh850_tx_wq", 0, 0);
+ if (!priv_data->tx_wq) {
+ dev_err(dev, "Couldn't alloc workqueue\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+
+ priv_data->tx_buf = kzalloc(XFER_BUFFER_SIZE, GFP_KERNEL);
+ priv_data->rx_buf = kzalloc(XFER_BUFFER_SIZE, GFP_KERNEL);
+ if (!priv_data->tx_buf || !priv_data->rx_buf) {
+ dev_err(dev, "Couldn't alloc tx or rx buffers\n");
+ err = -ENOMEM;
+ goto cleanup_privdata;
+ }
+ priv_data->xfer_length = 0;
+ priv_data->driver_mode = DRIVER_MODE_RAW_FRAMES;
+
+ mutex_init(&priv_data->spi_lock);
+ atomic_set(&priv_data->msg_seq, 0);
+ init_completion(&priv_data->response_completion);
+ return priv_data;
+
+cleanup_privdata:
+ if (priv_data) {
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data);
+ }
+ return 0;
+}
+
+static int rh850_probe(struct spi_device *spi)
+{
+ int err, i;
+ struct rh850_can *priv_data;
+ struct device *dev;
+ u32 irq_type;
+
+ dev = &spi->dev;
+ dev_info(dev, "rh850_probe");
+
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(dev, "spi_setup failed: %d", err);
+ return err;
+ }
+
+ priv_data = rh850_create_priv_data(spi);
+ if (!priv_data) {
+ dev_err(dev, "Failed to create rh850_can priv_data\n");
+ err = -ENOMEM;
+ return err;
+ }
+ dev_info(dev, "rh850_probe created priv_data");
+ for (i = 0; i < RH850_MAX_CHANNELS; i++) {
+ err = rh850_create_netdev(spi, priv_data, i);
+ if (err) {
+ dev_err(dev, "Failed to create CAN device: %d", err);
+ goto cleanup_candev;
+ }
+
+ err = register_candev(priv_data->netdev[i]);
+ if (err) {
+ dev_err(dev, "Failed to register CAN device: %d", err);
+ goto unregister_candev;
+ }
+ }
+
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (irq_type == IRQ_TYPE_NONE)
+ irq_type = IRQ_TYPE_EDGE_FALLING;
+ err = request_threaded_irq(spi->irq, NULL, rh850_irq,
+ irq_type | IRQF_ONESHOT,
+ "rh850", priv_data);
+ if (err) {
+ dev_err(dev, "Failed to request irq: %d", err);
+ goto unregister_candev;
+ }
+ dev_info(dev, "Request irq %d ret %d\n", spi->irq, err);
+
+ rh850_query_firmware_version(priv_data);
+ return 0;
+
+unregister_candev:
+ for (i = 0; i < RH850_MAX_CHANNELS; i++)
+ unregister_candev(priv_data->netdev[i]);
+cleanup_candev:
+ if (priv_data) {
+ for (i = 0; i < RH850_MAX_CHANNELS; i++) {
+ if (priv_data->netdev[i])
+ free_candev(priv_data->netdev[i]);
+ }
+ if (priv_data->tx_wq)
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data);
+ }
+ return err;
+}
+
+static int rh850_remove(struct spi_device *spi)
+{
+ struct rh850_can *priv_data = spi_get_drvdata(spi);
+ int i;
+
+ LOGDI("rh850_remove\n");
+ for (i = 0; i < RH850_MAX_CHANNELS; i++) {
+ unregister_candev(priv_data->netdev[i]);
+ free_candev(priv_data->netdev[i]);
+ }
+ destroy_workqueue(priv_data->tx_wq);
+ kfree(priv_data->assembly_buffer);
+ kfree(priv_data->rx_buf);
+ kfree(priv_data->tx_buf);
+ kfree(priv_data);
+ return 0;
+}
+
+static const struct of_device_id rh850_match_table[] = {
+ { .compatible = "renesas,rh850" },
+ { }
+};
+
+static struct spi_driver rh850_driver = {
+ .driver = {
+ .name = "rh850",
+ .of_match_table = rh850_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = rh850_probe,
+ .remove = rh850_remove,
+};
+module_spi_driver(rh850_driver);
+
+MODULE_DESCRIPTION("RH850 SPI-CAN module");
+MODULE_LICENSE("GPL v2");