diff options
Diffstat (limited to 'drivers')
44 files changed, 1686 insertions, 257 deletions
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 1ee27ac18de0..de4c8499cbac 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -108,6 +108,7 @@ static const struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3393) }, { USB_DEVICE(0x13d3, 0x3402) }, { USB_DEVICE(0x13d3, 0x3408) }, + { USB_DEVICE(0x13d3, 0x3423) }, { USB_DEVICE(0x13d3, 0x3432) }, /* Atheros AR5BBU12 with sflash firmware */ @@ -162,6 +163,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ @@ -174,6 +176,8 @@ static const struct usb_device_id ath3k_blist_tbl[] = { #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 +#define TIMEGAP_USEC_MIN 50 +#define TIMEGAP_USEC_MAX 100 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) @@ -205,6 +209,9 @@ static int ath3k_load_firmware(struct usb_device *udev, pipe = usb_sndbulkpipe(udev, 0x02); while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); @@ -302,6 +309,9 @@ static int ath3k_load_fwfile(struct usb_device *udev, pipe = usb_sndbulkpipe(udev, 0x02); while (count) { + /* workaround the compatibility issue with xHCI controller*/ + usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); + size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 4a6495ab9726..b87688881143 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -28,7 +28,7 @@ #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> -#define VERSION "0.6" +#define VERSION "0.7" static bool disable_scofix; static bool force_scofix; @@ -50,11 +50,16 @@ static struct usb_driver btusb_driver; #define BTUSB_BCM_PATCHRAM 0x400 #define BTUSB_MARVELL 0x800 #define BTUSB_SWAVE 0x1000 +#define BTUSB_INTEL_NEW 0x2000 +#define BTUSB_AMP 0x4000 static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Generic Bluetooth AMP device */ + { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP }, + /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, @@ -110,16 +115,24 @@ static const struct usb_device_id btusb_table[] = { { USB_DEVICE(0x13d3, 0x3404), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom BCM20702B0 (Dynex/Insignia) */ + { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Foxconn - Hon Hai */ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, + /* Lite-On Technology - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, + /* Broadcom devices with vendor specific id */ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* ASUSTek Computer - Broadcom based */ - { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01), + .driver_info = BTUSB_BCM_PATCHRAM }, /* Belkin F8065bf - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, @@ -189,6 +202,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ @@ -253,13 +267,18 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC }, - /* Intel Bluetooth device */ + /* Marvell Bluetooth devices */ + { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, + { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, + + /* Intel Bluetooth devices */ { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, + { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, - /* Marvell device */ - { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, - { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, + /* Other Intel Bluetooth devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), + .driver_info = BTUSB_IGNORE }, { } /* Terminating entry */ }; @@ -271,6 +290,11 @@ static const struct usb_device_id blacklist_table[] = { #define BTUSB_ISOC_RUNNING 2 #define BTUSB_SUSPENDING 3 #define BTUSB_DID_ISO_RESUME 4 +#define BTUSB_BOOTLOADER 5 +#define BTUSB_DOWNLOADING 6 +#define BTUSB_FIRMWARE_LOADED 7 +#define BTUSB_FIRMWARE_FAILED 8 +#define BTUSB_BOOTING 9 struct btusb_data { struct hci_dev *hdev; @@ -304,6 +328,7 @@ struct btusb_data { struct usb_endpoint_descriptor *isoc_rx_ep; __u8 cmdreq_type; + __u8 cmdreq; unsigned int sco_num; int isoc_altsetting; @@ -313,6 +338,16 @@ struct btusb_data { int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); }; +static int btusb_wait_on_bit_timeout(void *word, int bit, unsigned long timeout, + unsigned mode) +{ + might_sleep(); + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_timeout(word, bit, bit_wait_timeout, + mode, timeout); +} + static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; @@ -957,7 +992,7 @@ static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb) } dr->bRequestType = data->cmdreq_type; - dr->bRequest = 0; + dr->bRequest = data->cmdreq; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); @@ -1295,6 +1330,26 @@ struct intel_version { u8 fw_patch_num; } __packed; +struct intel_boot_params { + __u8 status; + __u8 otp_format; + __u8 otp_content; + __u8 otp_patch; + __le16 dev_revid; + __u8 secure_boot; + __u8 key_from_hdr; + __u8 key_type; + __u8 otp_lock; + __u8 api_lock; + __u8 debug_lock; + bdaddr_t otp_bdaddr; + __u8 min_fw_build_nn; + __u8 min_fw_build_cw; + __u8 min_fw_build_yy; + __u8 limited_cce; + __u8 unlocked_state; +} __packed; + static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -1703,6 +1758,562 @@ exit_mfg_deactivate: return 0; } +static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) +{ + struct sk_buff *skb; + struct hci_event_hdr *hdr; + struct hci_ev_cmd_complete *evt; + + skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hdr = (struct hci_event_hdr *)skb_put(skb, sizeof(*hdr)); + hdr->evt = HCI_EV_CMD_COMPLETE; + hdr->plen = sizeof(*evt) + 1; + + evt = (struct hci_ev_cmd_complete *)skb_put(skb, sizeof(*evt)); + evt->ncmd = 0x01; + evt->opcode = cpu_to_le16(opcode); + + *skb_put(skb, 1) = 0x00; + + bt_cb(skb)->pkt_type = HCI_EVENT_PKT; + + return hci_recv_frame(hdev, skb); +} + +static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer, + int count) +{ + /* When the device is in bootloader mode, then it can send + * events via the bulk endpoint. These events are treated the + * same way as the ones received from the interrupt endpoint. + */ + if (test_bit(BTUSB_BOOTLOADER, &data->flags)) + return btusb_recv_intr(data, buffer, count); + + return btusb_recv_bulk(data, buffer, count); +} + +static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + + if (test_bit(BTUSB_BOOTLOADER, &data->flags)) { + struct hci_event_hdr *hdr = (void *)skb->data; + + /* When the firmware loading completes the device sends + * out a vendor specific event indicating the result of + * the firmware loading. + */ + if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && + skb->data[2] == 0x06) { + if (skb->data[3] != 0x00) + test_bit(BTUSB_FIRMWARE_FAILED, &data->flags); + + if (test_and_clear_bit(BTUSB_DOWNLOADING, + &data->flags) && + test_bit(BTUSB_FIRMWARE_LOADED, &data->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&data->flags, BTUSB_DOWNLOADING); + } + } + + /* When switching to the operational firmware the device + * sends a vendor specific event indicating that the bootup + * completed. + */ + if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && + skb->data[2] == 0x02) { + if (test_and_clear_bit(BTUSB_BOOTING, &data->flags)) { + smp_mb__after_atomic(); + wake_up_bit(&data->flags, BTUSB_BOOTING); + } + } + } + + return hci_recv_frame(hdev, skb); +} + +static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btusb_data *data = hci_get_drvdata(hdev); + struct urb *urb; + + BT_DBG("%s", hdev->name); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -EBUSY; + + switch (bt_cb(skb)->pkt_type) { + case HCI_COMMAND_PKT: + if (test_bit(BTUSB_BOOTLOADER, &data->flags)) { + struct hci_command_hdr *cmd = (void *)skb->data; + __u16 opcode = le16_to_cpu(cmd->opcode); + + /* When in bootloader mode and the command 0xfc09 + * is received, it needs to be send down the + * bulk endpoint. So allocate a bulk URB instead. + */ + if (opcode == 0xfc09) + urb = alloc_bulk_urb(hdev, skb); + else + urb = alloc_ctrl_urb(hdev, skb); + + /* When the 0xfc01 command is issued to boot into + * the operational firmware, it will actually not + * send a command complete event. To keep the flow + * control working inject that event here. + */ + if (opcode == 0xfc01) + inject_cmd_complete(hdev, opcode); + } else { + urb = alloc_ctrl_urb(hdev, skb); + } + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.cmd_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_ACLDATA_PKT: + urb = alloc_bulk_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.acl_tx++; + return submit_or_queue_tx_urb(hdev, urb); + + case HCI_SCODATA_PKT: + if (hci_conn_num(hdev, SCO_LINK) < 1) + return -ENODEV; + + urb = alloc_isoc_urb(hdev, skb); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + hdev->stat.sco_tx++; + return submit_tx_urb(hdev, urb); + } + + return -EILSEQ; +} + +static int btusb_intel_secure_send(struct hci_dev *hdev, u8 fragment_type, + u32 plen, const void *param) +{ + while (plen > 0) { + struct sk_buff *skb; + u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; + + cmd_param[0] = fragment_type; + memcpy(cmd_param + 1, param, fragment_len); + + skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, + cmd_param, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + plen -= fragment_len; + param += fragment_len; + } + + return 0; +} + +static void btusb_intel_version_info(struct hci_dev *hdev, + struct intel_version *ver) +{ + const char *variant; + + switch (ver->fw_variant) { + case 0x06: + variant = "Bootloader"; + break; + case 0x23: + variant = "Firmware"; + break; + default: + return; + } + + BT_INFO("%s: %s revision %u.%u build %u week %u %u", hdev->name, + variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, + ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); +} + +static int btusb_setup_intel_new(struct hci_dev *hdev) +{ + static const u8 reset_param[] = { 0x00, 0x01, 0x00, 0x01, + 0x00, 0x08, 0x04, 0x00 }; + struct btusb_data *data = hci_get_drvdata(hdev); + struct sk_buff *skb; + struct intel_version *ver; + struct intel_boot_params *params; + const struct firmware *fw; + const u8 *fw_ptr; + char fwname[64]; + ktime_t calltime, delta, rettime; + unsigned long long duration; + int err; + + BT_DBG("%s", hdev->name); + + calltime = ktime_get(); + + /* Read the Intel version information to determine if the device + * is in bootloader mode or if it already has operational firmware + * loaded. + */ + skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel version information failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*ver)) { + BT_ERR("%s: Intel version event size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + ver = (struct intel_version *)skb->data; + if (ver->status) { + BT_ERR("%s: Intel version command failure (%02x)", + hdev->name, ver->status); + err = -bt_to_errno(ver->status); + kfree_skb(skb); + return err; + } + + /* The hardware platform number has a fixed value of 0x37 and + * for now only accept this single value. + */ + if (ver->hw_platform != 0x37) { + BT_ERR("%s: Unsupported Intel hardware platform (%u)", + hdev->name, ver->hw_platform); + kfree_skb(skb); + return -EINVAL; + } + + /* At the moment only the hardware variant iBT 3.0 (LnP/SfP) is + * supported by this firmware loading method. This check has been + * put in place to ensure correct forward compatibility options + * when newer hardware variants come along. + */ + if (ver->hw_variant != 0x0b) { + BT_ERR("%s: Unsupported Intel hardware variant (%u)", + hdev->name, ver->hw_variant); + kfree_skb(skb); + return -EINVAL; + } + + btusb_intel_version_info(hdev, ver); + + /* The firmware variant determines if the device is in bootloader + * mode or is running operational firmware. The value 0x06 identifies + * the bootloader and the value 0x23 identifies the operational + * firmware. + * + * When the operational firmware is already present, then only + * the check for valid Bluetooth device address is needed. This + * determines if the device will be added as configured or + * unconfigured controller. + * + * It is not possible to use the Secure Boot Parameters in this + * case since that command is only available in bootloader mode. + */ + if (ver->fw_variant == 0x23) { + kfree_skb(skb); + clear_bit(BTUSB_BOOTLOADER, &data->flags); + btusb_check_bdaddr_intel(hdev); + return 0; + } + + /* If the device is not in bootloader mode, then the only possible + * choice is to return an error and abort the device initialization. + */ + if (ver->fw_variant != 0x06) { + BT_ERR("%s: Unsupported Intel firmware variant (%u)", + hdev->name, ver->fw_variant); + kfree_skb(skb); + return -ENODEV; + } + + kfree_skb(skb); + + /* Read the secure boot parameters to identify the operating + * details of the bootloader. + */ + skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reading Intel boot parameters failed (%ld)", + hdev->name, PTR_ERR(skb)); + return PTR_ERR(skb); + } + + if (skb->len != sizeof(*params)) { + BT_ERR("%s: Intel boot parameters size mismatch", hdev->name); + kfree_skb(skb); + return -EILSEQ; + } + + params = (struct intel_boot_params *)skb->data; + if (params->status) { + BT_ERR("%s: Intel boot parameters command failure (%02x)", + hdev->name, params->status); + err = -bt_to_errno(params->status); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Device revision is %u", hdev->name, + le16_to_cpu(params->dev_revid)); + + BT_INFO("%s: Secure boot is %s", hdev->name, + params->secure_boot ? "enabled" : "disabled"); + + BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name, + params->min_fw_build_nn, params->min_fw_build_cw, + 2000 + params->min_fw_build_yy); + + /* It is required that every single firmware fragment is acknowledged + * with a command complete event. If the boot parameters indicate + * that this bootloader does not send them, then abort the setup. + */ + if (params->limited_cce != 0x00) { + BT_ERR("%s: Unsupported Intel firmware loading method (%u)", + hdev->name, params->limited_cce); + kfree_skb(skb); + return -EINVAL; + } + + /* If the OTP has no valid Bluetooth device address, then there will + * also be no valid address for the operational firmware. + */ + if (!bacmp(¶ms->otp_bdaddr, BDADDR_ANY)) { + BT_INFO("%s: No device address configured", hdev->name); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } + + /* With this Intel bootloader only the hardware variant and device + * revision information are used to select the right firmware. + * + * Currently this bootloader support is limited to hardware variant + * iBT 3.0 (LnP/SfP) which is identified by the value 11 (0x0b). + */ + snprintf(fwname, sizeof(fwname), "intel/ibt-11-%u.sfi", + le16_to_cpu(params->dev_revid)); + + err = request_firmware(&fw, fwname, &hdev->dev); + if (err < 0) { + BT_ERR("%s: Failed to load Intel firmware file (%d)", + hdev->name, err); + kfree_skb(skb); + return err; + } + + BT_INFO("%s: Found device firmware: %s", hdev->name, fwname); + + kfree_skb(skb); + + if (fw->size < 644) { + BT_ERR("%s: Invalid size of firmware file (%zu)", + hdev->name, fw->size); + err = -EBADF; + goto done; + } + + set_bit(BTUSB_DOWNLOADING, &data->flags); + + /* Start the firmware download transaction with the Init fragment + * represented by the 128 bytes of CSS header. + */ + err = btusb_intel_secure_send(hdev, 0x00, 128, fw->data); + if (err < 0) { + BT_ERR("%s: Failed to send firmware header (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of public key information from the firmware + * as the PKey fragment. + */ + err = btusb_intel_secure_send(hdev, 0x03, 256, fw->data + 128); + if (err < 0) { + BT_ERR("%s: Failed to send firmware public key (%d)", + hdev->name, err); + goto done; + } + + /* Send the 256 bytes of signature information from the firmware + * as the Sign fragment. + */ + err = btusb_intel_secure_send(hdev, 0x02, 256, fw->data + 388); + if (err < 0) { + BT_ERR("%s: Failed to send firmware signature (%d)", + hdev->name, err); + goto done; + } + + fw_ptr = fw->data + 644; + + while (fw_ptr - fw->data < fw->size) { + struct hci_command_hdr *cmd = (void *)fw_ptr; + u8 cmd_len; + + cmd_len = sizeof(*cmd) + cmd->plen; + + /* Send each command from the firmware data buffer as + * a single Data fragment. + */ + err = btusb_intel_secure_send(hdev, 0x01, cmd_len, fw_ptr); + if (err < 0) { + BT_ERR("%s: Failed to send firmware data (%d)", + hdev->name, err); + goto done; + } + + fw_ptr += cmd_len; + } + + set_bit(BTUSB_FIRMWARE_LOADED, &data->flags); + + BT_INFO("%s: Waiting for firmware download to complete", hdev->name); + + /* Before switching the device into operational mode and with that + * booting the loaded firmware, wait for the bootloader notification + * that all fragments have been successfully received. + * + * When the event processing receives the notification, then the + * BTUSB_DOWNLOADING flag will be cleared. + * + * The firmware loading should not take longer than 5 seconds + * and thus just timeout if that happens and fail the setup + * of this device. + */ + err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_DOWNLOADING, + msecs_to_jiffies(5000), + TASK_INTERRUPTIBLE); + if (err == 1) { + BT_ERR("%s: Firmware loading interrupted", hdev->name); + err = -EINTR; + goto done; + } + + if (err) { + BT_ERR("%s: Firmware loading timeout", hdev->name); + err = -ETIMEDOUT; + goto done; + } + + if (test_bit(BTUSB_FIRMWARE_FAILED, &data->flags)) { + BT_ERR("%s: Firmware loading failed", hdev->name); + err = -ENOEXEC; + goto done; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Firmware loaded in %llu usecs", hdev->name, duration); + +done: + release_firmware(fw); + + if (err < 0) + return err; + + calltime = ktime_get(); + + set_bit(BTUSB_BOOTING, &data->flags); + + skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(reset_param), reset_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + /* The bootloader will not indicate when the device is ready. This + * is done by the operational firmware sending bootup notification. + * + * Booting into operational firmware should not take longer than + * 1 second. However if that happens, then just fail the setup + * since something went wrong. + */ + BT_INFO("%s: Waiting for device to boot", hdev->name); + + err = btusb_wait_on_bit_timeout(&data->flags, BTUSB_BOOTING, + msecs_to_jiffies(1000), + TASK_INTERRUPTIBLE); + + if (err == 1) { + BT_ERR("%s: Device boot interrupted", hdev->name); + return -EINTR; + } + + if (err) { + BT_ERR("%s: Device boot timeout", hdev->name); + return -ETIMEDOUT; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("%s: Device booted in %llu usecs", hdev->name, duration); + + clear_bit(BTUSB_BOOTLOADER, &data->flags); + + return 0; +} + +static void btusb_hw_error_intel(struct hci_dev *hdev, u8 code) +{ + struct sk_buff *skb; + u8 type = 0x00; + + BT_ERR("%s: Hardware error 0x%2.2x", hdev->name, code); + + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Reset after hardware error failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + kfree_skb(skb); + + skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + BT_ERR("%s: Retrieving Intel exception info failed (%ld)", + hdev->name, PTR_ERR(skb)); + return; + } + + if (skb->len != 13) { + BT_ERR("%s: Exception info size mismatch", hdev->name); + kfree_skb(skb); + return; + } + + if (skb->data[0] != 0x00) { + BT_ERR("%s: Exception info command failure (%02x)", + hdev->name, skb->data[0]); + kfree_skb(skb); + return; + } + + BT_ERR("%s: Exception info %s", hdev->name, (char *)(skb->data + 1)); + + kfree_skb(skb); +} + static int btusb_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; @@ -2033,7 +2644,13 @@ static int btusb_probe(struct usb_interface *intf, if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) return -ENODEV; - data->cmdreq_type = USB_TYPE_CLASS; + if (id->driver_info & BTUSB_AMP) { + data->cmdreq_type = USB_TYPE_CLASS | 0x01; + data->cmdreq = 0x2b; + } else { + data->cmdreq_type = USB_TYPE_CLASS; + data->cmdreq = 0x00; + } data->udev = interface_to_usbdev(intf); data->intf = intf; @@ -2049,8 +2666,14 @@ static int btusb_probe(struct usb_interface *intf, init_usb_anchor(&data->isoc_anchor); spin_lock_init(&data->rxlock); - data->recv_event = hci_recv_frame; - data->recv_bulk = btusb_recv_bulk; + if (id->driver_info & BTUSB_INTEL_NEW) { + data->recv_event = btusb_recv_event_intel; + data->recv_bulk = btusb_recv_bulk_intel; + set_bit(BTUSB_BOOTLOADER, &data->flags); + } else { + data->recv_event = hci_recv_frame; + data->recv_bulk = btusb_recv_bulk; + } hdev = hci_alloc_dev(); if (!hdev) @@ -2059,6 +2682,11 @@ static int btusb_probe(struct usb_interface *intf, hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); + if (id->driver_info & BTUSB_AMP) + hdev->dev_type = HCI_AMP; + else + hdev->dev_type = HCI_BREDR; + data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); @@ -2081,6 +2709,15 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL) { hdev->setup = btusb_setup_intel; hdev->set_bdaddr = btusb_set_bdaddr_intel; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + } + + if (id->driver_info & BTUSB_INTEL_NEW) { + hdev->send = btusb_send_frame_intel; + hdev->setup = btusb_setup_intel_new; + hdev->hw_error = btusb_hw_error_intel; + hdev->set_bdaddr = btusb_set_bdaddr_intel; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } if (id->driver_info & BTUSB_MARVELL) @@ -2094,11 +2731,18 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_INTEL_BOOT) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); - if (id->driver_info & BTUSB_ATH3012) + if (id->driver_info & BTUSB_ATH3012) { hdev->set_bdaddr = btusb_set_bdaddr_ath3012; + set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); + } - /* Interface numbers are hardcoded in the specification */ - data->isoc = usb_ifnum_to_if(data->udev, 1); + if (id->driver_info & BTUSB_AMP) { + /* AMP controllers do not support SCO packets */ + data->isoc = NULL; + } else { + /* Interface numbers are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + } if (!reset) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); @@ -2192,7 +2836,6 @@ static void btusb_disconnect(struct usb_interface *intf) else if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); - btusb_free_frags(data); hci_free_dev(hdev); } diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 2d8c3397774f..f50a546224ad 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -36,6 +36,7 @@ #include <linux/slab.h> #include <linux/inet.h> #include <linux/string.h> +#include <linux/mlx4/driver.h> #include "mlx4_ib.h" diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9db258f7c804..2ed5b996b2f4 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -351,6 +351,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, enum ib_mtu tmp; struct mlx4_cmd_mailbox *mailbox; int err = 0; + int is_bonded = mlx4_is_bonded(mdev->dev); mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); if (IS_ERR(mailbox)) @@ -374,8 +375,12 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; + if (is_bonded) + rtnl_lock(); /* required to get upper dev */ spin_lock_bh(&iboe->lock); ndev = iboe->netdevs[port - 1]; + if (ndev && is_bonded) + ndev = netdev_master_upper_dev_get(ndev); if (!ndev) goto out_unlock; @@ -387,6 +392,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->phys_state = state_to_phys_state(props->state); out_unlock: spin_unlock_bh(&iboe->lock); + if (is_bonded) + rtnl_unlock(); out: mlx4_free_cmd_mailbox(mdev->dev, mailbox); return err; @@ -844,7 +851,7 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, struct mlx4_ib_steering { struct list_head list; - u64 reg_id; + struct mlx4_flow_reg_id reg_id; union ib_gid gid; }; @@ -1135,9 +1142,11 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, int domain) { - int err = 0, i = 0; + int err = 0, i = 0, j = 0; struct mlx4_ib_flow *mflow; enum mlx4_net_trans_promisc_mode type[2]; + struct mlx4_dev *dev = (to_mdev(qp->device))->dev; + int is_bonded = mlx4_is_bonded(dev); memset(type, 0, sizeof(type)); @@ -1172,26 +1181,55 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, while (i < ARRAY_SIZE(type) && type[i]) { err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], - &mflow->reg_id[i]); + &mflow->reg_id[i].id); if (err) goto err_create_flow; i++; + if (is_bonded) { + flow_attr->port = 2; + err = __mlx4_ib_create_flow(qp, flow_attr, + domain, type[j], + &mflow->reg_id[j].mirror); + flow_attr->port = 1; + if (err) + goto err_create_flow; + j++; + } + } if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { - err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); + err = mlx4_ib_tunnel_steer_add(qp, flow_attr, + &mflow->reg_id[i].id); if (err) goto err_create_flow; i++; + if (is_bonded) { + flow_attr->port = 2; + err = mlx4_ib_tunnel_steer_add(qp, flow_attr, + &mflow->reg_id[j].mirror); + flow_attr->port = 1; + if (err) + goto err_create_flow; + j++; + } + /* function to create mirror rule */ } return &mflow->ibflow; err_create_flow: while (i) { - (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]); + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, + mflow->reg_id[i].id); i--; } + + while (j) { + (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, + mflow->reg_id[j].mirror); + j--; + } err_free: kfree(mflow); return ERR_PTR(err); @@ -1204,10 +1242,16 @@ static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device); struct mlx4_ib_flow *mflow = to_mflow(flow_id); - while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) { - err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]); + while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { + err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i].id); if (err) ret = err; + if (mflow->reg_id[i].mirror) { + err = __mlx4_ib_destroy_flow(mdev->dev, + mflow->reg_id[i].mirror); + if (err) + ret = err; + } i++; } @@ -1219,11 +1263,12 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); + struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); - u64 reg_id; struct mlx4_ib_steering *ib_steering = NULL; enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; + struct mlx4_flow_reg_id reg_id; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1235,10 +1280,20 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), - prot, ®_id); + prot, ®_id.id); if (err) goto err_malloc; + reg_id.mirror = 0; + if (mlx4_is_bonded(dev)) { + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, 2, + !!(mqp->flags & + MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), + prot, ®_id.mirror); + if (err) + goto err_add; + } + err = add_gid_entry(ibqp, gid); if (err) goto err_add; @@ -1254,7 +1309,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) err_add: mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - prot, reg_id); + prot, reg_id.id); + if (reg_id.mirror) + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + prot, reg_id.mirror); err_malloc: kfree(ib_steering); @@ -1281,10 +1339,12 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); + struct mlx4_dev *dev = mdev->dev; struct mlx4_ib_qp *mqp = to_mqp(ibqp); struct net_device *ndev; struct mlx4_ib_gid_entry *ge; - u64 reg_id = 0; + struct mlx4_flow_reg_id reg_id = {0, 0}; + enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; @@ -1309,10 +1369,17 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) } err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, - prot, reg_id); + prot, reg_id.id); if (err) return err; + if (mlx4_is_bonded(dev)) { + err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + prot, reg_id.mirror); + if (err) + return err; + } + mutex_lock(&mqp->mutex); ge = find_gid_entry(mqp, gid->raw); if (ge) { @@ -1440,6 +1507,7 @@ static void update_gids_task(struct work_struct *work) union ib_gid *gids; int err; struct mlx4_dev *dev = gw->dev->dev; + int is_bonded = mlx4_is_bonded(dev); if (!gw->dev->ib_active) return; @@ -1459,7 +1527,10 @@ static void update_gids_task(struct work_struct *work) if (err) pr_warn("set port command failed\n"); else - mlx4_ib_dispatch_event(gw->dev, gw->port, IB_EVENT_GID_CHANGE); + if ((gw->port == 1) || !is_bonded) + mlx4_ib_dispatch_event(gw->dev, + is_bonded ? 1 : gw->port, + IB_EVENT_GID_CHANGE); mlx4_free_cmd_mailbox(dev, mailbox); kfree(gw); @@ -1875,7 +1946,8 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev, * don't want the bond IP based gids in the table since * flows that select port by gid may get the down port. */ - if (port_state == IB_PORT_DOWN) { + if (port_state == IB_PORT_DOWN && + !mlx4_is_bonded(ibdev->dev)) { reset_gid_table(ibdev, port); mlx4_ib_set_default_gid(ibdev, curr_netdev, @@ -2047,6 +2119,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) int err; struct mlx4_ib_iboe *iboe; int ib_num_ports = 0; + int num_req_counters; pr_info_once("%s", mlx4_ib_version); @@ -2080,13 +2153,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); ibdev->dev = dev; + ibdev->bond_next_port = 0; strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX); ibdev->ib_dev.owner = THIS_MODULE; ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; ibdev->num_ports = num_ports; - ibdev->ib_dev.phys_port_cnt = ibdev->num_ports; + ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? + 1 : ibdev->num_ports; ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; ibdev->ib_dev.dma_device = &dev->persist->pdev->dev; @@ -2207,7 +2282,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (init_node_data(ibdev)) goto err_map; - for (i = 0; i < ibdev->num_ports; ++i) { + num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; + for (i = 0; i < num_req_counters; ++i) { mutex_init(&ibdev->qp1_proxy_lock[i]); if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) == IB_LINK_LAYER_ETHERNET) { @@ -2218,6 +2294,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->counters[i] = -1; } } + if (mlx4_is_bonded(dev)) + for (i = 1; i < ibdev->num_ports ; ++i) + ibdev->counters[i] = ibdev->counters[0]; + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) ib_num_ports++; @@ -2538,6 +2618,38 @@ out: return; } +static void handle_bonded_port_state_event(struct work_struct *work) +{ + struct ib_event_work *ew = + container_of(work, struct ib_event_work, work); + struct mlx4_ib_dev *ibdev = ew->ib_dev; + enum ib_port_state bonded_port_state = IB_PORT_NOP; + int i; + struct ib_event ibev; + + kfree(ew); + spin_lock_bh(&ibdev->iboe.lock); + for (i = 0; i < MLX4_MAX_PORTS; ++i) { + struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; + + enum ib_port_state curr_port_state = + (netif_running(curr_netdev) && + netif_carrier_ok(curr_netdev)) ? + IB_PORT_ACTIVE : IB_PORT_DOWN; + + bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? + curr_port_state : IB_PORT_ACTIVE; + } + spin_unlock_bh(&ibdev->iboe.lock); + + ibev.device = &ibdev->ib_dev; + ibev.element.port_num = 1; + ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? + IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + + ib_dispatch_event(&ibev); +} + static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, enum mlx4_dev_event event, unsigned long param) { @@ -2547,6 +2659,18 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, struct ib_event_work *ew; int p = 0; + if (mlx4_is_bonded(dev) && + ((event == MLX4_DEV_EVENT_PORT_UP) || + (event == MLX4_DEV_EVENT_PORT_DOWN))) { + ew = kmalloc(sizeof(*ew), GFP_ATOMIC); + if (!ew) + return; + INIT_WORK(&ew->work, handle_bonded_port_state_event); + ew->ib_dev = ibdev; + queue_work(wq, &ew->work); + return; + } + if (event == MLX4_DEV_EVENT_PORT_MGMT_CHANGE) eqe = (struct mlx4_eqe *)param; else @@ -2607,7 +2731,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, } ibev.device = ibdev_ptr; - ibev.element.port_num = (u8) p; + ibev.element.port_num = mlx4_is_bonded(ibdev->dev) ? 1 : (u8)p; ib_dispatch_event(&ibev); } @@ -2616,7 +2740,8 @@ static struct mlx4_interface mlx4_ib_interface = { .add = mlx4_ib_add, .remove = mlx4_ib_remove, .event = mlx4_ib_event, - .protocol = MLX4_PROT_IB_IPV6 + .protocol = MLX4_PROT_IB_IPV6, + .flags = MLX4_INTFF_BONDING }; static int __init mlx4_ib_init(void) diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 6eb743f65f6f..721540c9163d 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -134,10 +134,17 @@ struct mlx4_ib_fmr { struct mlx4_fmr mfmr; }; +#define MAX_REGS_PER_FLOW 2 + +struct mlx4_flow_reg_id { + u64 id; + u64 mirror; +}; + struct mlx4_ib_flow { struct ib_flow ibflow; /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */ - u64 reg_id[2]; + struct mlx4_flow_reg_id reg_id[MAX_REGS_PER_FLOW]; }; struct mlx4_ib_wq { @@ -527,6 +534,7 @@ struct mlx4_ib_dev { struct mlx4_ib_qp *qp1_proxy[MLX4_MAX_PORTS]; /* lock when destroying qp1_proxy and getting netdev events */ struct mutex qp1_proxy_lock[MLX4_MAX_PORTS]; + u8 bond_next_port; }; struct ib_event_work { @@ -622,6 +630,13 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah) return container_of(ibah, struct mlx4_ib_ah, ibah); } +static inline u8 mlx4_ib_bond_next_port(struct mlx4_ib_dev *dev) +{ + dev->bond_next_port = (dev->bond_next_port + 1) % dev->num_ports; + + return dev->bond_next_port + 1; +} + int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev); void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index cf000b7ad64f..792f9dc86ada 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -40,6 +40,7 @@ #include <rdma/ib_addr.h> #include <rdma/ib_mad.h> +#include <linux/mlx4/driver.h> #include <linux/mlx4/qp.h> #include "mlx4_ib.h" @@ -93,17 +94,6 @@ enum { #ifndef ETH_ALEN #define ETH_ALEN 6 #endif -static inline u64 mlx4_mac_to_u64(u8 *addr) -{ - u64 mac = 0; - int i; - - for (i = 0; i < ETH_ALEN; i++) { - mac <<= 8; - mac |= addr[i]; - } - return mac; -} static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), @@ -1915,6 +1905,22 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, goto out; } + if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) { + if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) { + if ((ibqp->qp_type == IB_QPT_RC) || + (ibqp->qp_type == IB_QPT_UD) || + (ibqp->qp_type == IB_QPT_UC) || + (ibqp->qp_type == IB_QPT_RAW_PACKET) || + (ibqp->qp_type == IB_QPT_XRC_INI)) { + attr->port_num = mlx4_ib_bond_next_port(dev); + } + } else { + /* no sense in changing port_num + * when ports are bonded */ + attr_mask &= ~IB_QP_PORT; + } + } + if ((attr_mask & IB_QP_PORT) && (attr->port_num == 0 || attr->port_num > dev->num_ports)) { pr_debug("qpn 0x%x: invalid port number (%d) specified " @@ -1965,6 +1971,9 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, err = __mlx4_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state); + if (mlx4_is_bonded(dev->dev) && (attr_mask & IB_QP_PORT)) + attr->port_num = 1; + out: mutex_unlock(&qp->mutex); return err; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 7aaaf51e1596..35f19a683822 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -370,12 +370,12 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue, to_copy = size - bytes_copied; if (is_iovec) { - struct iovec *iov = (struct iovec *)src; + struct msghdr *msg = (struct msghdr *)src; int err; /* The iovec will track bytes_copied internally. */ - err = memcpy_fromiovec((u8 *)va + page_offset, - iov, to_copy); + err = memcpy_from_msg((u8 *)va + page_offset, + msg, to_copy); if (err != 0) { if (kernel_if->host) kunmap(kernel_if->u.h.page[page_index]); @@ -580,7 +580,7 @@ static int qp_memcpy_from_queue(void *dest, */ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, u64 queue_offset, - const void *src, + const void *msg, size_t src_offset, size_t size) { @@ -588,7 +588,7 @@ static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, * We ignore src_offset because src is really a struct iovec * and will * maintain offset internally. */ - return __qp_memcpy_to_queue(queue, queue_offset, src, size, true); + return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); } /* @@ -3223,13 +3223,13 @@ EXPORT_SYMBOL_GPL(vmci_qpair_peek); * of bytes enqueued or < 0 on error. */ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, - void *iov, + struct msghdr *msg, size_t iov_size, int buf_type) { ssize_t result; - if (!qpair || !iov) + if (!qpair) return VMCI_ERROR_INVALID_ARGS; qp_lock(qpair); @@ -3238,7 +3238,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair, result = qp_enqueue_locked(qpair->produce_q, qpair->consume_q, qpair->produce_q_size, - iov, iov_size, + msg, iov_size, qp_memcpy_to_queue_iov); if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index c9e519cb9214..679ef00d6b16 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -790,7 +790,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } new_active->delay = 0; - new_active->link = BOND_LINK_UP; + bond_set_slave_link_state(new_active, BOND_LINK_UP); if (BOND_MODE(bond) == BOND_MODE_8023AD) bond_3ad_handle_link_change(new_active, BOND_LINK_UP); @@ -1181,6 +1181,62 @@ static void bond_free_slave(struct slave *slave) kfree(slave); } +static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info) +{ + info->bond_mode = BOND_MODE(bond); + info->miimon = bond->params.miimon; + info->num_slaves = bond->slave_cnt; +} + +static void bond_fill_ifslave(struct slave *slave, struct ifslave *info) +{ + strcpy(info->slave_name, slave->dev->name); + info->link = slave->link; + info->state = bond_slave_state(slave); + info->link_failure_count = slave->link_failure_count; +} + +static void bond_netdev_notify(struct slave *slave, struct net_device *dev) +{ + struct bonding *bond = slave->bond; + struct netdev_bonding_info bonding_info; + + rtnl_lock(); + /* make sure that slave is still valid */ + if (dev->priv_flags & IFF_BONDING) { + bond_fill_ifslave(slave, &bonding_info.slave); + bond_fill_ifbond(bond, &bonding_info.master); + netdev_bonding_info_change(slave->dev, &bonding_info); + } + rtnl_unlock(); +} + +static void bond_netdev_notify_work(struct work_struct *_work) +{ + struct netdev_notify_work *w = + container_of(_work, struct netdev_notify_work, work.work); + + bond_netdev_notify(w->slave, w->dev); + dev_put(w->dev); +} + +void bond_queue_slave_event(struct slave *slave) +{ + struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC); + + if (!nnw) + return; + + INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work); + nnw->slave = slave; + nnw->dev = slave->dev; + + if (queue_delayed_work(slave->bond->wq, &nnw->work, 0)) + dev_hold(slave->dev); + else + kfree(nnw); +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) { @@ -1444,19 +1500,22 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) if (bond->params.miimon) { if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) { if (bond->params.updelay) { - new_slave->link = BOND_LINK_BACK; + bond_set_slave_link_state(new_slave, + BOND_LINK_BACK); new_slave->delay = bond->params.updelay; } else { - new_slave->link = BOND_LINK_UP; + bond_set_slave_link_state(new_slave, + BOND_LINK_UP); } } else { - new_slave->link = BOND_LINK_DOWN; + bond_set_slave_link_state(new_slave, BOND_LINK_DOWN); } } else if (bond->params.arp_interval) { - new_slave->link = (netif_carrier_ok(slave_dev) ? - BOND_LINK_UP : BOND_LINK_DOWN); + bond_set_slave_link_state(new_slave, + (netif_carrier_ok(slave_dev) ? + BOND_LINK_UP : BOND_LINK_DOWN)); } else { - new_slave->link = BOND_LINK_UP; + bond_set_slave_link_state(new_slave, BOND_LINK_UP); } if (new_slave->link != BOND_LINK_DOWN) @@ -1572,6 +1631,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) new_slave->link != BOND_LINK_DOWN ? "an up" : "a down"); /* enslave is successful */ + bond_queue_slave_event(new_slave); return 0; /* Undo stages on error */ @@ -1821,11 +1881,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, static int bond_info_query(struct net_device *bond_dev, struct ifbond *info) { struct bonding *bond = netdev_priv(bond_dev); - - info->bond_mode = BOND_MODE(bond); - info->miimon = bond->params.miimon; - info->num_slaves = bond->slave_cnt; - + bond_fill_ifbond(bond, info); return 0; } @@ -1839,10 +1895,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in bond_for_each_slave(bond, slave, iter) { if (i++ == (int)info->slave_id) { res = 0; - strcpy(info->slave_name, slave->dev->name); - info->link = slave->link; - info->state = bond_slave_state(slave); - info->link_failure_count = slave->link_failure_count; + bond_fill_ifslave(slave, info); break; } } @@ -1872,7 +1925,7 @@ static int bond_miimon_inspect(struct bonding *bond) if (link_state) continue; - slave->link = BOND_LINK_FAIL; + bond_set_slave_link_state(slave, BOND_LINK_FAIL); slave->delay = bond->params.downdelay; if (slave->delay) { netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n", @@ -1887,7 +1940,7 @@ static int bond_miimon_inspect(struct bonding *bond) case BOND_LINK_FAIL: if (link_state) { /* recovered before downdelay expired */ - slave->link = BOND_LINK_UP; + bond_set_slave_link_state(slave, BOND_LINK_UP); slave->last_link_up = jiffies; netdev_info(bond->dev, "link status up again after %d ms for interface %s\n", (bond->params.downdelay - slave->delay) * @@ -1909,7 +1962,7 @@ static int bond_miimon_inspect(struct bonding *bond) if (!link_state) continue; - slave->link = BOND_LINK_BACK; + bond_set_slave_link_state(slave, BOND_LINK_BACK); slave->delay = bond->params.updelay; if (slave->delay) { @@ -1922,7 +1975,8 @@ static int bond_miimon_inspect(struct bonding *bond) /*FALLTHRU*/ case BOND_LINK_BACK: if (!link_state) { - slave->link = BOND_LINK_DOWN; + bond_set_slave_link_state(slave, + BOND_LINK_DOWN); netdev_info(bond->dev, "link status down again after %d ms for interface %s\n", (bond->params.updelay - slave->delay) * bond->params.miimon, @@ -1960,7 +2014,7 @@ static void bond_miimon_commit(struct bonding *bond) continue; case BOND_LINK_UP: - slave->link = BOND_LINK_UP; + bond_set_slave_link_state(slave, BOND_LINK_UP); slave->last_link_up = jiffies; primary = rtnl_dereference(bond->primary_slave); @@ -2000,7 +2054,7 @@ static void bond_miimon_commit(struct bonding *bond) if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; - slave->link = BOND_LINK_DOWN; + bond_set_slave_link_state(slave, BOND_LINK_DOWN); if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP || BOND_MODE(bond) == BOND_MODE_8023AD) @@ -2583,7 +2637,7 @@ static void bond_ab_arp_commit(struct bonding *bond) struct slave *current_arp_slave; current_arp_slave = rtnl_dereference(bond->current_arp_slave); - slave->link = BOND_LINK_UP; + bond_set_slave_link_state(slave, BOND_LINK_UP); if (current_arp_slave) { bond_set_slave_inactive_flags( current_arp_slave, @@ -2606,7 +2660,7 @@ static void bond_ab_arp_commit(struct bonding *bond) if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; - slave->link = BOND_LINK_DOWN; + bond_set_slave_link_state(slave, BOND_LINK_DOWN); bond_set_slave_inactive_flags(slave, BOND_SLAVE_NOTIFY_NOW); @@ -2685,7 +2739,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) * up when it is actually down */ if (!bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) { - slave->link = BOND_LINK_DOWN; + bond_set_slave_link_state(slave, BOND_LINK_DOWN); if (slave->link_failure_count < UINT_MAX) slave->link_failure_count++; @@ -2705,7 +2759,7 @@ static bool bond_ab_arp_probe(struct bonding *bond) if (!new_slave) goto check_state; - new_slave->link = BOND_LINK_BACK; + bond_set_slave_link_state(new_slave, BOND_LINK_BACK); bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER); bond_arp_send_all(bond, new_slave); new_slave->last_link_up = jiffies; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index fb6980a09981..55019c93387d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -476,6 +476,22 @@ struct sge_rspq { /* state for an SGE response queue */ struct adapter *adap; struct net_device *netdev; /* associated net device */ rspq_handler_t handler; +#ifdef CONFIG_NET_RX_BUSY_POLL +#define CXGB_POLL_STATE_IDLE 0 +#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */ +#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */ +#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */ +#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */ +#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \ + CXGB_POLL_STATE_POLL_YIELD) +#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \ + CXGB_POLL_STATE_POLL) +#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \ + CXGB_POLL_STATE_POLL_YIELD) + unsigned int bpoll_state; + spinlock_t bpoll_lock; /* lock for busy poll */ +#endif /* CONFIG_NET_RX_BUSY_POLL */ + }; struct sge_eth_stats { /* Ethernet queue statistics */ @@ -880,6 +896,102 @@ static inline struct adapter *netdev2adap(const struct net_device *dev) return netdev2pinfo(dev)->adapter; } +#ifdef CONFIG_NET_RX_BUSY_POLL +static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) +{ + spin_lock_init(&q->bpoll_lock); + q->bpoll_state = CXGB_POLL_STATE_IDLE; +} + +static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) +{ + bool rc = true; + + spin_lock(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_LOCKED) { + q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD; + rc = false; + } else { + q->bpoll_state = CXGB_POLL_STATE_NAPI; + } + spin_unlock(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) +{ + bool rc = false; + + spin_lock(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) + rc = true; + q->bpoll_state = CXGB_POLL_STATE_IDLE; + spin_unlock(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) +{ + bool rc = true; + + spin_lock_bh(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_LOCKED) { + q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD; + rc = false; + } else { + q->bpoll_state |= CXGB_POLL_STATE_POLL; + } + spin_unlock_bh(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) +{ + bool rc = false; + + spin_lock_bh(&q->bpoll_lock); + if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD) + rc = true; + q->bpoll_state = CXGB_POLL_STATE_IDLE; + spin_unlock_bh(&q->bpoll_lock); + return rc; +} + +static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) +{ + return q->bpoll_state & CXGB_POLL_USER_PEND; +} +#else +static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) +{ +} + +static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) +{ + return true; +} + +static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_lock_poll(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q) +{ + return false; +} + +static inline bool cxgb_poll_busy_polling(struct sge_rspq *q) +{ + return false; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + void t4_os_portmod_changed(const struct adapter *adap, int port_id); void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat); @@ -908,6 +1020,7 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie); int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); +int cxgb_busy_poll(struct napi_struct *napi); extern int dbfifo_int_thresh; #define for_each_port(adapter, iter) \ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 5bf490a781aa..5db5b4f7b94d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -923,8 +923,14 @@ static void quiesce_rx(struct adapter *adap) for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) { struct sge_rspq *q = adap->sge.ingr_map[i]; - if (q && q->handler) + if (q && q->handler) { napi_disable(&q->napi); + local_bh_disable(); + while (!cxgb_poll_lock_napi(q)) + mdelay(1); + local_bh_enable(); + } + } } @@ -940,8 +946,10 @@ static void enable_rx(struct adapter *adap) if (!q) continue; - if (q->handler) + if (q->handler) { + cxgb_busy_poll_init_lock(q); napi_enable(&q->napi); + } /* 0-increment GTS to start the timer and enable interrupts */ t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), SEINTARM_V(q->intr_params) | @@ -4563,6 +4571,10 @@ static const struct net_device_ops cxgb4_netdev_ops = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, #endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = cxgb_busy_poll, +#endif + }; void t4_fatal_err(struct adapter *adap) @@ -5130,8 +5142,7 @@ static int adap_init0(struct adapter *adap) state, &reset); /* Cleaning up */ - if (fw != NULL) - release_firmware(fw); + release_firmware(fw); t4_free_mem(card_fw); if (ret < 0) diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 619156112b21..b4b9f6048fe7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -43,6 +43,9 @@ #include <linux/export.h> #include <net/ipv6.h> #include <net/tcp.h> +#ifdef CONFIG_NET_RX_BUSY_POLL +#include <net/busy_poll.h> +#endif /* CONFIG_NET_RX_BUSY_POLL */ #include "cxgb4.h" #include "t4_regs.h" #include "t4_values.h" @@ -1720,6 +1723,7 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, skb->truesize += skb->data_len; skb->ip_summed = CHECKSUM_UNNECESSARY; skb_record_rx_queue(skb, rxq->rspq.idx); + skb_mark_napi_id(skb, &rxq->rspq.napi); if (rxq->rspq.netdev->features & NETIF_F_RXHASH) skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, PKT_HASH_TYPE_L3); @@ -1763,6 +1767,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, csum_ok = pkt->csum_calc && !pkt->err_vec && (q->netdev->features & NETIF_F_RXCSUM); if ((pkt->l2info & htonl(RXF_TCP_F)) && + !(cxgb_poll_busy_polling(q)) && (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { do_gro(rxq, si, pkt); return 0; @@ -1801,6 +1806,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); rxq->stats.vlan_ex++; } + skb_mark_napi_id(skb, &q->napi); netif_receive_skb(skb); return 0; } @@ -1963,6 +1969,38 @@ static int process_responses(struct sge_rspq *q, int budget) return budget - budget_left; } +#ifdef CONFIG_NET_RX_BUSY_POLL +int cxgb_busy_poll(struct napi_struct *napi) +{ + struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); + unsigned int params, work_done; + u32 val; + + if (!cxgb_poll_lock_poll(q)) + return LL_FLUSH_BUSY; + + work_done = process_responses(q, 4); + params = QINTR_TIMER_IDX(TIMERREG_COUNTER0_X) | QINTR_CNT_EN; + q->next_intr_params = params; + val = CIDXINC_V(work_done) | SEINTARM_V(params); + + /* If we don't have access to the new User GTS (T5+), use the old + * doorbell mechanism; otherwise use the new BAR2 mechanism. + */ + if (unlikely(!q->bar2_addr)) + t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), + val | INGRESSQID_V((u32)q->cntxt_id)); + else { + writel(val | INGRESSQID_V(q->bar2_qid), + q->bar2_addr + SGE_UDB_GTS); + wmb(); + } + + cxgb_poll_unlock_poll(q); + return work_done; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + /** * napi_rx_handler - the NAPI handler for Rx processing * @napi: the napi instance @@ -1978,9 +2016,13 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) { unsigned int params; struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); - int work_done = process_responses(q, budget); + int work_done; u32 val; + if (!cxgb_poll_lock_napi(q)) + return budget; + + work_done = process_responses(q, budget); if (likely(work_done < budget)) { int timer_index; @@ -2018,6 +2060,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget) q->bar2_addr + SGE_UDB_GTS); wmb(); } + cxgb_poll_unlock_napi(q); return work_done; } @@ -2341,6 +2384,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, goto err; netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); + napi_hash_add(&iq->napi); iq->cur_desc = iq->desc; iq->cidx = 0; iq->gen = 1; @@ -2598,6 +2642,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, rq->cntxt_id, fl_id, 0xffff); dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, rq->desc, rq->phys_addr); + napi_hash_del(&rq->napi); netif_napi_del(&rq->napi); rq->netdev = NULL; rq->cntxt_id = rq->abs_id = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h index a40484432ebf..997ec87470c7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h @@ -59,6 +59,7 @@ /* GTS register */ #define SGE_TIMERREGS 6 +#define TIMERREG_COUNTER0_X 0 /* T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. * The User Doorbells are each 128 bytes in length with a Simple Doorbell at diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 58cabee00abf..9bb6220663b2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -2596,12 +2596,9 @@ static void fec_enet_free_queue(struct net_device *ndev) } for (i = 0; i < fep->num_rx_queues; i++) - if (fep->rx_queue[i]) - kfree(fep->rx_queue[i]); - + kfree(fep->rx_queue[i]); for (i = 0; i < fep->num_tx_queues; i++) - if (fep->tx_queue[i]) - kfree(fep->tx_queue[i]); + kfree(fep->tx_queue[i]); } static int fec_enet_alloc_queue(struct net_device *ndev) diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 9e2bcb807923..a17628769a1f 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -278,14 +278,20 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget) fep->stats.collisions++; /* unmap */ - dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), - skb->len, DMA_TO_DEVICE); + if (fep->mapped_as_page[dirtyidx]) + dma_unmap_page(fep->dev, CBDR_BUFADDR(bdp), + CBDR_DATLEN(bdp), DMA_TO_DEVICE); + else + dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), + CBDR_DATLEN(bdp), DMA_TO_DEVICE); /* * Free the sk buffer associated with this last transmit. */ - dev_kfree_skb(skb); - fep->tx_skbuff[dirtyidx] = NULL; + if (skb) { + dev_kfree_skb(skb); + fep->tx_skbuff[dirtyidx] = NULL; + } /* * Update pointer to next buffer descriptor to be transmitted. @@ -299,7 +305,7 @@ static int fs_enet_tx_napi(struct napi_struct *napi, int budget) * Since we have freed up a buffer, the ring is no longer * full. */ - if (!fep->tx_free++) + if (++fep->tx_free >= MAX_SKB_FRAGS) do_wake = 1; has_tx_work = 1; } @@ -509,6 +515,9 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) cbd_t __iomem *bdp; int curidx; u16 sc; + int nr_frags = skb_shinfo(skb)->nr_frags; + skb_frag_t *frag; + int len; #ifdef CONFIG_FS_ENET_MPC5121_FEC if (((unsigned long)skb->data) & 0x3) { @@ -530,7 +539,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) */ bdp = fep->cur_tx; - if (!fep->tx_free || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { + if (fep->tx_free <= nr_frags || (CBDR_SC(bdp) & BD_ENET_TX_READY)) { netif_stop_queue(dev); spin_unlock(&fep->tx_lock); @@ -543,35 +552,42 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) } curidx = bdp - fep->tx_bd_base; - /* - * Clear all of the status flags. - */ - CBDC_SC(bdp, BD_ENET_TX_STATS); - - /* - * Save skb pointer. - */ - fep->tx_skbuff[curidx] = skb; - - fep->stats.tx_bytes += skb->len; + len = skb->len; + fep->stats.tx_bytes += len; + if (nr_frags) + len -= skb->data_len; + fep->tx_free -= nr_frags + 1; /* * Push the data cache so the CPM does not get stale memory data. */ CBDW_BUFADDR(bdp, dma_map_single(fep->dev, - skb->data, skb->len, DMA_TO_DEVICE)); - CBDW_DATLEN(bdp, skb->len); + skb->data, len, DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, len); + + fep->mapped_as_page[curidx] = 0; + frag = skb_shinfo(skb)->frags; + while (nr_frags) { + CBDC_SC(bdp, + BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); + CBDS_SC(bdp, BD_ENET_TX_READY); + + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + bdp++, curidx++; + else + bdp = fep->tx_bd_base, curidx = 0; - /* - * If this was the last BD in the ring, start at the beginning again. - */ - if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) - fep->cur_tx++; - else - fep->cur_tx = fep->tx_bd_base; + len = skb_frag_size(frag); + CBDW_BUFADDR(bdp, skb_frag_dma_map(fep->dev, frag, 0, len, + DMA_TO_DEVICE)); + CBDW_DATLEN(bdp, len); - if (!--fep->tx_free) - netif_stop_queue(dev); + fep->tx_skbuff[curidx] = NULL; + fep->mapped_as_page[curidx] = 1; + + frag++; + nr_frags--; + } /* Trigger transmission start */ sc = BD_ENET_TX_READY | BD_ENET_TX_INTR | @@ -582,8 +598,22 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) * yay for hw reuse :) */ if (skb->len <= 60) sc |= BD_ENET_TX_PAD; + CBDC_SC(bdp, BD_ENET_TX_STATS); CBDS_SC(bdp, sc); + /* Save skb pointer. */ + fep->tx_skbuff[curidx] = skb; + + /* If this was the last BD in the ring, start at the beginning again. */ + if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) + bdp++; + else + bdp = fep->tx_bd_base; + fep->cur_tx = bdp; + + if (fep->tx_free < MAX_SKB_FRAGS) + netif_stop_queue(dev); + skb_tx_timestamp(skb); (*fep->ops->tx_kickstart)(dev); @@ -917,7 +947,7 @@ static int fs_enet_probe(struct platform_device *ofdev) } fpi->rx_ring = 32; - fpi->tx_ring = 32; + fpi->tx_ring = 64; fpi->rx_copybreak = 240; fpi->napi_weight = 17; fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0); @@ -955,7 +985,8 @@ static int fs_enet_probe(struct platform_device *ofdev) privsize = sizeof(*fep) + sizeof(struct sk_buff **) * - (fpi->rx_ring + fpi->tx_ring); + (fpi->rx_ring + fpi->tx_ring) + + sizeof(char) * fpi->tx_ring; ndev = alloc_etherdev(privsize); if (!ndev) { @@ -978,6 +1009,8 @@ static int fs_enet_probe(struct platform_device *ofdev) fep->rx_skbuff = (struct sk_buff **)&fep[1]; fep->tx_skbuff = fep->rx_skbuff + fpi->rx_ring; + fep->mapped_as_page = (char *)(fep->rx_skbuff + fpi->rx_ring + + fpi->tx_ring); spin_lock_init(&fep->lock); spin_lock_init(&fep->tx_lock); @@ -1007,6 +1040,8 @@ static int fs_enet_probe(struct platform_device *ofdev) netif_carrier_off(ndev); + ndev->features |= NETIF_F_SG; + ret = register_netdev(ndev); if (ret) goto out_free_bd; diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h index 3a4b49e0e717..f184d8f952e2 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h @@ -134,6 +134,7 @@ struct fs_enet_private { void __iomem *ring_base; struct sk_buff **rx_skbuff; struct sk_buff **tx_skbuff; + char *mapped_as_page; cbd_t __iomem *rx_bd_base; /* Address of Rx and Tx buffers. */ cbd_t __iomem *tx_bd_base; cbd_t __iomem *dirty_tx; /* ring entries to be free()ed. */ diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 9388a83818f2..162762d1a12c 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2367,7 +2367,7 @@ static int emac_wait_deps(struct emac_instance *dev) err = emac_check_deps(dev, deps) ? 0 : -ENODEV; for (i = 0; i < EMAC_DEP_COUNT; i++) { of_node_put(deps[i].node); - if (err && deps[i].ofdev) + if (err) of_dev_put(deps[i].ofdev); } if (err == 0) { diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 154effbfd8be..a681d7c0bb9f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1583,6 +1583,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_CMD_EPERM_wrapper }, + { + .opcode = MLX4_CMD_VIRT_PORT_MAP, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_CMD_EPERM_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c index c643d2bbb7b9..58d5a07d0ff4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c @@ -214,6 +214,8 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr) iounmap(mdev->uar_map); mlx4_uar_free(dev, &mdev->priv_uar); mlx4_pd_free(dev, mdev->priv_pdn); + if (mdev->nb.notifier_call) + unregister_netdevice_notifier(&mdev->nb); kfree(mdev); } @@ -298,6 +300,12 @@ static void *mlx4_en_add(struct mlx4_dev *dev) if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) mdev->pndev[i] = NULL; } + /* register notifier */ + mdev->nb.notifier_call = mlx4_en_netdev_event; + if (register_netdevice_notifier(&mdev->nb)) { + mdev->nb.notifier_call = NULL; + mlx4_err(mdev, "Failed to create notifier\n"); + } return mdev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index e075ff1f4e80..2a210c4efb89 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2062,6 +2062,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev) /* Detach the netdev so tasks would not attempt to access it */ mutex_lock(&mdev->state_lock); mdev->pndev[priv->port] = NULL; + mdev->upper[priv->port] = NULL; mutex_unlock(&mdev->state_lock); mlx4_en_free_resources(priv); @@ -2201,6 +2202,10 @@ static int mlx4_en_set_features(struct net_device *netdev, return ret; } + if (DEV_FEATURE_CHANGED(netdev, features, NETIF_F_HW_VLAN_CTAG_TX)) + en_info(priv, "Turn %s TX vlan strip offload\n", + (features & NETIF_F_HW_VLAN_CTAG_TX) ? "ON" : "OFF"); + if (features & NETIF_F_LOOPBACK) priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); else @@ -2441,6 +2446,180 @@ static const struct net_device_ops mlx4_netdev_ops_master = { #endif }; +struct mlx4_en_bond { + struct work_struct work; + struct mlx4_en_priv *priv; + int is_bonded; + struct mlx4_port_map port_map; +}; + +static void mlx4_en_bond_work(struct work_struct *work) +{ + struct mlx4_en_bond *bond = container_of(work, + struct mlx4_en_bond, + work); + int err = 0; + struct mlx4_dev *dev = bond->priv->mdev->dev; + + if (bond->is_bonded) { + if (!mlx4_is_bonded(dev)) { + err = mlx4_bond(dev); + if (err) + en_err(bond->priv, "Fail to bond device\n"); + } + if (!err) { + err = mlx4_port_map_set(dev, &bond->port_map); + if (err) + en_err(bond->priv, "Fail to set port map [%d][%d]: %d\n", + bond->port_map.port1, + bond->port_map.port2, + err); + } + } else if (mlx4_is_bonded(dev)) { + err = mlx4_unbond(dev); + if (err) + en_err(bond->priv, "Fail to unbond device\n"); + } + dev_put(bond->priv->dev); + kfree(bond); +} + +static int mlx4_en_queue_bond_work(struct mlx4_en_priv *priv, int is_bonded, + u8 v2p_p1, u8 v2p_p2) +{ + struct mlx4_en_bond *bond = NULL; + + bond = kzalloc(sizeof(*bond), GFP_ATOMIC); + if (!bond) + return -ENOMEM; + + INIT_WORK(&bond->work, mlx4_en_bond_work); + bond->priv = priv; + bond->is_bonded = is_bonded; + bond->port_map.port1 = v2p_p1; + bond->port_map.port2 = v2p_p2; + dev_hold(priv->dev); + queue_work(priv->mdev->workqueue, &bond->work); + return 0; +} + +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) +{ + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + u8 port = 0; + struct mlx4_en_dev *mdev; + struct mlx4_dev *dev; + int i, num_eth_ports = 0; + bool do_bond = true; + struct mlx4_en_priv *priv; + u8 v2p_port1 = 0; + u8 v2p_port2 = 0; + + if (!net_eq(dev_net(ndev), &init_net)) + return NOTIFY_DONE; + + mdev = container_of(this, struct mlx4_en_dev, nb); + dev = mdev->dev; + + /* Go into this mode only when two network devices set on two ports + * of the same mlx4 device are slaves of the same bonding master + */ + mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { + ++num_eth_ports; + if (!port && (mdev->pndev[i] == ndev)) + port = i; + mdev->upper[i] = mdev->pndev[i] ? + netdev_master_upper_dev_get(mdev->pndev[i]) : NULL; + /* condition not met: network device is a slave */ + if (!mdev->upper[i]) + do_bond = false; + if (num_eth_ports < 2) + continue; + /* condition not met: same master */ + if (mdev->upper[i] != mdev->upper[i-1]) + do_bond = false; + } + /* condition not met: 2 salves */ + do_bond = (num_eth_ports == 2) ? do_bond : false; + + /* handle only events that come with enough info */ + if ((do_bond && (event != NETDEV_BONDING_INFO)) || !port) + return NOTIFY_DONE; + + priv = netdev_priv(ndev); + if (do_bond) { + struct netdev_notifier_bonding_info *notifier_info = ptr; + struct netdev_bonding_info *bonding_info = + ¬ifier_info->bonding_info; + + /* required mode 1, 2 or 4 */ + if ((bonding_info->master.bond_mode != BOND_MODE_ACTIVEBACKUP) && + (bonding_info->master.bond_mode != BOND_MODE_XOR) && + (bonding_info->master.bond_mode != BOND_MODE_8023AD)) + do_bond = false; + + /* require exactly 2 slaves */ + if (bonding_info->master.num_slaves != 2) + do_bond = false; + + /* calc v2p */ + if (do_bond) { + if (bonding_info->master.bond_mode == + BOND_MODE_ACTIVEBACKUP) { + /* in active-backup mode virtual ports are + * mapped to the physical port of the active + * slave */ + if (bonding_info->slave.state == + BOND_STATE_BACKUP) { + if (port == 1) { + v2p_port1 = 2; + v2p_port2 = 2; + } else { + v2p_port1 = 1; + v2p_port2 = 1; + } + } else { /* BOND_STATE_ACTIVE */ + if (port == 1) { + v2p_port1 = 1; + v2p_port2 = 1; + } else { + v2p_port1 = 2; + v2p_port2 = 2; + } + } + } else { /* Active-Active */ + /* in active-active mode a virtual port is + * mapped to the native physical port if and only + * if the physical port is up */ + __s8 link = bonding_info->slave.link; + + if (port == 1) + v2p_port2 = 2; + else + v2p_port1 = 1; + if ((link == BOND_LINK_UP) || + (link == BOND_LINK_FAIL)) { + if (port == 1) + v2p_port1 = 1; + else + v2p_port2 = 2; + } else { /* BOND_LINK_DOWN || BOND_LINK_BACK */ + if (port == 1) + v2p_port1 = 2; + else + v2p_port2 = 1; + } + } + } + } + + mlx4_en_queue_bond_work(priv, do_bond, + v2p_port1, v2p_port2); + + return NOTIFY_DONE; +} + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -2623,6 +2802,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, } mdev->pndev[port] = dev; + mdev->upper[port] = NULL; netif_carrier_off(dev); mlx4_en_set_default_moderation(priv); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c index f1a5500ff72d..34f2fdf4fe5d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -50,10 +50,14 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->mtu_msgmax = 0xff; if (!is_tx && !rss) context->rq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); - if (is_tx) + if (is_tx) { context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4); - else + if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP) + context->params2 |= MLX4_QP_BIT_FPP; + + } else { context->sq_size_stride = ilog2(TXBB_SIZE) - 4; + } context->usr_page = cpu_to_be32(mdev->priv_uar.index); context->local_qpn = cpu_to_be32(qpn); context->pri_path.ackto = 1 & 0x07; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 2ba5d368edce..698d60de1255 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -162,6 +162,10 @@ static int mlx4_en_init_allocator(struct mlx4_en_priv *priv, if (mlx4_alloc_pages(priv, &ring->page_alloc[i], frag_info, GFP_KERNEL | __GFP_COLD)) goto out; + + en_dbg(DRV, priv, " frag %d allocator: - size:%d frags:%d\n", + i, ring->page_alloc[i].page_size, + atomic_read(&ring->page_alloc[i].page->_count)); } return 0; @@ -1059,8 +1063,9 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) (eff_mtu > buf_size + frag_sizes[i]) ? frag_sizes[i] : eff_mtu - buf_size; priv->frag_info[i].frag_prefix_size = buf_size; - priv->frag_info[i].frag_stride = ALIGN(frag_sizes[i], - SMP_CACHE_BYTES); + priv->frag_info[i].frag_stride = + ALIGN(priv->frag_info[i].frag_size, + SMP_CACHE_BYTES); buf_size += priv->frag_info[i].frag_size; i++; } diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index dbabfae3a3de..5a21e5dc94cb 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -142,7 +142,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [17] = "Asymmetric EQs support", [18] = "More than 80 VFs support", [19] = "Performance optimized for limited rule configuration flow steering support", - [20] = "Recoverable error events support" + [20] = "Recoverable error events support", + [21] = "Port Remap support" }; int i; @@ -863,6 +864,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_EQE_STRIDE; MLX4_GET(dev_cap->bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + if (dev_cap->bmme_flags & MLX4_FLAG_PORT_REMAP) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_PORT_REMAP; MLX4_GET(field, outbox, QUERY_DEV_CAP_CONFIG_DEV_OFFSET); if (field & 0x20) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_CONFIG_DEV; @@ -1120,9 +1123,10 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); - /* For guests, disable mw type 2 */ + /* For guests, disable mw type 2 and port remap*/ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; + bmme_flags &= ~MLX4_FLAG_PORT_REMAP; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); /* turn off device-managed steering capability if not enabled */ @@ -2100,13 +2104,16 @@ struct mlx4_config_dev { __be32 rsvd1[3]; __be16 vxlan_udp_dport; __be16 rsvd2; - __be32 rsvd3[27]; - __be16 rsvd4; - u8 rsvd5; + __be32 rsvd3; + __be32 roce_flags; + __be32 rsvd4[25]; + __be16 rsvd5; + u8 rsvd6; u8 rx_checksum_val; }; #define MLX4_VXLAN_UDP_DPORT (1 << 0) +#define MLX4_DISABLE_RX_PORT BIT(18) static int mlx4_CONFIG_DEV_set(struct mlx4_dev *dev, struct mlx4_config_dev *config_dev) { @@ -2162,7 +2169,7 @@ static const u8 config_dev_csum_flags[] = { int mlx4_config_dev_retrieval(struct mlx4_dev *dev, struct mlx4_config_dev_params *params) { - struct mlx4_config_dev config_dev; + struct mlx4_config_dev config_dev = {0}; int err; u8 csum_mask; @@ -2209,6 +2216,45 @@ int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port) } EXPORT_SYMBOL_GPL(mlx4_config_vxlan_port); +#define CONFIG_DISABLE_RX_PORT BIT(15) +int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis) +{ + struct mlx4_config_dev config_dev; + + memset(&config_dev, 0, sizeof(config_dev)); + config_dev.update_flags = cpu_to_be32(MLX4_DISABLE_RX_PORT); + if (dis) + config_dev.roce_flags = + cpu_to_be32(CONFIG_DISABLE_RX_PORT); + + return mlx4_CONFIG_DEV_set(dev, &config_dev); +} + +int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2) +{ + struct mlx4_cmd_mailbox *mailbox; + struct { + __be32 v_port1; + __be32 v_port2; + } *v2p; + int err; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return -ENOMEM; + + v2p = mailbox->buf; + v2p->v_port1 = cpu_to_be32(port1); + v2p->v_port2 = cpu_to_be32(port2); + + err = mlx4_cmd(dev, mailbox->dma, 0, + MLX4_SET_PORT_VIRT2PHY, MLX4_CMD_VIRT_PORT_MAP, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} + int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages) { diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c index 68d2bad325d5..6fce58718837 100644 --- a/drivers/net/ethernet/mellanox/mlx4/intf.c +++ b/drivers/net/ethernet/mellanox/mlx4/intf.c @@ -33,11 +33,13 @@ #include <linux/slab.h> #include <linux/export.h> +#include <linux/errno.h> #include "mlx4.h" struct mlx4_device_context { struct list_head list; + struct list_head bond_list; struct mlx4_interface *intf; void *context; }; @@ -115,6 +117,58 @@ void mlx4_unregister_interface(struct mlx4_interface *intf) } EXPORT_SYMBOL_GPL(mlx4_unregister_interface); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_device_context *dev_ctx = NULL, *temp_dev_ctx; + unsigned long flags; + int ret; + LIST_HEAD(bond_list); + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + ret = mlx4_disable_rx_port_check(dev, enable); + if (ret) { + mlx4_err(dev, "Fail to %s rx port check\n", + enable ? "enable" : "disable"); + return ret; + } + if (enable) { + dev->flags |= MLX4_FLAG_BONDED; + } else { + ret = mlx4_virt2phy_port_map(dev, 1, 2); + if (ret) { + mlx4_err(dev, "Fail to reset port map\n"); + return ret; + } + dev->flags &= ~MLX4_FLAG_BONDED; + } + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_for_each_entry_safe(dev_ctx, temp_dev_ctx, &priv->ctx_list, list) { + if (dev_ctx->intf->flags & MLX4_INTFF_BONDING) { + list_add_tail(&dev_ctx->bond_list, &bond_list); + list_del(&dev_ctx->list); + } + } + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + list_for_each_entry(dev_ctx, &bond_list, bond_list) { + dev_ctx->intf->remove(dev, dev_ctx->context); + dev_ctx->context = dev_ctx->intf->add(dev); + + spin_lock_irqsave(&priv->ctx_lock, flags); + list_add_tail(&dev_ctx->list, &priv->ctx_list); + spin_unlock_irqrestore(&priv->ctx_lock, flags); + + mlx4_dbg(dev, "Inrerface for protocol %d restarted with when bonded mode is %s\n", + dev_ctx->intf->protocol, enable ? + "enabled" : "disabled"); + } + return 0; +} + void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type, unsigned long param) { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index cc9f48439244..7e487223489a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -251,7 +251,8 @@ static void mlx4_enable_cqe_eqe_stride(struct mlx4_dev *dev) if (mlx4_is_master(dev)) dev_cap->function_caps |= MLX4_FUNC_CAP_EQE_CQE_STRIDE; } else { - mlx4_dbg(dev, "Disabling CQE stride cacheLine unsupported\n"); + if (cache_line_size() != 32 && cache_line_size() != 64) + mlx4_dbg(dev, "Disabling CQE stride, cacheLine size unsupported\n"); dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_CQE_STRIDE; dev_cap->flags2 &= ~MLX4_DEV_CAP_FLAG2_EQE_STRIDE; } @@ -1160,6 +1161,91 @@ err_set_port: return err ? err : count; } +int mlx4_bond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (!mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, true); + else + ret = 0; + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to bond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is bonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_bond); + +int mlx4_unbond(struct mlx4_dev *dev) +{ + int ret = 0; + struct mlx4_priv *priv = mlx4_priv(dev); + + mutex_lock(&priv->bond_mutex); + + if (mlx4_is_bonded(dev)) + ret = mlx4_do_bond(dev, false); + + mutex_unlock(&priv->bond_mutex); + if (ret) + mlx4_err(dev, "Failed to unbond device: %d\n", ret); + else + mlx4_dbg(dev, "Device is unbonded\n"); + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_unbond); + + +int mlx4_port_map_set(struct mlx4_dev *dev, struct mlx4_port_map *v2p) +{ + u8 port1 = v2p->port1; + u8 port2 = v2p->port2; + struct mlx4_priv *priv = mlx4_priv(dev); + int err; + + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PORT_REMAP)) + return -ENOTSUPP; + + mutex_lock(&priv->bond_mutex); + + /* zero means keep current mapping for this port */ + if (port1 == 0) + port1 = priv->v2p.port1; + if (port2 == 0) + port2 = priv->v2p.port2; + + if ((port1 < 1) || (port1 > MLX4_MAX_PORTS) || + (port2 < 1) || (port2 > MLX4_MAX_PORTS) || + (port1 == 2 && port2 == 1)) { + /* besides boundary checks cross mapping makes + * no sense and therefore not allowed */ + err = -EINVAL; + } else if ((port1 == priv->v2p.port1) && + (port2 == priv->v2p.port2)) { + err = 0; + } else { + err = mlx4_virt2phy_port_map(dev, port1, port2); + if (!err) { + mlx4_dbg(dev, "port map changed: [%d][%d]\n", + port1, port2); + priv->v2p.port1 = port1; + priv->v2p.port2 = port2; + } else { + mlx4_err(dev, "Failed to change port mape: %d\n", err); + } + } + + mutex_unlock(&priv->bond_mutex); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_port_map_set); + static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -2638,6 +2724,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data, spin_lock_init(&priv->ctx_lock); mutex_init(&priv->port_mutex); + mutex_init(&priv->bond_mutex); INIT_LIST_HEAD(&priv->pgdir_list); mutex_init(&priv->pgdir_mutex); @@ -2934,6 +3021,9 @@ slave_start: goto err_port; } + priv->v2p.port1 = 1; + priv->v2p.port2 = 2; + err = mlx4_register_device(dev); if (err) goto err_port; diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 148dc0945aab..803f17653da7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -885,6 +885,8 @@ struct mlx4_priv { int reserved_mtts; int fs_hash_mode; u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS]; + struct mlx4_port_map v2p; /* cached port mapping configuration */ + struct mutex bond_mutex; /* for bond mode */ __be64 slave_node_guids[MLX4_MFUNC_MAX]; atomic_t opreq_count; @@ -1364,6 +1366,7 @@ int mlx4_get_slave_num_gids(struct mlx4_dev *dev, int slave, int port); /* Returns the VF index of slave */ int mlx4_get_vf_indx(struct mlx4_dev *dev, int slave); int mlx4_config_mad_demux(struct mlx4_dev *dev); +int mlx4_do_bond(struct mlx4_dev *dev, bool enable); enum mlx4_zone_flags { MLX4_ZONE_ALLOW_ALLOC_FROM_LOWER_PRIO = 1UL << 0, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 944a112dff37..2a8268e6be15 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -390,6 +390,7 @@ struct mlx4_en_dev { struct pci_dev *pdev; struct mutex state_lock; struct net_device *pndev[MLX4_MAX_PORTS + 1]; + struct net_device *upper[MLX4_MAX_PORTS + 1]; u32 port_cnt; bool device_up; struct mlx4_en_profile profile; @@ -410,6 +411,7 @@ struct mlx4_en_dev { unsigned long overflow_period; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_clock_info; + struct notifier_block nb; }; @@ -845,6 +847,9 @@ int mlx4_en_reset_config(struct net_device *dev, struct hwtstamp_config ts_config, netdev_features_t new_features); +int mlx4_en_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr); + /* * Functions for time stamping */ diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d21e884a0838..78f51e103880 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -598,14 +598,11 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, if (err) return err; - mpt_entry->start = cpu_to_be64(mr->iova); - mpt_entry->length = cpu_to_be64(mr->size); - mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); - - mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | - MLX4_MPT_PD_FLAG_EN_INV); - mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | - MLX4_MPT_FLAG_SW_OWNS); + mpt_entry->start = cpu_to_be64(iova); + mpt_entry->length = cpu_to_be64(size); + mpt_entry->entity_size = cpu_to_be32(page_shift); + mpt_entry->flags &= ~(cpu_to_be32(MLX4_MPT_FLAG_FREE | + MLX4_MPT_FLAG_SW_OWNS)); if (mr->mtt.order < 0) { mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL); mpt_entry->mtt_addr = 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 1586ecce13c7..2bb8553bd905 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -882,6 +882,8 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { context->flags &= cpu_to_be32(~(0xf << 28)); context->flags |= cpu_to_be32(states[i + 1] << 28); + if (states[i + 1] != MLX4_QP_STATE_RTR) + context->params2 &= ~MLX4_QP_BIT_FPP; err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], context, 0, 0, qp); if (err) { diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 79feeb6b0d87..486e3d26cd4a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -2541,7 +2541,7 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave, /* Make sure that the PD bits related to the slave id are zeros. */ pd = mr_get_pd(inbox->buf); pd_slave = (pd >> 17) & 0x7f; - if (pd_slave != 0 && pd_slave != slave) { + if (pd_slave != 0 && --pd_slave != slave) { err = -EPERM; goto ex_abort; } @@ -2944,6 +2944,9 @@ static int verify_qp_parameters(struct mlx4_dev *dev, qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff; optpar = be32_to_cpu(*(__be32 *) inbox->buf); + if (slave != mlx4_master_func_num(dev)) + qp_ctx->params2 &= ~MLX4_QP_BIT_FPP; + switch (qp_type) { case MLX4_QP_ST_RC: case MLX4_QP_ST_XRC: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 3f4525619a07..d6651937d899 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -903,12 +903,12 @@ static void remove_one(struct pci_dev *pdev) } static const struct pci_device_id mlx5_core_pci_table[] = { - { PCI_VDEVICE(MELLANOX, 4113) }, /* Connect-IB */ - { PCI_VDEVICE(MELLANOX, 4114) }, /* Connect-IB VF */ - { PCI_VDEVICE(MELLANOX, 4115) }, /* ConnectX-4 */ - { PCI_VDEVICE(MELLANOX, 4116) }, /* ConnectX-4 VF */ - { PCI_VDEVICE(MELLANOX, 4117) }, /* ConnectX-4LX */ - { PCI_VDEVICE(MELLANOX, 4118) }, /* ConnectX-4LX VF */ + { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */ + { PCI_VDEVICE(MELLANOX, 0x1012) }, /* Connect-IB VF */ + { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */ + { PCI_VDEVICE(MELLANOX, 0x1014) }, /* ConnectX-4 VF */ + { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */ + { PCI_VDEVICE(MELLANOX, 0x1016) }, /* ConnectX-4LX VF */ { 0, } }; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 71af98bb72cb..1412f5af05ec 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -4226,8 +4226,7 @@ static void myri10ge_remove(struct pci_dev *pdev) mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span); #endif myri10ge_free_slices(mgp); - if (mgp->msix_vectors != NULL) - kfree(mgp->msix_vectors); + kfree(mgp->msix_vectors); dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd), mgp->cmd, mgp->cmd_bus); diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index afb8efb25781..e0c31e3947d1 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -176,9 +176,7 @@ netxen_alloc_sds_rings(struct netxen_recv_context *recv_ctx, int count) static void netxen_free_sds_rings(struct netxen_recv_context *recv_ctx) { - if (recv_ctx->sds_rings != NULL) - kfree(recv_ctx->sds_rings); - + kfree(recv_ctx->sds_rings); recv_ctx->sds_rings = NULL; } diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c index 763ada18ad3d..f59509486113 100644 --- a/drivers/net/ethernet/ti/cpsw-common.c +++ b/drivers/net/ethernet/ti/cpsw-common.c @@ -17,6 +17,8 @@ #include <linux/regmap.h> #include <linux/mfd/syscon.h> +#include "cpsw.h" + #define AM33XX_CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id)) #define AM33XX_CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4) diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 345cd2563772..84f5ce525750 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -2011,12 +2011,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, quit: if (gbe_dev->hw_stats) devm_kfree(dev, gbe_dev->hw_stats); - if (gbe_dev->ale) - cpsw_ale_destroy(gbe_dev->ale); + cpsw_ale_destroy(gbe_dev->ale); if (gbe_dev->ss_regs) devm_iounmap(dev, gbe_dev->ss_regs); - if (interfaces) - of_node_put(interfaces); + of_node_put(interfaces); devm_kfree(dev, gbe_dev); return ret; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index f2ff0074aac9..691ec936e88d 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -2540,7 +2540,7 @@ static void tlan_phy_power_down(struct net_device *dev) * This is abitrary. It is intended to make sure the * transceiver settles. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_PUP); + tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP); } @@ -2561,7 +2561,7 @@ static void tlan_phy_power_up(struct net_device *dev) * transceiver. The TLAN docs say both 50 ms and * 500 ms, so do the longer, just in case. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_RESET); + tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET); } @@ -2593,7 +2593,7 @@ static void tlan_phy_reset(struct net_device *dev) * I don't remember why I wait this long. * I've changed this to 50ms, as it seems long enough. */ - tlan_set_timer(dev, (HZ/20), TLAN_TIMER_PHY_START_LINK); + tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK); } @@ -2658,7 +2658,7 @@ static void tlan_phy_start_link(struct net_device *dev) data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data); - tlan_set_timer(dev, (40*HZ/1000), TLAN_TIMER_PHY_PDOWN); + tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN); return; } else if (priv->phy_num == 0) { control = 0; @@ -2725,7 +2725,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev) (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) && (priv->phy_num != 0)) { priv->phy_num = 0; - tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN); + tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN); return; } @@ -2744,7 +2744,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev) /* Wait for 100 ms. No reason in partiticular. */ - tlan_set_timer(dev, (HZ/10), TLAN_TIMER_FINISH_RESET); + tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET); } @@ -2796,7 +2796,7 @@ static void tlan_phy_monitor(unsigned long data) /* set to external PHY */ priv->phy_num = 1; /* restart autonegotiation */ - tlan_set_timer(dev, 4 * HZ / 10, + tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN); return; } diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c index a43c8acb7268..181b349b060e 100644 --- a/drivers/net/ieee802154/cc2520.c +++ b/drivers/net/ieee802154/cc2520.c @@ -44,9 +44,9 @@ #define CC2520_FREG_MASK 0x3F /* status byte values */ -#define CC2520_STATUS_XOSC32M_STABLE (1 << 7) -#define CC2520_STATUS_RSSI_VALID (1 << 6) -#define CC2520_STATUS_TX_UNDERFLOW (1 << 3) +#define CC2520_STATUS_XOSC32M_STABLE BIT(7) +#define CC2520_STATUS_RSSI_VALID BIT(6) +#define CC2520_STATUS_TX_UNDERFLOW BIT(3) /* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */ #define CC2520_MINCHANNEL 11 @@ -549,14 +549,14 @@ cc2520_ed(struct ieee802154_hw *hw, u8 *level) u8 rssi; int ret; - ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status); + ret = cc2520_read_register(priv, CC2520_RSSISTAT, &status); if (ret) return ret; if (status != RSSI_VALID) return -EINVAL; - ret = cc2520_read_register(priv , CC2520_RSSI, &rssi); + ret = cc2520_read_register(priv, CC2520_RSSI, &rssi); if (ret) return ret; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 31bac2a21ce3..c184717e8b28 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -558,7 +558,6 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, u32 data) { size_t start, offset, plen; - __wsum delta; if (skb->remcsum_offload) return vh; @@ -580,12 +579,7 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, return NULL; } - delta = remcsum_adjust((void *)vh + hdrlen, - NAPI_GRO_CB(skb)->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); - NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); + skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset); skb->remcsum_offload = 1; @@ -1159,7 +1153,6 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, size_t hdrlen, u32 data) { size_t start, offset, plen; - __wsum delta; if (skb->remcsum_offload) { /* Already processed in GRO path */ @@ -1179,14 +1172,7 @@ static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, vh = (struct vxlanhdr *)(udp_hdr(skb) + 1); - if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) - __skb_checksum_complete(skb); - - delta = remcsum_adjust((void *)vh + hdrlen, - skb->csum, start, offset); - - /* Adjust skb->csum since we changed the packet */ - skb->csum = csum_add(skb->csum, delta); + skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset); return vh; } diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 08f293411bf0..60a524b73207 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -76,6 +76,9 @@ static int ath10k_send_key(struct ath10k_vif *arvif, if (memcmp(macaddr, arvif->vif->addr, ETH_ALEN)) arg.key_flags = WMI_KEY_PAIRWISE; break; + case WLAN_CIPHER_SUITE_AES_CMAC: + /* this one needs to be done in software */ + return 1; default: ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); return -EOPNOTSUPP; @@ -5035,6 +5038,13 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) int ath10k_mac_register(struct ath10k *ar) { + static const u32 cipher_suites[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_AES_CMAC, + }; struct ieee80211_supported_band *band; struct ieee80211_sta_vht_cap vht_cap; struct ieee80211_sta_ht_cap ht_cap; @@ -5108,7 +5118,8 @@ int ath10k_mac_register(struct ath10k *ar) IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_HAS_RATE_CONTROL | IEEE80211_HW_AP_LINK_PS | - IEEE80211_HW_SPECTRUM_MGMT; + IEEE80211_HW_SPECTRUM_MGMT | + IEEE80211_HW_SW_CRYPTO_CONTROL; ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; @@ -5182,6 +5193,9 @@ int ath10k_mac_register(struct ath10k *ar) goto err_free; } + ar->hw->wiphy->cipher_suites = cipher_suites; + ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); + ret = ieee80211_register_hw(ar->hw); if (ret) { ath10k_err(ar, "failed to register ieee80211: %d\n", ret); diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index c81b06bcf827..2e66f34ebb79 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1372,7 +1372,8 @@ csio_config_device_caps(struct csio_hw *hw) } /* Validate device capabilities */ - if (csio_hw_validate_caps(hw, mbp)) + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) goto out; /* Don't config device capabilities if already configured */ @@ -1776,7 +1777,8 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) } /* Validate device capabilities */ - if (csio_hw_validate_caps(hw, mbp)) + rv = csio_hw_validate_caps(hw, mbp); + if (rv != 0) goto bye; /* * Note that we're operating with parameters diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h index a5f624f755df..b56a11d817be 100644 --- a/drivers/scsi/csiostor/csio_hw_chip.h +++ b/drivers/scsi/csiostor/csio_hw_chip.h @@ -45,11 +45,6 @@ #define FW_FNAME_T5 "cxgb4/t5fw.bin" #define FW_CFG_NAME_T5 "cxgb4/t5-config.txt" -#define T5FW_VERSION_MAJOR 0x01 -#define T5FW_VERSION_MINOR 0x0B -#define T5FW_VERSION_MICRO 0x1B -#define T5FW_VERSION_BUILD 0x00 - #define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision)) #define CHELSIO_CHIP_FPGA 0x100 #define CHELSIO_CHIP_VERSION(code) (((code) >> 12) & 0xf) @@ -74,6 +69,7 @@ static inline int csio_is_t5(uint16_t chip) { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) } #include "t4fw_api.h" +#include "t4fw_version.h" #define FW_VERSION(chip) ( \ FW_HDR_FW_VER_MAJOR_G(chip##FW_VERSION_MAJOR) | \ diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 1132c41d99ce..9451787ca7f2 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -327,7 +327,8 @@ csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, } #define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\ - FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG) + FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G |\ + FW_PORT_CAP_ANEG) /* * csio_mb_port- FW PORT command helper diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 6906f76332f4..e022cc40303d 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -84,10 +84,6 @@ struct vhost_net_ubuf_ref { struct vhost_net_virtqueue { struct vhost_virtqueue vq; - /* hdr is used to store the virtio header. - * Since each iovec has >= 1 byte length, we never need more than - * header length entries to store the header. */ - struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)]; size_t vhost_hlen; size_t sock_hlen; /* vhost zerocopy support fields below: */ @@ -235,44 +231,6 @@ static bool vhost_sock_zcopy(struct socket *sock) sock_flag(sock->sk, SOCK_ZEROCOPY); } -/* Pop first len bytes from iovec. Return number of segments used. */ -static int move_iovec_hdr(struct iovec *from, struct iovec *to, - size_t len, int iov_count) -{ - int seg = 0; - size_t size; - - while (len && seg < iov_count) { - size = min(from->iov_len, len); - to->iov_base = from->iov_base; - to->iov_len = size; - from->iov_len -= size; - from->iov_base += size; - len -= size; - ++from; - ++to; - ++seg; - } - return seg; -} -/* Copy iovec entries for len bytes from iovec. */ -static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, - size_t len, int iovcount) -{ - int seg = 0; - size_t size; - - while (len && seg < iovcount) { - size = min(from->iov_len, len); - to->iov_base = from->iov_base; - to->iov_len = size; - len -= size; - ++from; - ++to; - ++seg; - } -} - /* In case of DMA done not in order in lower device driver for some reason. * upend_idx is used to track end of used idx, done_idx is used to track head * of used idx. Once lower device DMA done contiguously, we will signal KVM @@ -336,7 +294,7 @@ static void handle_tx(struct vhost_net *net) { struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX]; struct vhost_virtqueue *vq = &nvq->vq; - unsigned out, in, s; + unsigned out, in; int head; struct msghdr msg = { .msg_name = NULL, @@ -395,16 +353,17 @@ static void handle_tx(struct vhost_net *net) break; } /* Skip header. TODO: support TSO. */ - s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out); len = iov_length(vq->iov, out); iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len); + iov_iter_advance(&msg.msg_iter, hdr_size); /* Sanity check */ - if (!len) { + if (!iov_iter_count(&msg.msg_iter)) { vq_err(vq, "Unexpected header len for TX: " "%zd expected %zd\n", - iov_length(nvq->hdr, s), hdr_size); + len, hdr_size); break; } + len = iov_iter_count(&msg.msg_iter); zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN && (nvq->upend_idx + 1) % UIO_MAXIOV != @@ -569,9 +528,9 @@ static void handle_rx(struct vhost_net *net) .msg_controllen = 0, .msg_flags = MSG_DONTWAIT, }; - struct virtio_net_hdr_mrg_rxbuf hdr = { - .hdr.flags = 0, - .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE + struct virtio_net_hdr hdr = { + .flags = 0, + .gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; int err, mergeable; @@ -579,6 +538,7 @@ static void handle_rx(struct vhost_net *net) size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; struct socket *sock; + struct iov_iter fixup; mutex_lock(&vq->mutex); sock = vq->private_data; @@ -623,14 +583,19 @@ static void handle_rx(struct vhost_net *net) break; } /* We don't need to be notified again. */ - if (unlikely((vhost_hlen))) - /* Skip header. TODO: support TSO. */ - move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in); - else - /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: - * needed because recvmsg can modify msg_iov. */ - copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in); - iov_iter_init(&msg.msg_iter, READ, vq->iov, in, sock_len); + iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len); + fixup = msg.msg_iter; + if (unlikely((vhost_hlen))) { + /* We will supply the header ourselves + * TODO: support TSO. + */ + iov_iter_advance(&msg.msg_iter, vhost_hlen); + } else { + /* It'll come from socket; we'll need to patch + * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF + */ + iov_iter_advance(&fixup, sizeof(hdr)); + } err = sock->ops->recvmsg(NULL, sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); /* Userspace might have consumed the packet meanwhile: @@ -642,18 +607,18 @@ static void handle_rx(struct vhost_net *net) vhost_discard_vq_desc(vq, headcount); continue; } + /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ if (unlikely(vhost_hlen) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0, - vhost_hlen)) { + copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { vq_err(vq, "Unable to write vnet_hdr at addr %p\n", vq->iov->iov_base); break; } - /* TODO: Should check and handle checksum. */ + /* Supply (or replace) ->num_buffers if VIRTIO_NET_F_MRG_RXBUF + * TODO: Should check and handle checksum. + */ if (likely(mergeable) && - memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount, - offsetof(typeof(hdr), num_buffers), - sizeof hdr.num_buffers)) { + copy_to_iter(&headcount, 2, &fixup) != 2) { vq_err(vq, "Failed num_buffers write"); vhost_discard_vq_desc(vq, headcount); break; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index d695b1673ae5..dc78d87e0fc2 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1079,7 +1079,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) req_size, vq->iov[0].iov_len); break; } - ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); + ret = copy_from_user(req, vq->iov[0].iov_base, req_size); if (unlikely(ret)) { vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); break; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index cb807d0ea498..2ee28266fd07 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1125,6 +1125,7 @@ static int get_indirect(struct vhost_virtqueue *vq, struct vring_desc desc; unsigned int i = 0, count, found = 0; u32 len = vhost32_to_cpu(vq, indirect->len); + struct iov_iter from; int ret; /* Sanity check */ @@ -1142,6 +1143,7 @@ static int get_indirect(struct vhost_virtqueue *vq, vq_err(vq, "Translation failure %d in indirect.\n", ret); return ret; } + iov_iter_init(&from, READ, vq->indirect, ret, len); /* We will use the result as an address to read from, so most * architectures only need a compiler barrier here. */ @@ -1164,8 +1166,8 @@ static int get_indirect(struct vhost_virtqueue *vq, i, count); return -EINVAL; } - if (unlikely(memcpy_fromiovec((unsigned char *)&desc, - vq->indirect, sizeof desc))) { + if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) != + sizeof(desc))) { vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc); return -EINVAL; |
