diff options
Diffstat (limited to 'drivers')
405 files changed, 7584 insertions, 2507 deletions
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c index 10ce48e16ebf..d830705f8a18 100644 --- a/drivers/acpi/acpica/evxfevnt.c +++ b/drivers/acpi/acpica/evxfevnt.c @@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags) ACPI_FUNCTION_TRACE(acpi_enable_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { @@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags) ACPI_FUNCTION_TRACE(acpi_disable_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { @@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event) ACPI_FUNCTION_TRACE(acpi_clear_event); + /* If Hardware Reduced flag is set, there are no fixed events */ + + if (acpi_gbl_reduced_hardware) { + return_ACPI_STATUS(AE_OK); + } + /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c index e54bc2aa7a88..a05b3b79b987 100644 --- a/drivers/acpi/acpica/psobject.c +++ b/drivers/acpi/acpica/psobject.c @@ -121,6 +121,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) (u32)(aml_offset + sizeof(struct acpi_table_header))); + ACPI_ERROR((AE_INFO, + "Aborting disassembly, AML byte code is corrupt")); + /* Dump the context surrounding the invalid opcode */ acpi_ut_dump_buffer(((u8 *)walk_state->parser_state. @@ -129,6 +132,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state) sizeof(struct acpi_table_header) - 16)); acpi_os_printf(" */\n"); + + /* + * Just abort the disassembly, cannot continue because the + * parser is essentially lost. The disassembler can then + * randomly fail because an ill-constructed parse tree + * can result. + */ + return_ACPI_STATUS(AE_AML_BAD_OPCODE); #endif } @@ -293,6 +304,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state, if (status == AE_CTRL_PARSE_CONTINUE) { return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE); } + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } /* Create Op structure and append to parent's argument list */ diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c index d176e0ece470..2946e2846573 100644 --- a/drivers/acpi/numa.c +++ b/drivers/acpi/numa.c @@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm) */ int acpi_map_pxm_to_online_node(int pxm) { - int node, n, dist, min_dist; + int node, min_node; node = acpi_map_pxm_to_node(pxm); if (node == NUMA_NO_NODE) node = 0; + min_node = node; if (!node_online(node)) { - min_dist = INT_MAX; + int min_dist = INT_MAX, dist, n; + for_each_online_node(n) { dist = node_distance(node, n); if (dist < min_dist) { min_dist = dist; - node = n; + min_node = n; } } } - return node; + return min_node; } EXPORT_SYMBOL(acpi_map_pxm_to_online_node); diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 8a10a7ae6a8a..c8e169e46673 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -131,9 +131,6 @@ static void do_prt_fixups(struct acpi_prt_entry *entry, quirk = &prt_quirks[i]; /* All current quirks involve link devices, not GSIs */ - if (!prt->source) - continue; - if (dmi_check_system(quirk->system) && entry->id.segment == quirk->segment && entry->id.bus == quirk->bus && diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c index 6a082d4de12c..24a793957bc0 100644 --- a/drivers/acpi/pmic/intel_pmic_xpower.c +++ b/drivers/acpi/pmic/intel_pmic_xpower.c @@ -28,97 +28,97 @@ static struct pmic_table power_table[] = { .address = 0x00, .reg = 0x13, .bit = 0x05, - }, + }, /* ALD1 */ { .address = 0x04, .reg = 0x13, .bit = 0x06, - }, + }, /* ALD2 */ { .address = 0x08, .reg = 0x13, .bit = 0x07, - }, + }, /* ALD3 */ { .address = 0x0c, .reg = 0x12, .bit = 0x03, - }, + }, /* DLD1 */ { .address = 0x10, .reg = 0x12, .bit = 0x04, - }, + }, /* DLD2 */ { .address = 0x14, .reg = 0x12, .bit = 0x05, - }, + }, /* DLD3 */ { .address = 0x18, .reg = 0x12, .bit = 0x06, - }, + }, /* DLD4 */ { .address = 0x1c, .reg = 0x12, .bit = 0x00, - }, + }, /* ELD1 */ { .address = 0x20, .reg = 0x12, .bit = 0x01, - }, + }, /* ELD2 */ { .address = 0x24, .reg = 0x12, .bit = 0x02, - }, + }, /* ELD3 */ { .address = 0x28, .reg = 0x13, .bit = 0x02, - }, + }, /* FLD1 */ { .address = 0x2c, .reg = 0x13, .bit = 0x03, - }, + }, /* FLD2 */ { .address = 0x30, .reg = 0x13, .bit = 0x04, - }, + }, /* FLD3 */ { - .address = 0x38, + .address = 0x34, .reg = 0x10, .bit = 0x03, - }, + }, /* BUC1 */ { - .address = 0x3c, + .address = 0x38, .reg = 0x10, .bit = 0x06, - }, + }, /* BUC2 */ { - .address = 0x40, + .address = 0x3c, .reg = 0x10, .bit = 0x05, - }, + }, /* BUC3 */ { - .address = 0x44, + .address = 0x40, .reg = 0x10, .bit = 0x04, - }, + }, /* BUC4 */ { - .address = 0x48, + .address = 0x44, .reg = 0x10, .bit = 0x01, - }, + }, /* BUC5 */ { - .address = 0x4c, + .address = 0x48, .reg = 0x10, .bit = 0x00 - }, + }, /* BUC6 */ }; /* TMP0 - TMP5 are the same, all from GPADC */ diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 11154a330f07..c9bf74982688 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -259,6 +259,9 @@ static int __acpi_processor_start(struct acpi_device *device) if (ACPI_SUCCESS(status)) return 0; + result = -ENODEV; + acpi_pss_perf_exit(pr, device); + err_power_exit: acpi_processor_power_exit(pr); return result; @@ -267,11 +270,16 @@ err_power_exit: static int acpi_processor_start(struct device *dev) { struct acpi_device *device = ACPI_COMPANION(dev); + int ret; if (!device) return -ENODEV; - return __acpi_processor_start(device); + /* Protect against concurrent CPU hotplug operations */ + get_online_cpus(); + ret = __acpi_processor_start(device); + put_online_cpus(); + return ret; } static int acpi_processor_stop(struct device *dev) diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index c72e64893d03..93d72413d844 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg { #define THROTTLING_POSTCHANGE (2) static int acpi_processor_get_throttling(struct acpi_processor *pr); -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force); +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct); static int acpi_processor_update_tsd_coord(void) { @@ -891,7 +891,8 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Invalid throttling state, reset\n")); state = 0; - ret = acpi_processor_set_throttling(pr, state, true); + ret = __acpi_processor_set_throttling(pr, state, true, + true); if (ret) return ret; } @@ -901,36 +902,31 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr) return 0; } -static int acpi_processor_get_throttling(struct acpi_processor *pr) +static long __acpi_processor_get_throttling(void *data) { - cpumask_var_t saved_mask; - int ret; + struct acpi_processor *pr = data; + + return pr->throttling.acpi_processor_get_throttling(pr); +} +static int acpi_processor_get_throttling(struct acpi_processor *pr) +{ if (!pr) return -EINVAL; if (!pr->flags.throttling) return -ENODEV; - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL)) - return -ENOMEM; - /* - * Migrate task to the cpu pointed by pr. + * This is either called from the CPU hotplug callback of + * processor_driver or via the ACPI probe function. In the latter + * case the CPU is not guaranteed to be online. Both call sites are + * protected against CPU hotplug. */ - cpumask_copy(saved_mask, ¤t->cpus_allowed); - /* FIXME: use work_on_cpu() */ - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { - /* Can't migrate to the target pr->id CPU. Exit */ - free_cpumask_var(saved_mask); + if (!cpu_online(pr->id)) return -ENODEV; - } - ret = pr->throttling.acpi_processor_get_throttling(pr); - /* restore the previous state */ - set_cpus_allowed_ptr(current, saved_mask); - free_cpumask_var(saved_mask); - return ret; + return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr); } static int acpi_processor_get_fadt_info(struct acpi_processor *pr) @@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn(void *data) arg->target_state, arg->force); } -int acpi_processor_set_throttling(struct acpi_processor *pr, - int state, bool force) +static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) +{ + if (direct) + return fn(arg); + return work_on_cpu(cpu, fn, arg); +} + +static int __acpi_processor_set_throttling(struct acpi_processor *pr, + int state, bool force, bool direct) { int ret = 0; unsigned int i; @@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg, + direct); } else { /* * When the T-state coordination is SW_ALL or HW_ALL, @@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, arg.pr = match_pr; arg.target_state = state; arg.force = force; - ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, - &arg); + ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, + &arg, direct); } } /* @@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr, return ret; } +int acpi_processor_set_throttling(struct acpi_processor *pr, int state, + bool force) +{ + return __acpi_processor_set_throttling(pr, state, force, false); +} + int acpi_processor_get_throttling_info(struct acpi_processor *pr) { int result = 0; diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c index f0099360039e..42086ad535c5 100644 --- a/drivers/amba/bus.c +++ b/drivers/amba/bus.c @@ -82,7 +82,8 @@ static ssize_t driver_override_store(struct device *_dev, struct amba_device *dev = to_amba_device(_dev); char *driver_override, *old = dev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 9f62841a8f10..db6a51427b04 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2148,8 +2148,14 @@ static void binder_send_failed_reply(struct binder_transaction *t, &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { - WARN(1, "Unexpected reply error: %u\n", - target_thread->reply_error.cmd); + /* + * Cannot get here for normal operation, but + * we can if multiple synchronous transactions + * are sent without blocking for responses. + * Just ignore the 2nd error in this case. + */ + pr_warn("Unexpected reply error: %u\n", + target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 8ddf5d5c94fd..5a6a01135470 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -538,7 +538,9 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), .driver_data = board_ahci_yes_fbs }, - { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */ + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */ .driver_data = board_ahci_yes_fbs }, /* Promise */ diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index aaa761b9081c..cd2eab6aa92e 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c @@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq <= 0) { - dev_err(dev, "no irq\n"); - return -EINVAL; + if (irq != -EPROBE_DEFER) + dev_err(dev, "no irq\n"); + return irq; } hpriv->irq = irq; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 69ec1c5d7152..2d677ba46d77 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4224,6 +4224,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, + /* Crucial BX100 SSD 500GB has broken LPM support */ + { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM }, + + /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */ + { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* 512GB MX100 with newer firmware has only LPM issues */ + { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + + /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */ + { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM | + ATA_HORKAGE_NOLPM, }, + /* devices that don't properly handle queued TRIM commands */ { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -4235,7 +4254,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, - { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | ATA_HORKAGE_ZERO_AFTER_TRIM, }, @@ -5077,8 +5098,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc) * We guarantee to LLDs that they will have at least one * non-zero sg if the command is a data command. */ - if (WARN_ON_ONCE(ata_is_data(prot) && - (!qc->sg || !qc->n_elem || !qc->nbytes))) + if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)) goto sys_err; if (ata_is_dma(prot) || (ata_is_pio(prot) && diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 5b2aee83d776..4a267347a6d9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3472,7 +3472,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { /* relay SCSI command to ATAPI device */ int len = COMMAND_SIZE(scsi_op); - if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) + if (unlikely(len > scmd->cmd_len || + len > dev->cdb_len || + scmd->cmd_len > ATAPI_CDB_LEN)) goto bad_cdb_len; xlat_func = atapi_xlat; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 1c36de9719e5..1dd16f26e77d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -263,7 +263,7 @@ static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos) struct iov_iter i; ssize_t bw; - iov_iter_bvec(&i, ITER_BVEC, bvec, 1, bvec->bv_len); + iov_iter_bvec(&i, ITER_BVEC | WRITE, bvec, 1, bvec->bv_len); file_start_write(file); bw = vfs_iter_write(file, &i, ppos); @@ -623,6 +623,9 @@ static int loop_switch(struct loop_device *lo, struct file *file) */ static int loop_flush(struct loop_device *lo) { + /* loop not yet configured, no running thread, nothing to flush */ + if (lo->lo_state != Lo_bound) + return 0; return loop_switch(lo, NULL); } diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 54cef3dc0beb..7fca7cfd5b09 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -216,7 +216,6 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, @@ -247,6 +246,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, /* QCA ROME chipset */ + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME }, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 71325e443e46..8a3bf0a8c31d 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -936,6 +936,9 @@ static int qca_setup(struct hci_uart *hu) if (!ret) { set_bit(STATE_IN_BAND_SLEEP_ENABLED, &qca->flags); qca_debugfs_init(hdev); + } else if (ret == -ENOENT) { + /* No patch/nvm-config found, run with original fw/config */ + ret = 0; } /* Setup bdaddr */ diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c index f364fa4d24eb..f59183018280 100644 --- a/drivers/bus/brcmstb_gisb.c +++ b/drivers/bus/brcmstb_gisb.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014 Broadcom Corporation + * Copyright (C) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -33,8 +33,6 @@ #define ARB_ERR_CAP_CLEAR (1 << 0) #define ARB_ERR_CAP_STATUS_TIMEOUT (1 << 12) #define ARB_ERR_CAP_STATUS_TEA (1 << 11) -#define ARB_ERR_CAP_STATUS_BS_SHIFT (1 << 2) -#define ARB_ERR_CAP_STATUS_BS_MASK 0x3c #define ARB_ERR_CAP_STATUS_WRITE (1 << 1) #define ARB_ERR_CAP_STATUS_VALID (1 << 0) @@ -43,7 +41,6 @@ enum { ARB_ERR_CAP_CLR, ARB_ERR_CAP_HI_ADDR, ARB_ERR_CAP_ADDR, - ARB_ERR_CAP_DATA, ARB_ERR_CAP_STATUS, ARB_ERR_CAP_MASTER, }; @@ -53,7 +50,6 @@ static const int gisb_offsets_bcm7038[] = { [ARB_ERR_CAP_CLR] = 0x0c4, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0c8, - [ARB_ERR_CAP_DATA] = 0x0cc, [ARB_ERR_CAP_STATUS] = 0x0d0, [ARB_ERR_CAP_MASTER] = -1, }; @@ -63,7 +59,6 @@ static const int gisb_offsets_bcm7400[] = { [ARB_ERR_CAP_CLR] = 0x0c8, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x0cc, - [ARB_ERR_CAP_DATA] = 0x0d0, [ARB_ERR_CAP_STATUS] = 0x0d4, [ARB_ERR_CAP_MASTER] = 0x0d8, }; @@ -73,7 +68,6 @@ static const int gisb_offsets_bcm7435[] = { [ARB_ERR_CAP_CLR] = 0x168, [ARB_ERR_CAP_HI_ADDR] = -1, [ARB_ERR_CAP_ADDR] = 0x16c, - [ARB_ERR_CAP_DATA] = 0x170, [ARB_ERR_CAP_STATUS] = 0x174, [ARB_ERR_CAP_MASTER] = 0x178, }; @@ -83,7 +77,6 @@ static const int gisb_offsets_bcm7445[] = { [ARB_ERR_CAP_CLR] = 0x7e4, [ARB_ERR_CAP_HI_ADDR] = 0x7e8, [ARB_ERR_CAP_ADDR] = 0x7ec, - [ARB_ERR_CAP_DATA] = 0x7f0, [ARB_ERR_CAP_STATUS] = 0x7f4, [ARB_ERR_CAP_MASTER] = 0x7f8, }; @@ -105,9 +98,13 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) { int offset = gdev->gisb_offsets[reg]; - /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ - if (offset == -1) - return 1; + if (offset < 0) { + /* return 1 if the hardware doesn't have ARB_ERR_CAP_MASTER */ + if (reg == ARB_ERR_CAP_MASTER) + return 1; + else + return 0; + } if (gdev->big_endian) return ioread32be(gdev->base + offset); @@ -115,6 +112,16 @@ static u32 gisb_read(struct brcmstb_gisb_arb_device *gdev, int reg) return ioread32(gdev->base + offset); } +static u64 gisb_read_address(struct brcmstb_gisb_arb_device *gdev) +{ + u64 value; + + value = gisb_read(gdev, ARB_ERR_CAP_ADDR); + value |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; + + return value; +} + static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) { int offset = gdev->gisb_offsets[reg]; @@ -123,9 +130,9 @@ static void gisb_write(struct brcmstb_gisb_arb_device *gdev, u32 val, int reg) return; if (gdev->big_endian) - iowrite32be(val, gdev->base + reg); + iowrite32be(val, gdev->base + offset); else - iowrite32(val, gdev->base + reg); + iowrite32(val, gdev->base + offset); } static ssize_t gisb_arb_get_timeout(struct device *dev, @@ -181,7 +188,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, const char *reason) { u32 cap_status; - unsigned long arb_addr; + u64 arb_addr; u32 master; const char *m_name; char m_fmt[11]; @@ -193,10 +200,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, return 1; /* Read the address and master */ - arb_addr = gisb_read(gdev, ARB_ERR_CAP_ADDR) & 0xffffffff; -#if (IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT)) - arb_addr |= (u64)gisb_read(gdev, ARB_ERR_CAP_HI_ADDR) << 32; -#endif + arb_addr = gisb_read_address(gdev); master = gisb_read(gdev, ARB_ERR_CAP_MASTER); m_name = brcmstb_gisb_master_to_str(gdev, master); @@ -205,7 +209,7 @@ static int brcmstb_gisb_arb_decode_addr(struct brcmstb_gisb_arb_device *gdev, m_name = m_fmt; } - pr_crit("%s: %s at 0x%lx [%c %s], core: %s\n", + pr_crit("%s: %s at 0x%llx [%c %s], core: %s\n", __func__, reason, arb_addr, cap_status & ARB_ERR_CAP_STATUS_WRITE ? 'W' : 'R', cap_status & ARB_ERR_CAP_STATUS_TIMEOUT ? "timeout" : "", diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 1341a94cc779..76afc841232c 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c @@ -859,6 +859,8 @@ void intel_gtt_insert_sg_entries(struct sg_table *st, } } wmb(); + if (intel_private.driver->chipset_flush) + intel_private.driver->chipset_flush(); } EXPORT_SYMBOL(intel_gtt_insert_sg_entries); diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c index 16ff781cde65..b0b36d00415d 100644 --- a/drivers/char/diag/diag_dci.c +++ b/drivers/char/diag/diag_dci.c @@ -689,7 +689,7 @@ int diag_dci_query_log_mask(struct diag_dci_client_tbl *entry, byte_mask = 0x01 << (item_num % 8); offset = equip_id * 514; - if (offset + byte_index > DCI_LOG_MASK_SIZE) { + if (offset + byte_index >= DCI_LOG_MASK_SIZE) { pr_err("diag: In %s, invalid offset: %d, log_code: %d, byte_index: %d\n", __func__, offset, log_code, byte_index); return 0; @@ -716,7 +716,7 @@ int diag_dci_query_event_mask(struct diag_dci_client_tbl *entry, bit_index = event_id % 8; byte_mask = 0x1 << bit_index; - if (byte_index > DCI_EVENT_MASK_SIZE) { + if (byte_index >= DCI_EVENT_MASK_SIZE) { pr_err("diag: In %s, invalid, event_id: %d, byte_index: %d\n", __func__, event_id, byte_index); return 0; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index f53e8ba2c718..83c206f0fc98 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -409,6 +409,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -431,6 +432,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 40d400fe5bb7..4ada103945f0 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -515,7 +515,7 @@ static void panic_halt_ipmi_heartbeat(void) msg.cmd = IPMI_WDOG_RESET_TIMER; msg.data = NULL; msg.data_len = 0; - atomic_add(2, &panic_done_count); + atomic_add(1, &panic_done_count); rv = ipmi_request_supply_msgs(watchdog_user, (struct ipmi_addr *) &addr, 0, @@ -525,7 +525,7 @@ static void panic_halt_ipmi_heartbeat(void) &panic_halt_heartbeat_recv_msg, 1); if (rv) - atomic_sub(2, &panic_done_count); + atomic_sub(1, &panic_done_count); } static struct ipmi_smi_msg panic_halt_smi_msg = { @@ -549,12 +549,12 @@ static void panic_halt_ipmi_set_timeout(void) /* Wait for the messages to be free. */ while (atomic_read(&panic_done_count) != 0) ipmi_poll_interface(watchdog_user); - atomic_add(2, &panic_done_count); + atomic_add(1, &panic_done_count); rv = i_ipmi_set_timeout(&panic_halt_smi_msg, &panic_halt_recv_msg, &send_heartbeat_now); if (rv) { - atomic_sub(2, &panic_done_count); + atomic_sub(1, &panic_done_count); printk(KERN_WARNING PFX "Unable to extend the watchdog timeout."); } else { diff --git a/drivers/char/random.c b/drivers/char/random.c index 1822472dffab..bd9fc2baa6aa 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -886,12 +886,16 @@ static void add_interrupt_bench(cycles_t start) static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) { __u32 *ptr = (__u32 *) regs; + unsigned int idx; if (regs == NULL) return 0; - if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) - f->reg_idx = 0; - return *(ptr + f->reg_idx++); + idx = READ_ONCE(f->reg_idx); + if (idx >= sizeof(struct pt_regs) / sizeof(__u32)) + idx = 0; + ptr += idx++; + WRITE_ONCE(f->reg_idx, idx); + return *ptr; } void add_interrupt_randomness(int irq, int irq_flags) diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c index 8d626784cd8d..49e4040eeb55 100644 --- a/drivers/char/tpm/st33zp24/st33zp24.c +++ b/drivers/char/tpm/st33zp24/st33zp24.c @@ -485,7 +485,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, size_t count) { int size = 0; - int expected; + u32 expected; if (!chip) return -EBUSY; @@ -502,7 +502,7 @@ static int st33zp24_recv(struct tpm_chip *chip, unsigned char *buf, } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c index aaa5fa95dede..36afc1a21699 100644 --- a/drivers/char/tpm/tpm-interface.c +++ b/drivers/char/tpm/tpm-interface.c @@ -1040,6 +1040,11 @@ int tpm_get_random(u32 chip_num, u8 *out, size_t max) break; recd = be32_to_cpu(tpm_cmd.params.getrandom_out.rng_data_len); + if (recd > num_bytes) { + total = -EFAULT; + break; + } + memcpy(dest, tpm_cmd.params.getrandom_out.rng_data, recd); dest += recd; diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index 286bd090a488..389a009b83f2 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -622,6 +622,11 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, if (!rc) { data_len = be16_to_cpup( (__be16 *) &buf.data[TPM_HEADER_SIZE + 4]); + if (data_len < MIN_KEY_SIZE || data_len > MAX_KEY_SIZE + 1) { + rc = -EFAULT; + goto out; + } + data = &buf.data[TPM_HEADER_SIZE + 6]; memcpy(payload->key, data, data_len - 1); @@ -629,6 +634,7 @@ static int tpm2_unseal_cmd(struct tpm_chip *chip, payload->migratable = data[data_len - 1]; } +out: tpm_buf_destroy(&buf); return rc; } diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c index f2aa99e34b4b..9f12ad74a09b 100644 --- a/drivers/char/tpm/tpm_i2c_infineon.c +++ b/drivers/char/tpm/tpm_i2c_infineon.c @@ -436,7 +436,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -451,7 +452,7 @@ static int tpm_tis_i2c_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *)(buf + 2)); - if ((size_t) expected > count) { + if (((size_t) expected > count) || (expected < TPM_HEADER_SIZE)) { size = -EIO; goto out; } diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c index a1e1474dda30..aedf726cbab6 100644 --- a/drivers/char/tpm/tpm_i2c_nuvoton.c +++ b/drivers/char/tpm/tpm_i2c_nuvoton.c @@ -267,7 +267,11 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) struct device *dev = chip->dev.parent; struct i2c_client *client = to_i2c_client(dev); s32 rc; - int expected, status, burst_count, retries, size = 0; + int status; + int burst_count; + int retries; + int size = 0; + u32 expected; if (count < TPM_HEADER_SIZE) { i2c_nuvoton_ready(chip); /* return to idle */ @@ -309,7 +313,7 @@ static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count) * to machine native */ expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < size) { dev_err(dev, "%s() expected > count\n", __func__); size = -EIO; continue; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7f13221aeb30..9dd93a209ef2 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -283,7 +283,8 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count) static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) { int size = 0; - int expected, status; + int status; + u32 expected; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -298,7 +299,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) } expected = be32_to_cpu(*(__be32 *) (buf + 2)); - if (expected > count) { + if (expected > count || expected < TPM_HEADER_SIZE) { size = -EIO; goto out; } diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 35ab89fe9d7b..7c4b1ffe874f 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -912,8 +912,10 @@ static int bcm2835_pll_on(struct clk_hw *hw) ~A2W_PLL_CTRL_PWRDN); /* Take the PLL out of reset. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, data->cm_ctrl_reg, cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); + spin_unlock(&cprman->regs_lock); /* Wait for the PLL to lock. */ timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); @@ -997,9 +999,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, } /* Unmask the reference clock from the oscillator. */ + spin_lock(&cprman->regs_lock); cprman_write(cprman, A2W_XOSC_CTRL, cprman_read(cprman, A2W_XOSC_CTRL) | data->reference_enable_mask); + spin_unlock(&cprman->regs_lock); if (do_ana_setup_first) bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); diff --git a/drivers/clk/bcm/clk-ns2.c b/drivers/clk/bcm/clk-ns2.c index a564e9248814..adc14145861a 100644 --- a/drivers/clk/bcm/clk-ns2.c +++ b/drivers/clk/bcm/clk-ns2.c @@ -103,7 +103,7 @@ CLK_OF_DECLARE(ns2_genpll_src_clk, "brcm,ns2-genpll-scr", static const struct iproc_pll_ctrl genpll_sw = { .flags = IPROC_CLK_AON | IPROC_CLK_PLL_SPLIT_STAT_CTRL, - .aon = AON_VAL(0x0, 2, 9, 8), + .aon = AON_VAL(0x0, 1, 11, 10), .reset = RESET_VAL(0x4, 2, 1), .dig_filter = DF_VAL(0x0, 9, 3, 5, 4, 2, 3), .ndiv_int = REG_VAL(0x8, 4, 10), diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c index 43a218f35b19..4ad32ce428cf 100644 --- a/drivers/clk/clk-conf.c +++ b/drivers/clk/clk-conf.c @@ -106,7 +106,7 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier) rc = clk_set_rate(clk, rate); if (rc < 0) - pr_err("clk: couldn't set %s clk rate to %d (%d), current rate: %ld\n", + pr_err("clk: couldn't set %s clk rate to %u (%d), current rate: %lu\n", __clk_get_name(clk), rate, rc, clk_get_rate(clk)); clk_put(clk); diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c index cd0f2726f5e0..c40445488d3a 100644 --- a/drivers/clk/clk-scpi.c +++ b/drivers/clk/clk-scpi.c @@ -71,15 +71,15 @@ static const struct clk_ops scpi_clk_ops = { }; /* find closest match to given frequency in OPP table */ -static int __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) +static long __scpi_dvfs_round_rate(struct scpi_clk *clk, unsigned long rate) { int idx; - u32 fmin = 0, fmax = ~0, ftmp; + unsigned long fmin = 0, fmax = ~0, ftmp; const struct scpi_opp *opp = clk->info->opps; for (idx = 0; idx < clk->info->count; idx++, opp++) { ftmp = opp->freq; - if (ftmp >= (u32)rate) { + if (ftmp >= rate) { if (ftmp <= fmax) fmax = ftmp; break; diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index e346b223199d..a01ee9a3ed6d 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -72,7 +72,7 @@ static const char * const si5351_input_names[] = { "xtal", "clkin" }; static const char * const si5351_pll_names[] = { - "plla", "pllb", "vxco" + "si5351_plla", "si5351_pllb", "si5351_vxco" }; static const char * const si5351_msynth_names[] = { "ms0", "ms1", "ms2", "ms3", "ms4", "ms5", "ms6", "ms7" diff --git a/drivers/clk/msm/virtclk-front.c b/drivers/clk/msm/virtclk-front.c index 4018c4922574..2d8a9e8ec61c 100644 --- a/drivers/clk/msm/virtclk-front.c +++ b/drivers/clk/msm/virtclk-front.c @@ -77,7 +77,7 @@ static int virtclk_front_get_id(struct clk *clk) } ret = habmm_socket_recv(handle, &rsp, &rsp_size, - UINT_MAX, 0); + UINT_MAX, HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -132,7 +132,8 @@ static int virtclk_front_prepare(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -185,7 +186,8 @@ static void virtclk_front_unprepare(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -236,7 +238,8 @@ static int virtclk_front_reset(struct clk *clk, enum clk_reset_action action) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -290,7 +293,8 @@ static int virtclk_front_set_rate(struct clk *clk, unsigned long rate) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, ret); @@ -362,7 +366,8 @@ static unsigned long virtclk_front_get_rate(struct clk *clk) goto err_out; } - ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, 0); + ret = habmm_socket_recv(handle, &rsp, &rsp_size, UINT_MAX, + HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); if (ret) { ret = 0; pr_err("%s: habmm socket receive failed (%d)\n", clk->dbg_name, diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index ff0c8327fabe..3c3cf8e04eea 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2013, 2016-2018, The Linux Foundation. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -210,9 +210,11 @@ static unsigned long clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); + const struct freq_tbl *f_curr; u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; - if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) { + if (rcg->enable_safe_config && (!clk_hw_is_prepared(hw) + || !clk_hw_is_enabled(hw))) { if (!rcg->current_freq) rcg->current_freq = cxo_f.freq; return rcg->current_freq; @@ -232,9 +234,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) mode >>= CFG_MODE_SHIFT; } - mask = BIT(rcg->hid_width) - 1; - hid_div = cfg >> CFG_SRC_DIV_SHIFT; - hid_div &= mask; + if (rcg->enable_safe_config) { + f_curr = qcom_find_freq(rcg->freq_tbl, rcg->current_freq); + if (!f_curr) + return -EINVAL; + + hid_div = f_curr->pre_div; + } else { + mask = BIT(rcg->hid_width) - 1; + hid_div = cfg >> CFG_SRC_DIV_SHIFT; + hid_div &= mask; + } return calc_rate(parent_rate, m, n, mode, hid_div); } diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c index 2e7f03d50f4e..95a4dd290f35 100644 --- a/drivers/clk/qcom/gcc-msm8916.c +++ b/drivers/clk/qcom/gcc-msm8916.c @@ -1437,6 +1437,7 @@ static const struct freq_tbl ftbl_codec_clk[] = { static struct clk_rcg2 codec_digcodec_clk_src = { .cmd_rcgr = 0x1c09c, + .mnd_width = 8, .hid_width = 5, .parent_map = gcc_xo_gpll1_emclk_sleep_map, .freq_tbl = ftbl_codec_clk, diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c index a4044955c68f..d388073f5f42 100644 --- a/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c +++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-14nm-util.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -430,8 +430,8 @@ static void mdss_dsi_pll_14nm_input_init(struct mdss_pll_resources *pll, pdb->in.pll_ip_trim = 4; /* 4, reg: 0x0404 */ pdb->in.pll_cpcset_cur = 1; /* 1, reg: 0x04f0, bit 0 - 2 */ pdb->in.pll_cpmset_cur = 1; /* 1, reg: 0x04f0, bit 3 - 5 */ - pdb->in.pll_icpmset = 4; /* 4, reg: 0x04fc, bit 3 - 5 */ - pdb->in.pll_icpcset = 4; /* 4, reg: 0x04fc, bit 0 - 2 */ + pdb->in.pll_icpmset = 7; /* 7, reg: 0x04fc, bit 3 - 5 */ + pdb->in.pll_icpcset = 7; /* 7, reg: 0x04fc, bit 0 - 2 */ pdb->in.pll_icpmset_p = 0; /* 0, reg: 0x04f4, bit 0 - 2 */ pdb->in.pll_icpmset_m = 0; /* 0, reg: 0x04f4, bit 3 - 5 */ pdb->in.pll_icpcset_p = 0; /* 0, reg: 0x04f8, bit 0 - 2 */ diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index 8b2061fca5f0..7ca79714649e 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -46,6 +46,15 @@ config CPU_FREQ_STAT_DETAILS If in doubt, say N. +config CPU_FREQ_TIMES + bool "CPU frequency time-in-state statistics" + default y + help + This driver exports CPU time-in-state information through procfs file + system. + + If in doubt, say N. + choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 6d4a7aeb506d..4f9ba8eb0130 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -4,7 +4,10 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o freq_table.o cpufreq_governor_attr_set.o # CPUfreq stats obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o -# CPUfreq governors +# CPUfreq times +obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o + +# CPUfreq governors obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 5f0f983ce173..659d2029ac5a 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -19,6 +19,7 @@ #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpufreq_times.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/init.h> @@ -447,6 +448,7 @@ static void __cpufreq_notify_transition(struct cpufreq_policy *policy, pr_debug("FREQ: %lu - CPU: %lu\n", (unsigned long)freqs->new, (unsigned long)freqs->cpu); trace_cpu_frequency(freqs->new, freqs->cpu); + cpufreq_times_record_transition(freqs); srcu_notifier_call_chain(&cpufreq_transition_notifier_list, CPUFREQ_POSTCHANGE, freqs); if (likely(policy) && likely(policy->cpu == freqs->cpu)) @@ -1354,6 +1356,7 @@ static int cpufreq_online(unsigned int cpu) goto out_exit_policy; blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_CREATE_POLICY, policy); + cpufreq_times_create_policy(policy); write_lock_irqsave(&cpufreq_driver_lock, flags); list_add(&policy->policy_list, &cpufreq_policy_list); diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c new file mode 100644 index 000000000000..e5df7a47cc16 --- /dev/null +++ b/drivers/cpufreq/cpufreq_times.c @@ -0,0 +1,461 @@ +/* drivers/cpufreq/cpufreq_times.c + * + * Copyright (C) 2018 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/cpufreq.h> +#include <linux/cpufreq_times.h> +#include <linux/cputime.h> +#include <linux/hashtable.h> +#include <linux/init.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/threads.h> + +#define UID_HASH_BITS 10 + +DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS); + +static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */ +static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */ + +struct uid_entry { + uid_t uid; + unsigned int max_state; + struct hlist_node hash; + struct rcu_head rcu; + u64 time_in_state[0]; +}; + +/** + * struct cpu_freqs - per-cpu frequency information + * @offset: start of these freqs' stats in task time_in_state array + * @max_state: number of entries in freq_table + * @last_index: index in freq_table of last frequency switched to + * @freq_table: list of available frequencies + */ +struct cpu_freqs { + unsigned int offset; + unsigned int max_state; + unsigned int last_index; + unsigned int freq_table[0]; +}; + +static struct cpu_freqs *all_freqs[NR_CPUS]; + +static unsigned int next_offset; + + +/* Caller must hold rcu_read_lock() */ +static struct uid_entry *find_uid_entry_rcu(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible_rcu(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_uid_entry_locked(uid_t uid) +{ + struct uid_entry *uid_entry; + + hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) { + if (uid_entry->uid == uid) + return uid_entry; + } + return NULL; +} + +/* Caller must hold uid lock */ +static struct uid_entry *find_or_register_uid_locked(uid_t uid) +{ + struct uid_entry *uid_entry, *temp; + unsigned int max_state = READ_ONCE(next_offset); + size_t alloc_size = sizeof(*uid_entry) + max_state * + sizeof(uid_entry->time_in_state[0]); + + uid_entry = find_uid_entry_locked(uid); + if (uid_entry) { + if (uid_entry->max_state == max_state) + return uid_entry; + /* uid_entry->time_in_state is too small to track all freqs, so + * expand it. + */ + temp = __krealloc(uid_entry, alloc_size, GFP_ATOMIC); + if (!temp) + return uid_entry; + temp->max_state = max_state; + memset(temp->time_in_state + uid_entry->max_state, 0, + (max_state - uid_entry->max_state) * + sizeof(uid_entry->time_in_state[0])); + if (temp != uid_entry) { + hlist_replace_rcu(&uid_entry->hash, &temp->hash); + kfree_rcu(uid_entry, rcu); + } + return temp; + } + + uid_entry = kzalloc(alloc_size, GFP_ATOMIC); + if (!uid_entry) + return NULL; + + uid_entry->uid = uid; + uid_entry->max_state = max_state; + + hash_add_rcu(uid_hash_table, &uid_entry->hash, uid); + + return uid_entry; +} + +static bool freq_index_invalid(unsigned int index) +{ + unsigned int cpu; + struct cpu_freqs *freqs; + + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || index < freqs->offset || + freqs->offset + freqs->max_state <= index) + continue; + return freqs->freq_table[index - freqs->offset] == + CPUFREQ_ENTRY_INVALID; + } + return true; +} + +static int single_uid_time_in_state_show(struct seq_file *m, void *ptr) +{ + struct uid_entry *uid_entry; + unsigned int i; + u64 time; + uid_t uid = from_kuid_munged(current_user_ns(), *(kuid_t *)m->private); + + if (uid == overflowuid) + return -EINVAL; + + rcu_read_lock(); + + uid_entry = find_uid_entry_rcu(uid); + if (!uid_entry) { + rcu_read_unlock(); + return 0; + } + + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + time = cputime_to_clock_t(uid_entry->time_in_state[i]); + seq_write(m, &time, sizeof(time)); + } + + rcu_read_unlock(); + + return 0; +} + +static void *uid_seq_start(struct seq_file *seq, loff_t *pos) +{ + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + + if (*pos >= HASH_SIZE(uid_hash_table)) + return NULL; + + return &uid_hash_table[*pos]; +} + +static void uid_seq_stop(struct seq_file *seq, void *v) { } + +static int uid_time_in_state_seq_show(struct seq_file *m, void *v) +{ + struct uid_entry *uid_entry; + struct cpu_freqs *freqs, *last_freqs = NULL; + int i, cpu; + + if (v == uid_hash_table) { + seq_puts(m, "uid:"); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == + CPUFREQ_ENTRY_INVALID) + continue; + seq_printf(m, " %d", freqs->freq_table[i]); + } + } + seq_putc(m, '\n'); + } + + rcu_read_lock(); + + hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) { + if (uid_entry->max_state) + seq_printf(m, "%d:", uid_entry->uid); + for (i = 0; i < uid_entry->max_state; ++i) { + if (freq_index_invalid(i)) + continue; + seq_printf(m, " %lu", (unsigned long)cputime_to_clock_t( + uid_entry->time_in_state[i])); + } + if (uid_entry->max_state) + seq_putc(m, '\n'); + } + + rcu_read_unlock(); + return 0; +} + +void cpufreq_task_times_init(struct task_struct *p) +{ + void *temp; + unsigned long flags; + unsigned int max_state; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = 0; + + max_state = READ_ONCE(next_offset); + + /* We use one array to avoid multiple allocs per task */ + temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC); + if (!temp) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + p->time_in_state = temp; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + p->max_state = max_state; +} + +/* Caller must hold task_time_in_state_lock */ +static int cpufreq_task_times_realloc_locked(struct task_struct *p) +{ + void *temp; + unsigned int max_state = READ_ONCE(next_offset); + + temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC); + if (!temp) + return -ENOMEM; + p->time_in_state = temp; + memset(p->time_in_state + p->max_state, 0, + (max_state - p->max_state) * sizeof(u64)); + p->max_state = max_state; + return 0; +} + +void cpufreq_task_times_exit(struct task_struct *p) +{ + unsigned long flags; + void *temp; + + if (!p->time_in_state) + return; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + temp = p->time_in_state; + p->time_in_state = NULL; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + kfree(temp); +} + +int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *p) +{ + unsigned int cpu, i; + cputime_t cputime; + unsigned long flags; + struct cpu_freqs *freqs; + struct cpu_freqs *last_freqs = NULL; + + spin_lock_irqsave(&task_time_in_state_lock, flags); + for_each_possible_cpu(cpu) { + freqs = all_freqs[cpu]; + if (!freqs || freqs == last_freqs) + continue; + last_freqs = freqs; + + seq_printf(m, "cpu%u\n", cpu); + for (i = 0; i < freqs->max_state; i++) { + if (freqs->freq_table[i] == CPUFREQ_ENTRY_INVALID) + continue; + cputime = 0; + if (freqs->offset + i < p->max_state && + p->time_in_state) + cputime = p->time_in_state[freqs->offset + i]; + seq_printf(m, "%u %lu\n", freqs->freq_table[i], + (unsigned long)cputime_to_clock_t(cputime)); + } + } + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + return 0; +} + +void cpufreq_acct_update_power(struct task_struct *p, cputime_t cputime) +{ + unsigned long flags; + unsigned int state; + struct uid_entry *uid_entry; + struct cpu_freqs *freqs = all_freqs[task_cpu(p)]; + uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); + + if (!freqs || p->flags & PF_EXITING) + return; + + state = freqs->offset + READ_ONCE(freqs->last_index); + + spin_lock_irqsave(&task_time_in_state_lock, flags); + if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) && + p->time_in_state) + p->time_in_state[state] += cputime; + spin_unlock_irqrestore(&task_time_in_state_lock, flags); + + spin_lock_irqsave(&uid_lock, flags); + uid_entry = find_or_register_uid_locked(uid); + if (uid_entry && state < uid_entry->max_state) + uid_entry->time_in_state[state] += cputime; + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_create_policy(struct cpufreq_policy *policy) +{ + int cpu, index; + unsigned int count = 0; + struct cpufreq_frequency_table *pos, *table; + struct cpu_freqs *freqs; + void *tmp; + + if (all_freqs[policy->cpu]) + return; + + table = cpufreq_frequency_get_table(policy->cpu); + if (!table) + return; + + cpufreq_for_each_entry(pos, table) + count++; + + tmp = kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count, + GFP_KERNEL); + if (!tmp) + return; + + freqs = tmp; + freqs->max_state = count; + + index = cpufreq_frequency_table_get_index(policy, policy->cur); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_for_each_entry(pos, table) + freqs->freq_table[pos - table] = pos->frequency; + + freqs->offset = next_offset; + WRITE_ONCE(next_offset, freqs->offset + count); + for_each_cpu(cpu, policy->related_cpus) + all_freqs[cpu] = freqs; +} + +void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end) +{ + struct uid_entry *uid_entry; + struct hlist_node *tmp; + unsigned long flags; + + spin_lock_irqsave(&uid_lock, flags); + + for (; uid_start <= uid_end; uid_start++) { + hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp, + hash, uid_start) { + if (uid_start == uid_entry->uid) { + hash_del_rcu(&uid_entry->hash); + kfree_rcu(uid_entry, rcu); + } + } + } + + spin_unlock_irqrestore(&uid_lock, flags); +} + +void cpufreq_times_record_transition(struct cpufreq_freqs *freq) +{ + int index; + struct cpu_freqs *freqs = all_freqs[freq->cpu]; + struct cpufreq_policy *policy; + + if (!freqs) + return; + + policy = cpufreq_cpu_get(freq->cpu); + if (!policy) + return; + + index = cpufreq_frequency_table_get_index(policy, freq->new); + if (index >= 0) + WRITE_ONCE(freqs->last_index, index); + + cpufreq_cpu_put(policy); +} + +static const struct seq_operations uid_time_in_state_seq_ops = { + .start = uid_seq_start, + .next = uid_seq_next, + .stop = uid_seq_stop, + .show = uid_time_in_state_seq_show, +}; + +static int uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &uid_time_in_state_seq_ops); +} + +int single_uid_time_in_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, single_uid_time_in_state_show, + &(inode->i_uid)); +} + +static const struct file_operations uid_time_in_state_fops = { + .open = uid_time_in_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init cpufreq_times_init(void) +{ + proc_create_data("uid_time_in_state", 0444, NULL, + &uid_time_in_state_fops, NULL); + + return 0; +} + +early_initcall(cpufreq_times_init); diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index 68ef8fd9482f..f5c4e009113c 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -364,7 +364,13 @@ struct clk *s3c_cpufreq_clk_get(struct device *dev, const char *name) static int s3c_cpufreq_init(struct cpufreq_policy *policy) { policy->clk = clk_arm; - return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); + + policy->cpuinfo.transition_latency = cpu_cur.info->latency; + + if (ftab) + return cpufreq_table_validate_and_show(policy, ftab); + + return 0; } static int __init s3c_cpufreq_initclks(void) diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c index 86628e22b2a3..719c3d9f07fb 100644 --- a/drivers/cpufreq/sh-cpufreq.c +++ b/drivers/cpufreq/sh-cpufreq.c @@ -30,54 +30,63 @@ static DEFINE_PER_CPU(struct clk, sh_cpuclk); +struct cpufreq_target { + struct cpufreq_policy *policy; + unsigned int freq; +}; + static unsigned int sh_cpufreq_get(unsigned int cpu) { return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000; } -/* - * Here we notify other drivers of the proposed change and the final change. - */ -static int sh_cpufreq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) +static long __sh_cpufreq_target(void *arg) { - unsigned int cpu = policy->cpu; + struct cpufreq_target *target = arg; + struct cpufreq_policy *policy = target->policy; + int cpu = policy->cpu; struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu); - cpumask_t cpus_allowed; struct cpufreq_freqs freqs; struct device *dev; long freq; - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - BUG_ON(smp_processor_id() != cpu); + if (smp_processor_id() != cpu) + return -ENODEV; dev = get_cpu_device(cpu); /* Convert target_freq from kHz to Hz */ - freq = clk_round_rate(cpuclk, target_freq * 1000); + freq = clk_round_rate(cpuclk, target->freq * 1000); if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) return -EINVAL; - dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000); + dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000); freqs.old = sh_cpufreq_get(cpu); freqs.new = (freq + 500) / 1000; freqs.flags = 0; - cpufreq_freq_transition_begin(policy, &freqs); - set_cpus_allowed_ptr(current, &cpus_allowed); + cpufreq_freq_transition_begin(target->policy, &freqs); clk_set_rate(cpuclk, freq); - cpufreq_freq_transition_end(policy, &freqs, 0); + cpufreq_freq_transition_end(target->policy, &freqs, 0); dev_dbg(dev, "set frequency %lu Hz\n", freq); - return 0; } +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sh_cpufreq_target(struct cpufreq_policy *policy, + unsigned int target_freq, + unsigned int relation) +{ + struct cpufreq_target data = { .policy = policy, .freq = target_freq }; + + return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data); +} + static int sh_cpufreq_verify(struct cpufreq_policy *policy) { struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c index a5c111b67f37..ea11a33e7fff 100644 --- a/drivers/cpuidle/dt_idle_states.c +++ b/drivers/cpuidle/dt_idle_states.c @@ -174,8 +174,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv, if (!state_node) break; - if (!of_device_is_available(state_node)) + if (!of_device_is_available(state_node)) { + of_node_put(state_node); continue; + } if (!idle_state_valid(state_node, i, cpumask)) { pr_warn("%s idle state not valid, bailing out\n", diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c index 584a1857624a..324cce5d7354 100644 --- a/drivers/cpuidle/lpm-levels.c +++ b/drivers/cpuidle/lpm-levels.c @@ -1706,7 +1706,8 @@ static int cluster_cpuidle_register(struct lpm_cluster *cl) struct cpuidle_state *st = &cl->drv->states[i]; struct lpm_cpu_level *cpu_level = &cl->cpu->levels[i]; snprintf(st->name, CPUIDLE_NAME_LEN, "C%u\n", i); - snprintf(st->desc, CPUIDLE_DESC_LEN, cpu_level->name); + snprintf(st->desc, CPUIDLE_DESC_LEN, "%s", + cpu_level->name); st->flags = 0; st->exit_latency = cpu_level->pwr.latency_us; st->power_usage = cpu_level->pwr.ss_power; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 0f6fd42f55ca..48d4dddf4941 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -911,6 +911,21 @@ static int sdma_disable_channel(struct dma_chan *chan) return 0; } +static int sdma_disable_channel_with_delay(struct dma_chan *chan) +{ + sdma_disable_channel(chan); + + /* + * According to NXP R&D team a delay of one BD SDMA cost time + * (maximum is 1ms) should be added after disable of the channel + * bit, to ensure SDMA core has really been stopped after SDMA + * clients call .device_terminate_all. + */ + mdelay(1); + + return 0; +} + static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) { struct sdma_engine *sdma = sdmac->sdma; @@ -1707,17 +1722,24 @@ static int sdma_probe(struct platform_device *pdev) if (IS_ERR(sdma->clk_ahb)) return PTR_ERR(sdma->clk_ahb); - clk_prepare(sdma->clk_ipg); - clk_prepare(sdma->clk_ahb); + ret = clk_prepare(sdma->clk_ipg); + if (ret) + return ret; + + ret = clk_prepare(sdma->clk_ahb); + if (ret) + goto err_clk; ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", sdma); if (ret) - return ret; + goto err_irq; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); - if (!sdma->script_addrs) - return -ENOMEM; + if (!sdma->script_addrs) { + ret = -ENOMEM; + goto err_irq; + } /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; @@ -1793,7 +1815,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel; + sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); @@ -1832,6 +1854,10 @@ err_register: dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); +err_irq: + clk_unprepare(sdma->clk_ahb); +err_clk: + clk_unprepare(sdma->clk_ipg); return ret; } @@ -1842,6 +1868,8 @@ static int sdma_remove(struct platform_device *pdev) dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); + clk_unprepare(sdma->clk_ahb); + clk_unprepare(sdma->clk_ipg); /* Kill the tasklet */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 8100ede095d5..c7bd1c5315f4 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -51,7 +51,15 @@ struct ti_am335x_xbar_map { static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val) { - writeb_relaxed(val, iomem + event); + /* + * TPCC_EVT_MUX_60_63 register layout is different than the + * rest, in the sense, that event 63 is mapped to lowest byte + * and event 60 is mapped to highest, handle it separately. + */ + if (event >= 60 && event <= 63) + writeb_relaxed(val, iomem + (63 - event % 4)); + else + writeb_relaxed(val, iomem + event); } static void ti_am335x_xbar_free(struct device *dev, void *route_data) diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c index 0574e1bbe45c..3ce5609b4611 100644 --- a/drivers/edac/mv64x60_edac.c +++ b/drivers/edac/mv64x60_edac.c @@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev) /* Non-ECC RAM? */ printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__); res = -ENODEV; - goto err2; + goto err; } edac_dbg(3, "init mci\n"); diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c index d071e89d3124..99fd598b5069 100644 --- a/drivers/esoc/esoc-mdm-4x.c +++ b/drivers/esoc/esoc-mdm-4x.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -230,10 +230,15 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc) } msleep(100); } - if (status_down) + if (status_down) { dev_dbg(dev, "shutdown successful\n"); - else + esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc); + } else { dev_err(mdm->dev, "graceful poff ipc fail\n"); + graceful_shutdown = false; + goto force_poff; + } + break; force_poff: case ESOC_FORCE_PWR_OFF: if (!graceful_shutdown) { diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c index 4be66a16a3a1..0288082cea00 100644 --- a/drivers/esoc/esoc-mdm-pon.c +++ b/drivers/esoc/esoc-mdm-pon.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2015, 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -55,7 +55,7 @@ static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic) if (!atomic) usleep_range(reset_time_us, reset_time_us + 100000); else - mdelay(mdm->reset_time_ms); + mdelay(DEF_MDM9X55_RESET_TIME); gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET), soft_reset_direction_de_assert); return 0; diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 06d345b087f8..759a39906a52 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -2145,7 +2145,8 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, return desc; } - status = gpiod_request(desc, con_id); + /* If a connection label was passed use that, else use the device name as label */ + status = gpiod_request(desc, con_id ? con_id : dev_name(dev)); if (status < 0) return ERR_PTR(status); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index a142d5ae148d..5c40d6d710af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -585,6 +585,9 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, size_t size; u32 retry = 3; + if (amdgpu_acpi_pcie_notify_device_ready(adev)) + return -EINVAL; + /* Get the device handle */ handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index 930083336968..1f0e6ede120c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) && + amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { + /* Don't start link training before we have the DPCD */ + if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -739,9 +732,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -760,8 +755,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ amdgpu_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -862,9 +861,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = amdgpu_connector_best_single_encoder(connector); if (!encoder) @@ -918,8 +919,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -981,9 +984,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1108,8 +1113,10 @@ out: amdgpu_connector_update_scratch_regs(connector, ret); exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1351,9 +1358,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector); int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1421,8 +1430,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force) amdgpu_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82903ca78529..c555781685ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -560,6 +560,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); if (amdgpu_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index b57fffc2d4af..0a91261b6f5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2104,34 +2104,8 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) case CHIP_KAVERI: adev->gfx.config.max_shader_engines = 1; adev->gfx.config.max_tile_pipes = 4; - if ((adev->pdev->device == 0x1304) || - (adev->pdev->device == 0x1305) || - (adev->pdev->device == 0x130C) || - (adev->pdev->device == 0x130F) || - (adev->pdev->device == 0x1310) || - (adev->pdev->device == 0x1311) || - (adev->pdev->device == 0x131C)) { - adev->gfx.config.max_cu_per_sh = 8; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1309) || - (adev->pdev->device == 0x130A) || - (adev->pdev->device == 0x130D) || - (adev->pdev->device == 0x1313) || - (adev->pdev->device == 0x131D)) { - adev->gfx.config.max_cu_per_sh = 6; - adev->gfx.config.max_backends_per_se = 2; - } else if ((adev->pdev->device == 0x1306) || - (adev->pdev->device == 0x1307) || - (adev->pdev->device == 0x130B) || - (adev->pdev->device == 0x130E) || - (adev->pdev->device == 0x1315) || - (adev->pdev->device == 0x131B)) { - adev->gfx.config.max_cu_per_sh = 4; - adev->gfx.config.max_backends_per_se = 1; - } else { - adev->gfx.config.max_cu_per_sh = 3; - adev->gfx.config.max_backends_per_se = 1; - } + adev->gfx.config.max_cu_per_sh = 8; + adev->gfx.config.max_backends_per_se = 2; adev->gfx.config.max_sh_per_se = 1; adev->gfx.config.max_texture_channel_caches = 4; adev->gfx.config.max_gprs = 256; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..2acbd43f9a53 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -519,11 +519,17 @@ static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr, return ret; } +static void kfd_topology_kobj_release(struct kobject *kobj) +{ + kfree(kobj); +} + static const struct sysfs_ops sysprops_ops = { .show = sysprops_show, }; static struct kobj_type sysprops_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &sysprops_ops, }; @@ -559,6 +565,7 @@ static const struct sysfs_ops iolink_ops = { }; static struct kobj_type iolink_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &iolink_ops, }; @@ -586,6 +593,7 @@ static const struct sysfs_ops mem_ops = { }; static struct kobj_type mem_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &mem_ops, }; @@ -625,6 +633,7 @@ static const struct sysfs_ops cache_ops = { }; static struct kobj_type cache_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &cache_ops, }; @@ -747,6 +756,7 @@ static const struct sysfs_ops node_ops = { }; static struct kobj_type node_type = { + .release = kfd_topology_kobj_release, .sysfs_ops = &node_ops, }; diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 65d4c5d8d94b..fbd717324328 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -3766,8 +3766,7 @@ monitor_name(struct detailed_timing *t, void *data) * @edid: EDID to parse * * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver. The - * Conn_Type, HDCP and Port_ID ELD fields are left for the graphics driver to - * fill in. + * HDCP and Port_ID ELD fields are left for the graphics driver to fill in. */ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) { @@ -3843,6 +3842,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid) } eld[5] |= sad_count << 4; + if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->connector_type == DRM_MODE_CONNECTOR_eDP) + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_DP; + else + eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= DRM_ELD_CONN_TYPE_HDMI; + eld[DRM_ELD_BASELINE_ELD_LEN] = DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8090989185b2..4ddbc49125cd 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -1271,9 +1271,9 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe) if (atomic_dec_and_test(&vblank->refcount)) { if (drm_vblank_offdelay == 0) return; - else if (dev->vblank_disable_immediate || drm_vblank_offdelay < 0) + else if (drm_vblank_offdelay < 0) vblank_disable_fn((unsigned long)vblank); - else + else if (!dev->vblank_disable_immediate) mod_timer(&vblank->disable_timer, jiffies + ((drm_vblank_offdelay * HZ)/1000)); } @@ -1902,6 +1902,16 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe) wake_up(&vblank->queue); drm_handle_vblank_events(dev, pipe); + /* With instant-off, we defer disabling the interrupt until after + * we finish processing the following vblank. The disable has to + * be last (after drm_handle_vblank_events) so that the timestamp + * is always accurate. + */ + if (dev->vblank_disable_immediate && + drm_vblank_offdelay > 0 && + !atomic_read(&vblank->refcount)) + vblank_disable_fn((unsigned long)vblank); + spin_unlock_irqrestore(&dev->event_lock, irqflags); return true; diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index f8b5fcfa91a2..1fe4b8e6596b 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -412,6 +412,26 @@ out: } /** + * drm_kms_helper_is_poll_worker - is %current task an output poll worker? + * + * Determine if %current task is an output poll worker. This can be used + * to select distinct code paths for output polling versus other contexts. + * + * One use case is to avoid a deadlock between the output poll worker and + * the autosuspend worker wherein the latter waits for polling to finish + * upon calling drm_kms_helper_poll_disable(), while the former waits for + * runtime suspend to finish upon calling pm_runtime_get_sync() in a + * connector ->detect hook. + */ +bool drm_kms_helper_is_poll_worker(void) +{ + struct work_struct *work = current_work(); + + return work && work->func == output_poll_execute; +} +EXPORT_SYMBOL(drm_kms_helper_is_poll_worker); + +/** * drm_kms_helper_poll_disable - disable output polling * @dev: drm_device * diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c index 9144dfdf30c9..7887bda23df0 100644 --- a/drivers/gpu/drm/msm/dba_bridge.c +++ b/drivers/gpu/drm/msm/dba_bridge.c @@ -51,6 +51,7 @@ struct dba_bridge { u32 num_of_input_lanes; bool pluggable; u32 panel_count; + bool cont_splash_enabled; }; #define to_dba_bridge(x) container_of((x), struct dba_bridge, base) @@ -324,6 +325,7 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev, bridge->panel_count = data->panel_count; bridge->base.funcs = &_dba_bridge_ops; bridge->base.encoder = encoder; + bridge->cont_splash_enabled = data->cont_splash_enabled; rc = drm_bridge_attach(dev, &bridge->base); if (rc) { @@ -339,7 +341,10 @@ struct drm_bridge *dba_bridge_init(struct drm_device *dev, encoder->bridge = &bridge->base; } - if (!bridge->pluggable) { + /* If early splash has enabled bridge chip in bootloader, + * below call should be skipped. + */ + if (!bridge->pluggable && !bridge->cont_splash_enabled) { if (bridge->ops.power_on) bridge->ops.power_on(bridge->dba_ctx, true, 0); if (bridge->ops.check_hpd) diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h index 5562d2b2aef9..edc130f92257 100644 --- a/drivers/gpu/drm/msm/dba_bridge.h +++ b/drivers/gpu/drm/msm/dba_bridge.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -44,6 +44,7 @@ struct dba_bridge_init { struct drm_bridge *precede_bridge; bool pluggable; u32 panel_count; + bool cont_splash_enabled; }; /** diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c index 6015cf35e030..7a90c7be4e5c 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c @@ -1598,7 +1598,7 @@ exit: * * Return: error code. */ -int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl) +int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled) { int rc = 0; @@ -1615,37 +1615,40 @@ int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl) goto error; } - dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw, + if (!cont_splash_enabled) { + dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw, &dsi_ctrl->host_config.lane_map); - dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config); - if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) { - dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw, + if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) { + dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config, &dsi_ctrl->host_config.u.cmd_engine); - dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw, dsi_ctrl->host_config.video_timing.h_active, dsi_ctrl->host_config.video_timing.h_active * 3, dsi_ctrl->host_config.video_timing.v_active, 0x0); - } else { - dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw, + } else { + dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw, &dsi_ctrl->host_config.common_config, &dsi_ctrl->host_config.u.video_engine); - dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, + dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, &dsi_ctrl->host_config.video_timing); + } } - - dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0); dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0); - /* Perform a soft reset before enabling dsi controller */ - dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); + /* Perform a soft reset before enabling dsi controller + * But skip the reset if dsi is enabled in bootloader. + */ + if (!cont_splash_enabled) + dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw); pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index); dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1); error: @@ -1967,6 +1970,12 @@ error: return rc; } +void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl, + enum dsi_power_state state) +{ + dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state); +} + /** * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller * @dsi_ctrl: DSI controller handle. diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h index 993a35cbf84a..c0ba532011b5 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -331,6 +331,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl); /** * dsi_ctrl_host_init() - Initialize DSI host hardware. * @dsi_ctrl: DSI controller handle. + * @cont_splash_enabled: Flag for DSI splash enabled in bootloader. * * Initializes DSI controller hardware with host configuration provided by * dsi_ctrl_update_host_config(). Initialization can be performed only during @@ -339,7 +340,7 @@ int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl); * * Return: error code. */ -int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl); +int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled); /** * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware. @@ -404,6 +405,16 @@ int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl, enum dsi_power_state state); /** + * dsi_ctrl_update_power_state() - update power state for dsi controller + * @dsi_ctrl: DSI controller handle. + * @state: Power state. + * + * Update power state for DSI controller. + * + */ +void dsi_ctrl_update_power_state(struct dsi_ctrl *dsi_ctrl, + enum dsi_power_state state); +/** * dsi_ctrl_set_cmd_engine_state() - set command engine state * @dsi_ctrl: DSI Controller handle. * @state: Engine state. diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c index 09ab14cc4746..c468a6f5caa2 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c @@ -174,6 +174,11 @@ static int dsi_display_ctrl_power_on(struct dsi_display *display) int i; struct dsi_display_ctrl *ctrl; + if (display->cont_splash_enabled) { + pr_debug("skip ctrl power on\n"); + return rc; + } + /* Sequence does not matter for split dsi usecases */ for (i = 0; i < display->ctrl_count; i++) { @@ -460,7 +465,8 @@ static int dsi_display_ctrl_init(struct dsi_display *display) for (i = 0 ; i < display->ctrl_count; i++) { ctrl = &display->ctrl[i]; - rc = dsi_ctrl_host_init(ctrl->ctrl); + rc = dsi_ctrl_host_init(ctrl->ctrl, + display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to init host_%d, rc=%d\n", display->name, i, rc); @@ -720,7 +726,7 @@ static int dsi_display_phy_enable(struct dsi_display *display) rc = dsi_phy_enable(m_ctrl->phy, &display->config, m_src, - true); + true, display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to enable DSI PHY, rc=%d\n", display->name, rc); @@ -735,7 +741,7 @@ static int dsi_display_phy_enable(struct dsi_display *display) rc = dsi_phy_enable(ctrl->phy, &display->config, DSI_PLL_SOURCE_NON_NATIVE, - true); + true, display->cont_splash_enabled); if (rc) { pr_err("[%s] failed to enable DSI PHY, rc=%d\n", display->name, rc); @@ -848,6 +854,11 @@ static int dsi_display_phy_sw_reset(struct dsi_display *display) int i; struct dsi_display_ctrl *m_ctrl, *ctrl; + if (display->cont_splash_enabled) { + pr_debug("skip phy sw reset\n"); + return 0; + } + m_ctrl = &display->ctrl[display->cmd_master_idx]; rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl); @@ -1748,6 +1759,45 @@ static int _dsi_display_dev_deinit(struct dsi_display *display) return rc; } +/* + * _dsi_display_config_ctrl_for_splash + * + * Config ctrl engine for DSI display. + * @display: Handle to the display + * Returns: Zero on success + */ +static int _dsi_display_config_ctrl_for_splash(struct dsi_display *display) +{ + int rc = 0; + + if (!display) { + pr_err("Invalid params\n"); + return -EINVAL; + } + + if (display->config.panel_mode == DSI_OP_VIDEO_MODE) { + rc = dsi_display_vid_engine_enable(display); + if (rc) { + pr_err("[%s]failed to enable video engine, rc=%d\n", + display->name, rc); + goto error_out; + } + } else if (display->config.panel_mode == DSI_OP_CMD_MODE) { + rc = dsi_display_cmd_engine_enable(display); + if (rc) { + pr_err("[%s]failed to enable cmd engine, rc=%d\n", + display->name, rc); + goto error_out; + } + } else { + pr_err("[%s] Invalid configuration\n", display->name); + rc = -EINVAL; + } + +error_out: + return rc; +} + /** * dsi_display_bind - bind dsi device with controlling device * @dev: Pointer to base of platform device @@ -2141,6 +2191,8 @@ int dsi_display_drm_bridge_init(struct dsi_display *display, init_data.num_of_input_lanes = num_of_lanes; init_data.precede_bridge = precede_bridge; init_data.panel_count = display->panel_count; + init_data.cont_splash_enabled = + display->cont_splash_enabled; dba_bridge = dba_bridge_init(display->drm_dev, enc, &init_data); if (IS_ERR_OR_NULL(dba_bridge)) { @@ -2451,26 +2503,28 @@ int dsi_display_prepare(struct dsi_display *display) mutex_lock(&display->display_lock); - for (i = 0; i < display->panel_count; i++) { - rc = dsi_panel_pre_prepare(display->panel[i]); - if (rc) { - SDE_ERROR("[%s] panel pre-prepare failed, rc=%d\n", - display->name, rc); - goto error_panel_post_unprep; + if (!display->cont_splash_enabled) { + for (i = 0; i < display->panel_count; i++) { + rc = dsi_panel_pre_prepare(display->panel[i]); + if (rc) { + SDE_ERROR("[%s]pre-prepare failed, rc=%d\n", + display->name, rc); + goto error_panel_post_unprep; + } } } rc = dsi_display_ctrl_power_on(display); if (rc) { pr_err("[%s] failed to power on dsi controllers, rc=%d\n", - display->name, rc); + display->name, rc); goto error_panel_post_unprep; } rc = dsi_display_phy_power_on(display); if (rc) { pr_err("[%s] failed to power on dsi phy, rc = %d\n", - display->name, rc); + display->name, rc); goto error_ctrl_pwr_off; } @@ -2497,21 +2551,21 @@ int dsi_display_prepare(struct dsi_display *display) rc = dsi_display_ctrl_init(display); if (rc) { pr_err("[%s] failed to setup DSI controller, rc=%d\n", - display->name, rc); + display->name, rc); goto error_phy_disable; } rc = dsi_display_ctrl_link_clk_on(display); if (rc) { pr_err("[%s] failed to enable DSI link clocks, rc=%d\n", - display->name, rc); + display->name, rc); goto error_ctrl_deinit; } rc = dsi_display_ctrl_host_enable(display); if (rc) { pr_err("[%s] failed to enable DSI host, rc=%d\n", - display->name, rc); + display->name, rc); goto error_ctrl_link_off; } @@ -2519,11 +2573,10 @@ int dsi_display_prepare(struct dsi_display *display) rc = dsi_panel_prepare(display->panel[j]); if (rc) { SDE_ERROR("[%s] panel prepare failed, rc=%d\n", - display->name, rc); + display->name, rc); goto error_panel_unprep; } } - goto error; error_panel_unprep: @@ -2559,6 +2612,12 @@ int dsi_display_enable(struct dsi_display *display) return -EINVAL; } + if (display->cont_splash_enabled) { + _dsi_display_config_ctrl_for_splash(display); + display->cont_splash_enabled = false; + return 0; + } + mutex_lock(&display->display_lock); for (i = 0; i < display->panel_count; i++) { @@ -2755,6 +2814,46 @@ int dsi_display_unprepare(struct dsi_display *display) return rc; } +int dsi_dsiplay_setup_splash_resource(struct dsi_display *display) +{ + int ret = 0, i = 0; + struct dsi_display_ctrl *ctrl; + + if (!display) + return -EINVAL; + + for (i = 0; i < display->ctrl_count; i++) { + ctrl = &display->ctrl[i]; + if (!ctrl) + return -EINVAL; + + dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.host_pwr, true); + dsi_pwr_enable_regulator(&ctrl->ctrl->pwr_info.digital, true); + dsi_pwr_enable_regulator(&ctrl->phy->pwr_info.phy_pwr, true); + + ret = dsi_clk_enable_core_clks(&ctrl->ctrl->clk_info.core_clks, + true); + if (ret) { + SDE_ERROR("failed to set core clk for dsi, ret = %d\n", + ret); + return -EINVAL; + } + + ret = dsi_clk_enable_link_clks(&ctrl->ctrl->clk_info.link_clks, + true); + if (ret) { + SDE_ERROR("failed to set link clk for dsi, ret = %d\n", + ret); + return -EINVAL; + } + + dsi_ctrl_update_power_state(ctrl->ctrl, + DSI_CTRL_POWER_LINK_CLK_ON); + } + + return ret; +} + static int __init dsi_display_register(void) { dsi_phy_drv_register(); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h index 210b8d00850b..3723f19fd0e7 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -158,6 +158,8 @@ struct dsi_display { /* DEBUG FS */ struct dentry *root; + + bool cont_splash_enabled; }; int dsi_display_dev_probe(struct platform_device *pdev); @@ -338,4 +340,15 @@ int dsi_display_clock_gate(struct dsi_display *display, bool enable); int dsi_dispaly_static_frame(struct dsi_display *display, bool enable); int dsi_display_set_backlight(void *display, u32 bl_lvl); + +/** + * dsi_dsiplay_setup_splash_resource + * @display: Handle to display. + * + * Setup DSI splash resource to avoid reset and glitch if DSI is enabled + * in bootloder. + * + * Return: error code. + */ +int dsi_dsiplay_setup_splash_resource(struct dsi_display *display); #endif /* _DSI_DISPLAY_H_ */ diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c index 309401eb3093..35000d7eb12a 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -439,7 +439,7 @@ int dsi_connector_get_modes(struct drm_connector *connector, rc = dsi_display_get_modes(display, NULL, &count); if (rc) { pr_err("failed to get num of modes, rc=%d\n", rc); - goto error; + goto end; } size = count * sizeof(*modes); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c index 1ccbbe7df573..da3b3b548e5f 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -721,9 +721,10 @@ error: * Return: error code. */ int dsi_phy_enable(struct msm_dsi_phy *phy, - struct dsi_host_config *config, - enum dsi_phy_pll_source pll_source, - bool skip_validation) + struct dsi_host_config *config, + enum dsi_phy_pll_source pll_source, + bool skip_validation, + bool cont_splash_enabled) { int rc = 0; @@ -758,7 +759,8 @@ int dsi_phy_enable(struct msm_dsi_phy *phy, goto error_disable_clks; } - dsi_phy_enable_hw(phy); + if (!cont_splash_enabled) + dsi_phy_enable_hw(phy); error_disable_clks: rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false); diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h index 6c31bfa3ea00..aa21d0b347e8 100644 --- a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h +++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -157,9 +157,10 @@ int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable); * Return: error code. */ int dsi_phy_enable(struct msm_dsi_phy *dsi_phy, - struct dsi_host_config *config, - enum dsi_phy_pll_source pll_source, - bool skip_validation); + struct dsi_host_config *config, + enum dsi_phy_pll_source pll_source, + bool skip_validation, + bool cont_splash_enabled); /** * dsi_phy_disable() - disable DSI PHY hardware. diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c index 21b89663a9c3..852ac80410f0 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c @@ -1311,7 +1311,7 @@ static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi) } } - if (!sde_kms->splash_info.handoff) { + if (!sde_hdmi->cont_splash_enabled) { sde_hdmi_set_mode(hdmi, false); _sde_hdmi_phy_reset(hdmi); sde_hdmi_set_mode(hdmi, true); @@ -3186,7 +3186,6 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) struct msm_drm_private *priv = NULL; struct hdmi *hdmi; struct platform_device *pdev; - struct sde_kms *sde_kms; DBG(""); if (!display || !display->drm_dev || !enc) { @@ -3252,8 +3251,7 @@ int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc) * clocks. This can skip the clock disabling operation in * clock_late_init when finding clk.count == 1. */ - sde_kms = to_sde_kms(priv->kms); - if (sde_kms->splash_info.handoff) { + if (display->cont_splash_enabled) { sde_hdmi_bridge_power_on(hdmi->bridge); hdmi->power_on = true; } else { diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h index 607d2cf3c7b7..9cf807e829c7 100644 --- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h +++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h @@ -196,6 +196,8 @@ struct sde_hdmi { struct dss_io_data io[HDMI_TX_MAX_IO]; /* DEBUG FS */ struct dentry *root; + + bool cont_splash_enabled; }; /** diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index 01b6425c6e19..d751625bbfd7 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -193,7 +193,8 @@ static void mdp5_plane_reset(struct drm_plane *plane) kfree(to_mdp5_plane_state(plane->state)); mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL); - + if (!mdp5_state) + return; /* assign default blend parameters */ mdp5_state->alpha = 255; mdp5_state->premultiplied = 0; @@ -686,14 +687,21 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, bool vflip, hflip; unsigned long flags; int ret; + const struct msm_format *msm_fmt; + msm_fmt = msm_framebuffer_format(fb); nplanes = drm_format_num_planes(fb->pixel_format); /* bad formats should already be rejected: */ if (WARN_ON(nplanes > pipe2nclients(pipe))) return -EINVAL; - format = to_mdp_format(msm_framebuffer_format(fb)); + if (!msm_fmt) { + pr_err("invalid format"); + return -EINVAL; + } + + format = to_mdp_format(msm_fmt); pix_format = format->base.pixel_format; /* src values are in Q16 fixed point, convert to integer: */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 2e528b112e1f..af36b95beadb 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -347,6 +347,10 @@ static int submit_reloc(struct msm_gpu *gpu, * to do it page-by-page, w/ kmap() if not vmap()d.. */ ptr = msm_gem_vaddr(&obj->base); + if (!ptr) { + DRM_ERROR("Invalid format"); + return -EINVAL; + } if (IS_ERR(ptr)) { ret = PTR_ERR(ptr); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index b52c4752c5fe..4586b62401fb 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -237,7 +237,8 @@ static struct device *find_context_bank(const char *name) /* Get the parent device */ parent = of_find_device_by_node(node->parent); - + if (!parent) + return ERR_PTR(-ENODEV); /* Populate the sub nodes */ of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev); diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c index ea3f138ee461..cd00ab4b4a81 100644 --- a/drivers/gpu/drm/msm/sde/sde_crtc.c +++ b/drivers/gpu/drm/msm/sde/sde_crtc.c @@ -39,10 +39,10 @@ #include "sde_trace.h" /* default input fence timeout, in ms */ -#define SDE_CRTC_INPUT_FENCE_TIMEOUT 2000 +#define SDE_CRTC_INPUT_FENCE_TIMEOUT 10000 /* - * The default input fence timeout is 2 seconds while max allowed + * The default input fence timeout is 10 seconds while max allowed * range is 10 seconds. Any value above 10 seconds adds glitches beyond * tolerance limit. */ @@ -1625,8 +1625,18 @@ static void sde_crtc_install_properties(struct drm_crtc *crtc, sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion); sde_kms_info_add_keyint(info, "max_linewidth", catalog->max_mixer_width); - sde_kms_info_add_keyint(info, "max_blendstages", - catalog->max_mixer_blendstages); + + /* till now, we can't know which display early RVC will run on. + * Not to impact early RVC's layer, we decrease all lm's blend stage. + * This should be restored after handoff is done. + */ + if (sde_kms->splash_info.handoff) + sde_kms_info_add_keyint(info, "max_blendstages", + catalog->max_mixer_blendstages - 1); + else + sde_kms_info_add_keyint(info, "max_blendstages", + catalog->max_mixer_blendstages); + if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2) sde_kms_info_add_keystr(info, "qseed_type", "qseed2"); if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3) diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c index a94de553c855..b95157b28855 100644 --- a/drivers/gpu/drm/msm/sde/sde_kms.c +++ b/drivers/gpu/drm/msm/sde/sde_kms.c @@ -639,6 +639,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev, continue; } + rc = sde_splash_setup_display_resource(&sde_kms->splash_info, + display, DRM_MODE_CONNECTOR_DSI); + if (rc) { + SDE_ERROR("dsi %d splash resource setup failed %d\n", + i, rc); + sde_encoder_destroy(encoder); + continue; + } + rc = dsi_display_drm_bridge_init(display, encoder); if (rc) { SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc); @@ -731,6 +740,15 @@ static int _sde_kms_setup_displays(struct drm_device *dev, continue; } + rc = sde_splash_setup_display_resource(&sde_kms->splash_info, + display, DRM_MODE_CONNECTOR_HDMIA); + if (rc) { + SDE_ERROR("hdmi %d splash resource setup failed %d\n", + i, rc); + sde_encoder_destroy(encoder); + continue; + } + rc = sde_hdmi_drm_init(display, encoder); if (rc) { SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc); @@ -812,6 +830,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) struct msm_drm_private *priv; struct sde_mdss_cfg *catalog; + struct sde_splash_info *sinfo; int primary_planes_idx, i, ret; int max_crtc_count, max_plane_count; @@ -824,6 +843,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) dev = sde_kms->dev; priv = dev->dev_private; catalog = sde_kms->catalog; + sinfo = &sde_kms->splash_info; ret = sde_core_irq_domain_add(sde_kms); if (ret) @@ -851,7 +871,7 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) primary = false; plane = sde_plane_init(dev, catalog->vp[i].id, - primary, 1UL << crtc_id, true); + primary, 1UL << crtc_id, true, false); if (IS_ERR(plane)) { SDE_ERROR("sde_plane_init failed\n"); ret = PTR_ERR(plane); @@ -869,14 +889,22 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms) for (i = 0; i < max_plane_count; i++) { bool primary = true; + bool resv_plane = false; if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR) || primary_planes_idx >= max_crtc_count) primary = false; + if (sde_splash_query_plane_is_reserved(sinfo, + catalog->sspp[i].id)) { + resv_plane = true; + DRM_INFO("pipe%d is reserved\n", + catalog->sspp[i].id); + } + plane = sde_plane_init(dev, catalog->sspp[i].id, primary, (1UL << max_crtc_count) - 1, - false); + false, resv_plane); if (IS_ERR(plane)) { SDE_ERROR("sde_plane_init failed\n"); ret = PTR_ERR(plane); @@ -1337,12 +1365,17 @@ static int sde_kms_hw_init(struct msm_kms *kms) */ sinfo = &sde_kms->splash_info; if (sinfo->handoff) { - rc = sde_splash_parse_dt(dev); + rc = sde_splash_parse_memory_dt(dev); if (rc) { - SDE_ERROR("parse dt for splash info failed: %d\n", rc); + SDE_ERROR("parse memory dt failed: %d\n", rc); goto power_error; } + rc = sde_splash_parse_reserved_plane_dt(sinfo, + sde_kms->catalog); + if (rc) + SDE_ERROR("parse reserved plane dt failed: %d\n", rc); + sde_splash_init(&priv->phandle, kms); } diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c index acd5687f6d11..ceac5a931e7e 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.c +++ b/drivers/gpu/drm/msm/sde/sde_plane.c @@ -1798,7 +1798,7 @@ static void sde_plane_atomic_update(struct drm_plane *plane, /* helper to install properties which are common to planes and crtcs */ static void _sde_plane_install_properties(struct drm_plane *plane, - struct sde_mdss_cfg *catalog) + struct sde_mdss_cfg *catalog, bool plane_reserved) { static const struct drm_prop_enum_list e_blend_op[] = { {SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"}, @@ -1994,6 +1994,16 @@ static void _sde_plane_install_properties(struct drm_plane *plane, sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale); sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp); sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp); + + /* When early RVC is enabled in bootloader and doesn't exit, + * user app should not touch the pipe which RVC is on. + * So mark the plane_unavailibility to the special pipe's property, + * user can parse this property of this pipe and stop this pipe's + * allocation after parsing. + * plane_reserved is 1, means the pipe is occupied in bootloader. + * plane_reserved is 0, means it's not used in bootloader. + */ + sde_kms_info_add_keyint(info, "plane_unavailability", plane_reserved); msm_property_set_blob(&psde->property_info, &psde->blob_info, info->data, info->len, PLANE_PROP_INFO); @@ -2731,7 +2741,8 @@ end: /* initialize plane */ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, bool primary_plane, - unsigned long possible_crtcs, bool vp_enabled) + unsigned long possible_crtcs, + bool vp_enabled, bool plane_reserved) { struct drm_plane *plane = NULL; struct sde_plane *psde; @@ -2856,7 +2867,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev, PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT, sizeof(struct sde_plane_state)); - _sde_plane_install_properties(plane, kms->catalog); + _sde_plane_install_properties(plane, kms->catalog, plane_reserved); /* save user friendly pipe name for later */ snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id); diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h index 7b91822d4cde..8ac582643926 100644 --- a/drivers/gpu/drm/msm/sde/sde_plane.h +++ b/drivers/gpu/drm/msm/sde/sde_plane.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * @@ -77,10 +77,12 @@ void sde_plane_flush(struct drm_plane *plane); * @primary_plane: true if this pipe is primary plane for crtc * @possible_crtcs: bitmask of crtc that can be attached to the given pipe * @vp_enabled: Flag indicating if virtual planes enabled + * @plane_reserved: Flag indicating the plane is occupied in bootloader */ struct drm_plane *sde_plane_init(struct drm_device *dev, uint32_t pipe, bool primary_plane, - unsigned long possible_crtcs, bool vp_enabled); + unsigned long possible_crtcs, + bool vp_enabled, bool plane_reserved); /** * sde_plane_wait_input_fence - wait for input fence object diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c index 2789ae053663..f6bd7b040dcb 100644 --- a/drivers/gpu/drm/msm/sde/sde_splash.c +++ b/drivers/gpu/drm/msm/sde/sde_splash.c @@ -23,6 +23,7 @@ #include "sde_hw_intf.h" #include "sde_hw_catalog.h" #include "dsi_display.h" +#include "sde_hdmi.h" #define MDP_SSPP_TOP0_OFF 0x1000 #define DISP_INTF_SEL 0x004 @@ -39,6 +40,10 @@ #define SDE_LK_EXIT_MAX_LOOP 20 +#define INTF_HDMI_SEL (BIT(25) | BIT(24)) +#define INTF_DSI0_SEL BIT(8) +#define INTF_DSI1_SEL BIT(16) + static DEFINE_MUTEX(sde_splash_lock); /* @@ -283,6 +288,44 @@ static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo) sinfo->splash_mem_size = NULL; } +static void _sde_splash_sent_pipe_update_uevent(struct sde_kms *sde_kms) +{ + char *event_string; + char *envp[2]; + struct drm_device *dev; + struct device *kdev; + int i = 0; + + if (!sde_kms || !sde_kms->dev) { + DRM_ERROR("invalid input\n"); + return; + } + + dev = sde_kms->dev; + kdev = dev->primary->kdev; + + event_string = kzalloc(SZ_4K, GFP_KERNEL); + if (!event_string) { + SDE_ERROR("failed to allocate event string\n"); + return; + } + + for (i = 0; i < MAX_BLOCKS; i++) { + if (sde_kms->splash_info.reserved_pipe_info[i] != 0xFFFFFFFF) + snprintf(event_string, SZ_4K, "pipe%d avialable", + sde_kms->splash_info.reserved_pipe_info[i]); + } + + DRM_INFO("generating pipe update event[%s]", event_string); + + envp[0] = event_string; + envp[1] = NULL; + + kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp); + + kfree(event_string); +} + static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo, u32 *hdmi_cnt, u32 *dsi_cnt) { @@ -372,12 +415,12 @@ void sde_splash_destroy(struct sde_splash_info *sinfo, } /* - * sde_splash_parse_dt. + * sde_splash_parse_memory_dt. * In the function, it will parse and reserve two kinds of memory node. * First is to get the reserved memory for display buffers. - * Second is to get the memory node LK's code stack is running on. + * Second is to get the memory node which LK's heap memory is running on. */ -int sde_splash_parse_dt(struct drm_device *dev) +int sde_splash_parse_memory_dt(struct drm_device *dev) { struct msm_drm_private *priv = dev->dev_private; struct sde_kms *sde_kms; @@ -404,6 +447,79 @@ int sde_splash_parse_dt(struct drm_device *dev) return 0; } +static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg, + const char *name) +{ + int i; + + for (i = 0; i < cfg->sspp_count; i++) { + if (!strcmp(cfg->sspp[i].name, name)) + return cfg->sspp[i].id; + } + + return 0; +} + +int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info, + struct sde_mdss_cfg *cfg) +{ + struct device_node *parent, *node; + struct property *prop; + const char *cname; + int ret = 0, i = 0; + + if (!splash_info || !cfg) + return -EINVAL; + + parent = of_find_node_by_path("/qcom,sde-reserved-plane"); + if (!parent) + return -EINVAL; + + for (i = 0; i < MAX_BLOCKS; i++) + splash_info->reserved_pipe_info[i] = 0xFFFFFFFF; + + i = 0; + for_each_child_of_node(parent, node) { + if (i >= MAX_BLOCKS) { + SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n", + i, MAX_BLOCKS); + ret = -EINVAL; + goto parent_node_err; + } + + of_property_for_each_string(node, "qcom,plane-name", + prop, cname) + splash_info->reserved_pipe_info[i] = + _sde_splash_parse_sspp_id(cfg, cname); + i++; + } + +parent_node_err: + of_node_put(parent); + + return ret; +} + +bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo, + uint32_t pipe) +{ + int i = 0; + + if (!sinfo) + return false; + + /* early return if no splash is enabled */ + if (!sinfo->handoff) + return false; + + for (i = 0; i < MAX_BLOCKS; i++) { + if (sinfo->reserved_pipe_info[i] == pipe) + return true; + } + + return false; +} + int sde_splash_get_handoff_status(struct msm_kms *kms) { uint32_t intf_sel = 0; @@ -448,14 +564,17 @@ int sde_splash_get_handoff_status(struct msm_kms *kms) * considered as single display. So decrement * 'num_of_display_on' by 1 */ - if (split_display) + if (split_display) { num_of_display_on--; + sinfo->split_is_enabled = true; + } } if (num_of_display_on) { sinfo->handoff = true; sinfo->program_scratch_regs = true; sinfo->lk_is_exited = false; + sinfo->intf_sel_status = intf_sel; } else { sinfo->handoff = false; sinfo->program_scratch_regs = false; @@ -504,6 +623,71 @@ int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu, return ret ? 0 : -ENOMEM; } +static bool _sde_splash_get_panel_intf_status(struct sde_splash_info *sinfo, + const char *display_name, int connector_type) +{ + bool ret = false; + int intf_status = 0; + + if (sinfo && sinfo->handoff) { + if (connector_type == DRM_MODE_CONNECTOR_DSI) { + if (!strcmp(display_name, "dsi_adv_7533_1")) { + if (sinfo->intf_sel_status & INTF_DSI0_SEL) + ret = true; + } else if (!strcmp(display_name, "dsi_adv_7533_2")) { + if (sinfo->intf_sel_status & INTF_DSI1_SEL) + ret = true; + } else + DRM_INFO("wrong display name %s\n", + display_name); + } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + intf_status = sinfo->intf_sel_status & INTF_HDMI_SEL; + ret = (intf_status == INTF_HDMI_SEL); + } + } + + return ret; +} + +int sde_splash_setup_display_resource(struct sde_splash_info *sinfo, + void *disp, int connector_type) +{ + if (!sinfo || !disp) + return -EINVAL; + + /* early return if splash is not enabled in bootloader */ + if (!sinfo->handoff) + return 0; + + if (connector_type == DRM_MODE_CONNECTOR_DSI) { + struct dsi_display *display = (struct dsi_display *)disp; + + display->cont_splash_enabled = + _sde_splash_get_panel_intf_status(sinfo, + display->name, + connector_type); + + DRM_INFO("DSI splash %s\n", + display->cont_splash_enabled ? "enabled" : "disabled"); + + if (display->cont_splash_enabled) { + if (dsi_dsiplay_setup_splash_resource(display)) + return -EINVAL; + } + } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)disp; + + sde_hdmi->cont_splash_enabled = + _sde_splash_get_panel_intf_status(sinfo, + NULL, connector_type); + + DRM_INFO("HDMI splash %s\n", + sde_hdmi->cont_splash_enabled ? "enabled" : "disabled"); + } + + return 0; +} + void sde_splash_setup_connector_count(struct sde_splash_info *sinfo, int connector_type) { @@ -578,6 +762,8 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, sde_power_data_bus_bandwidth_ctrl(phandle, sde_kms->core_client, false); + _sde_splash_sent_pipe_update_uevent(sde_kms); + mutex_unlock(&sde_splash_lock); return 0; } diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h index babf88335e49..9eddd87e5e26 100644 --- a/drivers/gpu/drm/msm/sde/sde_splash.h +++ b/drivers/gpu/drm/msm/sde/sde_splash.h @@ -1,5 +1,5 @@ /** - * Copyright (c) 2017 The Linux Foundation. All rights reserved. + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -49,11 +49,20 @@ struct sde_splash_info { /* memory size of lk pool */ size_t lk_pool_size; + /* enabled statue of displays*/ + uint32_t intf_sel_status; + + /* DSI split enabled flag */ + bool split_is_enabled; + /* registered hdmi connector count */ uint32_t hdmi_connector_cnt; /* registered dst connector count */ uint32_t dsi_connector_cnt; + + /* reserved pipe info for early RVC */ + uint32_t reserved_pipe_info[MAX_BLOCKS]; }; /* APIs for early splash handoff functions */ @@ -99,11 +108,27 @@ int sde_splash_clean_up_free_resource(struct msm_kms *kms, int connector_type, void *display); /** - * sde_splash_parse_dt. + * sde_splash_parse_memory_dt. * * Parse reserved memory block from DT for early splash. */ -int sde_splash_parse_dt(struct drm_device *dev); +int sde_splash_parse_memory_dt(struct drm_device *dev); + +/** + * sde_splash_parse_reserved_plane_dt + * + * Parse reserved plane information from DT for early RVC case. + */ +int sde_splash_parse_reserved_plane_dt(struct sde_splash_info *splash_info, + struct sde_mdss_cfg *cfg); + +/* + * sde_splash_query_plane_is_reserved + * + * Query plane is reserved in dt. + */ +bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo, + uint32_t pipe); /** * sde_splash_smmu_map. @@ -129,4 +154,11 @@ void sde_splash_destroy(struct sde_splash_info *sinfo, */ bool sde_splash_get_lk_complete_status(struct sde_splash_info *sinfo); +/** + * sde_splash_setup_display_resource + * + * Setup display resource based on connector type. + */ +int sde_splash_setup_display_resource(struct sde_splash_info *sinfo, + void *disp, int connector_type); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 2a5ed7460354..ababdaabe870 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -253,9 +253,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force) nv_connector->edid = NULL; } - ret = pm_runtime_get_sync(connector->dev->dev); - if (ret < 0 && ret != -EACCES) - return conn_status; + /* Outputs are only polled while runtime active, so acquiring a + * runtime PM ref here is unnecessary (and would deadlock upon + * runtime suspend because it waits for polling to finish). + */ + if (!drm_kms_helper_is_poll_worker()) { + ret = pm_runtime_get_sync(connector->dev->dev); + if (ret < 0 && ret != -EACCES) + return conn_status; + } nv_encoder = nouveau_connector_ddc_detect(connector); if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) { @@ -323,8 +329,10 @@ detect_analog: out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return conn_status; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 00de1bf81519..9dfc2471ea09 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -104,7 +104,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos, }; struct nouveau_display *disp = nouveau_display(crtc->dev); struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)]; - int ret, retry = 1; + int ret, retry = 20; do { ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args)); diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c index f516b5891932..083db3f5181f 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c @@ -288,7 +288,12 @@ static int dmm_txn_commit(struct dmm_txn *txn, bool wait) msecs_to_jiffies(100))) { dev_err(dmm->dev, "timed out waiting for done\n"); ret = -ETIMEDOUT; + goto cleanup; } + + /* Check the engine status before continue */ + ret = wait_status(engine, DMM_PATSTATUS_READY | + DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE); } cleanup: diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 7ed08fdc4c42..393e5335e33b 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj, size_t size = PAGE_SIZE * n; loff_t off = mmap_offset(obj) + (entry->obj_pgoff << PAGE_SHIFT); - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); if (m > 1) { int i; @@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj, * into account in some of the math, so figure out virtual stride * in pages */ - const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE); + const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE); /* We don't use vmf->pgoff since that has the fake offset: */ pgoff = ((unsigned long)vmf->virtual_address - diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index c4a552637c93..3ff7689835dc 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -494,9 +494,11 @@ static const struct drm_fb_helper_funcs qxl_fb_helper_funcs = { int qxl_fbdev_init(struct qxl_device *qdev) { + int ret = 0; + +#ifdef CONFIG_DRM_FBDEV_EMULATION struct qxl_fbdev *qfbdev; int bpp_sel = 32; /* TODO: parameter from somewhere? */ - int ret; qfbdev = kzalloc(sizeof(struct qxl_fbdev), GFP_KERNEL); if (!qfbdev) @@ -531,6 +533,8 @@ fini: drm_fb_helper_fini(&qfbdev->helper); free: kfree(qfbdev); +#endif + return ret; } @@ -546,6 +550,9 @@ void qxl_fbdev_fini(struct qxl_device *qdev) void qxl_fbdev_set_suspend(struct qxl_device *qdev, int state) { + if (!qdev->mode_info.qfbdev) + return; + drm_fb_helper_set_suspend(&qdev->mode_info.qfbdev->helper, state); } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 134874cab4c7..80b6d6e4721a 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3599,35 +3599,8 @@ static void cik_gpu_init(struct radeon_device *rdev) case CHIP_KAVERI: rdev->config.cik.max_shader_engines = 1; rdev->config.cik.max_tile_pipes = 4; - if ((rdev->pdev->device == 0x1304) || - (rdev->pdev->device == 0x1305) || - (rdev->pdev->device == 0x130C) || - (rdev->pdev->device == 0x130F) || - (rdev->pdev->device == 0x1310) || - (rdev->pdev->device == 0x1311) || - (rdev->pdev->device == 0x131C)) { - rdev->config.cik.max_cu_per_sh = 8; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1309) || - (rdev->pdev->device == 0x130A) || - (rdev->pdev->device == 0x130D) || - (rdev->pdev->device == 0x1313) || - (rdev->pdev->device == 0x131D)) { - rdev->config.cik.max_cu_per_sh = 6; - rdev->config.cik.max_backends_per_se = 2; - } else if ((rdev->pdev->device == 0x1306) || - (rdev->pdev->device == 0x1307) || - (rdev->pdev->device == 0x130B) || - (rdev->pdev->device == 0x130E) || - (rdev->pdev->device == 0x1315) || - (rdev->pdev->device == 0x1318) || - (rdev->pdev->device == 0x131B)) { - rdev->config.cik.max_cu_per_sh = 4; - rdev->config.cik.max_backends_per_se = 1; - } else { - rdev->config.cik.max_cu_per_sh = 3; - rdev->config.cik.max_backends_per_se = 1; - } + rdev->config.cik.max_cu_per_sh = 8; + rdev->config.cik.max_backends_per_se = 2; rdev->config.cik.max_sh_per_se = 1; rdev->config.cik.max_texture_channel_caches = 4; rdev->config.cik.max_gprs = 256; diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 30f00748ed37..1a2a7365d0b5 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -89,25 +89,18 @@ void radeon_connector_hotplug(struct drm_connector *connector) /* don't do anything if sink is not display port, i.e., * passive dp->(dvi|hdmi) adaptor */ - if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { - int saved_dpms = connector->dpms; - /* Only turn off the display if it's physically disconnected */ - if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } else if (radeon_dp_needs_link_train(radeon_connector)) { - /* Don't try to start link training before we - * have the dpcd */ - if (!radeon_dp_getdpcd(radeon_connector)) - return; - - /* set it to OFF so that drm_helper_connector_dpms() - * won't return immediately since the current state - * is ON at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } - connector->dpms = saved_dpms; + if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT && + radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && + radeon_dp_needs_link_train(radeon_connector)) { + /* Don't start link training before we have the DPCD */ + if (!radeon_dp_getdpcd(radeon_connector)) + return; + + /* Turn the connector off and back on immediately, which + * will trigger link training + */ + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); + drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); } } } @@ -891,9 +884,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); @@ -916,8 +911,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force) /* check acpi lid status ??? */ radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1020,9 +1019,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; int r; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1089,8 +1090,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force) radeon_connector_update_scratch_regs(connector, ret); out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1153,9 +1156,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (!radeon_connector->dac_load_detect) return ret; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -1167,8 +1172,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force) if (ret == connector_status_connected) ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); radeon_connector_update_scratch_regs(connector, ret); - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } + return ret; } @@ -1230,9 +1239,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) enum drm_connector_status ret = connector_status_disconnected; bool dret = false, broken_edid = false; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (radeon_connector->detected_hpd_without_ddc) { force = true; @@ -1415,8 +1426,10 @@ out: } exit: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } @@ -1666,9 +1679,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force) if (radeon_dig_connector->is_mst) return connector_status_disconnected; - r = pm_runtime_get_sync(connector->dev->dev); - if (r < 0) - return connector_status_disconnected; + if (!drm_kms_helper_is_poll_worker()) { + r = pm_runtime_get_sync(connector->dev->dev); + if (r < 0) + return connector_status_disconnected; + } if (!force && radeon_check_hpd_status_unchanged(connector)) { ret = connector->status; @@ -1755,8 +1770,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force) } out: - pm_runtime_mark_last_busy(connector->dev->dev); - pm_runtime_put_autosuspend(connector->dev->dev); + if (!drm_kms_helper_is_poll_worker()) { + pm_runtime_mark_last_busy(connector->dev->dev); + pm_runtime_put_autosuspend(connector->dev->dev); + } return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3645b223aa37..446d99062306 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1374,6 +1374,12 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOENT); } + /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ + if (obj->import_attach) { + DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n"); + return ERR_PTR(-EINVAL); + } + radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL); if (radeon_fb == NULL) { drm_gem_object_unreference_unlocked(obj); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 73e41a8613da..29bd801f5dad 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -256,10 +256,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { unsigned long start = vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + unsigned long offset; unsigned long page, pos; - if (offset + size > info->fix.smem_len) + if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) + return -EINVAL; + + offset = vma->vm_pgoff << PAGE_SHIFT; + + if (offset > info->fix.smem_len || size > info->fix.smem_len - offset) return -EINVAL; pos = (unsigned long)info->fix.smem_start + offset; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index b40ed6061f05..7f898cfdc746 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -118,7 +118,7 @@ static const struct file_operations virtio_gpu_driver_fops = { static struct drm_driver driver = { - .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER, + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, .set_busid = drm_virtio_set_busid, .load = virtio_gpu_driver_load, .unload = virtio_gpu_driver_unload, diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index 4a74129c5708..7b6e5c5e7284 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -68,10 +68,17 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, struct virtio_gpu_object *bo; uint32_t handle; - if (plane->fb) { - vgfb = to_virtio_gpu_framebuffer(plane->fb); + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); bo = gem_to_virtio_gpu_obj(vgfb->obj); handle = bo->hw_res_handle; + if (bo->dumb) { + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, handle, 0, + cpu_to_le32(plane->state->crtc_w), + cpu_to_le32(plane->state->crtc_h), + plane->state->crtc_x, plane->state->crtc_y, NULL); + } } else { handle = 0; } @@ -84,6 +91,11 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, plane->state->crtc_h, plane->state->crtc_x, plane->state->crtc_y); + virtio_gpu_cmd_resource_flush(vgdev, handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->crtc_w, + plane->state->crtc_h); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index d2d93959b119..aec6e9eef489 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -433,7 +433,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, set.y = 0; set.mode = NULL; set.fb = NULL; - set.num_connectors = 1; + set.num_connectors = 0; set.connectors = &par->con; ret = drm_mode_set_config_internal(&set); if (ret) { @@ -821,7 +821,9 @@ int vmw_fb_off(struct vmw_private *vmw_priv) flush_delayed_work(&par->local_work); mutex_lock(&par->bo_mutex); + drm_modeset_lock_all(vmw_priv->dev); (void) vmw_fb_kms_detach(par, true, false); + drm_modeset_unlock_all(vmw_priv->dev); mutex_unlock(&par->bo_mutex); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 060e5c6f4446..098e562bd579 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -27,7 +27,6 @@ #include "vmwgfx_kms.h" - /* Might need a hrtimer here? */ #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) @@ -1910,9 +1909,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, * Helper to be used if an error forces the caller to undo the actions of * vmw_kms_helper_resource_prepare. */ -void vmw_kms_helper_resource_revert(struct vmw_resource *res) +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx) { - vmw_kms_helper_buffer_revert(res->backup); + struct vmw_resource *res = ctx->res; + + vmw_kms_helper_buffer_revert(ctx->buf); + vmw_dmabuf_unreference(&ctx->buf); vmw_resource_unreserve(res, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -1929,10 +1931,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res) * interrupted by a signal. */ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible) + bool interruptible, + struct vmw_validation_ctx *ctx) { int ret = 0; + ctx->buf = NULL; + ctx->res = res; + if (interruptible) ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); else @@ -1951,6 +1957,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, res->dev_priv->has_mob); if (ret) goto out_unreserve; + + ctx->buf = vmw_dmabuf_reference(res->backup); } ret = vmw_resource_validate(res); if (ret) @@ -1958,7 +1966,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res, return 0; out_revert: - vmw_kms_helper_buffer_revert(res->backup); + vmw_kms_helper_buffer_revert(ctx->buf); out_unreserve: vmw_resource_unreserve(res, false, NULL, 0); out_unlock: @@ -1974,11 +1982,13 @@ out_unlock: * @out_fence: Optional pointer to a fence pointer. If non-NULL, a * ref-counted fence pointer is returned here. */ -void vmw_kms_helper_resource_finish(struct vmw_resource *res, - struct vmw_fence_obj **out_fence) +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, + struct vmw_fence_obj **out_fence) { - if (res->backup || out_fence) - vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, + struct vmw_resource *res = ctx->res; + + if (ctx->buf || out_fence) + vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, out_fence, NULL); vmw_resource_unreserve(res, false, NULL, 0); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index edd81503516d..63b05d5ee50a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -180,6 +180,11 @@ struct vmw_display_unit { bool is_implicit; }; +struct vmw_validation_ctx { + struct vmw_resource *res; + struct vmw_dma_buffer *buf; +}; + #define vmw_crtc_to_du(x) \ container_of(x, struct vmw_display_unit, crtc) #define vmw_connector_to_du(x) \ @@ -230,9 +235,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv, struct drm_vmw_fence_rep __user * user_fence_rep); int vmw_kms_helper_resource_prepare(struct vmw_resource *res, - bool interruptible); -void vmw_kms_helper_resource_revert(struct vmw_resource *res); -void vmw_kms_helper_resource_finish(struct vmw_resource *res, + bool interruptible, + struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx); +void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx, struct vmw_fence_obj **out_fence); int vmw_kms_readback(struct vmw_private *dev_priv, struct drm_file *file_priv, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 13926ff192e3..f50fcd213413 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -841,12 +841,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_kms_sou_surface_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -865,7 +866,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv, ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, dest_x, dest_y, num_clips, inc, &sdirty.base); - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index f823fc3efed7..3184a9ae22c1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1003,12 +1003,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, struct vmw_framebuffer_surface *vfbs = container_of(framebuffer, typeof(*vfbs), base); struct vmw_stdu_dirty sdirty; + struct vmw_validation_ctx ctx; int ret; if (!srf) srf = &vfbs->surface->res; - ret = vmw_kms_helper_resource_prepare(srf, true); + ret = vmw_kms_helper_resource_prepare(srf, true, &ctx); if (ret) return ret; @@ -1031,7 +1032,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv, dest_x, dest_y, num_clips, inc, &sdirty.base); out_finish: - vmw_kms_helper_resource_finish(srf, out_fence); + vmw_kms_helper_resource_finish(&ctx, out_fence); return ret; } diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 119741f7ce47..4daf1fad6ee1 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -1381,13 +1381,13 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev, /* todo double check the reg writes */ while ((cur - opcode) < length) { - if (cur[0] == 1 && ((cur + 4) - opcode) <= length) { + if (cur[0] == 1 && (length - (cur - opcode) >= 4)) { /* Write a 32 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]); cur += 4; - } else if (cur[0] == 2 && ((cur + 5) - opcode) <= length) { + } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) { /* Write a 64 bit value to a 64 bit reg */ reg = cur[2]; reg = (reg << 32) | cur[1]; @@ -1395,7 +1395,7 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev, val = (val << 32) | cur[3]; kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val); cur += 5; - } else if (cur[0] == 3 && ((cur + 2) - opcode) <= length) { + } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) { /* Delay for X usec */ udelay(cur[1]); cur += 2; diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c index 0cd4f7216239..5eea6fe0d7bd 100644 --- a/drivers/hid/hid-elo.c +++ b/drivers/hid/hid-elo.c @@ -42,6 +42,12 @@ static int elo_input_configured(struct hid_device *hdev, { struct input_dev *input = hidinput->input; + /* + * ELO devices have one Button usage in GenDesk field, which makes + * hid-input map it to BTN_LEFT; that confuses userspace, which then + * considers the device to be a mouse/touchpad instead of touchscreen. + */ + clear_bit(BTN_LEFT, input->keybit); set_bit(BTN_TOUCH, input->keybit); set_bit(ABS_PRESSURE, input->absbit); input_set_abs_params(input, ABS_PRESSURE, 0, 256, 0, 0); diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index 2ba6bf69b7d0..53e54855c366 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -1128,18 +1128,26 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct /* * Ignore out-of-range values as per HID specification, - * section 5.10 and 6.2.25. + * section 5.10 and 6.2.25, when NULL state bit is present. + * When it's not, clamp the value to match Microsoft's input + * driver as mentioned in "Required HID usages for digitizers": + * https://msdn.microsoft.com/en-us/library/windows/hardware/dn672278(v=vs.85).asp * * The logical_minimum < logical_maximum check is done so that we * don't unintentionally discard values sent by devices which * don't specify logical min and max. */ if ((field->flags & HID_MAIN_ITEM_VARIABLE) && - (field->logical_minimum < field->logical_maximum) && - (value < field->logical_minimum || - value > field->logical_maximum)) { - dbg_hid("Ignoring out-of-range value %x\n", value); - return; + (field->logical_minimum < field->logical_maximum)) { + if (field->flags & HID_MAIN_ITEM_NULL_STATE && + (value < field->logical_minimum || + value > field->logical_maximum)) { + dbg_hid("Ignoring out-of-range value %x\n", value); + return; + } + value = clamp(value, + field->logical_minimum, + field->logical_maximum); } /* diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index a38af68cf326..0a0628d11c0b 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -976,7 +976,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; /* Pad to 32-bits - FIXME: Revisit*/ if ((skb->len & 3) && skb_pad(skb, 4 - (skb->len & 3))) - goto drop; + goto inc_dropped; /* * Modem sends Phonet messages over SSI with its own endianess... @@ -1028,8 +1028,9 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) drop2: hsi_free_msg(msg); drop: - dev->stats.tx_dropped++; dev_kfree_skb(skb); +inc_dropped: + dev->stats.tx_dropped++; return 0; } diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c index b24f1d3045f0..a629f7c130f0 100644 --- a/drivers/hwmon/ina2xx.c +++ b/drivers/hwmon/ina2xx.c @@ -94,18 +94,20 @@ enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { u16 config_default; - int calibration_factor; + int calibration_value; int registers; int shunt_div; int bus_voltage_shift; int bus_voltage_lsb; /* uV */ - int power_lsb; /* uW */ + int power_lsb_factor; }; struct ina2xx_data { const struct ina2xx_config *config; long rshunt; + long current_lsb_uA; + long power_lsb_uW; struct mutex config_lock; struct regmap *regmap; @@ -115,21 +117,21 @@ struct ina2xx_data { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { .config_default = INA219_CONFIG_DEFAULT, - .calibration_factor = 40960000, + .calibration_value = 4096, .registers = INA219_REGISTERS, .shunt_div = 100, .bus_voltage_shift = 3, .bus_voltage_lsb = 4000, - .power_lsb = 20000, + .power_lsb_factor = 20, }, [ina226] = { .config_default = INA226_CONFIG_DEFAULT, - .calibration_factor = 5120000, + .calibration_value = 2048, .registers = INA226_REGISTERS, .shunt_div = 400, .bus_voltage_shift = 0, .bus_voltage_lsb = 1250, - .power_lsb = 25000, + .power_lsb_factor = 25, }, }; @@ -168,12 +170,16 @@ static u16 ina226_interval_to_reg(int interval) return INA226_SHIFT_AVG(avg_bits); } +/* + * Calibration register is set to the best value, which eliminates + * truncation errors on calculating current register in hardware. + * According to datasheet (eq. 3) the best values are 2048 for + * ina226 and 4096 for ina219. They are hardcoded as calibration_value. + */ static int ina2xx_calibrate(struct ina2xx_data *data) { - u16 val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - data->rshunt); - - return regmap_write(data->regmap, INA2XX_CALIBRATION, val); + return regmap_write(data->regmap, INA2XX_CALIBRATION, + data->config->calibration_value); } /* @@ -186,10 +192,6 @@ static int ina2xx_init(struct ina2xx_data *data) if (ret < 0) return ret; - /* - * Set current LSB to 1mA, shunt is in uOhms - * (equation 13 in datasheet). - */ return ina2xx_calibrate(data); } @@ -267,15 +269,15 @@ static int ina2xx_get_value(struct ina2xx_data *data, u8 reg, val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_POWER: - val = regval * data->config->power_lsb; + val = regval * data->power_lsb_uW; break; case INA2XX_CURRENT: - /* signed register, LSB=1mA (selected), in mA */ - val = (s16)regval; + /* signed register, result in mA */ + val = regval * data->current_lsb_uA; + val = DIV_ROUND_CLOSEST(val, 1000); break; case INA2XX_CALIBRATION: - val = DIV_ROUND_CLOSEST(data->config->calibration_factor, - regval); + val = regval; break; default: /* programmer goofed */ @@ -303,9 +305,32 @@ static ssize_t ina2xx_show_value(struct device *dev, ina2xx_get_value(data, attr->index, regval)); } -static ssize_t ina2xx_set_shunt(struct device *dev, - struct device_attribute *da, - const char *buf, size_t count) +/* + * In order to keep calibration register value fixed, the product + * of current_lsb and shunt_resistor should also be fixed and equal + * to shunt_voltage_lsb = 1 / shunt_div multiplied by 10^9 in order + * to keep the scale. + */ +static int ina2xx_set_shunt(struct ina2xx_data *data, long val) +{ + unsigned int dividend = DIV_ROUND_CLOSEST(1000000000, + data->config->shunt_div); + if (val <= 0 || val > dividend) + return -EINVAL; + + mutex_lock(&data->config_lock); + data->rshunt = val; + data->current_lsb_uA = DIV_ROUND_CLOSEST(dividend, val); + data->power_lsb_uW = data->config->power_lsb_factor * + data->current_lsb_uA; + mutex_unlock(&data->config_lock); + + return 0; +} + +static ssize_t ina2xx_store_shunt(struct device *dev, + struct device_attribute *da, + const char *buf, size_t count) { unsigned long val; int status; @@ -315,18 +340,9 @@ static ssize_t ina2xx_set_shunt(struct device *dev, if (status < 0) return status; - if (val == 0 || - /* Values greater than the calibration factor make no sense. */ - val > data->config->calibration_factor) - return -EINVAL; - - mutex_lock(&data->config_lock); - data->rshunt = val; - status = ina2xx_calibrate(data); - mutex_unlock(&data->config_lock); + status = ina2xx_set_shunt(data, val); if (status < 0) return status; - return count; } @@ -386,7 +402,7 @@ static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO, ina2xx_show_value, NULL, /* shunt resistance */ static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR, - ina2xx_show_value, ina2xx_set_shunt, + ina2xx_show_value, ina2xx_store_shunt, INA2XX_CALIBRATION); /* update interval (ina226 only) */ @@ -441,10 +457,7 @@ static int ina2xx_probe(struct i2c_client *client, val = INA2XX_RSHUNT_DEFAULT; } - if (val <= 0 || val > data->config->calibration_factor) - return -ENODEV; - - data->rshunt = val; + ina2xx_set_shunt(data, val); ina2xx_regmap_config.max_register = data->config->registers; diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c index 188af4c89f40..18477dd1e243 100644 --- a/drivers/hwmon/pmbus/adm1275.c +++ b/drivers/hwmon/pmbus/adm1275.c @@ -95,8 +95,8 @@ static const struct coefficients adm1075_coefficients[] = { [0] = { 27169, 0, -1 }, /* voltage */ [1] = { 806, 20475, -1 }, /* current, irange25 */ [2] = { 404, 20475, -1 }, /* current, irange50 */ - [3] = { 0, -1, 8549 }, /* power, irange25 */ - [4] = { 0, -1, 4279 }, /* power, irange50 */ + [3] = { 8549, 0, -1 }, /* power, irange25 */ + [4] = { 4279, 0, -1 }, /* power, irange50 */ }; static const struct coefficients adm1275_coefficients[] = { diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c index 3fd080b94069..0da9adc49574 100644 --- a/drivers/hwtracing/coresight/coresight-tpiu.c +++ b/drivers/hwtracing/coresight/coresight-tpiu.c @@ -45,8 +45,11 @@ #define TPIU_ITATBCTR0 0xef8 /** register definition **/ +/* FFSR - 0x300 */ +#define FFSR_FT_STOPPED BIT(1) /* FFCR - 0x304 */ #define FFCR_FON_MAN BIT(6) +#define FFCR_STOP_FI BIT(12) /** * @base: memory mapped base address for this component. @@ -85,10 +88,14 @@ static void tpiu_disable_hw(struct tpiu_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Clear formatter controle reg. */ - writel_relaxed(0x0, drvdata->base + TPIU_FFCR); + /* Clear formatter and stop on flush */ + writel_relaxed(FFCR_STOP_FI, drvdata->base + TPIU_FFCR); /* Generate manual flush */ - writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + writel_relaxed(FFCR_STOP_FI | FFCR_FON_MAN, drvdata->base + TPIU_FFCR); + /* Wait for flush to complete */ + coresight_timeout(drvdata->base, TPIU_FFCR, FFCR_FON_MAN, 0); + /* Wait for formatter to stop */ + coresight_timeout(drvdata->base, TPIU_FFSR, FFSR_FT_STOPPED, 1); CS_LOCK(drvdata->base); } diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c index 061ddadd1122..1b8199f1b25f 100644 --- a/drivers/hwtracing/coresight/of_coresight.c +++ b/drivers/hwtracing/coresight/of_coresight.c @@ -152,7 +152,7 @@ struct coresight_platform_data *of_get_coresight_platform_data( continue; /* The local out port number */ - pdata->outports[i] = endpoint.id; + pdata->outports[i] = endpoint.port; /* * Get a handle on the remote port and parent diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c index c0d962212720..67261bc10e80 100644 --- a/drivers/i2c/busses/i2c-msm-v2.c +++ b/drivers/i2c/busses/i2c-msm-v2.c @@ -2848,8 +2848,8 @@ static void i2c_msm_pm_rt_init(struct device *dev) {} static const struct dev_pm_ops i2c_msm_pm_ops = { #ifdef CONFIG_PM_SLEEP - .suspend_noirq = i2c_msm_pm_sys_suspend_noirq, - .resume_noirq = i2c_msm_pm_sys_resume_noirq, + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(i2c_msm_pm_sys_suspend_noirq, + i2c_msm_pm_sys_resume_noirq) #endif SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend, i2c_msm_pm_rt_resume, diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index dfc98df7b1b6..7aa7b9cb6203 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -18,6 +18,9 @@ #define ACPI_SMBUS_HC_CLASS "smbus" #define ACPI_SMBUS_HC_DEVICE_NAME "cmi" +/* SMBUS HID definition as supported by Microsoft Windows */ +#define ACPI_SMBUS_MS_HID "SMB0001" + ACPI_MODULE_NAME("smbus_cmi"); struct smbus_methods_t { @@ -51,6 +54,7 @@ static const struct smbus_methods_t ibm_smbus_methods = { static const struct acpi_device_id acpi_smbus_cmi_ids[] = { {"SMBUS01", (kernel_ulong_t)&smbus_methods}, {ACPI_SMBUS_IBM_HID, (kernel_ulong_t)&ibm_smbus_methods}, + {ACPI_SMBUS_MS_HID, (kernel_ulong_t)&smbus_methods}, {"", 0} }; MODULE_DEVICE_TABLE(acpi, acpi_smbus_cmi_ids); diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 197a08b4e2f3..b4136d3bf6b7 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -628,6 +628,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = { int st_accel_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *adata = iio_priv(indio_dev); + struct st_sensors_platform_data *pdata = + (struct st_sensors_platform_data *)adata->dev->platform_data; int irq = adata->get_irq_data_ready(indio_dev); int err; @@ -652,11 +654,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev) &adata->sensor_settings->fs.fs_avl[0]; adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; - if (!adata->dev->platform_data) - adata->dev->platform_data = - (struct st_sensors_platform_data *)&default_accel_pdata; + if (!pdata) + pdata = (struct st_sensors_platform_data *)&default_accel_pdata; - err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) return err; diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c index c73c6c62a6ac..7401f102dff4 100644 --- a/drivers/iio/adc/hi8435.c +++ b/drivers/iio/adc/hi8435.c @@ -121,10 +121,21 @@ static int hi8435_write_event_config(struct iio_dev *idev, enum iio_event_direction dir, int state) { struct hi8435_priv *priv = iio_priv(idev); + int ret; + u32 tmp; + + if (state) { + ret = hi8435_readl(priv, HI8435_SO31_0_REG, &tmp); + if (ret < 0) + return ret; + if (tmp & BIT(chan->channel)) + priv->event_prev_val |= BIT(chan->channel); + else + priv->event_prev_val &= ~BIT(chan->channel); - priv->event_scan_mask &= ~BIT(chan->channel); - if (state) priv->event_scan_mask |= BIT(chan->channel); + } else + priv->event_scan_mask &= ~BIT(chan->channel); return 0; } @@ -442,13 +453,15 @@ static int hi8435_probe(struct spi_device *spi) priv->spi = spi; reset_gpio = devm_gpiod_get(&spi->dev, NULL, GPIOD_OUT_LOW); - if (IS_ERR(reset_gpio)) { - /* chip s/w reset if h/w reset failed */ + if (!IS_ERR(reset_gpio)) { + /* need >=100ns low pulse to reset chip */ + gpiod_set_raw_value_cansleep(reset_gpio, 0); + udelay(1); + gpiod_set_raw_value_cansleep(reset_gpio, 1); + } else { + /* s/w reset chip if h/w reset is not available */ hi8435_writeb(priv, HI8435_CTRL_REG, HI8435_CTRL_SRST); hi8435_writeb(priv, HI8435_CTRL_REG, 0); - } else { - udelay(5); - gpiod_set_value(reset_gpio, 1); } spi_set_drvdata(spi, idev); diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c index 6325e7dc8e03..f3cb4dc05391 100644 --- a/drivers/iio/magnetometer/st_magn_spi.c +++ b/drivers/iio/magnetometer/st_magn_spi.c @@ -48,8 +48,6 @@ static int st_magn_spi_remove(struct spi_device *spi) } static const struct spi_device_id st_magn_id_table[] = { - { LSM303DLHC_MAGN_DEV_NAME }, - { LSM303DLM_MAGN_DEV_NAME }, { LIS3MDL_MAGN_DEV_NAME }, { LSM303AGR_MAGN_DEV_NAME }, {}, diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c index 5056bd68573f..ba282ff3892d 100644 --- a/drivers/iio/pressure/st_pressure_core.c +++ b/drivers/iio/pressure/st_pressure_core.c @@ -436,6 +436,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = { int st_press_common_probe(struct iio_dev *indio_dev) { struct st_sensor_data *press_data = iio_priv(indio_dev); + struct st_sensors_platform_data *pdata = + (struct st_sensors_platform_data *)press_data->dev->platform_data; int irq = press_data->get_irq_data_ready(indio_dev); int err; @@ -464,12 +466,10 @@ int st_press_common_probe(struct iio_dev *indio_dev) press_data->odr = press_data->sensor_settings->odr.odr_avl[0].hz; /* Some devices don't support a data ready pin. */ - if (!press_data->dev->platform_data && - press_data->sensor_settings->drdy_irq.addr) - press_data->dev->platform_data = - (struct st_sensors_platform_data *)&default_press_pdata; + if (!pdata && press_data->sensor_settings->drdy_irq.addr) + pdata = (struct st_sensors_platform_data *)&default_press_pdata; - err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); + err = st_sensors_init_sensor(indio_dev, pdata); if (err < 0) return err; diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index 6a8024d9d742..864a7c8d82d3 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -86,6 +86,22 @@ int rdma_addr_size(struct sockaddr *addr) } EXPORT_SYMBOL(rdma_addr_size); +int rdma_addr_size_in6(struct sockaddr_in6 *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_in6); + +int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr) +{ + int ret = rdma_addr_size((struct sockaddr *) addr); + + return ret <= sizeof(*addr) ? ret : 0; +} +EXPORT_SYMBOL(rdma_addr_size_kss); + static struct rdma_addr_client self; void rdma_addr_register_client(struct rdma_addr_client *client) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b6c9a370a38b..d57a78ec7425 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -3743,6 +3743,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, struct cma_multicast *mc; int ret; + if (!id->device) + return -EINVAL; + id_priv = container_of(id, struct rdma_id_private, id); if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) @@ -4007,7 +4010,7 @@ static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) goto out; if (ibnl_put_attr(skb, nlh, - rdma_addr_size(cma_src_addr(id_priv)), + rdma_addr_size(cma_dst_addr(id_priv)), cma_dst_addr(id_priv), RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) goto out; diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index fb43a242847b..8d7d110d0721 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -663,6 +663,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) } skb_num++; spin_lock_irqsave(&iwpm_mapinfo_lock, flags); + ret = -EINVAL; for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) { hlist_for_each_entry(map_info, &iwpm_hash_bucket[i], hlist_node) { diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 886f61ea6cc7..960fcb613198 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -131,7 +131,7 @@ static inline struct ucma_context *_ucma_find_context(int id, ctx = idr_find(&ctx_idr, id); if (!ctx) ctx = ERR_PTR(-ENOENT); - else if (ctx->file != file) + else if (ctx->file != file || !ctx->cm_id) ctx = ERR_PTR(-EINVAL); return ctx; } @@ -453,6 +453,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, struct rdma_ucm_create_id cmd; struct rdma_ucm_create_id_resp resp; struct ucma_context *ctx; + struct rdma_cm_id *cm_id; enum ib_qp_type qp_type; int ret; @@ -473,10 +474,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, return -ENOMEM; ctx->uid = cmd.uid; - ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, - ucma_event_handler, ctx, cmd.ps, qp_type); - if (IS_ERR(ctx->cm_id)) { - ret = PTR_ERR(ctx->cm_id); + cm_id = rdma_create_id(current->nsproxy->net_ns, + ucma_event_handler, ctx, cmd.ps, qp_type); + if (IS_ERR(cm_id)) { + ret = PTR_ERR(cm_id); goto err1; } @@ -486,14 +487,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, ret = -EFAULT; goto err2; } + + ctx->cm_id = cm_id; return 0; err2: - rdma_destroy_id(ctx->cm_id); + rdma_destroy_id(cm_id); err1: mutex_lock(&mut); idr_remove(&ctx_idr, ctx->id); mutex_unlock(&mut); + mutex_lock(&file->mut); + list_del(&ctx->list); + mutex_unlock(&file->mut); kfree(ctx); return ret; } @@ -623,6 +629,9 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -636,22 +645,21 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf, int in_len, int out_len) { struct rdma_ucm_bind cmd; - struct sockaddr *addr; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - addr = (struct sockaddr *) &cmd.addr; - if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr))) + if (cmd.reserved || !cmd.addr_size || + cmd.addr_size != rdma_addr_size_kss(&cmd.addr)) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_bind_addr(ctx->cm_id, addr); + ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_put_ctx(ctx); return ret; } @@ -667,13 +675,16 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_in6(&cmd.src_addr) || + !rdma_addr_size_in6(&cmd.dst_addr)) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, - (struct sockaddr *) &cmd.dst_addr, - cmd.timeout_ms); + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -683,24 +694,23 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file, int in_len, int out_len) { struct rdma_ucm_resolve_addr cmd; - struct sockaddr *src, *dst; struct ucma_context *ctx; int ret; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; - src = (struct sockaddr *) &cmd.src_addr; - dst = (struct sockaddr *) &cmd.dst_addr; - if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) || - !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst))) + if (cmd.reserved || + (cmd.src_size && (cmd.src_size != rdma_addr_size_kss(&cmd.src_addr))) || + !cmd.dst_size || (cmd.dst_size != rdma_addr_size_kss(&cmd.dst_addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); + ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, + (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms); ucma_put_ctx(ctx); return ret; } @@ -1138,10 +1148,18 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (cmd.qp_state > IB_QPS_ERR) + return -EINVAL; + ctx = ucma_get_ctx(file, cmd.id); if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (!ctx->cm_id->device) { + ret = -EINVAL; + goto out; + } + resp.qp_attr_mask = 0; memset(&qp_attr, 0, sizeof qp_attr); qp_attr.qp_state = cmd.qp_state; @@ -1274,6 +1292,9 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); + if (unlikely(cmd.optval > KMALLOC_MAX_SIZE)) + return -EINVAL; + optval = memdup_user((void __user *) (unsigned long) cmd.optval, cmd.optlen); if (IS_ERR(optval)) { @@ -1295,7 +1316,7 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, { struct rdma_ucm_notify cmd; struct ucma_context *ctx; - int ret; + int ret = -EINVAL; if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; @@ -1304,7 +1325,9 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf, if (IS_ERR(ctx)) return PTR_ERR(ctx); - ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); + if (ctx->cm_id->device) + ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event); + ucma_put_ctx(ctx); return ret; } @@ -1322,7 +1345,7 @@ static ssize_t ucma_process_join(struct ucma_file *file, return -ENOSPC; addr = (struct sockaddr *) &cmd->addr; - if (cmd->reserved || !cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) + if (cmd->reserved || (cmd->addr_size != rdma_addr_size(addr))) return -EINVAL; ctx = ucma_get_ctx(file, cmd->id); @@ -1381,7 +1404,10 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file, join_cmd.response = cmd.response; join_cmd.uid = cmd.uid; join_cmd.id = cmd.id; - join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); + join_cmd.addr_size = rdma_addr_size_in6(&cmd.addr); + if (!join_cmd.addr_size) + return -EINVAL; + join_cmd.reserved = 0; memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); @@ -1397,6 +1423,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file, if (copy_from_user(&cmd, inbuf, sizeof(cmd))) return -EFAULT; + if (!rdma_addr_size_kss(&cmd.addr)) + return -EINVAL; + return ucma_process_join(file, &cmd, out_len); } diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 0ae337bec4f2..6790ebb366dd 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -354,7 +354,7 @@ int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset, return -EINVAL; } - ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->nmap, dst, length, + ret = sg_pcopy_to_buffer(umem->sg_head.sgl, umem->npages, dst, length, offset + ib_umem_offset(umem)); if (ret < 0) diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b7a73f1a8beb..3eb967521917 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2436,9 +2436,13 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, static void *alloc_wr(size_t wr_size, __u32 num_sge) { + if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) / + sizeof (struct ib_sge)) + return NULL; + return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) + num_sge * sizeof (struct ib_sge), GFP_KERNEL); -}; +} ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, struct ib_device *ib_dev, @@ -2665,6 +2669,13 @@ static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf, goto err; } + if (user_wr->num_sge >= + (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) / + sizeof (struct ib_sge)) { + ret = -EINVAL; + goto err; + } + next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) + user_wr->num_sge * sizeof (struct ib_sge), GFP_KERNEL); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 5a2a0b5db938..67c4c73343d4 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1041,7 +1041,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) /* need to protect from a race on closing the vma as part of * mlx4_ib_vma_close(). */ - down_read(&owning_mm->mmap_sem); + down_write(&owning_mm->mmap_sem); for (i = 0; i < HW_BAR_COUNT; i++) { vma = context->hw_bar_info[i].vma; if (!vma) @@ -1055,11 +1055,13 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) BUG_ON(1); } + context->hw_bar_info[i].vma->vm_flags &= + ~(VM_SHARED | VM_MAYSHARE); /* context going to be destroyed, should not access ops any more */ context->hw_bar_info[i].vma->vm_ops = NULL; } - up_read(&owning_mm->mmap_sem); + up_write(&owning_mm->mmap_sem); mmput(owning_mm); put_task_struct(owning_process); } diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 02c8deab1fff..4a4ab433062f 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -972,7 +972,12 @@ static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, if (ucmd.reserved0 || ucmd.reserved1) return -EINVAL; - umem = ib_umem_get(context, ucmd.buf_addr, entries * ucmd.cqe_size, + /* check multiplication overflow */ + if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) + return -EINVAL; + + umem = ib_umem_get(context, ucmd.buf_addr, + (size_t)ucmd.cqe_size * entries, IB_ACCESS_LOCAL_WRITE, 1); if (IS_ERR(umem)) { err = PTR_ERR(umem); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 86c303a620c1..748b63b86cbc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -834,7 +834,7 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) dev->reset_stats.type = OCRDMA_RESET_STATS; dev->reset_stats.dev = dev; - if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + if (!debugfs_create_file("reset_stats", 0200, dev->dir, &dev->reset_stats, &ocrdma_dbg_ops)) goto err; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index d3f0a384faad..f6b06729f4ea 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -945,6 +945,19 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv) */ priv->dev->broadcast[8] = priv->pkey >> 8; priv->dev->broadcast[9] = priv->pkey & 0xff; + + /* + * Update the broadcast address in the priv->broadcast object, + * in case it already exists, otherwise no one will do that. + */ + if (priv->broadcast) { + spin_lock_irq(&priv->lock); + memcpy(priv->broadcast->mcmember.mgid.raw, + priv->dev->broadcast + 4, + sizeof(union ib_gid)); + spin_unlock_irq(&priv->lock); + } + return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 5c653669e736..37b42447045d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -724,6 +724,22 @@ static void path_rec_completion(int status, spin_lock_irqsave(&priv->lock, flags); if (!IS_ERR_OR_NULL(ah)) { + /* + * pathrec.dgid is used as the database key from the LLADDR, + * it must remain unchanged even if the SA returns a different + * GID to use in the AH. + */ + if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid))) { + ipoib_dbg( + priv, + "%s got PathRec for gid %pI6 while asked for %pI6\n", + dev->name, pathrec->dgid.raw, + path->pathrec.dgid.raw); + memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw, + sizeof(union ib_gid)); + } + path->pathrec = *pathrec; old_ah = path->ah; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 8bf48165f32c..21e688d55da6 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -473,6 +473,9 @@ static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) return -EINVAL; + init_completion(&mcast->done); + set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); rec.mgid = mcast->mcmember.mgid; @@ -631,8 +634,6 @@ void ipoib_mcast_join_task(struct work_struct *work) if (mcast->backoff == 1 || time_after_eq(jiffies, mcast->delay_until)) { /* Found the next unjoined group */ - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); if (ipoib_mcast_join(dev, mcast)) { spin_unlock_irq(&priv->lock); return; @@ -652,11 +653,9 @@ out: queue_delayed_work(priv->wq, &priv->mcast_task, delay_until - jiffies); } - if (mcast) { - init_completion(&mcast->done); - set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); + if (mcast) ipoib_mcast_join(dev, mcast); - } + spin_unlock_irq(&priv->lock); } diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index a73874508c3a..cb3a8623ff54 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2974,12 +2974,8 @@ static void srpt_queue_response(struct se_cmd *cmd) } spin_unlock_irqrestore(&ioctx->spinlock, flags); - if (unlikely(transport_check_aborted_status(&ioctx->cmd, false) - || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) { - atomic_inc(&ch->req_lim_delta); - srpt_abort_cmd(ioctx); + if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) return; - } dir = ioctx->cmd.data_direction; diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index ddd8148d51d7..75ff4c965573 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -525,6 +525,16 @@ config KEYBOARD_GOLDFISH_EVENTS To compile this driver as a module, choose M here: the module will be called goldfish-events. +config KEYBOARD_GOLDFISH_ROTARY + depends on GOLDFISH + tristate "Rotary encoder device for Goldfish" + help + Say Y here to get an input event device for the Goldfish virtual + device emulator that acts as a rotary encoder. + + To compile this driver as a module, choose M here: the + module will be called goldfish-rotary. + config KEYBOARD_STOWAWAY tristate "Stowaway keyboard" select SERIO diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index 1d416ddf84e4..a5d43fc8fab6 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_KEYBOARD_CROS_EC) += cros_ec_keyb.o obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o obj-$(CONFIG_KEYBOARD_GOLDFISH_EVENTS) += goldfish_events.o +obj-$(CONFIG_KEYBOARD_GOLDFISH_ROTARY) += goldfish_rotary.o obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o diff --git a/drivers/input/keyboard/goldfish_rotary.c b/drivers/input/keyboard/goldfish_rotary.c new file mode 100644 index 000000000000..485727d44684 --- /dev/null +++ b/drivers/input/keyboard/goldfish_rotary.c @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2007 Google, Inc. + * Copyright (C) 2012 Intel, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/types.h> +#include <linux/input.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/irq.h> +#include <linux/io.h> +#include <linux/acpi.h> + +enum { + REG_READ = 0x00, + REG_SET_PAGE = 0x00, + REG_LEN = 0x04, + REG_DATA = 0x08, + + PAGE_NAME = 0x00000, + PAGE_EVBITS = 0x10000, + PAGE_ABSDATA = 0x20000 | EV_ABS, +}; + +struct event_dev { + struct input_dev *input; + int irq; + void __iomem *addr; + char name[0]; +}; + +static irqreturn_t rotary_interrupt(int irq, void *dev_id) +{ + struct event_dev *edev = dev_id; + unsigned type, code, value; + + type = __raw_readl(edev->addr + REG_READ); + code = __raw_readl(edev->addr + REG_READ); + value = __raw_readl(edev->addr + REG_READ); + + input_event(edev->input, type, code, value); + return IRQ_HANDLED; +} + +static void rotary_import_bits(struct event_dev *edev, + unsigned long bits[], unsigned type, size_t count) +{ + void __iomem *addr = edev->addr; + int i, j; + size_t size; + uint8_t val; + + __raw_writel(PAGE_EVBITS | type, addr + REG_SET_PAGE); + + size = __raw_readl(addr + REG_LEN) * 8; + if (size < count) + count = size; + + addr += REG_DATA; + for (i = 0; i < count; i += 8) { + val = __raw_readb(addr++); + for (j = 0; j < 8; j++) + if (val & 1 << j) + set_bit(i + j, bits); + } +} + +static void rotary_import_abs_params(struct event_dev *edev) +{ + struct input_dev *input_dev = edev->input; + void __iomem *addr = edev->addr; + u32 val[4]; + int count; + int i, j; + + __raw_writel(PAGE_ABSDATA, addr + REG_SET_PAGE); + + count = __raw_readl(addr + REG_LEN) / sizeof(val); + if (count > ABS_MAX) + count = ABS_MAX; + + for (i = 0; i < count; i++) { + if (!test_bit(i, input_dev->absbit)) + continue; + + for (j = 0; j < ARRAY_SIZE(val); j++) { + int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32); + val[j] = __raw_readl(edev->addr + REG_DATA + offset); + } + + input_set_abs_params(input_dev, i, + val[0], val[1], val[2], val[3]); + } +} + +static int rotary_probe(struct platform_device *pdev) +{ + struct input_dev *input_dev; + struct event_dev *edev; + struct resource *res; + unsigned keymapnamelen; + void __iomem *addr; + int irq; + int i; + int error; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + addr = devm_ioremap(&pdev->dev, res->start, 4096); + if (!addr) + return -ENOMEM; + + __raw_writel(PAGE_NAME, addr + REG_SET_PAGE); + keymapnamelen = __raw_readl(addr + REG_LEN); + + edev = devm_kzalloc(&pdev->dev, + sizeof(struct event_dev) + keymapnamelen + 1, + GFP_KERNEL); + if (!edev) + return -ENOMEM; + + input_dev = devm_input_allocate_device(&pdev->dev); + if (!input_dev) + return -ENOMEM; + + edev->input = input_dev; + edev->addr = addr; + edev->irq = irq; + + for (i = 0; i < keymapnamelen; i++) + edev->name[i] = __raw_readb(edev->addr + REG_DATA + i); + + pr_debug("rotary_probe() keymap=%s\n", edev->name); + + input_dev->name = edev->name; + input_dev->id.bustype = BUS_HOST; + rotary_import_bits(edev, input_dev->evbit, EV_SYN, EV_MAX); + rotary_import_bits(edev, input_dev->relbit, EV_REL, REL_MAX); + rotary_import_bits(edev, input_dev->absbit, EV_ABS, ABS_MAX); + + rotary_import_abs_params(edev); + + error = devm_request_irq(&pdev->dev, edev->irq, rotary_interrupt, 0, + "goldfish-rotary", edev); + if (error) + return error; + + error = input_register_device(input_dev); + if (error) + return error; + + return 0; +} + +static const struct of_device_id goldfish_rotary_of_match[] = { + { .compatible = "generic,goldfish-rotary", }, + {}, +}; +MODULE_DEVICE_TABLE(of, goldfish_rotary_of_match); + +static const struct acpi_device_id goldfish_rotary_acpi_match[] = { + { "GFSH0008", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, goldfish_rotary_acpi_match); + +static struct platform_driver rotary_driver = { + .probe = rotary_probe, + .driver = { + .owner = THIS_MODULE, + .name = "goldfish_rotary", + .of_match_table = goldfish_rotary_of_match, + .acpi_match_table = ACPI_PTR(goldfish_rotary_acpi_match), + }, +}; + +module_platform_driver(rotary_driver); + +MODULE_AUTHOR("Nimrod Gileadi"); +MODULE_DESCRIPTION("Goldfish Rotary Encoder Device"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 7f12b6579f82..795fa353de7c 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c @@ -216,8 +216,10 @@ static void matrix_keypad_stop(struct input_dev *dev) { struct matrix_keypad *keypad = input_get_drvdata(dev); + spin_lock_irq(&keypad->lock); keypad->stopped = true; - mb(); + spin_unlock_irq(&keypad->lock); + flush_work(&keypad->work.work); /* * matrix_keypad_scan() will leave IRQs enabled; diff --git a/drivers/input/keyboard/qt1070.c b/drivers/input/keyboard/qt1070.c index 5a5778729e37..76bb51309a78 100644 --- a/drivers/input/keyboard/qt1070.c +++ b/drivers/input/keyboard/qt1070.c @@ -274,9 +274,18 @@ static const struct i2c_device_id qt1070_id[] = { }; MODULE_DEVICE_TABLE(i2c, qt1070_id); +#ifdef CONFIG_OF +static const struct of_device_id qt1070_of_match[] = { + { .compatible = "qt1070", }, + { }, +}; +MODULE_DEVICE_TABLE(of, qt1070_of_match); +#endif + static struct i2c_driver qt1070_driver = { .driver = { .name = "qt1070", + .of_match_table = of_match_ptr(qt1070_of_match), .pm = &qt1070_pm_ops, }, .id_table = qt1070_id, diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index 3048ef3e3e16..a5e8998047fe 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c @@ -189,8 +189,6 @@ static void tca8418_read_keypad(struct tca8418_keypad *keypad_data) input_event(input, EV_MSC, MSC_SCAN, code); input_report_key(input, keymap[code], state); - /* Read for next loop */ - error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, ®); } while (1); input_sync(input); diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c index 603fc2fadf05..12b20840fb74 100644 --- a/drivers/input/misc/twl4030-pwrbutton.c +++ b/drivers/input/misc/twl4030-pwrbutton.c @@ -70,7 +70,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev) pwr->phys = "twl4030_pwrbutton/input0"; pwr->dev.parent = &pdev->dev; - err = devm_request_threaded_irq(&pwr->dev, irq, NULL, powerbutton_irq, + err = devm_request_threaded_irq(&pdev->dev, irq, NULL, powerbutton_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT, "twl4030_pwrbutton", pwr); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index c9d491bc85e0..3851d5715772 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -1082,6 +1082,13 @@ static int elan_probe(struct i2c_client *client, return error; } + /* Make sure there is something at this address */ + error = i2c_smbus_read_byte(client); + if (error < 0) { + dev_dbg(&client->dev, "nothing at this address: %d\n", error); + return -ENXIO; + } + /* Initialize the touchpad. */ error = elan_initialize(data); if (error) diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index a679e56c44cd..765879dcaf85 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -557,7 +557,14 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, long ret; int error; int len; - u8 buffer[ETP_I2C_INF_LENGTH]; + u8 buffer[ETP_I2C_REPORT_LEN]; + + len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); + if (len != ETP_I2C_REPORT_LEN) { + error = len < 0 ? len : -EIO; + dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", + error, len); + } reinit_completion(completion); enable_irq(client->irq); diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 51b96e9bf793..06ea28e5d7b4 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1715,6 +1715,17 @@ int elantech_init(struct psmouse *psmouse) etd->samples[0], etd->samples[1], etd->samples[2]); } + if (etd->samples[1] == 0x74 && etd->hw_version == 0x03) { + /* + * This module has a bug which makes absolute mode + * unusable, so let's abort so we'll be using standard + * PS/2 protocol. + */ + psmouse_info(psmouse, + "absolute mode broken, forcing standard PS/2 protocol\n"); + goto init_fail; + } + if (elantech_set_absolute_mode(psmouse)) { psmouse_err(psmouse, "failed to put touchpad into absolute mode.\n"); diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index b604564dec5c..30328e57fdda 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -15,6 +15,7 @@ #define MOUSEDEV_MINORS 31 #define MOUSEDEV_MIX 63 +#include <linux/bitops.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/poll.h> @@ -103,7 +104,7 @@ struct mousedev_client { spinlock_t packet_lock; int pos_x, pos_y; - signed char ps2[6]; + u8 ps2[6]; unsigned char ready, buffer, bufsiz; unsigned char imexseq, impsseq; enum mousedev_emul mode; @@ -291,11 +292,10 @@ static void mousedev_notify_readers(struct mousedev *mousedev, } client->pos_x += packet->dx; - client->pos_x = client->pos_x < 0 ? - 0 : (client->pos_x >= xres ? xres : client->pos_x); + client->pos_x = clamp_val(client->pos_x, 0, xres); + client->pos_y += packet->dy; - client->pos_y = client->pos_y < 0 ? - 0 : (client->pos_y >= yres ? yres : client->pos_y); + client->pos_y = clamp_val(client->pos_y, 0, yres); p->dx += packet->dx; p->dy += packet->dy; @@ -571,44 +571,50 @@ static int mousedev_open(struct inode *inode, struct file *file) return error; } -static inline int mousedev_limit_delta(int delta, int limit) -{ - return delta > limit ? limit : (delta < -limit ? -limit : delta); -} - -static void mousedev_packet(struct mousedev_client *client, - signed char *ps2_data) +static void mousedev_packet(struct mousedev_client *client, u8 *ps2_data) { struct mousedev_motion *p = &client->packets[client->tail]; + s8 dx, dy, dz; + + dx = clamp_val(p->dx, -127, 127); + p->dx -= dx; + + dy = clamp_val(p->dy, -127, 127); + p->dy -= dy; - ps2_data[0] = 0x08 | - ((p->dx < 0) << 4) | ((p->dy < 0) << 5) | (p->buttons & 0x07); - ps2_data[1] = mousedev_limit_delta(p->dx, 127); - ps2_data[2] = mousedev_limit_delta(p->dy, 127); - p->dx -= ps2_data[1]; - p->dy -= ps2_data[2]; + ps2_data[0] = BIT(3); + ps2_data[0] |= ((dx & BIT(7)) >> 3) | ((dy & BIT(7)) >> 2); + ps2_data[0] |= p->buttons & 0x07; + ps2_data[1] = dx; + ps2_data[2] = dy; switch (client->mode) { case MOUSEDEV_EMUL_EXPS: - ps2_data[3] = mousedev_limit_delta(p->dz, 7); - p->dz -= ps2_data[3]; - ps2_data[3] = (ps2_data[3] & 0x0f) | ((p->buttons & 0x18) << 1); + dz = clamp_val(p->dz, -7, 7); + p->dz -= dz; + + ps2_data[3] = (dz & 0x0f) | ((p->buttons & 0x18) << 1); client->bufsiz = 4; break; case MOUSEDEV_EMUL_IMPS: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); - ps2_data[3] = mousedev_limit_delta(p->dz, 127); - p->dz -= ps2_data[3]; + dz = clamp_val(p->dz, -127, 127); + p->dz -= dz; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + ps2_data[3] = dz; + client->bufsiz = 4; break; case MOUSEDEV_EMUL_PS2: default: - ps2_data[0] |= - ((p->buttons & 0x10) >> 3) | ((p->buttons & 0x08) >> 1); p->dz = 0; + + ps2_data[0] |= ((p->buttons & 0x10) >> 3) | + ((p->buttons & 0x08) >> 1); + client->bufsiz = 3; break; } @@ -714,7 +720,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer, { struct mousedev_client *client = file->private_data; struct mousedev *mousedev = client->mousedev; - signed char data[sizeof(client->ps2)]; + u8 data[sizeof(client->ps2)]; int retval = 0; if (!client->ready && !client->buffer && mousedev->exist && diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index d1051e3ce819..e484ea2dc787 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -530,6 +530,20 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { { } }; +static const struct dmi_system_id i8042_dmi_forcemux_table[] __initconst = { + { + /* + * Sony Vaio VGN-CS series require MUX or the touch sensor + * buttons will disturb touchpad operation + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VGN-CS"), + }, + }, + { } +}; + /* * On some Asus laptops, just running self tests cause problems. */ @@ -693,6 +707,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { }, }, { + /* Lenovo ThinkPad L460 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L460"), + }, + }, + { /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), @@ -1223,6 +1244,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_nomux_table)) i8042_nomux = true; + if (dmi_check_system(i8042_dmi_forcemux_table)) + i8042_nomux = false; + if (dmi_check_system(i8042_dmi_notimeout_table)) i8042_notimeout = true; diff --git a/drivers/input/touchscreen/ar1021_i2c.c b/drivers/input/touchscreen/ar1021_i2c.c index 71b5a634cf6d..e7bb155911d0 100644 --- a/drivers/input/touchscreen/ar1021_i2c.c +++ b/drivers/input/touchscreen/ar1021_i2c.c @@ -152,7 +152,7 @@ static int __maybe_unused ar1021_i2c_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ar1021_i2c_pm, ar1021_i2c_suspend, ar1021_i2c_resume); static const struct i2c_device_id ar1021_i2c_id[] = { - { "MICROCHIP_AR1021_I2C", 0 }, + { "ar1021", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, ar1021_i2c_id); diff --git a/drivers/input/touchscreen/tsc2007.c b/drivers/input/touchscreen/tsc2007.c index 5d0cd51c6f41..a4b7b4c3d27b 100644 --- a/drivers/input/touchscreen/tsc2007.c +++ b/drivers/input/touchscreen/tsc2007.c @@ -455,6 +455,14 @@ static int tsc2007_probe(struct i2c_client *client, tsc2007_stop(ts); + /* power down the chip (TSC2007_SETUP does not ACK on I2C) */ + err = tsc2007_xfer(ts, PWRDOWN); + if (err < 0) { + dev_err(&client->dev, + "Failed to setup chip: %d\n", err); + return err; /* usually, chip does not respond */ + } + err = input_register_device(input_dev); if (err) { dev_err(&client->dev, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index f929879ecae6..a7d516f973dd 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -127,6 +127,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); + iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); @@ -142,9 +143,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index fa0adef32bd6..62739766b60b 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -126,7 +126,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, break; /* found a free slot */ } adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo - 1; + limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; move_left: prev = curr; curr = rb_prev(curr); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 3dc5b65f3990..b98d38f95237 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1295,6 +1295,7 @@ static int __init omap_iommu_init(void) const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ struct device_node *np; + int ret; np = of_find_matching_node(NULL, omap_iommu_of_match); if (!np) @@ -1308,11 +1309,25 @@ static int __init omap_iommu_init(void) return -ENOMEM; iopte_cachep = p; - bus_set_iommu(&platform_bus_type, &omap_iommu_ops); - omap_iommu_debugfs_init(); - return platform_driver_register(&omap_iommu_driver); + ret = platform_driver_register(&omap_iommu_driver); + if (ret) { + pr_err("%s: failed to register driver\n", __func__); + goto fail_driver; + } + + ret = bus_set_iommu(&platform_bus_type, &omap_iommu_ops); + if (ret) + goto fail_bus; + + return 0; + +fail_bus: + platform_driver_unregister(&omap_iommu_driver); +fail_driver: + kmem_cache_destroy(iopte_cachep); + return ret; } subsys_initcall(omap_iommu_init); /* must be ready before omap3isp is probed */ diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c5f1757ac61d..82e00e3ad0e0 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -663,7 +663,7 @@ static struct irq_chip its_irq_chip = { * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. */ #define IRQS_PER_CHUNK_SHIFT 5 -#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) +#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT) static unsigned long *lpi_bitmap; static u32 lpi_chunks; @@ -1168,11 +1168,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev = kzalloc(sizeof(*dev), GFP_KERNEL); /* - * At least one bit of EventID is being used, hence a minimum - * of two entries. No, the architecture doesn't let you - * express an ITT with a single entry. + * We allocate at least one chunk worth of LPIs bet device, + * and thus that many ITEs. The device may require less though. */ - nr_ites = max(2UL, roundup_pow_of_two(nvecs)); + nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; itt = kzalloc(sz, GFP_KERNEL); diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 9cb4b621fbc3..b92a19a594a1 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c @@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) if (sk->sk_state != MISDN_BOUND) continue; if (!cskb) - cskb = skb_copy(skb, GFP_KERNEL); + cskb = skb_copy(skb, GFP_ATOMIC); if (!cskb) { printk(KERN_WARNING "%s no skb\n", __func__); break; diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 92b6798ef5b3..c1c3af089634 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c @@ -149,7 +149,7 @@ void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { - led_stop_software_blink(led_cdev); + del_timer_sync(&led_cdev->blink_timer); led_cdev->flags &= ~LED_BLINK_ONESHOT; led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index e8b1120f486d..eef3e64ca0a8 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -88,21 +88,23 @@ ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr, down_read(&led_cdev->trigger_lock); if (!led_cdev->trigger) - len += sprintf(buf+len, "[none] "); + len += scnprintf(buf+len, PAGE_SIZE - len, "[none] "); else - len += sprintf(buf+len, "none "); + len += scnprintf(buf+len, PAGE_SIZE - len, "none "); list_for_each_entry(trig, &trigger_list, next_trig) { if (led_cdev->trigger && !strcmp(led_cdev->trigger->name, trig->name)) - len += sprintf(buf+len, "[%s] ", trig->name); + len += scnprintf(buf+len, PAGE_SIZE - len, "[%s] ", + trig->name); else - len += sprintf(buf+len, "%s ", trig->name); + len += scnprintf(buf+len, PAGE_SIZE - len, "%s ", + trig->name); } up_read(&led_cdev->trigger_lock); up_read(&triggers_list_lock); - len += sprintf(len+buf, "\n"); + len += scnprintf(len+buf, PAGE_SIZE - len, "\n"); return len; } EXPORT_SYMBOL_GPL(led_trigger_show); diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index b775e1efecd3..b9f71a87b7e1 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c @@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client, "slave address 0x%02x\n", id->name, chip->bits, client->addr); - if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; if (pdata) { diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c index b4ec36fd3cdf..c90633b16fad 100644 --- a/drivers/leds/leds-qpnp-flash-v2.c +++ b/drivers/leds/leds-qpnp-flash-v2.c @@ -319,7 +319,7 @@ static inline int get_current_reg_code(int target_curr_ma, int ires_ua) if (!ires_ua || !target_curr_ma || (target_curr_ma < (ires_ua / 1000))) return 0; - return DIV_ROUND_UP(target_curr_ma * 1000, ires_ua) - 1; + return DIV_ROUND_CLOSEST(target_curr_ma * 1000, ires_ua) - 1; } static int qpnp_flash_led_read(struct qpnp_flash_led *led, u16 addr, u8 *data) @@ -391,7 +391,7 @@ led_brightness qpnp_flash_led_brightness_get(struct led_classdev *led_cdev) static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) { int rc, i, addr_offset; - u8 val = 0, mask; + u8 val = 0, mask, strobe_mask = 0, strobe_ctrl; for (i = 0; i < led->num_fnodes; i++) { addr_offset = led->fnode[i].id; @@ -402,6 +402,51 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; val |= 0x1 << led->fnode[i].id; + + if (led->fnode[i].strobe_sel == HW_STROBE) { + if (led->fnode[i].id == LED3) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + else + strobe_mask |= LED1N2_FLASH_ONCE_ONLY_BIT; + } + + if (led->fnode[i].id == LED3 && + led->fnode[i].strobe_sel == LPG_STROBE) + strobe_mask |= LED3_FLASH_ONCE_ONLY_BIT; + /* + * As per the hardware recommendation, to use LED2/LED3 in HW + * strobe mode, LED1 should be set to HW strobe mode as well. + */ + if (led->fnode[i].strobe_sel == HW_STROBE && + (led->fnode[i].id == LED2 || led->fnode[i].id == LED3)) { + mask = FLASH_HW_STROBE_MASK; + addr_offset = led->fnode[LED1].id; + /* + * HW_STROBE: enable, TRIGGER: level, + * POLARITY: active high + */ + strobe_ctrl = BIT(2) | BIT(0); + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_STROBE_CTRL( + led->base + addr_offset), + mask, strobe_ctrl); + if (rc < 0) + return rc; + } + } + + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), + strobe_mask, 0); + if (rc < 0) + return rc; + + if (led->fnode[LED3].strobe_sel == LPG_STROBE) { + rc = qpnp_flash_led_masked_write(led, + FLASH_LED_REG_LPG_INPUT_CTRL(led->base), + LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); + if (rc < 0) + return rc; } rc = qpnp_flash_led_write(led, @@ -595,19 +640,6 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led) return rc; } - if (led->fnode[LED3].strobe_sel == LPG_STROBE) { - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_MULTI_STROBE_CTRL(led->base), - LED3_FLASH_ONCE_ONLY_BIT, 0); - if (rc < 0) - return rc; - - rc = qpnp_flash_led_masked_write(led, - FLASH_LED_REG_LPG_INPUT_CTRL(led->base), - LPG_INPUT_SEL_BIT, LPG_INPUT_SEL_BIT); - if (rc < 0) - return rc; - } return 0; } diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 4d46f2ce606f..aa84fcfd59fc 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -514,15 +514,21 @@ struct open_bucket { /* * We keep multiple buckets open for writes, and try to segregate different - * write streams for better cache utilization: first we look for a bucket where - * the last write to it was sequential with the current write, and failing that - * we look for a bucket that was last used by the same task. + * write streams for better cache utilization: first we try to segregate flash + * only volume write streams from cached devices, secondly we look for a bucket + * where the last write to it was sequential with the current write, and + * failing that we look for a bucket that was last used by the same task. * * The ideas is if you've got multiple tasks pulling data into the cache at the * same time, you'll get better cache utilization if you try to segregate their * data and preserve locality. * - * For example, say you've starting Firefox at the same time you're copying a + * For example, dirty sectors of flash only volume is not reclaimable, if their + * dirty sectors mixed with dirty sectors of cached device, such buckets will + * be marked as dirty and won't be reclaimed, though the dirty data of cached + * device have been written back to backend device. + * + * And say you've starting Firefox at the same time you're copying a * bunch of files. Firefox will likely end up being fairly hot and stay in the * cache awhile, but the data you copied might not be; if you wrote all that * data to the same buckets it'd get invalidated at the same time. @@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c, struct open_bucket *ret, *ret_task = NULL; list_for_each_entry_reverse(ret, &c->data_buckets, list) - if (!bkey_cmp(&ret->key, search)) + if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) != + UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)])) + continue; + else if (!bkey_cmp(&ret->key, search)) goto found; else if (ret->last_write_point == write_point) ret_task = ret; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c2248b75f2da..b9a526271f02 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -890,6 +890,12 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); + cancel_delayed_work_sync(&dc->writeback_rate_update); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { + kthread_stop(dc->writeback_thread); + dc->writeback_thread = NULL; + } + memset(&dc->sb.set_uuid, 0, 16); SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE); @@ -935,6 +941,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) uint32_t rtime = cpu_to_le32(get_seconds()); struct uuid_entry *u; char buf[BDEVNAME_SIZE]; + struct cached_dev *exist_dc, *t; bdevname(dc->bdev, buf); @@ -958,6 +965,16 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c) return -EINVAL; } + /* Check whether already attached */ + list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) { + if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) { + pr_err("Tried to attach %s but duplicate UUID already attached", + buf); + + return -EINVAL; + } + } + u = uuid_find(c, dc->sb.uuid); if (u && diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 81c5e1a1f363..1b84d2890fbf 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -300,6 +300,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, else if (rw & REQ_WRITE_SAME) special_cmd_max_sectors = q->limits.max_write_same_sectors; if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) { + atomic_inc(&io->count); dec_count(io, region, -EOPNOTSUPP); return; } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 7baeeafa059d..065d7cee0d21 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1773,12 +1773,12 @@ static int validate_params(uint cmd, struct dm_ioctl *param) cmd == DM_LIST_VERSIONS_CMD) return 0; - if ((cmd == DM_DEV_CREATE_CMD)) { + if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { DMWARN("name not supplied when creating device"); return -EINVAL; } - } else if ((*param->uuid && *param->name)) { + } else if (*param->uuid && *param->name) { DMWARN("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 494d01d0e92a..a7a561af05c9 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c @@ -945,8 +945,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); lock_comm(cinfo); ret = __sendmsg(cinfo, &cmsg); - if (ret) + if (ret) { + unlock_comm(cinfo); return ret; + } cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index a67e1a36733f..45e7a47e5f7b 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -2698,6 +2698,11 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) list_add(&r10_bio->retry_list, &conf->bio_end_io_list); conf->nr_queued++; spin_unlock_irq(&conf->device_lock); + /* + * In case freeze_array() is waiting for condition + * nr_pending == nr_queued + extra to be true. + */ + wake_up(&conf->wait_barrier); md_wakeup_thread(conf->mddev->thread); } else { if (test_bit(R10BIO_WriteError, @@ -3633,6 +3638,7 @@ static int run(struct mddev *mddev) if (blk_queue_discard(bdev_get_queue(rdev->bdev))) discard_supported = true; + first = 0; } if (mddev->queue) { @@ -4039,6 +4045,7 @@ static int raid10_start_reshape(struct mddev *mddev) diff = 0; if (first || diff < min_offset_diff) min_offset_diff = diff; + first = 0; } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 77403228e098..9284acea4f7b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -110,8 +110,7 @@ static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) { int i; - local_irq_disable(); - spin_lock(conf->hash_locks); + spin_lock_irq(conf->hash_locks); for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); spin_lock(&conf->device_lock); @@ -121,9 +120,9 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) { int i; spin_unlock(&conf->device_lock); - for (i = NR_STRIPE_HASH_LOCKS; i; i--) - spin_unlock(conf->hash_locks + i - 1); - local_irq_enable(); + for (i = NR_STRIPE_HASH_LOCKS - 1; i; i--) + spin_unlock(conf->hash_locks + i); + spin_unlock_irq(conf->hash_locks); } /* bio's attached to a stripe+device for I/O are linked together in bi_sector @@ -726,12 +725,11 @@ static bool is_full_stripe_write(struct stripe_head *sh) static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) { - local_irq_disable(); if (sh1 > sh2) { - spin_lock(&sh2->stripe_lock); + spin_lock_irq(&sh2->stripe_lock); spin_lock_nested(&sh1->stripe_lock, 1); } else { - spin_lock(&sh1->stripe_lock); + spin_lock_irq(&sh1->stripe_lock); spin_lock_nested(&sh2->stripe_lock, 1); } } @@ -739,8 +737,7 @@ static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) { spin_unlock(&sh1->stripe_lock); - spin_unlock(&sh2->stripe_lock); - local_irq_enable(); + spin_unlock_irq(&sh2->stripe_lock); } /* Only freshly new full stripe normal write stripe can be added to a batch list */ @@ -3372,9 +3369,20 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); BUG_ON(test_bit(R5_Wantread, &dev->flags)); BUG_ON(sh->batch_head); + + /* + * In the raid6 case if the only non-uptodate disk is P + * then we already trusted P to compute the other failed + * drives. It is safe to compute rather than re-read P. + * In other cases we only compute blocks from failed + * devices, otherwise check/repair might fail to detect + * a real inconsistency. + */ + if ((s->uptodate == disks - 1) && + ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || (s->failed && (disk_idx == s->failed_num[0] || - disk_idx == s->failed_num[1]))) { + disk_idx == s->failed_num[1])))) { /* have disk failed, and we're requested to fetch it; * do compute it */ diff --git a/drivers/media/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb-core/dvb_ca_en50221.c index fb66184dc9b6..77cf211e842e 100644 --- a/drivers/media/dvb-core/dvb_ca_en50221.c +++ b/drivers/media/dvb-core/dvb_ca_en50221.c @@ -750,6 +750,29 @@ static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot, u8 * b goto exit; } + /* + * It may need some time for the CAM to settle down, or there might + * be a race condition between the CAM, writing HC and our last + * check for DA. This happens, if the CAM asserts DA, just after + * checking DA before we are setting HC. In this case it might be + * a bug in the CAM to keep the FR bit, the lower layer/HW + * communication requires a longer timeout or the CAM needs more + * time internally. But this happens in reality! + * We need to read the status from the HW again and do the same + * we did for the previous check for DA + */ + status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS); + if (status < 0) + goto exit; + + if (status & (STATUSREG_DA | STATUSREG_RE)) { + if (status & STATUSREG_DA) + dvb_ca_en50221_thread_wakeup(ca); + + status = -EAGAIN; + goto exit; + } + /* send the amount of data */ if ((status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH, bytes_write >> 8)) != 0) goto exit; diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c index feeeb70d841e..d14d075ab1d6 100644 --- a/drivers/media/dvb-frontends/m88ds3103.c +++ b/drivers/media/dvb-frontends/m88ds3103.c @@ -1281,11 +1281,12 @@ static int m88ds3103_select(struct i2c_adapter *adap, void *mux_priv, u32 chan) * New users must use I2C client binding directly! */ struct dvb_frontend *m88ds3103_attach(const struct m88ds3103_config *cfg, - struct i2c_adapter *i2c, struct i2c_adapter **tuner_i2c_adapter) + struct i2c_adapter *i2c, + struct i2c_adapter **tuner_i2c_adapter) { struct i2c_client *client; struct i2c_board_info board_info; - struct m88ds3103_platform_data pdata; + struct m88ds3103_platform_data pdata = {}; pdata.clk = cfg->clock; pdata.i2c_wr_max = cfg->i2c_wr_max; @@ -1428,6 +1429,8 @@ static int m88ds3103_probe(struct i2c_client *client, case M88DS3103_CHIP_ID: break; default: + ret = -ENODEV; + dev_err(&client->dev, "Unknown device. Chip_id=%02x\n", dev->chip_id); goto err_kfree; } diff --git a/drivers/media/dvb-frontends/si2168.c b/drivers/media/dvb-frontends/si2168.c index 821a8f481507..9d6270591858 100644 --- a/drivers/media/dvb-frontends/si2168.c +++ b/drivers/media/dvb-frontends/si2168.c @@ -14,6 +14,8 @@ * GNU General Public License for more details. */ +#include <linux/delay.h> + #include "si2168_priv.h" static const struct dvb_frontend_ops si2168_ops; @@ -420,6 +422,7 @@ static int si2168_init(struct dvb_frontend *fe) if (ret) goto err; + udelay(100); memcpy(cmd.args, "\x85", 1); cmd.wlen = 1; cmd.rlen = 1; diff --git a/drivers/media/i2c/adv7481.c b/drivers/media/i2c/adv7481.c index 9c8159cc737a..43a5f3da5ac4 100644 --- a/drivers/media/i2c/adv7481.c +++ b/drivers/media/i2c/adv7481.c @@ -55,6 +55,8 @@ #define LOCK_MAX_SLEEP 6000 #define LOCK_NUM_TRIES 200 +#define MAX_DEFAULT_WIDTH 1280 +#define MAX_DEFAULT_HEIGHT 720 #define MAX_DEFAULT_FRAME_RATE 60 #define MAX_DEFAULT_PIX_CLK_HZ 74240000 @@ -1705,7 +1707,8 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, } else { pr_err("%s(%d): PLL not locked return EBUSY\n", __func__, __LINE__); - return -EBUSY; + ret = -EBUSY; + goto set_default; } /* Check Timing Lock */ @@ -1825,6 +1828,17 @@ static int adv7481_get_hdmi_timings(struct adv7481_state *state, (hdmi_params->pix_rep + 1)); } +set_default: + if (ret) { + pr_debug("%s(%d), error %d resort to default fmt\n", + __func__, __LINE__, ret); + vid_params->act_pix = MAX_DEFAULT_WIDTH; + vid_params->act_lines = MAX_DEFAULT_HEIGHT; + vid_params->fr_rate = MAX_DEFAULT_FRAME_RATE; + vid_params->pix_clk = MAX_DEFAULT_PIX_CLK_HZ; + vid_params->intrlcd = 0; + ret = 0; + } pr_debug("%s(%d), adv7481 TMDS Resolution: %d x %d @ %d fps\n", __func__, __LINE__, diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c index fe6eb78b6914..a47ab1947cc4 100644 --- a/drivers/media/i2c/cx25840/cx25840-core.c +++ b/drivers/media/i2c/cx25840/cx25840-core.c @@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } /* 6. */ cx25840_write(client, 0x115, 0x8c); @@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } /* Call the cx23888 specific std setup func, we no longer rely on * the generic cx24840 func. @@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client) INIT_WORK(&state->fw_work, cx25840_work_handler); init_waitqueue_head(&state->fw_wait); q = create_singlethread_workqueue("cx25840_fw"); - prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); - queue_work(q, &state->fw_work); - schedule(); - finish_wait(&state->fw_wait, &wait); - destroy_workqueue(q); + if (q) { + prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE); + queue_work(q, &state->fw_work); + schedule(); + finish_wait(&state->fw_wait, &wait); + destroy_workqueue(q); + } cx25840_std_setup(client); diff --git a/drivers/media/i2c/soc_camera/ov6650.c b/drivers/media/i2c/soc_camera/ov6650.c index 1f8af1ee8352..1e4783b51a35 100644 --- a/drivers/media/i2c/soc_camera/ov6650.c +++ b/drivers/media/i2c/soc_camera/ov6650.c @@ -1033,7 +1033,7 @@ static int ov6650_probe(struct i2c_client *client, priv->code = MEDIA_BUS_FMT_YUYV8_2X8; priv->colorspace = V4L2_COLORSPACE_JPEG; - priv->clk = v4l2_clk_get(&client->dev, "mclk"); + priv->clk = v4l2_clk_get(&client->dev, NULL); if (IS_ERR(priv->clk)) { ret = PTR_ERR(priv->clk); goto eclkget; diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index ea2777e1ee10..bc630a719776 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c @@ -226,7 +226,7 @@ static void i2c_wr8(struct v4l2_subdev *sd, u16 reg, u8 val) static void i2c_wr8_and_or(struct v4l2_subdev *sd, u16 reg, u8 mask, u8 val) { - i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 2) & mask) | val, 2); + i2c_wrreg(sd, reg, (i2c_rdreg(sd, reg, 1) & mask) | val, 1); } static u16 i2c_rd16(struct v4l2_subdev *sd, u16 reg) diff --git a/drivers/media/pci/bt8xx/bt878.c b/drivers/media/pci/bt8xx/bt878.c index 8aa726651630..90fcccc05b56 100644 --- a/drivers/media/pci/bt8xx/bt878.c +++ b/drivers/media/pci/bt8xx/bt878.c @@ -422,8 +422,7 @@ static int bt878_probe(struct pci_dev *dev, const struct pci_device_id *pci_id) bt878_num); if (bt878_num >= BT878_MAX) { printk(KERN_ERR "bt878: Too many devices inserted\n"); - result = -ENOMEM; - goto fail0; + return -ENOMEM; } if (pci_enable_device(dev)) return -EIO; diff --git a/drivers/media/pci/solo6x10/solo6x10-v4l2.c b/drivers/media/pci/solo6x10/solo6x10-v4l2.c index f7ce493b1fee..a0b61e88c838 100644 --- a/drivers/media/pci/solo6x10/solo6x10-v4l2.c +++ b/drivers/media/pci/solo6x10/solo6x10-v4l2.c @@ -342,6 +342,17 @@ static void solo_stop_streaming(struct vb2_queue *q) struct solo_dev *solo_dev = vb2_get_drv_priv(q); solo_stop_thread(solo_dev); + + spin_lock(&solo_dev->slock); + while (!list_empty(&solo_dev->vidq_active)) { + struct solo_vb2_buf *buf = list_entry( + solo_dev->vidq_active.next, + struct solo_vb2_buf, list); + + list_del(&buf->list); + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + } + spin_unlock(&solo_dev->slock); INIT_LIST_HEAD(&solo_dev->vidq_active); } diff --git a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c index 76fe7dfa68cb..61200d379a1d 100644 --- a/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c +++ b/drivers/media/platform/msm/ais/jpeg_dma/msm_jpeg_dma_dev.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -754,9 +754,12 @@ static int msm_jpegdma_s_fmt_vid_out(struct file *file, static int msm_jpegdma_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { + int ret = 0; struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh); - - return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req); + mutex_lock(&ctx->lock); + ret = v4l2_m2m_reqbufs(file, ctx->m2m_ctx, req); + mutex_unlock(&ctx->lock); + return ret; } /* @@ -833,11 +836,11 @@ static int msm_jpegdma_streamoff(struct file *file, { struct jpegdma_ctx *ctx = msm_jpegdma_ctx_from_fh(fh); int ret; - + mutex_lock(&ctx->lock); ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, buf_type); if (ret < 0) dev_err(ctx->jdma_device->dev, "Stream off fails\n"); - + mutex_unlock(&ctx->lock); return ret; } diff --git a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h index 392d902d3e0c..0486c8aa96d0 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h +++ b/drivers/media/platform/msm/ais/sensor/csid/include/msm_csid_3_5_hwreg.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -56,7 +56,12 @@ static struct csid_reg_parms_t csid_v3_5 = { 0xC, 0x84, 0xA4, - 0x7f010800, + /* + * Default IRQ enabled: + * FIFO overflow, Unbounded frame, Stream underflow, + * Error ECC, Error CRC, Reset done + */ + 0x73000800, 20, 17, 16, diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c index 2b3eefa65606..6d26dff7525d 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c +++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -14,6 +14,12 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/irqreturn.h> +#include <media/v4l2-subdev.h> +#include <media/v4l2-dev.h> +#include <media/v4l2-ioctl.h> +#include <media/v4l2-device.h> +#include <media/v4l2-fh.h> +#include <media/v4l2-event.h> #include "msm_csid.h" #include "msm_sd.h" #include "msm_camera_io_util.h" @@ -51,12 +57,13 @@ #define CSID_VERSION_V40 0x40000000 #define MSM_CSID_DRV_NAME "msm_csid" -#define DBG_CSID 0 +#define DBG_CSID 1 #define SHORT_PKT_CAPTURE 0 #define SHORT_PKT_OFFSET 0x200 #define ENABLE_3P_BIT 1 #define SOF_DEBUG_ENABLE 1 #define SOF_DEBUG_DISABLE 0 +#define MAX_CSID_V4l2_EVENTS 100 #define TRUE 1 #define FALSE 0 @@ -156,6 +163,25 @@ static int msm_csid_stop(struct csid_device *csid_dev, uint32_t cid_mask) return 0; } +static int msm_csid_send_event(struct csid_device *csid_dev, + uint32_t event_type, struct msm_csid_event_data *event_data) +{ + struct v4l2_event csid_event; + struct msm_csid_event_data *d_event_data; + + memset(&csid_event, 0, sizeof(struct v4l2_event)); + csid_event.id = 0; + csid_event.type = event_type; + + d_event_data = + (struct msm_csid_event_data *)(&csid_event.u.data[0]); + d_event_data->csid_id = event_data->csid_id; + d_event_data->error_status = event_data->error_status; + + v4l2_event_queue(csid_dev->msm_sd.sd.devnode, &csid_event); + return 0; +} + #if (DBG_CSID) static void msm_csid_set_debug_reg(struct csid_device *csid_dev, struct msm_camera_csid_params *csid_params) @@ -490,8 +516,9 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data) #else static irqreturn_t msm_csid_irq(int irq_num, void *data) { - uint32_t irq; + uint32_t irq, error_irq, rst_done_irq_mask; struct csid_device *csid_dev = data; + struct msm_csid_event_data csid_event; if (!csid_dev) { pr_err("%s:%d csid_dev NULL\n", __func__, __LINE__); @@ -509,11 +536,26 @@ static irqreturn_t msm_csid_irq(int irq_num, void *data) irq = msm_camera_io_r(csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_irq_status_addr); + irq &= msm_camera_io_r(csid_dev->base + + csid_dev->ctrl_reg->csid_reg.csid_irq_mask_addr); pr_err_ratelimited("%s CSID%d_IRQ_STATUS_ADDR = 0x%x\n", __func__, csid_dev->pdev->id, irq); - if (irq & (0x1 << - csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift)) + error_irq = irq; + rst_done_irq_mask = + 0x1 << csid_dev->ctrl_reg->csid_reg.csid_rst_done_irq_bitshift; + + if (irq & rst_done_irq_mask) { complete(&csid_dev->reset_complete); + error_irq &= ~rst_done_irq_mask; + } + + if (error_irq) { + csid_event.csid_id = csid_dev->pdev->id; + csid_event.error_status = error_irq; + msm_csid_send_event(csid_dev, CSID_EVENT_SIGNAL_ERROR, + &csid_event); + } + msm_camera_io_w(irq, csid_dev->base + csid_dev->ctrl_reg->csid_reg.csid_irq_clear_cmd_addr); return IRQ_HANDLED; @@ -890,7 +932,6 @@ static long msm_csid_subdev_ioctl(struct v4l2_subdev *sd, return rc; } - #ifdef CONFIG_COMPAT static int32_t msm_csid_cmd32(struct csid_device *csid_dev, void *arg) { @@ -1061,27 +1102,135 @@ static long msm_csid_subdev_ioctl32(struct v4l2_subdev *sd, mutex_unlock(&csid_dev->mutex); return rc; } +#endif -static long msm_csid_subdev_do_ioctl32( +static long msm_csid_subdev_do_ioctl( struct file *file, unsigned int cmd, void *arg) { + int rc = -ENOIOCTLCMD; struct video_device *vdev = video_devdata(file); struct v4l2_subdev *sd = vdev_to_v4l2_subdev(vdev); + struct v4l2_fh *vfh = file->private_data; + + switch (cmd) { + case VIDIOC_DQEVENT: + if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS)) + return -ENOIOCTLCMD; + return v4l2_event_dequeue(vfh, arg, + file->f_flags & O_NONBLOCK); + case VIDIOC_SUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, subscribe_event, vfh, arg); + + case VIDIOC_UNSUBSCRIBE_EVENT: + return v4l2_subdev_call(sd, core, unsubscribe_event, vfh, arg); + + case VIDIOC_MSM_CSID_IO_CFG32: +#ifdef CONFIG_COMPAT + rc = msm_csid_subdev_ioctl32(sd, cmd, arg); +#endif + break; - return msm_csid_subdev_ioctl32(sd, cmd, arg); + default: + rc = msm_csid_subdev_ioctl(sd, cmd, arg); + break; + } + + return rc; } -static long msm_csid_subdev_fops_ioctl32(struct file *file, unsigned int cmd, +static long msm_csid_subdev_fops_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl32); + return video_usercopy(file, cmd, arg, msm_csid_subdev_do_ioctl); } -#endif + +static u32 msm_csid_evt_mask_to_csid_event(u32 evt_mask) +{ + u32 evt_id = CSID_EVENT_SUBS_MASK_NONE; + + switch (evt_mask) { + case CSID_EVENT_MASK_INDEX_SIGNAL_ERROR: + evt_id = CSID_EVENT_SIGNAL_ERROR; + break; + default: + evt_id = CSID_EVENT_SUBS_MASK_NONE; + break; + } + + return evt_id; +} + +static int msm_csid_subscribe_event_mask(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, int evt_mask_index, + u32 evt_id, bool subscribe_flag) +{ + int rc = 0; + + sub->type = evt_id; + + if (subscribe_flag) + rc = v4l2_event_subscribe(fh, sub, + MAX_CSID_V4l2_EVENTS, NULL); + else + rc = v4l2_event_unsubscribe(fh, sub); + if (rc != 0) { + pr_err("%s: Subs event_type =0x%x failed\n", + __func__, sub->type); + return rc; + } + return rc; +} + +static int msm_csid_process_event_subscription(struct v4l2_fh *fh, + struct v4l2_event_subscription *sub, bool subscribe_flag) +{ + int rc = 0, evt_mask_index = 0; + u32 evt_mask = sub->type; + u32 evt_id = 0; + + if (evt_mask == CSID_EVENT_SUBS_MASK_NONE) { + pr_err("%s: Subs event_type is None=0x%x\n", + __func__, evt_mask); + return 0; + } + + evt_mask_index = CSID_EVENT_MASK_INDEX_SIGNAL_ERROR; + if (evt_mask & (1<<evt_mask_index)) { + evt_id = + msm_csid_evt_mask_to_csid_event( + evt_mask_index); + rc = msm_csid_subscribe_event_mask(fh, sub, + evt_mask_index, evt_id, subscribe_flag); + if (rc != 0) { + pr_err("%s: Subs event index:%d failed\n", + __func__, evt_mask_index); + return rc; + } + } + + return rc; +} +static int msm_csid_subscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_csid_process_event_subscription(fh, sub, true); +} + +static int msm_csid_unsubscribe_event(struct v4l2_subdev *sd, + struct v4l2_fh *fh, + struct v4l2_event_subscription *sub) +{ + return msm_csid_process_event_subscription(fh, sub, false); +} + static const struct v4l2_subdev_internal_ops msm_csid_internal_ops; static struct v4l2_subdev_core_ops msm_csid_subdev_core_ops = { .ioctl = &msm_csid_subdev_ioctl, .interrupt_service_routine = msm_csid_irq_routine, + .subscribe_event = msm_csid_subscribe_event, + .unsubscribe_event = msm_csid_unsubscribe_event, }; static const struct v4l2_subdev_ops msm_csid_subdev_ops = { @@ -1175,6 +1324,7 @@ static int csid_probe(struct platform_device *pdev) new_csid_dev->pdev = pdev; new_csid_dev->msm_sd.sd.internal_ops = &msm_csid_internal_ops; new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; + new_csid_dev->msm_sd.sd.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; snprintf(new_csid_dev->msm_sd.sd.name, ARRAY_SIZE(new_csid_dev->msm_sd.sd.name), "msm_csid"); media_entity_init(&new_csid_dev->msm_sd.sd.entity, 0, NULL, 0); @@ -1183,11 +1333,12 @@ static int csid_probe(struct platform_device *pdev) new_csid_dev->msm_sd.close_seq = MSM_SD_CLOSE_2ND_CATEGORY | 0x5; msm_sd_register(&new_csid_dev->msm_sd); -#ifdef CONFIG_COMPAT msm_cam_copy_v4l2_subdev_fops(&msm_csid_v4l2_subdev_fops); - msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl32; - new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops; + msm_csid_v4l2_subdev_fops.unlocked_ioctl = msm_csid_subdev_fops_ioctl; +#ifdef CONFIG_COMPAT + msm_csid_v4l2_subdev_fops.compat_ioctl32 = msm_csid_subdev_fops_ioctl; #endif + new_csid_dev->msm_sd.sd.devnode->fops = &msm_csid_v4l2_subdev_fops; rc = msm_camera_register_irq(pdev, new_csid_dev->irq, msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev); diff --git a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c index 5e34016d199c..03ae276d1a6f 100644 --- a/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c +++ b/drivers/media/platform/msm/ais/sensor/msm_sensor_driver.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -639,7 +639,7 @@ static irqreturn_t bridge_irq(int irq, void *dev) { struct msm_sensor_ctrl_t *s_ctrl = dev; - pr_err("msm_sensor_driver: received bridge interrupt:0x%x", + pr_debug("msm_sensor_driver: received bridge interrupt:0x%x\n", s_ctrl->sensordata->slave_info->sensor_slave_addr); schedule_delayed_work(&s_ctrl->irq_delayed_work, msecs_to_jiffies(0)); @@ -682,7 +682,7 @@ static void bridge_irq_delay_work(struct work_struct *work) &sensor_event); mutex_unlock(s_ctrl->msm_sensor_mutex); exit_queue: - pr_err("Work IRQ exit"); + pr_debug("Work IRQ exit\n"); } /* static function definition */ @@ -947,8 +947,6 @@ CSID_TG: goto free_camera_info; } - pr_err("%s probe succeeded", slave_info->sensor_name); - /* * Update the subdevice id of flash-src based on availability in kernel. */ @@ -1009,8 +1007,6 @@ CSID_TG: pr_err("%s: Failed gpio_direction irq %d", __func__, rc); goto cancel_work; - } else { - pr_err("sensor probe IRQ direction succeeded"); } } @@ -1035,7 +1031,7 @@ CSID_TG: } /* Keep irq enabled */ - pr_err("msm_sensor_driver.c irq number = %d", s_ctrl->irq); + pr_debug("msm_sensor_driver.c irq number = %d\n", s_ctrl->irq); } /* diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c index eab56b70e646..20a38925aa10 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -26,6 +26,7 @@ #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include <media/videobuf2-core.h> +#include <media/msmb_generic_buf_mgr.h> #include "msm.h" #include "msm_buf_mgr.h" diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c index 4dc471b9c1c6..6db3c3c527a3 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c @@ -3095,12 +3095,18 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, return -EINVAL; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); - + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < stream_cfg_cmd->num_streams; i++) { if (stream_cfg_cmd->stream_handle[i] == 0) continue; stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl, HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i])); + + if (!stream_info) { + pr_err("%s: stream_info is NULL", __func__); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); + return -EINVAL; + } if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX) src_state = axi_data->src_info[ SRC_TO_INTF(stream_info->stream_src)].active; @@ -3108,6 +3114,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, else { ISP_DBG("%s: invalid src info index\n", __func__); rc = -EINVAL; + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } spin_lock_irqsave(&stream_info->lock, flags); @@ -3119,6 +3126,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } @@ -3141,6 +3149,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, HANDLE_TO_IDX( stream_cfg_cmd->stream_handle[i])); spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } for (k = 0; k < stream_info->num_isp; k++) { @@ -3199,6 +3208,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl, spin_unlock_irqrestore(&stream_info->lock, flags); streams[num_streams++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (i = 0; i < MAX_VFE; i++) { vfe_dev = update_vfes[i]; diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c index 6c1d9ddc4232..3e8220005f77 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c @@ -1102,6 +1102,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, struct vfe_device *vfe_dev; msm_isp_get_timestamp(×tamp, vfe_dev_ioctl); + mutex_lock(&vfe_dev_ioctl->buf_mgr->lock); num_stats_comp_mask = vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask; @@ -1120,6 +1121,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, } if (rc) { spin_unlock_irqrestore(&stream_info->lock, flags); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); goto error; } rc = msm_isp_init_stats_ping_pong_reg( @@ -1127,6 +1129,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, if (rc < 0) { spin_unlock_irqrestore(&stream_info->lock, flags); pr_err("%s: No buffer for stream%d\n", __func__, idx); + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); return rc; } init_completion(&stream_info->active_comp); @@ -1161,6 +1164,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl, stats_data->num_active_stream); streams[num_stream++] = stream_info; } + mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock); for (k = 0; k < MAX_VFE; k++) { if (!update_vfes[k] || num_active_streams[k]) diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c index c132947b3790..684b331d9ac4 100644 --- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c +++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c @@ -897,9 +897,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, case VIDIOC_MSM_ISP_CFG_STREAM: mutex_lock(&vfe_dev->core_mutex); MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev); - mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_axi_stream(vfe_dev, arg); - mutex_unlock(&vfe_dev->buf_mgr->lock); MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev); mutex_unlock(&vfe_dev->core_mutex); break; @@ -1016,9 +1014,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd, case VIDIOC_MSM_ISP_CFG_STATS_STREAM: mutex_lock(&vfe_dev->core_mutex); MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev); - mutex_lock(&vfe_dev->buf_mgr->lock); rc = msm_isp_cfg_stats_stream(vfe_dev, arg); - mutex_unlock(&vfe_dev->buf_mgr->lock); MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev); mutex_unlock(&vfe_dev->core_mutex); break; diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c index f732f5180e81..58bfdb77a492 100644 --- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c +++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -166,6 +166,33 @@ static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev, return ret; } +static int32_t msm_buf_mngr_buf_error(struct msm_buf_mngr_device *buf_mngr_dev, + struct msm_buf_mngr_info *buf_info) +{ + unsigned long flags; + struct msm_get_bufs *bufs, *save; + int32_t ret = -EINVAL; + + spin_lock_irqsave(&buf_mngr_dev->buf_q_spinlock, flags); + list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) { + if ((bufs->session_id == buf_info->session_id) && + (bufs->stream_id == buf_info->stream_id) && + (bufs->index == buf_info->index)) { + ret = buf_mngr_dev->vb2_ops.buf_error + (bufs->vb2_v4l2_buf, + buf_info->session_id, + buf_info->stream_id, + buf_info->frame_id, + &buf_info->timestamp, + buf_info->reserved); + list_del_init(&bufs->entry); + kfree(bufs); + break; + } + } + spin_unlock_irqrestore(&buf_mngr_dev->buf_q_spinlock, flags); + return ret; +} static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev, struct msm_buf_mngr_info *buf_info) @@ -473,6 +500,9 @@ int msm_cam_buf_mgr_ops(unsigned int cmd, void *argp) case VIDIOC_MSM_BUF_MNGR_BUF_DONE: rc = msm_buf_mngr_buf_done(msm_buf_mngr_dev, argp); break; + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: + rc = msm_buf_mngr_buf_error(msm_buf_mngr_dev, argp); + break; case VIDIOC_MSM_BUF_MNGR_PUT_BUF: rc = msm_buf_mngr_put_buf(msm_buf_mngr_dev, argp); break; @@ -571,6 +601,7 @@ static long msm_buf_mngr_subdev_ioctl(struct v4l2_subdev *sd, case VIDIOC_MSM_BUF_MNGR_GET_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: case VIDIOC_MSM_BUF_MNGR_PUT_BUF: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: rc = msm_cam_buf_mgr_ops(cmd, argp); break; case VIDIOC_MSM_BUF_MNGR_INIT: @@ -719,6 +750,9 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file, case VIDIOC_MSM_BUF_MNGR_BUF_DONE32: cmd = VIDIOC_MSM_BUF_MNGR_BUF_DONE; break; + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR32: + cmd = VIDIOC_MSM_BUF_MNGR_BUF_ERROR; + break; case VIDIOC_MSM_BUF_MNGR_PUT_BUF32: cmd = VIDIOC_MSM_BUF_MNGR_PUT_BUF; break; @@ -737,6 +771,7 @@ static long msm_bmgr_subdev_fops_compat_ioctl(struct file *file, switch (cmd) { case VIDIOC_MSM_BUF_MNGR_GET_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: case VIDIOC_MSM_BUF_MNGR_FLUSH: case VIDIOC_MSM_BUF_MNGR_PUT_BUF: { struct msm_buf_mngr_info32_t buf_info32; diff --git a/drivers/media/platform/msm/camera_v2/msm_sd.h b/drivers/media/platform/msm/camera_v2/msm_sd.h index d893d9fc07e3..3d5d3e03632e 100644 --- a/drivers/media/platform/msm/camera_v2/msm_sd.h +++ b/drivers/media/platform/msm/camera_v2/msm_sd.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2016, 2018 The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -81,6 +81,9 @@ struct msm_sd_req_vb2_q { unsigned int stream_id, uint32_t sequence, struct timeval *ts, uint32_t reserved); int (*flush_buf)(int session_id, unsigned int stream_id); + int (*buf_error)(struct vb2_v4l2_buffer *vb2_v4l2_buf, int session_id, + unsigned int stream_id, uint32_t sequence, struct timeval *ts, + uint32_t reserved); }; #define MSM_SD_NOTIFY_GET_SD 0x00000001 diff --git a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c index e271c7fcd1b6..f2b048e37319 100644 --- a/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c +++ b/drivers/media/platform/msm/camera_v2/msm_vb2/msm_vb2.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -457,6 +457,67 @@ static int msm_vb2_buf_done(struct vb2_v4l2_buffer *vb, int session_id, return rc; } +static int msm_vb2_buf_error(struct vb2_v4l2_buffer *vb, int session_id, + unsigned int stream_id, uint32_t sequence, + struct timeval *ts, uint32_t buf_type) +{ + unsigned long flags, rl_flags; + struct msm_vb2_buffer *msm_vb2; + struct msm_stream *stream; + struct msm_session *session; + struct vb2_v4l2_buffer *vb2_v4l2_buf = NULL; + int rc = 0; + + session = msm_get_session(session_id); + if (IS_ERR_OR_NULL(session)) + return -EINVAL; + + read_lock_irqsave(&session->stream_rwlock, rl_flags); + + stream = msm_get_stream(session, stream_id); + if (IS_ERR_OR_NULL(stream)) { + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return -EINVAL; + } + + spin_lock_irqsave(&stream->stream_lock, flags); + if (vb) { + list_for_each_entry(msm_vb2, &(stream->queued_list), list) { + vb2_v4l2_buf = &(msm_vb2->vb2_v4l2_buf); + if (vb2_v4l2_buf == vb) + break; + } + if (vb2_v4l2_buf != vb) { + pr_err("VB buffer is INVALID ses_id=%d, str_id=%d, vb=%pK\n", + session_id, stream_id, vb); + spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, + rl_flags); + return -EINVAL; + } + msm_vb2 = + container_of(vb2_v4l2_buf, struct msm_vb2_buffer, + vb2_v4l2_buf); + /* put buf before buf done */ + if (msm_vb2->in_freeq) { + vb2_v4l2_buf->sequence = sequence; + vb2_v4l2_buf->timestamp = *ts; + vb2_buffer_done(&vb2_v4l2_buf->vb2_buf, + VB2_BUF_STATE_ERROR); + msm_vb2->in_freeq = 0; + rc = 0; + } else + rc = -EINVAL; + } else { + pr_err(" VB buffer is NULL for ses_id=%d, str_id=%d\n", + session_id, stream_id); + rc = -EINVAL; + } + spin_unlock_irqrestore(&stream->stream_lock, flags); + read_unlock_irqrestore(&session->stream_rwlock, rl_flags); + return rc; +} + long msm_vb2_return_buf_by_idx(int session_id, unsigned int stream_id, uint32_t index) { @@ -555,6 +616,7 @@ int msm_vb2_request_cb(struct msm_sd_req_vb2_q *req) req->put_buf = msm_vb2_put_buf; req->buf_done = msm_vb2_buf_done; req->flush_buf = msm_vb2_flush_buf; + req->buf_error = msm_vb2_buf_error; return 0; } diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c index 25fc34b26bc1..53a01aff4bdd 100644 --- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c +++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1624,6 +1624,7 @@ static int msm_cpp_buffer_ops(struct cpp_device *cpp_dev, case VIDIOC_MSM_BUF_MNGR_PUT_BUF: case VIDIOC_MSM_BUF_MNGR_BUF_DONE: case VIDIOC_MSM_BUF_MNGR_GET_BUF: + case VIDIOC_MSM_BUF_MNGR_BUF_ERROR: default: { struct msm_buf_mngr_info *buff_mgr_info = (struct msm_buf_mngr_info *)arg; @@ -3617,7 +3618,7 @@ STREAM_BUFF_END: break; } buff_mgr_info.frame_id = frame_info.frame_id; - rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_DONE, + rc = msm_cpp_buffer_ops(cpp_dev, VIDIOC_MSM_BUF_MNGR_BUF_ERROR, 0x0, &buff_mgr_info); if (rc < 0) { pr_err("error in buf done\n"); diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h index 6ed5c5c7dbce..323f99f25147 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_3_5_hwreg.h @@ -14,7 +14,6 @@ #define MSM_CSIPHY_3_5_HWREG_H #define ULPM_WAKE_UP_TIMER_MODE 2 -#define GLITCH_ELIMINATION_NUM 0x12 /* bit [6:4] */ #include <sensor/csiphy/msm_csiphy.h> @@ -47,7 +46,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v3_5_3ph = { {0x138, 0x0}, {0x13C, 0x10}, {0x140, 0x1}, - {0x144, GLITCH_ELIMINATION_NUM}, + {0x144, 0x32}, {0x148, 0xFE}, {0x14C, 0x1}, {0x154, 0x0}, diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c index a8d7c1f8b489..4f7a62716810 100644 --- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c +++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -52,6 +52,7 @@ #define MAX_DPHY_DATA_LN 4 #define CLOCK_OFFSET 0x700 #define CSIPHY_SOF_DEBUG_COUNT 2 +#define GBPS 1000000000 #undef CDBG #define CDBG(fmt, args...) pr_debug(fmt, ##args) @@ -134,8 +135,10 @@ static int msm_csiphy_3phase_lane_config( uint8_t i = 0; uint16_t lane_mask = 0, lane_enable = 0, temp; void __iomem *csiphybase; + uint64_t two_gbps = 0; csiphybase = csiphy_dev->base; + two_gbps = 2 * (uint64_t)csiphy_params->lane_cnt * GBPS; lane_mask = csiphy_params->lane_mask & 0x7; while (lane_mask != 0) { temp = (i << 1)+1; @@ -281,11 +284,20 @@ static int msm_csiphy_3phase_lane_config( csiphy_3ph_reg.mipi_csiphy_3ph_lnn_ctrl51.addr + 0x200*i); } - msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.data, - csiphybase + csiphy_dev->ctrl_reg->csiphy_3ph_reg. - mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + if ((csiphy_dev->hw_version == CSIPHY_VERSION_V35) && + (csiphy_params->data_rate > two_gbps)) { + msm_camera_io_w(0x40, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } else { + msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.data, + csiphybase + + csiphy_dev->ctrl_reg->csiphy_3ph_reg. + mipi_csiphy_3ph_lnn_ctrl25.addr + 0x200*i); + } lane_mask >>= 1; i++; } @@ -797,10 +809,10 @@ static int msm_csiphy_lane_config(struct csiphy_device *csiphy_dev, ratio = csiphy_dev->csiphy_max_clk/clk_rate; csiphy_params->settle_cnt = csiphy_params->settle_cnt/ratio; } - CDBG("%s csiphy_params, mask = 0x%x cnt = %d\n", + CDBG("%s csiphy_params, mask = 0x%x cnt = %d, data rate = %llu\n", __func__, csiphy_params->lane_mask, - csiphy_params->lane_cnt); + csiphy_params->lane_cnt, csiphy_params->data_rate); CDBG("%s csiphy_params, settle cnt = 0x%x csid %d\n", __func__, csiphy_params->settle_cnt, csiphy_params->csid_core); diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c index 8490a65ae1c6..a43404cad3e3 100644 --- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c +++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c @@ -83,7 +83,7 @@ static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei) static void channel_swdemux_tsklet(unsigned long data) { struct channel_info *channel = (struct channel_info *)data; - struct c8sectpfei *fei = channel->fei; + struct c8sectpfei *fei; unsigned long wp, rp; int pos, num_packets, n, size; u8 *buf; @@ -91,6 +91,8 @@ static void channel_swdemux_tsklet(unsigned long data) if (unlikely(!channel || !channel->irec)) return; + fei = channel->fei; + wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0)); rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0)); diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index f838d9c7ed12..0fba4a2c1602 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf, goto rc_dev_fail; /* wire up inbound data handler */ - usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, - mceusb_dev_recv, ir, ep_in->bInterval); + if (usb_endpoint_xfer_int(ep_in)) + usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, + mceusb_dev_recv, ir, ep_in->bInterval); + else + usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp, + mceusb_dev_recv, ir); + ir->urb_in->transfer_dma = ir->dma_in; ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c index 9caea8344547..d793c630f1dd 100644 --- a/drivers/media/usb/cpia2/cpia2_v4l.c +++ b/drivers/media/usb/cpia2/cpia2_v4l.c @@ -812,7 +812,7 @@ static int cpia2_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct camera_data *cam = video_drvdata(file); if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; buf->m.offset = cam->buffers[buf->index].data - cam->frame_buffer; @@ -863,7 +863,7 @@ static int cpia2_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if(buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP || - buf->index > cam->num_frames) + buf->index >= cam->num_frames) return -EINVAL; DBG("QBUF #%d\n", buf->index); diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c index 3bbc77aa6a33..483457d4904f 100644 --- a/drivers/media/usb/usbtv/usbtv-core.c +++ b/drivers/media/usb/usbtv/usbtv-core.c @@ -95,6 +95,8 @@ static int usbtv_probe(struct usb_interface *intf, return 0; usbtv_audio_fail: + /* we must not free at this point */ + usb_get_dev(usbtv->udev); usbtv_video_free(usbtv); usbtv_video_fail: diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c index 3dc9ed2e0774..bb1e19f7ed5a 100644 --- a/drivers/media/v4l2-core/videobuf2-core.c +++ b/drivers/media/v4l2-core/videobuf2-core.c @@ -205,6 +205,10 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, struct vb2_buffer *vb; int ret; + /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */ + num_buffers = min_t(unsigned int, num_buffers, + VB2_MAX_FRAME - q->num_buffers); + for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c index bf23234d957e..b44d0e755675 100644 --- a/drivers/media/v4l2-core/videobuf2-v4l2.c +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c @@ -133,23 +133,6 @@ static int __set_timestamp(struct vb2_buffer *vb, const void *pb) return 0; }; -static void vb2_warn_zero_bytesused(struct vb2_buffer *vb) -{ - static bool check_once; - - if (check_once) - return; - - check_once = true; - WARN_ON(1); - - pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n"); - if (vb->vb2_queue->allow_zero_bytesused) - pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n"); - else - pr_warn("use the actual size instead.\n"); -} - static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, const char *opname) { @@ -357,9 +340,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *pdst = &planes[plane]; struct v4l2_plane *psrc = &b->m.planes[plane]; - if (psrc->bytesused == 0) - vb2_warn_zero_bytesused(vb); - if (vb->vb2_queue->allow_zero_bytesused) pdst->bytesused = psrc->bytesused; else @@ -394,9 +374,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, } if (V4L2_TYPE_IS_OUTPUT(b->type)) { - if (b->bytesused == 0) - vb2_warn_zero_bytesused(vb); - if (vb->vb2_queue->allow_zero_bytesused) planes[0].bytesused = b->bytesused; else diff --git a/drivers/mfd/palmas.c b/drivers/mfd/palmas.c index 8f8bacb67a15..a6b5259ffbdd 100644 --- a/drivers/mfd/palmas.c +++ b/drivers/mfd/palmas.c @@ -430,6 +430,20 @@ static void palmas_power_off(void) { unsigned int addr; int ret, slave; + struct device_node *np = palmas_dev->dev->of_node; + + if (of_property_read_bool(np, "ti,palmas-override-powerhold")) { + addr = PALMAS_BASE_TO_REG(PALMAS_PU_PD_OD_BASE, + PALMAS_PRIMARY_SECONDARY_PAD2); + slave = PALMAS_BASE_TO_SLAVE(PALMAS_PU_PD_OD_BASE); + + ret = regmap_update_bits(palmas_dev->regmap[slave], addr, + PALMAS_PRIMARY_SECONDARY_PAD2_GPIO_7_MASK, 0); + if (ret) + dev_err(palmas_dev->dev, + "Unable to write PRIMARY_SECONDARY_PAD2 %d\n", + ret); + } if (!palmas_dev) return; diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c index cc91f7b3d90c..eb29113e0bac 100644 --- a/drivers/misc/enclosure.c +++ b/drivers/misc/enclosure.c @@ -148,7 +148,7 @@ enclosure_register(struct device *dev, const char *name, int components, for (i = 0; i < components; i++) { edev->component[i].number = -1; edev->component[i].slot = -1; - edev->component[i].power_status = 1; + edev->component[i].power_status = -1; } mutex_lock(&container_list_lock); @@ -600,6 +600,11 @@ static ssize_t get_component_power_status(struct device *cdev, if (edev->cb->get_power_status) edev->cb->get_power_status(edev, ecomp); + + /* If still uninitialized, the callback failed or does not exist. */ + if (ecomp->power_status == -1) + return (edev->cb->get_power_status) ? -EIO : -ENOTTY; + return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off"); } diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 4ef189a7a2fb..8c04e342e30a 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c @@ -571,7 +571,6 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data) break; default: - dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd); rets = -ENOIOCTLCMD; } diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c index 0e6ab4e7c686..c52c8ccc90b7 100644 --- a/drivers/misc/uid_sys_stats.c +++ b/drivers/misc/uid_sys_stats.c @@ -14,6 +14,7 @@ */ #include <linux/atomic.h> +#include <linux/cpufreq_times.h> #include <linux/err.h> #include <linux/hashtable.h> #include <linux/init.h> @@ -344,13 +345,13 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime = 0; } - read_lock(&tasklist_lock); + rcu_read_lock(); do_each_thread(temp, task) { uid = from_kuid_munged(user_ns, task_uid(task)); if (!uid_entry || uid_entry->uid != uid) uid_entry = find_or_register_uid(uid); if (!uid_entry) { - read_unlock(&tasklist_lock); + rcu_read_unlock(); rt_mutex_unlock(&uid_lock); pr_err("%s: failed to find the uid_entry for uid %d\n", __func__, uid); @@ -360,7 +361,7 @@ static int uid_cputime_show(struct seq_file *m, void *v) uid_entry->active_utime += utime; uid_entry->active_stime += stime; } while_each_thread(temp, task); - read_unlock(&tasklist_lock); + rcu_read_unlock(); hash_for_each(hash_table, bkt, uid_entry, hash) { cputime_t total_utime = uid_entry->utime + @@ -421,6 +422,10 @@ static ssize_t uid_remove_write(struct file *file, kstrtol(end_uid, 10, &uid_end) != 0) { return -EINVAL; } + + /* Also remove uids from /proc/uid_time_in_state */ + cpufreq_task_times_remove_uids(uid_start, uid_end); + rt_mutex_lock(&uid_lock); for (; uid_start <= uid_end; uid_start++) { diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index f42d9c4e4561..cc277f7849b0 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -298,8 +298,11 @@ static void *qp_alloc_queue(u64 size, u32 flags) size_t pas_size; size_t vas_size; size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + u64 num_pages; + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / (sizeof(*queue->kernel_if->u.g.pas) + @@ -624,9 +627,12 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) { struct vmci_queue *queue; size_t queue_page_size; - const u64 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; + u64 num_pages; const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); + if (size > SIZE_MAX - PAGE_SIZE) + return NULL; + num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / sizeof(*queue->kernel_if->u.h.page)) return NULL; diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 7d2ceda7f80e..de7def1f4f1c 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -1048,6 +1048,12 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev, goto idata_free; } + /* + * Ensure rpmb_req_pending flag is synchronized between multiple + * entities which may use rpmb ioclts with a lock. + */ + mutex_lock(&card->host->rpmb_req_mutex); + atomic_set(&card->host->rpmb_req_pending, 1); mmc_get_card(card); if (mmc_card_doing_bkops(card)) { @@ -1163,6 +1169,9 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev, cmd_rel_host: mmc_put_card(card); + atomic_set(&card->host->rpmb_req_pending, 0); + mutex_unlock(&card->host->rpmb_req_mutex); + idata_free: for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) { @@ -3173,11 +3182,11 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep( static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq, struct request *req) { - struct mmc_card *card = mq->card; - struct mmc_host *host = card->host; + struct request_queue *q = req->q; - blk_requeue_request(req->q, req); - mmc_put_card(host->card); + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, req); + spin_unlock_irq(q->queue_lock); } static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req) @@ -4065,9 +4074,16 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) * If issuing of the request fails with eitehr EBUSY or * EAGAIN error, re-queue the request. * This case would occur with ICE calls. + * For request which gets completed successfully or + * errored out, we release host lock in completion or + * error handling softirq context. But here the request + * is neither completed nor erred-out, so release the + * host lock explicitly. */ - if (ret == -EBUSY || ret == -EAGAIN) + if (ret == -EBUSY || ret == -EAGAIN) { mmc_blk_cmdq_requeue_rw_rq(mq, req); + mmc_put_card(host->card); + } } } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index ccf22eb5bdc0..397bbd09034d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -95,7 +95,9 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host, * be any other direct command active. * 3. cmdq state should be unhalted. * 4. cmdq state shouldn't be in error state. - * 5. free tag available to process the new request. + * 5. There is no outstanding RPMB request pending. + * 6. free tag available to process the new request. + * (This must be the last condtion to check) */ wait_event(ctx->wait, kthread_should_stop() || (mmc_peek_request(mq) && @@ -106,6 +108,7 @@ static inline void mmc_cmdq_ready_wait(struct mmc_host *host, && !(!host->card->part_curr && mmc_host_cq_disable(host) && !mmc_card_suspended(host->card)) && !test_bit(CMDQ_STATE_ERR, &ctx->curr_state) + && !atomic_read(&host->rpmb_req_pending) && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked))); } diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 548a9e8b72ae..0b527a708bd7 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -373,12 +373,13 @@ int mmc_add_card(struct mmc_card *card) mmc_card_ddr52(card) ? "DDR " : "", type); } else { - pr_info("%s: new %s%s%s%s%s card at address %04x\n", + pr_info("%s: new %s%s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : (mmc_card_hs(card) ? "high speed " : ""), mmc_card_hs400(card) ? "HS400 " : (mmc_card_hs200(card) ? "HS200 " : ""), + mmc_card_hs400es(card) ? "Enhanced strobe " : "", mmc_card_ddr52(card) ? "DDR " : "", uhs_bus_speed_mode, type, card->rca); } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index d1d045f04368..2af0e819d0cb 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1170,6 +1170,46 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) return 0; } +static int mmc_cmdq_check_retune(struct mmc_host *host) +{ + bool cmdq_mode; + int err = 0; + + if (!host->need_retune || host->doing_retune || !host->card || + mmc_card_hs400es(host->card) || + (host->ios.clock <= MMC_HIGH_DDR_MAX_DTR)) + return 0; + + cmdq_mode = mmc_card_cmdq(host->card); + if (cmdq_mode) { + err = mmc_cmdq_halt(host, true); + if (err) { + pr_err("%s: %s: failed halting queue (%d)\n", + mmc_hostname(host), __func__, err); + host->cmdq_ops->dumpstate(host); + goto halt_failed; + } + } + + mmc_retune_hold(host); + err = mmc_retune(host); + mmc_retune_release(host); + + if (cmdq_mode) { + if (mmc_cmdq_halt(host, false)) { + pr_err("%s: %s: cmdq unhalt failed\n", + mmc_hostname(host), __func__); + host->cmdq_ops->dumpstate(host); + } + } + +halt_failed: + pr_debug("%s: %s: Retuning done err: %d\n", + mmc_hostname(host), __func__, err); + + return err; +} + static int mmc_start_cmdq_request(struct mmc_host *host, struct mmc_request *mrq) { @@ -1196,6 +1236,7 @@ static int mmc_start_cmdq_request(struct mmc_host *host, } mmc_host_clk_hold(host); + mmc_cmdq_check_retune(host); if (likely(host->cmdq_ops->request)) { ret = host->cmdq_ops->request(host, mrq); } else { @@ -1558,7 +1599,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host, mmc_card_removed(host->card)) { if (cmd->error && !cmd->retries && cmd->opcode != MMC_SEND_STATUS && - cmd->opcode != MMC_SEND_TUNING_BLOCK) + cmd->opcode != MMC_SEND_TUNING_BLOCK && + cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) mmc_recovery_fallback_lower_speed(host); break; } @@ -4493,6 +4535,14 @@ int mmc_pm_notify(struct notifier_block *notify_block, if (!err) break; + if (!mmc_card_is_removable(host)) { + dev_warn(mmc_dev(host), + "pre_suspend failed for non-removable host: " + "%d\n", err); + /* Avoid removing non-removable hosts */ + break; + } + /* Calling bus_ops->remove() with a claimed host can deadlock */ host->bus_ops->remove(host); mmc_claim_host(host); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 88699f852aa2..b3b9d78f789a 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -388,7 +388,8 @@ int mmc_retune(struct mmc_host *host) else return 0; - if (!host->need_retune || host->doing_retune || !host->card) + if (!host->need_retune || host->doing_retune || !host->card || + mmc_card_hs400es(host->card)) return 0; host->need_retune = 0; @@ -635,6 +636,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) #endif setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host); + mutex_init(&host->rpmb_req_mutex); + /* * By default, hosts do not support SGIO or large requests. * They have to set these according to their abilities. diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 6f4f81a370d8..c8f85b31e2ac 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -1275,6 +1275,8 @@ static int mmc_select_hs400(struct mmc_card *card) if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) { mmc_host_clk_hold(host); err = host->ops->enhanced_strobe(host); + if (!err) + host->ios.enhanced_strobe = true; mmc_host_clk_release(host); } else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) && host->ops->execute_tuning) { diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fb204ee6ff89..581f5d0271f4 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -619,6 +619,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) (sizeof(struct idmac_desc_64addr) * (i + 1))) >> 32; /* Initialize reserved and buffer size fields to "0" */ + p->des0 = 0; p->des1 = 0; p->des2 = 0; p->des3 = 0; @@ -640,6 +641,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) i++, p++) { p->des3 = cpu_to_le32(host->sg_dma + (sizeof(struct idmac_desc) * (i + 1))); + p->des0 = 0; p->des1 = 0; } @@ -2807,8 +2809,8 @@ static bool dw_mci_reset(struct dw_mci *host) } if (host->use_dma == TRANS_MODE_IDMAC) - /* It is also recommended that we reset and reprogram idmac */ - dw_mci_idmac_reset(host); + /* It is also required that we reinit idmac */ + dw_mci_idmac_init(host); ret = true; diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 7fb0753abe30..6b814d7d6560 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1776,8 +1776,8 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) */ if (host->pdata->controller_flags & OMAP_HSMMC_SWAKEUP_MISSING) { struct pinctrl *p = devm_pinctrl_get(host->dev); - if (!p) { - ret = -ENODEV; + if (IS_ERR(p)) { + ret = PTR_ERR(p); goto err_free_irq; } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 83b1226471c1..ac66c61d9433 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -418,6 +418,20 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* + * Limit SD clock to 167MHz for ls1046a according to its datasheet + */ + if (clock > 167000000 && + of_find_compatible_node(NULL, NULL, "fsl,ls1046a-esdhc")) + clock = 167000000; + + /* + * Limit SD clock to 125MHz for ls1012a according to its datasheet + */ + if (clock > 125000000 && + of_find_compatible_node(NULL, NULL, "fsl,ls1012a-esdhc")) + clock = 125000000; + /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { if (clock > 20000000) diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index 7c0b27d132b1..b479bd81120b 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c @@ -1889,6 +1889,8 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base, do { uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi); mask = (1 << (cfi->device_type * 8)) - 1; + if (ofs >= map->size) + return 0; result = map_read(map, base + ofs); bank++; } while ((result.x[0] & mask) == CFI_MFR_CONTINUATION); diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index 7f4ac8c19001..5e3fa5861039 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -726,6 +726,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) struct fsl_ifc_ctrl *ctrl = priv->ctrl; struct fsl_ifc_regs __iomem *ifc = ctrl->regs; u32 nand_fsr; + int status; /* Use READ_STATUS command, but wait for the device to be ready */ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | @@ -740,12 +741,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) fsl_ifc_run_command(mtd); nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - + status = nand_fsr >> 24; /* * The chip always seems to report that it is * write-protected, even when it is not. */ - return nand_fsr | NAND_STATUS_WP; + return status | NAND_STATUS_WP; } static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 27864c0863ef..8406f346b0be 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -626,7 +626,8 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, chip->cmd_ctrl(mtd, readcmd, ctrl); ctrl &= ~NAND_CTRL_CHANGE; } - chip->cmd_ctrl(mtd, command, ctrl); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, ctrl); /* Address cycle, when necessary */ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE; @@ -655,6 +656,7 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: case NAND_CMD_ERASE2: @@ -717,7 +719,9 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + if (command != NAND_CMD_NONE) + chip->cmd_ctrl(mtd, command, + NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -750,6 +754,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, */ switch (command) { + case NAND_CMD_NONE: case NAND_CMD_CACHEDPROG: case NAND_CMD_PAGEPROG: case NAND_CMD_ERASE1: diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index 3ea4c022cbb9..ccdb3dd74421 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c @@ -265,6 +265,12 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) vol->last_eb_bytes = vol->usable_leb_size; } + /* Make volume "available" before it becomes accessible via sysfs */ + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = vol; + ubi->vol_count += 1; + spin_unlock(&ubi->volumes_lock); + /* Register character device for the volume */ cdev_init(&vol->cdev, &ubi_vol_cdev_operations); vol->cdev.owner = THIS_MODULE; @@ -304,11 +310,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) if (err) goto out_sysfs; - spin_lock(&ubi->volumes_lock); - ubi->volumes[vol_id] = vol; - ubi->vol_count += 1; - spin_unlock(&ubi->volumes_lock); - ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); self_check_volumes(ubi); return err; @@ -328,6 +329,10 @@ out_sysfs: out_cdev: cdev_del(&vol->cdev); out_mapping: + spin_lock(&ubi->volumes_lock); + ubi->volumes[vol_id] = NULL; + ubi->vol_count -= 1; + spin_unlock(&ubi->volumes_lock); if (do_free) kfree(vol->eba_tbl); out_acc: diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2cb34b0f3856..278d12888cab 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1490,39 +1490,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_close; } - /* If the mode uses primary, then the following is handled by - * bond_change_active_slave(). - */ - if (!bond_uses_primary(bond)) { - /* set promiscuity level to new slave */ - if (bond_dev->flags & IFF_PROMISC) { - res = dev_set_promiscuity(slave_dev, 1); - if (res) - goto err_close; - } - - /* set allmulti level to new slave */ - if (bond_dev->flags & IFF_ALLMULTI) { - res = dev_set_allmulti(slave_dev, 1); - if (res) - goto err_close; - } - - netif_addr_lock_bh(bond_dev); - - dev_mc_sync_multiple(slave_dev, bond_dev); - dev_uc_sync_multiple(slave_dev, bond_dev); - - netif_addr_unlock_bh(bond_dev); - } - - if (BOND_MODE(bond) == BOND_MODE_8023AD) { - /* add lacpdu mc addr to mc list */ - u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; - - dev_mc_add(slave_dev, lacpdu_multicast); - } - res = vlan_vids_add_by_dev(slave_dev, bond_dev); if (res) { netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n", @@ -1679,6 +1646,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) goto err_upper_unlink; } + /* If the mode uses primary, then the following is handled by + * bond_change_active_slave(). + */ + if (!bond_uses_primary(bond)) { + /* set promiscuity level to new slave */ + if (bond_dev->flags & IFF_PROMISC) { + res = dev_set_promiscuity(slave_dev, 1); + if (res) + goto err_sysfs_del; + } + + /* set allmulti level to new slave */ + if (bond_dev->flags & IFF_ALLMULTI) { + res = dev_set_allmulti(slave_dev, 1); + if (res) { + if (bond_dev->flags & IFF_PROMISC) + dev_set_promiscuity(slave_dev, -1); + goto err_sysfs_del; + } + } + + netif_addr_lock_bh(bond_dev); + dev_mc_sync_multiple(slave_dev, bond_dev); + dev_uc_sync_multiple(slave_dev, bond_dev); + netif_addr_unlock_bh(bond_dev); + + if (BOND_MODE(bond) == BOND_MODE_8023AD) { + /* add lacpdu mc addr to mc list */ + u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; + + dev_mc_add(slave_dev, lacpdu_multicast); + } + } + bond->slave_cnt++; bond_compute_features(bond); bond_set_carrier(bond); @@ -1702,6 +1703,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) return 0; /* Undo stages on error */ +err_sysfs_del: + bond_sysfs_slave_del(new_slave); + err_upper_unlink: bond_upper_dev_unlink(bond_dev, slave_dev); @@ -1709,9 +1713,6 @@ err_unregister: netdev_rx_handler_unregister(slave_dev); err_detach: - if (!bond_uses_primary(bond)) - bond_hw_addr_flush(bond_dev, slave_dev); - vlan_vids_del_by_dev(slave_dev, bond_dev); if (rcu_access_pointer(bond->primary_slave) == new_slave) RCU_INIT_POINTER(bond->primary_slave, NULL); @@ -2555,11 +2556,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) bond_for_each_slave_rcu(bond, slave, iter) { unsigned long trans_start = dev_trans_start(slave->dev); + slave->new_link = BOND_LINK_NOCHANGE; + if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, trans_start, 1) && bond_time_in_interval(bond, slave->last_rx, 1)) { - slave->link = BOND_LINK_UP; + slave->new_link = BOND_LINK_UP; slave_state_changed = 1; /* primary_slave has no meaning in round-robin @@ -2586,7 +2589,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (!bond_time_in_interval(bond, trans_start, 2) || !bond_time_in_interval(bond, slave->last_rx, 2)) { - slave->link = BOND_LINK_DOWN; + slave->new_link = BOND_LINK_DOWN; slave_state_changed = 1; if (slave->link_failure_count < UINT_MAX) @@ -2617,6 +2620,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work) if (!rtnl_trylock()) goto re_arm; + bond_for_each_slave(bond, slave, iter) { + if (slave->new_link != BOND_LINK_NOCHANGE) + slave->link = slave->new_link; + } + if (slave_state_changed) { bond_slave_state_change(bond); if (BOND_MODE(bond) == BOND_MODE_XOR) @@ -3276,12 +3284,17 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res, for (i = 0; i < sizeof(*_res) / sizeof(u64); i++) { u64 nv = new[i]; u64 ov = old[i]; + s64 delta = nv - ov; /* detects if this particular field is 32bit only */ if (((nv | ov) >> 32) == 0) - res[i] += (u32)nv - (u32)ov; - else - res[i] += nv - ov; + delta = (s64)(s32)((u32)nv - (u32)ov); + + /* filter anomalies, some drivers reset their stats + * at down/up events. + */ + if (delta > 0) + res[i] += delta; } } diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 1e37313054f3..6da69af103e6 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev, return 0; } -static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); - struct net_device_stats *stats = &dev->stats; - struct can_frame *cf = (struct can_frame *)skb->data; - unsigned int mo = obj2msgobj(CC770_OBJ_TX); + struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; - if (can_dropped_invalid_skb(dev, skb)) - return NETDEV_TX_OK; - - if ((cc770_read_reg(priv, - msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { - netdev_err(dev, "TX register is still occupied!\n"); - return NETDEV_TX_BUSY; - } - - netif_stop_queue(dev); - dlc = cf->can_dlc; id = cf->can_id; - if (cf->can_id & CAN_RTR_FLAG) - rtr = 0; - else - rtr = MSGCFG_DIR; + rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; + + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES); + if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, @@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); - /* Store echo skb before starting the transfer */ - can_put_echo_skb(skb, dev, 0); - cc770_write_reg(priv, msgobj[mo].ctrl1, - RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); + cc770_write_reg(priv, msgobj[mo].ctrl0, + MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); +} - stats->tx_bytes += dlc; +static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct cc770_priv *priv = netdev_priv(dev); + unsigned int mo = obj2msgobj(CC770_OBJ_TX); + if (can_dropped_invalid_skb(dev, skb)) + return NETDEV_TX_OK; - /* - * HM: We had some cases of repeated IRQs so make sure the - * INT is acknowledged I know it's already further up, but - * doing again fixed the issue - */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + netif_stop_queue(dev); + + if ((cc770_read_reg(priv, + msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { + netdev_err(dev, "TX register is still occupied!\n"); + return NETDEV_TX_BUSY; + } + + priv->tx_skb = skb; + cc770_tx(dev, mo); return NETDEV_TX_OK; } @@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); + struct can_frame *cf; + u8 ctrl1; + + ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); - /* Nothing more to send, switch off interrupts */ cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); - /* - * We had some cases of repeated IRQ so make sure the - * INT is acknowledged + cc770_write_reg(priv, msgobj[mo].ctrl1, + RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); + + if (unlikely(!priv->tx_skb)) { + netdev_err(dev, "missing tx skb in tx interrupt\n"); + return; + } + + if (unlikely(ctrl1 & MSGLST_SET)) { + stats->rx_over_errors++; + stats->rx_errors++; + } + + /* When the CC770 is sending an RTR message and it receives a regular + * message that matches the id of the RTR message, it will overwrite the + * outgoing message in the TX register. When this happens we must + * process the received message and try to transmit the outgoing skb + * again. */ - cc770_write_reg(priv, msgobj[mo].ctrl0, - MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); + if (unlikely(ctrl1 & NEWDAT_SET)) { + cc770_rx(dev, mo, ctrl1); + cc770_tx(dev, mo); + return; + } + cf = (struct can_frame *)priv->tx_skb->data; + stats->tx_bytes += cf->can_dlc; stats->tx_packets++; + + can_put_echo_skb(priv->tx_skb, dev, 0); can_get_echo_skb(dev, 0); + priv->tx_skb = NULL; + netif_wake_queue(dev); } @@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv) priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; + priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h index a1739db98d91..95752e1d1283 100644 --- a/drivers/net/can/cc770/cc770.h +++ b/drivers/net/can/cc770/cc770.h @@ -193,6 +193,8 @@ struct cc770_priv { u8 cpu_interface; /* CPU interface register */ u8 clkout; /* Clock out register */ u8 bus_config; /* Bus conffiguration register */ + + struct sk_buff *tx_skb; }; struct net_device *alloc_cc770dev(int sizeof_priv); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c31e691d11fc..e8d31640058d 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -604,6 +604,7 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); + CFG_CLE_IP_HDR_LEN_SET(&cb, 0); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index c153a1dc5ff7..480312105964 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h @@ -147,6 +147,7 @@ enum xgene_enet_rm { #define CFG_RXCLK_MUXSEL0_SET(dst, val) xgene_set_bits(dst, val, 26, 3) #define CFG_CLE_IP_PROTOCOL0_SET(dst, val) xgene_set_bits(dst, val, 16, 2) +#define CFG_CLE_IP_HDR_LEN_SET(dst, val) xgene_set_bits(dst, val, 8, 5) #define CFG_CLE_DSTQID0_SET(dst, val) xgene_set_bits(dst, val, 0, 12) #define CFG_CLE_FPSEL0_SET(dst, val) xgene_set_bits(dst, val, 16, 4) #define CFG_MACMODE_SET(dst, val) xgene_set_bits(dst, val, 18, 2) diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c31c7407b753..425dae560322 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c @@ -150,8 +150,10 @@ static int emac_rockchip_probe(struct platform_device *pdev) /* Optional regulator for PHY */ priv->regulator = devm_regulator_get_optional(dev, "phy"); if (IS_ERR(priv->regulator)) { - if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto out_clk_disable; + } dev_err(dev, "no regulator found\n"); priv->regulator = NULL; } diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 027705117086..af9ec57bbebf 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -729,37 +729,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, struct bcm_sysport_tx_ring *ring) { struct net_device *ndev = priv->netdev; - unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs; unsigned int pkts_compl = 0, bytes_compl = 0; + unsigned int txbds_processed = 0; struct bcm_sysport_cb *cb; + unsigned int txbds_ready; + unsigned int c_index; u32 hw_ind; /* Compute how many descriptors have been processed since last call */ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; - ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); - - last_c_index = ring->c_index; - num_tx_cbs = ring->size; - - c_index &= (num_tx_cbs - 1); - - if (c_index >= last_c_index) - last_tx_cn = c_index - last_c_index; - else - last_tx_cn = num_tx_cbs - last_c_index + c_index; + txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK; netif_dbg(priv, tx_done, ndev, - "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", - ring->index, c_index, last_tx_cn, last_c_index); + "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n", + ring->index, ring->c_index, c_index, txbds_ready); - while (last_tx_cn-- > 0) { - cb = ring->cbs + last_c_index; + while (txbds_processed < txbds_ready) { + cb = &ring->cbs[ring->clean_index]; bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl); ring->desc_count++; - last_c_index++; - last_c_index &= (num_tx_cbs - 1); + txbds_processed++; + + if (likely(ring->clean_index < ring->size - 1)) + ring->clean_index++; + else + ring->clean_index = 0; } ring->c_index = c_index; @@ -1229,6 +1225,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); ring->index = index; ring->size = size; + ring->clean_index = 0; ring->alloc_size = ring->size; ring->desc_cpu = p; ring->desc_count = ring->size; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index f28bf545d7f4..8ace6ecb5f79 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -638,7 +638,7 @@ struct bcm_sysport_tx_ring { unsigned int desc_count; /* Number of descriptors */ unsigned int curr_desc; /* Current descriptor */ unsigned int c_index; /* Last consumer index */ - unsigned int p_index; /* Current producer index */ + unsigned int clean_index; /* Current clean index */ struct bcm_sysport_cb *cbs; /* Transmit control blocks */ struct dma_desc *desc_cpu; /* CPU view of the descriptor */ struct bcm_sysport_priv *priv; /* private context backpointer */ diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 74bece5897c9..949a82458a29 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -2044,6 +2044,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) ETH_OVREHEAD + mtu + BNX2X_FW_RX_ALIGN_END; + fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size); /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */ if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; @@ -3942,15 +3943,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) /* when transmitting in a vf, start bd must hold the ethertype * for fw to enforce it */ + u16 vlan_tci = 0; #ifndef BNX2X_STOP_ON_ERROR - if (IS_VF(bp)) + if (IS_VF(bp)) { #endif - tx_start_bd->vlan_or_ethertype = - cpu_to_le16(ntohs(eth->h_proto)); + /* Still need to consider inband vlan for enforced */ + if (__vlan_get_tag(skb, &vlan_tci)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_INBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(vlan_tci); + } #ifndef BNX2X_STOP_ON_ERROR - else + } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } #endif } diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 0f6811860ad5..a36e38676640 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -2845,7 +2845,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver) static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer) { - memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); + strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN); } static void diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index cc1725616f9d..50747573f42e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -2823,7 +2823,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) if (!g) { netif_info(lio, tx_err, lio->netdev, "Transmit scatter gather: glist null!\n"); - goto lio_xmit_dma_failed; + goto lio_xmit_failed; } cmdsetup.s.gather = 1; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cf61a5869c6e..de23f23b41de 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -6076,13 +6076,18 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, if (!t4_fw_matches_chip(adap, fw_hdr)) return -EINVAL; + /* Disable FW_OK flag so that mbox commands with FW_OK flag set + * wont be sent when we are flashing FW. + */ + adap->flags &= ~FW_OK; + ret = t4_fw_halt(adap, mbox, force); if (ret < 0 && !force) - return ret; + goto out; ret = t4_load_fw(adap, fw_data, size); if (ret < 0) - return ret; + goto out; /* * Older versions of the firmware don't understand the new @@ -6093,7 +6098,17 @@ int t4_fw_upgrade(struct adapter *adap, unsigned int mbox, * its header flags to see if it advertises the capability. */ reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0); - return t4_fw_restart(adap, mbox, reset); + ret = t4_fw_restart(adap, mbox, reset); + + /* Grab potentially new Firmware Device Log parameters so we can see + * how healthy the new Firmware is. It's okay to contact the new + * Firmware for these parameters even though, as far as it's + * concerned, we've never said "HELLO" to it ... + */ + (void)t4_init_devlog_params(adap); +out: + adap->flags |= FW_OK; + return ret; } /** @@ -7696,7 +7711,16 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr) ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]); if (ret) break; - idx = (idx + 1) & UPDBGLARDPTR_M; + + /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to + * identify the 32-bit portion of the full 312-bit data + */ + if (is_t6(adap->params.chip) && (idx & 0xf) >= 9) + idx = (idx & 0xff0) + 0x10; + else + idx++; + /* address can't exceed 0xfff */ + idx &= UPDBGLARDPTR_M; } restart: if (cfg & UPDBGLAEN_F) { diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index fa3786a9d30e..ec8ffd7eae33 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -2604,8 +2604,8 @@ void t4vf_sge_stop(struct adapter *adapter) int t4vf_sge_init(struct adapter *adapter) { struct sge_params *sge_params = &adapter->params.sge; - u32 fl0 = sge_params->sge_fl_buffer_size[0]; - u32 fl1 = sge_params->sge_fl_buffer_size[1]; + u32 fl_small_pg = sge_params->sge_fl_buffer_size[0]; + u32 fl_large_pg = sge_params->sge_fl_buffer_size[1]; struct sge *s = &adapter->sge; unsigned int ingpadboundary, ingpackboundary; @@ -2614,9 +2614,20 @@ int t4vf_sge_init(struct adapter *adapter) * the Physical Function Driver. Ideally we should be able to deal * with _any_ configuration. Practice is different ... */ - if (fl0 != PAGE_SIZE || (fl1 != 0 && fl1 <= fl0)) { + + /* We only bother using the Large Page logic if the Large Page Buffer + * is larger than our Page Size Buffer. + */ + if (fl_large_pg <= fl_small_pg) + fl_large_pg = 0; + + /* The Page Size Buffer must be exactly equal to our Page Size and the + * Large Page Size Buffer should be 0 (per above) or a power of 2. + */ + if (fl_small_pg != PAGE_SIZE || + (fl_large_pg & (fl_large_pg - 1)) != 0) { dev_err(adapter->pdev_dev, "bad SGE FL buffer sizes [%d, %d]\n", - fl0, fl1); + fl_small_pg, fl_large_pg); return -EINVAL; } if ((sge_params->sge_control & RXPKTCPLMODE_F) == 0) { @@ -2627,8 +2638,8 @@ int t4vf_sge_init(struct adapter *adapter) /* * Now translate the adapter parameters into our internal forms. */ - if (fl1) - s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT; + if (fl_large_pg) + s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64); s->pktshift = PKTSHIFT_G(sge_params->sge_control); diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 6d0c5d5eea6d..58c0fccdd8cb 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -28,6 +28,7 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/netdevice.h> +#include <linux/of.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <net/ip.h> diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 458e2d97d096..ae8e4fc22e7b 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -3539,6 +3539,8 @@ fec_drv_remove(struct platform_device *pdev) fec_enet_mii_remove(fep); if (fep->reg_phy) regulator_disable(fep->reg_phy); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 40071dad1c57..9c76f1a2f57b 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -382,7 +382,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) { const struct of_device_id *id = of_match_device(fsl_pq_mdio_match, &pdev->dev); - const struct fsl_pq_mdio_data *data = id->data; + const struct fsl_pq_mdio_data *data; struct device_node *np = pdev->dev.of_node; struct resource res; struct device_node *tbi; @@ -390,6 +390,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) struct mii_bus *new_bus; int err; + if (!id) { + dev_err(&pdev->dev, "Failed to match device\n"); + return -ENODEV; + } + + data = id->data; + dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); new_bus = mdiobus_alloc_size(sizeof(*priv)); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c index 802d55457f19..b1a27aef4425 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c @@ -776,7 +776,7 @@ static void hns_xgmac_get_strings(u32 stringset, u8 *data) */ static int hns_xgmac_get_sset_count(int stringset) { - if (stringset == ETH_SS_STATS) + if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) return ARRAY_SIZE(g_xgmac_stats_string); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index a0332129970b..4b91eb70c683 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -1000,8 +1000,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset) cnt--; return cnt; - } else { + } else if (stringset == ETH_SS_STATS) { return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); + } else { + return -EOPNOTSUPP; } } diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 5d7db6c01c46..f301c03c527b 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -342,6 +342,7 @@ static int emac_reset(struct emac_instance *dev) { struct emac_regs __iomem *p = dev->emacp; int n = 20; + bool __maybe_unused try_internal_clock = false; DBG(dev, "reset" NL); @@ -354,6 +355,7 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE +do_retry: /* * PPC460EX/GT Embedded Processor Advanced User's Manual * section 28.10.1 Mode Register 0 (EMACx_MR0) states: @@ -361,10 +363,19 @@ static int emac_reset(struct emac_instance *dev) * of the EMAC. If none is present, select the internal clock * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). * After a soft reset, select the external clock. + * + * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the + * ethernet cable is not attached. This causes the reset to timeout + * and the PHY detection code in emac_init_phy() is unable to + * communicate and detect the AR8035-A PHY. As a result, the emac + * driver bails out early and the user has no ethernet. + * In order to stay compatible with existing configurations, the + * driver will temporarily switch to the internal clock, after + * the first reset fails. */ if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: select internal loop clock before reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, 0, SDR0_ETH_CFG_ECS << dev->cell_index); @@ -382,8 +393,15 @@ static int emac_reset(struct emac_instance *dev) #ifdef CONFIG_PPC_DCR_NATIVE if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { - if (dev->phy_address == 0xffffffff && - dev->phy_map == 0xffffffff) { + if (!n && !try_internal_clock) { + /* first attempt has timed out. */ + n = 20; + try_internal_clock = true; + goto do_retry; + } + + if (try_internal_clock || (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff)) { /* No PHY: restore external clock source after reset */ dcri_clrset(SDR0, SDR0_ETH_CFG, SDR0_ETH_CFG_ECS << dev->cell_index, 0); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 5205f1ebe381..20d8806d2bff 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -1182,6 +1182,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) struct e1000_hw *hw = &adapter->hw; if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct sk_buff *skb = adapter->tx_hwtstamp_skb; struct skb_shared_hwtstamps shhwtstamps; u64 txstmp; @@ -1190,9 +1191,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work) e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); - skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); - dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ adapter->tx_hwtstamp_skb = NULL; + wmb(); /* force write prior to skb_tstamp_tx */ + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); } else if (time_after(jiffies, adapter->tx_hwtstamp_start + adapter->tx_timeout_factor * HZ)) { dev_kfree_skb_any(adapter->tx_hwtstamp_skb); @@ -3526,6 +3532,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) switch (hw->mac.type) { case e1000_pch2lan: + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; case e1000_pch_lpt: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 96MHz frequency */ @@ -6583,12 +6595,17 @@ static int e1000e_pm_thaw(struct device *dev) static int e1000e_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + int rc; e1000e_flush_lpic(pdev); e1000e_pm_freeze(dev); - return __e1000_shutdown(pdev, false); + rc = __e1000_shutdown(pdev, false); + if (rc) + e1000e_pm_thaw(dev); + + return rc; } static int e1000e_pm_resume(struct device *dev) diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 2ce0eba5e040..38431b49020f 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -983,7 +983,7 @@ static void fm10k_self_test(struct net_device *dev, memset(data, 0, sizeof(*data) * FM10K_TEST_LEN); - if (FM10K_REMOVED(hw)) { + if (FM10K_REMOVED(hw->hw_addr)) { netif_err(interface, drv, dev, "Interface removed - test blocked\n"); eth_test->flags |= ETH_TEST_FL_FAILED; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 488a50d59dca..3da1f206ff84 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1073,6 +1073,11 @@ static int i40e_get_eeprom_len(struct net_device *netdev) struct i40e_hw *hw = &np->vsi->back->hw; u32 val; +#define X722_EEPROM_SCOPE_LIMIT 0x5B9FFF + if (hw->mac.type == I40E_MAC_X722) { + val = X722_EEPROM_SCOPE_LIMIT + 1; + return val; + } val = (rd32(hw, I40E_GLPCI_LBARCTRL) & I40E_GLPCI_LBARCTRL_FL_SIZE_MASK) >> I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT; diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 6100cdd9ad13..dd4e6ea9e0e1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -292,14 +292,14 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset, { enum i40e_status_code ret_code = 0; - if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { - ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); - if (!ret_code) { + ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); + if (!ret_code) { + if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) { ret_code = i40e_read_nvm_word_aq(hw, offset, data); - i40e_release_nvm(hw); + } else { + ret_code = i40e_read_nvm_word_srctl(hw, offset, data); } - } else { - ret_code = i40e_read_nvm_word_srctl(hw, offset, data); + i40e_release_nvm(hw); } return ret_code; } diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 4b62aa1f9ff8..6e5065f0907b 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -5079,7 +5079,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_WORK(&hw->restart_work, sky2_restart); pci_set_drvdata(pdev, hw); - pdev->d3_delay = 150; + pdev->d3_delay = 200; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1d4e2e054647..897d061e4f03 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -35,6 +35,7 @@ #include <linux/etherdevice.h> #include <linux/mlx4/cmd.h> +#include <linux/mlx4/qp.h> #include <linux/export.h> #include "mlx4.h" @@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, if (IS_ERR(mailbox)) return PTR_ERR(mailbox); + if (!mlx4_qp_lookup(dev, rule->qpn)) { + mlx4_err_rule(dev, "QP doesn't exist\n", rule); + ret = -EINVAL; + goto out; + } + trans_rule_ctrl_to_hw(rule, mailbox->buf); size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); list_for_each_entry(cur, &rule->list, list) { ret = parse_trans_rule(dev, cur, mailbox->buf + size); - if (ret < 0) { - mlx4_free_cmd_mailbox(dev, mailbox); - return ret; - } + if (ret < 0) + goto out; + size += ret; } @@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, } } +out: mlx4_free_cmd_mailbox(dev, mailbox); return ret; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index d8359ffba026..62f1a3433a62 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -381,6 +381,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) __mlx4_qp_free_icm(dev, qpn); } +struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) +{ + struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; + struct mlx4_qp *qp; + + spin_lock(&qp_table->lock); + + qp = __mlx4_qp_lookup(dev, qpn); + + spin_unlock(&qp_table->lock); + return qp; +} + int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -468,6 +481,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, } if (attr & MLX4_UPDATE_QP_QOS_VPORT) { + if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { + mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); + err = -EOPNOTSUPP; + goto out; + } + qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; cmd->qp_context.qos_vport = params->qos_vport; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index d1fc7fa87b05..e3080fbd9d00 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -5040,6 +5040,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } +static void update_qos_vpp(struct mlx4_update_qp_context *ctx, + struct mlx4_vf_immed_vlan_work *work) +{ + ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); + ctx->qp_context.qos_vport = work->qos_vport; +} + void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) { struct mlx4_vf_immed_vlan_work *work = @@ -5144,11 +5151,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) qp->sched_queue & 0xC7; upd_context->qp_context.pri_path.sched_queue |= ((work->qos & 0x7) << 3); - upd_context->qp_mask |= - cpu_to_be64(1ULL << - MLX4_UPD_QP_MASK_QOS_VPP); - upd_context->qp_context.qos_vport = - work->qos_vport; + + if (dev->caps.flags2 & + MLX4_DEV_CAP_FLAG2_QOS_VPP) + update_qos_vpp(upd_context, work); } err = mlx4_cmd(dev, mailbox->dma, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index f5c1f4acc57b..7c42be586be8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -513,7 +513,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) struct mlx5_priv *priv = &mdev->priv; struct msix_entry *msix = priv->msix_arr; int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector; - int err; if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); @@ -523,18 +522,11 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), priv->irq_info[i].mask); - err = irq_set_affinity_hint(irq, priv->irq_info[i].mask); - if (err) { - mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x", - irq); - goto err_clear_mask; - } + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, priv->irq_info[i].mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); return 0; - -err_clear_mask: - free_cpumask_var(priv->irq_info[i].mask); - return err; } static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index b8d5270359cd..e30676515529 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) cmd.req.arg3 = 0; if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) - netxen_issue_cmd(adapter, &cmd); + rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 509b596cf1e8..bd1ec70fb736 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) } return -EIO; } - usleep_range(1000, 1500); + udelay(1200); } if (id_reg) diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 7327b729ba2e..ffa6885acfc8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -127,6 +127,8 @@ static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id) return 0; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset); pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride); diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c index be258d90de9e..e3223f2fe2ff 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c @@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump) sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_mpi_coredump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); /* Get generic NIC reg dump */ @@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev, sizeof(struct mpi_coredump_global_header); mpi_coredump->mpi_global_header.imageSize = sizeof(struct ql_reg_dump); - memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", + strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump", sizeof(mpi_coredump->mpi_global_header.idString)); diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 1ef03939d25f..c90ae4d4be7d 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -296,8 +296,9 @@ qcaspi_receive(struct qcaspi *qca) /* Allocate rx SKB if we don't have one available. */ if (!qca->rx_skb) { - qca->rx_skb = netdev_alloc_skb(net_dev, - net_dev->mtu + VLAN_ETH_HLEN); + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, + net_dev->mtu + + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); qca->stats.out_of_mem++; @@ -377,7 +378,7 @@ qcaspi_receive(struct qcaspi *qca) qca->rx_skb, qca->rx_skb->dev); qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx_ni(qca->rx_skb); - qca->rx_skb = netdev_alloc_skb(net_dev, + qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); if (!qca->rx_skb) { netdev_dbg(net_dev, "out of RX resources\n"); @@ -759,7 +760,8 @@ qcaspi_netdev_init(struct net_device *dev) if (!qca->rx_buffer) return -ENOBUFS; - qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN); + qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu + + VLAN_ETH_HLEN); if (!qca->rx_skb) { kfree(qca->rx_buffer); netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n"); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 3783c40f568b..a82c89af7124 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -8411,12 +8411,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_msi_4; } + pci_set_drvdata(pdev, dev); + rc = register_netdev(dev); if (rc < 0) goto err_out_cnt_5; - pci_set_drvdata(pdev, dev); - netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 424d1dee55c9..afaf79b8761f 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3222,7 +3222,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev) /* MDIO bus init */ ret = sh_mdio_init(mdp, pd); if (ret) { - dev_err(&ndev->dev, "failed to initialise MDIO\n"); + dev_err(&pdev->dev, "failed to initialise MDIO\n"); goto out_release; } diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index fc958067d10a..c69b0bdd891d 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -280,6 +280,10 @@ struct cpsw_ss_regs { /* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */ #define CPSW_V1_SEQ_ID_OFS_SHIFT 16 +#define CPSW_MAX_BLKS_TX 15 +#define CPSW_MAX_BLKS_TX_SHIFT 4 +#define CPSW_MAX_BLKS_RX 5 + struct cpsw_host_regs { u32 max_blks; u32 blk_cnt; @@ -878,7 +882,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave, /* set speed_in input in case RMII mode is used in 100Mbps */ if (phy->speed == 100) mac_control |= BIT(15); - else if (phy->speed == 10) + /* in band mode only works in 10Mbps RGMII mode */ + else if ((phy->speed == 10) && phy_interface_is_rgmii(phy)) mac_control |= BIT(18); /* In Band mode */ if (priv->rx_pause) @@ -1126,11 +1131,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) switch (priv->version) { case CPSW_VERSION_1: slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS); break; case CPSW_VERSION_2: case CPSW_VERSION_3: case CPSW_VERSION_4: slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP); + /* Increase RX FIFO size to 5 for supporting fullduplex + * flow control mode + */ + slave_write(slave, + (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) | + CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS); break; } diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 49fe59b180a8..a75ce9051a7f 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c @@ -574,6 +574,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case HDLCDRVCTL_CALIBRATE: if(!capable(CAP_SYS_RAWIO)) return -EPERM; + if (s->par.bitrate <= 0) + return -EINVAL; if (bi.data.calibrate > INT_MAX / s->par.bitrate) return -EINVAL; s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8aecea0d5dbf..142015af43db 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c @@ -282,6 +282,10 @@ static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb, if (dev_forward_skb(ipvlan->dev, skb) == NET_RX_SUCCESS) success = true; } else { + if (!ether_addr_equal_64bits(eth_hdr(skb)->h_dest, + ipvlan->phy_dev->dev_addr)) + skb->pkt_type = PACKET_OTHERHOST; + ret = RX_HANDLER_ANOTHER; success = true; } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 7d0690433ee0..7d2cf015c5e7 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -148,6 +148,12 @@ static inline int phy_aneg_done(struct phy_device *phydev) if (phydev->drv->aneg_done) return phydev->drv->aneg_done(phydev); + /* Avoid genphy_aneg_done() if the Clause 45 PHY does not + * implement Clause 22 registers + */ + if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) + return -EINVAL; + return genphy_aneg_done(phydev); } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index e2decf71c6d1..46448d7e3290 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -2952,6 +2952,15 @@ ppp_connect_channel(struct channel *pch, int unit) goto outl; ppp_lock(ppp); + spin_lock_bh(&pch->downl); + if (!pch->chan) { + /* Don't connect unregistered channels */ + spin_unlock_bh(&pch->downl); + ppp_unlock(ppp); + ret = -ENOTCONN; + goto outl; + } + spin_unlock_bh(&pch->downl); if (pch->file.hdrlen > ppp->file.hdrlen) ppp->file.hdrlen = pch->file.hdrlen; hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */ diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index f7e8c79349ad..12a627fcc02c 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -501,7 +501,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.mtu = dst_mtu(&rt->dst); if (!po->chan.mtu) po->chan.mtu = PPP_MRU; - ip_rt_put(rt); po->chan.mtu -= PPTP_HEADER_OVERHEAD; po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 61cd53838360..9bca36e1fefd 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2380,7 +2380,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } @@ -2660,7 +2660,7 @@ send_done: if (!nlh) { err = __send_and_alloc_skb(&skb, team, portid, send_func); if (err) - goto errout; + return err; goto send_done; } diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 72cb30828a12..c8e98c8e29fa 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1069,6 +1069,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) u16 n = 0, index, ndplen; u8 ready2send = 0; u32 delayed_ndp_size; + size_t padding_count; /* When our NDP gets written in cdc_ncm_ndp(), then skb_out->len gets updated * accordingly. Otherwise, we should check here. @@ -1225,11 +1226,13 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign) * a ZLP after full sized NTBs. */ if (!(dev->driver_info->flags & FLAG_SEND_ZLP) && - skb_out->len > ctx->min_tx_pkt) - memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, - ctx->tx_max - skb_out->len); - else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) + skb_out->len > ctx->min_tx_pkt) { + padding_count = ctx->tx_max - skb_out->len; + memset(skb_put(skb_out, padding_count), 0, padding_count); + } else if (skb_out->len < ctx->tx_max && + (skb_out->len % dev->maxpacket) == 0) { *skb_put(skb_out, 1) = 0; /* force short packet */ + } /* set final frame length */ nth16 = (struct usb_cdc_ncm_nth16 *)skb_out->data; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index ba21d072be31..6b4cc1c2e6b4 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -399,6 +399,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, if (ifmp && (dev->ifindex != 0)) peer->ifindex = ifmp->ifi_index; + peer->gso_max_size = dev->gso_max_size; + peer->gso_max_segs = dev->gso_max_segs; + err = register_netdevice(peer); put_net(net); net = NULL; diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8dfc75250583..d01285250204 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -556,7 +556,12 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, hdr = skb_vnet_hdr(skb); sg_init_table(rq->sg, 2); sg_set_buf(rq->sg, hdr, vi->hdr_len); - skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + + err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + if (unlikely(err < 0)) { + dev_kfree_skb(skb); + return err; + } err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); if (err < 0) @@ -858,7 +863,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; struct virtnet_info *vi = sq->vq->vdev->priv; - unsigned num_sg; + int num_sg; unsigned hdr_len = vi->hdr_len; bool can_push; @@ -911,11 +916,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) if (can_push) { __skb_push(skb, hdr_len); num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; /* Pull header back to avoid skew in tx bytes calculations. */ __skb_pull(skb, hdr_len); } else { sg_set_buf(sq->sg, hdr, hdr_len); - num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; + num_sg++; } return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 82bf85ae5d08..419c045d0752 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -2789,6 +2789,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter) /* we need to enable NAPI, otherwise dev_close will deadlock */ for (i = 0; i < adapter->num_rx_queues; i++) napi_enable(&adapter->rx_queue[i].napi); + /* + * Need to clear the quiesce bit to ensure that vmxnet3_close + * can quiesce the device properly + */ + clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); dev_close(adapter->netdev); } diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index ac945f8781ac..d3d59122a357 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -550,13 +550,15 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (!IS_ERR(neigh)) + if (!IS_ERR(neigh)) { ret = dst_neigh_output(dst, neigh, skb); + rcu_read_unlock_bh(); + return ret; + } rcu_read_unlock_bh(); err: - if (unlikely(ret < 0)) - vrf_tx_error(skb->dev, skb); + vrf_tx_error(skb->dev, skb); return ret; } diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index dab3bf6649e6..c41378214ede 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -962,7 +962,7 @@ static bool vxlan_snoop(struct net_device *dev, return false; /* Don't migrate static entries, drop packets */ - if (f->state & NUD_NOARP) + if (f->state & (NUD_PERMANENT | NUD_NOARP)) return true; if (net_ratelimit()) @@ -2834,6 +2834,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev, needed_headroom = lowerdev->hard_header_len; } + if (lowerdev) { + dev->gso_max_size = lowerdev->gso_max_size; + dev->gso_max_segs = lowerdev->gso_max_segs; + } + if (conf->mtu) { err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false); if (err) diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 0d7645581f91..4842344a96f1 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -574,7 +574,10 @@ static void ppp_timer(unsigned long arg) ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, 0, NULL); proto->restart_counter--; - } else + } else if (netif_carrier_ok(proto->dev)) + ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0, + 0, NULL); + else ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0, 0, NULL); break; diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c index db363856e0b5..2b064998915f 100644 --- a/drivers/net/wan/pc300too.c +++ b/drivers/net/wan/pc300too.c @@ -347,6 +347,7 @@ static int pc300_pci_init_one(struct pci_dev *pdev, card->rambase == NULL) { pr_err("ioremap() failed\n"); pc300_pci_remove_one(pdev); + return -ENOMEM; } /* PLX PCI 9050 workaround for local configuration register read bug */ diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c index 42aab9b86af3..0836a81b93e0 100644 --- a/drivers/net/wireless/ath/ath10k/debug.c +++ b/drivers/net/wireless/ath/ath10k/debug.c @@ -2226,6 +2226,15 @@ static ssize_t ath10k_write_simulate_radar(struct file *file, size_t count, loff_t *ppos) { struct ath10k *ar = file->private_data; + struct ath10k_vif *arvif; + + /* Just check for for the first vif alone, as all the vifs will be + * sharing the same channel and if the channel is disabled, all the + * vifs will share the same 'is_started' state. + */ + arvif = list_first_entry(&ar->arvifs, typeof(*arvif), list); + if (!arvif->is_started) + return -EINVAL; ieee80211_radar_detected(ar->hw); diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 8d382f12b5fd..008fd633cb5d 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -5665,6 +5665,22 @@ static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, arvif->vdev_id, ret); } +static void ath10k_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath10k *ar = hw->priv; + struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); + + mutex_lock(&ar->conf_mutex); + memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); + memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); + arvif->gtk_rekey_data.replay_ctr = + be64_to_cpup((__be64 *)data->replay_ctr); + arvif->gtk_rekey_data.valid = true; + mutex_unlock(&ar->conf_mutex); +} + static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) @@ -6151,6 +6167,16 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); + if (sta->tdls) { + ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, + sta, + WMI_TDLS_PEER_STATE_TEARDOWN); + if (ret) + ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", + sta->addr, + WMI_TDLS_PEER_STATE_TEARDOWN, ret); + } + ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); if (ret) ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", @@ -7174,7 +7200,7 @@ ath10k_mac_update_rx_channel(struct ath10k *ar, lockdep_assert_held(&ar->data_lock); WARN_ON(ctx && vifs); - WARN_ON(vifs && n_vifs != 1); + WARN_ON(vifs && !n_vifs); /* FIXME: Sort of an optimization and a workaround. Peers and vifs are * on a linked list now. Doing a lookup peer -> vif -> chanctx for each @@ -7599,6 +7625,7 @@ static const struct ieee80211_ops ath10k_ops = { .bss_info_changed = ath10k_bss_info_changed, .hw_scan = ath10k_hw_scan, .cancel_hw_scan = ath10k_cancel_hw_scan, + .set_rekey_data = ath10k_set_rekey_data, .set_key = ath10k_set_key, .set_default_unicast_key = ath10k_set_default_unicast_key, .sta_state = ath10k_sta_state, @@ -7634,7 +7661,6 @@ static const struct ieee80211_ops ath10k_ops = { .suspend = ath10k_wow_op_suspend, .resume = ath10k_wow_op_resume, .set_wakeup = ath10k_wow_op_set_wakeup, - .set_rekey_data = ath10k_wow_op_set_rekey_data, #endif #ifdef CONFIG_MAC80211_DEBUGFS .sta_add_debugfs = ath10k_sta_add_debugfs, @@ -8308,6 +8334,7 @@ err_free: void ath10k_mac_unregister(struct ath10k *ar) { + ath10k_wow_deinit(ar); ieee80211_unregister_hw(ar->hw); if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index 2dc2b5360ee8..d938ca951aee 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -3123,13 +3123,14 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, void *ptr; int i; struct wmi_ns_arp_offload_req *arp = &arvif->arp_offload; + struct wmi_ns_arp_offload_req *ns = &arvif->ns_offload; struct wmi_ns_offload *ns_tuple; struct wmi_arp_offload *arp_tuple; len = sizeof(*cmd) + sizeof(*tlv) + - sizeof(*tlv) + WMI_MAX_NS_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv)) + - sizeof(*tlv) + WMI_MAX_ARP_OFFLOADS * + sizeof(*tlv) + WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv)); skb = ath10k_wmi_alloc_skb(ar, len); @@ -3147,33 +3148,49 @@ ath10k_wmi_tlv_op_gen_set_arp_ns_offload(struct ath10k *ar, ptr += (sizeof(*tlv) + sizeof(*cmd)); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_NS_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_ns_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_NS_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_NS_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_ns_offload)); ns_tuple = (struct wmi_ns_offload *)tlv->value; - ns_tuple->flags |= __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + if (ns->enable_offload) { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); + if (ns->info.target_addr_valid.s6_addr[i]) { + memcpy(&ns_tuple->target_ipaddr[0], + &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + memcpy(&ns_tuple->solicitation_ipaddr, + &ns->info.self_addr[i], sizeof(struct in6_addr)); + if (ns->info.target_ipv6_ac.s6_addr[i] == IPV6_ADDR_ANY) + ns_tuple->flags |= + __cpu_to_le32(WMI_NSOFF_IPV6_ANYCAST); + } else { + ns_tuple->flags |= + __cpu_to_le32(WMI_ARP_NS_OFFLOAD_DISABLE); + } ptr += (sizeof(*tlv) + sizeof(struct wmi_ns_offload)); tlv = ptr; } tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); - tlv->len = __cpu_to_le16(WMI_MAX_ARP_OFFLOADS * + tlv->len = __cpu_to_le16(WMI_NS_ARP_OFFLOAD * (sizeof(struct wmi_arp_offload) + sizeof(*tlv))); ptr += sizeof(*tlv); tlv = ptr; - for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + for (i = 0; i < WMI_NS_ARP_OFFLOAD; i++) { tlv->tag = __cpu_to_le16(WMI_TLV_TAG_STRUCT_ARP_OFFLOAD_TUPLE); tlv->len = __cpu_to_le16(sizeof(struct wmi_arp_offload)); arp_tuple = (struct wmi_arp_offload *)tlv->value; if (arp->enable_offload && (i == 0)) { arp_tuple->flags |= - __cpu_to_le32(WMI_ARPOFF_FLAGS_VALID); + __cpu_to_le32(WMI_ARP_NS_OFF_FLAGS_VALID); memcpy(&arp_tuple->target_ipaddr, &arp->params.ipv4_addr, sizeof(arp_tuple->target_ipaddr)); diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 57b81b8bae82..4892c7d3cce3 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -21,6 +21,7 @@ #include <linux/types.h> #include <net/mac80211.h> #include <linux/ipv6.h> +#include <net/ipv6.h> #include <linux/in.h> /* @@ -2887,13 +2888,12 @@ struct wmi_start_scan_common { } __packed; /* ARP-NS offload data structure */ -#define WMI_NSOFF_MAX_TARGET_IPS 2 -#define WMI_MAX_NS_OFFLOADS 2 -#define WMI_MAX_ARP_OFFLOADS 2 -#define WMI_ARPOFF_FLAGS_VALID BIT(0) +#define WMI_NS_ARP_OFFLOAD 2 +#define WMI_ARP_NS_OFF_FLAGS_VALID BIT(0) #define WMI_IPV4_ARP_REPLY_OFFLOAD 0 #define WMI_ARP_NS_OFFLOAD_DISABLE 0 #define WMI_ARP_NS_OFFLOAD_ENABLE 1 +#define WMI_NSOFF_IPV6_ANYCAST BIT(3) struct wmi_ns_offload_info { struct in6_addr src_addr; @@ -2902,7 +2902,7 @@ struct wmi_ns_offload_info { struct wmi_mac_addr self_macaddr; u8 src_ipv6_addr_valid; struct in6_addr target_addr_valid; - struct in6_addr target_addr_ac_type; + struct in6_addr target_ipv6_ac; u8 slot_idx; } __packed; @@ -2914,13 +2914,13 @@ struct wmi_ns_arp_offload_req { struct in_addr ipv4_addr; struct in6_addr ipv6_addr; } params; - struct wmi_ns_offload_info offload_info; + struct wmi_ns_offload_info info; struct wmi_mac_addr bssid; } __packed; struct wmi_ns_offload { __le32 flags; - struct in6_addr target_ipaddr[WMI_NSOFF_MAX_TARGET_IPS]; + struct in6_addr target_ipaddr[WMI_NS_ARP_OFFLOAD]; struct in6_addr solicitation_ipaddr; struct in6_addr remote_ipaddr; struct wmi_mac_addr target_mac; @@ -5088,7 +5088,8 @@ enum wmi_10_4_vdev_param { #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3) #define WMI_TXBF_STS_CAP_OFFSET_LSB 4 -#define WMI_TXBF_STS_CAP_OFFSET_MASK 0xf0 +#define WMI_TXBF_STS_CAP_OFFSET_MASK 0x70 +#define WMI_TXBF_CONF_IMPLICIT_BF BIT(7) #define WMI_BF_SOUND_DIM_OFFSET_LSB 8 #define WMI_BF_SOUND_DIM_OFFSET_MASK 0xf00 diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 262a1a19196e..2280f47dc227 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -17,6 +17,7 @@ #include "mac.h" #include <net/mac80211.h> +#include <net/addrconf.h> #include "hif.h" #include "core.h" #include "debug.h" @@ -232,6 +233,116 @@ static int ath10k_wow_wakeup(struct ath10k *ar) } static int +ath10k_wow_fill_vdev_ns_offload_struct(struct ath10k_vif *arvif, + bool enable_offload) +{ + struct in6_addr addr[TARGET_NUM_STATIONS]; + struct wmi_ns_arp_offload_req *ns; + struct wireless_dev *wdev; + struct inet6_dev *in6_dev; + struct in6_addr addr_type; + struct inet6_ifaddr *ifa; + struct ifacaddr6 *ifaca; + struct list_head *addr_list; + u32 scope, count = 0; + int i; + + ns = &arvif->ns_offload; + if (!enable_offload) { + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_DISABLE); + return 0; + } + + wdev = ieee80211_vif_to_wdev(arvif->vif); + if (!wdev) + return -ENODEV; + + in6_dev = __in6_dev_get(wdev->netdev); + if (!in6_dev) + return -ENODEV; + + memset(&addr, 0, TARGET_NUM_STATIONS * sizeof(struct in6_addr)); + memset(&addr_type, 0, sizeof(struct in6_addr)); + + /* Unicast Addresses */ + read_lock_bh(&in6_dev->lock); + list_for_each(addr_list, &in6_dev->addr_list) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + ifa = list_entry(addr_list, struct inet6_ifaddr, if_list); + if (ifa->flags & IFA_F_DADFAILED) + continue; + scope = ipv6_addr_src_scope(&ifa->addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifa->addr.s6_addr, + sizeof(ifa->addr.s6_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_UNICAST; + count += 1; + break; + } + } + + /* Anycast Addresses */ + for (ifaca = in6_dev->ac_list; ifaca; ifaca = ifaca->aca_next) { + if (count >= TARGET_NUM_STATIONS) { + read_unlock_bh(&in6_dev->lock); + return -EINVAL; + } + + scope = ipv6_addr_src_scope(&ifaca->aca_addr); + switch (scope) { + case IPV6_ADDR_SCOPE_GLOBAL: + case IPV6_ADDR_SCOPE_LINKLOCAL: + memcpy(&addr[count], &ifaca->aca_addr, + sizeof(ifaca->aca_addr)); + addr_type.s6_addr[count] = IPV6_ADDR_ANY; + count += 1; + break; + } + } + read_unlock_bh(&in6_dev->lock); + + /* Filling up the request structure + * Filling the self_addr with solicited address + * A Solicited-Node multicast address is created by + * taking the last 24 bits of a unicast or anycast + * address and appending them to the prefix + * + * FF02:0000:0000:0000:0000:0001:FFXX:XXXX + * + * here XX is the unicast/anycast bits + */ + for (i = 0; i < count; i++) { + ns->info.self_addr[i].s6_addr[0] = 0xFF; + ns->info.self_addr[i].s6_addr[1] = 0x02; + ns->info.self_addr[i].s6_addr[11] = 0x01; + ns->info.self_addr[i].s6_addr[12] = 0xFF; + ns->info.self_addr[i].s6_addr[13] = addr[i].s6_addr[13]; + ns->info.self_addr[i].s6_addr[14] = addr[i].s6_addr[14]; + ns->info.self_addr[i].s6_addr[15] = addr[i].s6_addr[15]; + ns->info.slot_idx = i; + memcpy(&ns->info.target_addr[i], &addr[i], + sizeof(struct in6_addr)); + ns->info.target_addr_valid.s6_addr[i] = 1; + ns->info.target_ipv6_ac.s6_addr[i] = addr_type.s6_addr[i]; + memcpy(&ns->params.ipv6_addr, &ns->info.target_addr[i], + sizeof(struct in6_addr)); + } + + ns->offload_type = __cpu_to_le16(WMI_NS_ARP_OFFLOAD); + ns->enable_offload = __cpu_to_le16(WMI_ARP_NS_OFFLOAD_ENABLE); + ns->num_ns_offload_count = __cpu_to_le16(count); + + return 0; +} + +static int ath10k_wow_fill_vdev_arp_offload_struct(struct ath10k_vif *arvif, bool enable_offload) { @@ -291,6 +402,13 @@ static int ath10k_wow_enable_ns_arp_offload(struct ath10k *ar, bool offload) return ret; } + ret = ath10k_wow_fill_vdev_ns_offload_struct(arvif, offload); + if (ret) { + ath10k_err(ar, "NS-offload config failed, vdev: %d\n", + arvif->vdev_id); + return ret; + } + ret = ath10k_wmi_set_arp_ns_offload(ar, arvif); if (ret) { ath10k_err(ar, "failed to send offload cmd, vdev: %d\n", @@ -327,22 +445,6 @@ static int ath10k_config_wow_listen_interval(struct ath10k *ar) return 0; } -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data) -{ - struct ath10k *ar = hw->priv; - struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); - - mutex_lock(&ar->conf_mutex); - memcpy(&arvif->gtk_rekey_data.kek, data->kek, NL80211_KEK_LEN); - memcpy(&arvif->gtk_rekey_data.kck, data->kck, NL80211_KCK_LEN); - arvif->gtk_rekey_data.replay_ctr = - cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr)); - arvif->gtk_rekey_data.valid = true; - mutex_unlock(&ar->conf_mutex); -} - static int ath10k_wow_config_gtk_offload(struct ath10k *ar, bool gtk_offload) { struct ath10k_vif *arvif; @@ -391,6 +493,13 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto exit; } + ret = ath10k_wow_cleanup(ar); + if (ret) { + ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + ret = ath10k_wow_config_gtk_offload(ar, true); if (ret) { ath10k_warn(ar, "failed to enable GTK offload: %d\n", ret); @@ -403,18 +512,11 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw, goto disable_gtk_offload; } - ret = ath10k_wow_cleanup(ar); - if (ret) { - ath10k_warn(ar, "failed to clear wow wakeup events: %d\n", - ret); - goto disable_ns_arp_offload; - } - ret = ath10k_wow_set_wakeups(ar, wowlan); if (ret) { ath10k_warn(ar, "failed to set wow wakeup events: %d\n", ret); - goto cleanup; + goto disable_ns_arp_offload; } ret = ath10k_config_wow_listen_interval(ar); @@ -577,8 +679,15 @@ int ath10k_wow_init(struct ath10k *ar) ar->wow.wowlan_support = ath10k_wowlan_support; ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; ar->hw->wiphy->wowlan = &ar->wow.wowlan_support; - - device_set_wakeup_capable(ar->dev, true); + device_init_wakeup(ar->dev, true); return 0; } + +void ath10k_wow_deinit(struct ath10k *ar) +{ + if (test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, + ar->running_fw->fw_file.fw_features) && + test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)) + device_init_wakeup(ar->dev, false); +} diff --git a/drivers/net/wireless/ath/ath10k/wow.h b/drivers/net/wireless/ath/ath10k/wow.h index b53211584052..2ca4ba4848c9 100644 --- a/drivers/net/wireless/ath/ath10k/wow.h +++ b/drivers/net/wireless/ath/ath10k/wow.h @@ -27,13 +27,11 @@ struct ath10k_wow { #ifdef CONFIG_PM int ath10k_wow_init(struct ath10k *ar); +void ath10k_wow_deinit(struct ath10k *ar); int ath10k_wow_op_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int ath10k_wow_op_resume(struct ieee80211_hw *hw); void ath10k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); -void ath10k_wow_op_set_rekey_data(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct cfg80211_gtk_rekey_data *data); #else static inline int ath10k_wow_init(struct ath10k *ar) @@ -41,5 +39,8 @@ static inline int ath10k_wow_init(struct ath10k *ar) return 0; } +void ath10k_wow_deinit(struct ath10k *ar) +{ +} #endif /* CONFIG_PM */ #endif /* _WOW_H_ */ diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c index 654a1e33f827..7c5f189cace7 100644 --- a/drivers/net/wireless/ath/ath5k/debug.c +++ b/drivers/net/wireless/ath/ath5k/debug.c @@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file) } for (i = 0; i < eesize; ++i) { - AR5K_EEPROM_READ(i, val); + if (!ath5k_hw_nvram_read(ah, i, &val)) { + ret = -EIO; + goto freebuf; + } buf[i] = val; } diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c index 213569d384e7..5cde46c82a03 100644 --- a/drivers/net/wireless/ath/regd.c +++ b/drivers/net/wireless/ath/regd.c @@ -254,8 +254,12 @@ bool ath_is_49ghz_allowed(u16 regdomain) EXPORT_SYMBOL(ath_is_49ghz_allowed); /* Frequency is one where radar detection is required */ -static bool ath_is_radar_freq(u16 center_freq) +static bool ath_is_radar_freq(u16 center_freq, + struct ath_regulatory *reg) + { + if (reg->country_code == CTRY_INDIA) + return (center_freq >= 5500 && center_freq <= 5700); return (center_freq >= 5260 && center_freq <= 5720); } @@ -306,7 +310,7 @@ __ath_reg_apply_beaconing_flags(struct wiphy *wiphy, enum nl80211_reg_initiator initiator, struct ieee80211_channel *ch) { - if (ath_is_radar_freq(ch->center_freq) || + if (ath_is_radar_freq(ch->center_freq, reg) || (ch->flags & IEEE80211_CHAN_RADAR)) return; @@ -395,8 +399,9 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy, } } -/* Always apply Radar/DFS rules on freq range 5260 MHz - 5700 MHz */ -static void ath_reg_apply_radar_flags(struct wiphy *wiphy) +/* Always apply Radar/DFS rules on freq range 5500 MHz - 5700 MHz */ +static void ath_reg_apply_radar_flags(struct wiphy *wiphy, + struct ath_regulatory *reg) { struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; @@ -409,7 +414,7 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy) for (i = 0; i < sband->n_channels; i++) { ch = &sband->channels[i]; - if (!ath_is_radar_freq(ch->center_freq)) + if (!ath_is_radar_freq(ch->center_freq, reg)) continue; /* We always enable radar detection/DFS on this * frequency range. Additionally we also apply on @@ -505,7 +510,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy, struct ath_common *common = container_of(reg, struct ath_common, regulatory); /* We always apply this */ - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); /* * This would happen when we have sent a custom regulatory request @@ -669,7 +674,7 @@ ath_regd_init_wiphy(struct ath_regulatory *reg, chan->flags |= IEEE80211_CHAN_DISABLED; } - ath_reg_apply_radar_flags(wiphy); + ath_reg_apply_radar_flags(wiphy, reg); ath_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg); return 0; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c index d224b3dd72ed..3196245ab820 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c @@ -461,25 +461,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac) * @dev_addr: optional device address. * * P2P needs mac addresses for P2P device and interface. If no device - * address it specified, these are derived from the primary net device, ie. - * the permanent ethernet address of the device. + * address it specified, these are derived from a random ethernet + * address. */ static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) { - struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; - bool local_admin = false; + bool random_addr = false; - if (!dev_addr || is_zero_ether_addr(dev_addr)) { - dev_addr = pri_ifp->mac_addr; - local_admin = true; - } + if (!dev_addr || is_zero_ether_addr(dev_addr)) + random_addr = true; - /* Generate the P2P Device Address. This consists of the device's - * primary MAC address with the locally administered bit set. + /* Generate the P2P Device Address obtaining a random ethernet + * address with the locally administered bit set. */ - memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); - if (local_admin) - p2p->dev_addr[0] |= 0x02; + if (random_addr) + eth_random_addr(p2p->dev_addr); + else + memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c index af92f00ca56e..03219cf1693a 100644 --- a/drivers/net/wireless/cnss/cnss_pci.c +++ b/drivers/net/wireless/cnss/cnss_pci.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -569,6 +569,7 @@ out: static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state) { +#ifndef CONFIG_MSM_GVM_QUIN if (!info->prop) return; @@ -588,6 +589,9 @@ static void cnss_wlan_gpio_set(struct cnss_wlan_gpio_info *info, bool state) pr_debug("%s: %s gpio is now %s\n", __func__, info->name, info->state ? "enabled" : "disabled"); +#else + return; +#endif } static int cnss_configure_wlan_en_gpio(bool state) @@ -1560,7 +1564,6 @@ int cnss_msm_pcie_enumerate(u32 rc_idx) return msm_pcie_enumerate(rc_idx); } #else /* !defined CONFIG_PCI_MSM */ - struct pci_saved_state *cnss_pci_store_saved_state(struct pci_dev *dev) { return NULL; @@ -1570,7 +1573,7 @@ int cnss_msm_pcie_pm_control( enum msm_pcie_pm_opt pm_opt, u32 bus_num, struct pci_dev *pdev, u32 options) { - return -ENODEV; + return 0; } int cnss_pci_load_and_free_saved_state( @@ -1581,27 +1584,27 @@ int cnss_pci_load_and_free_saved_state( int cnss_msm_pcie_shadow_control(struct pci_dev *dev, bool enable) { - return -ENODEV; + return 0; } int cnss_msm_pcie_deregister_event(struct msm_pcie_register_event *reg) { - return -ENODEV; + return 0; } int cnss_msm_pcie_recover_config(struct pci_dev *dev) { - return -ENODEV; + return 0; } int cnss_msm_pcie_register_event(struct msm_pcie_register_event *reg) { - return -ENODEV; + return 0; } int cnss_msm_pcie_enumerate(u32 rc_idx) { - return -EPROBE_DEFER; + return 0; } #endif @@ -2870,7 +2873,9 @@ static int cnss_probe(struct platform_device *pdev) struct esoc_desc *desc; const char *client_desc; struct device *dev = &pdev->dev; +#ifndef CONFIG_MSM_GVM_QUIN u32 rc_num; +#endif struct resource *res; u32 ramdump_size = 0; u32 smmu_iova_address[2]; @@ -2905,6 +2910,7 @@ static int cnss_probe(struct platform_device *pdev) goto err_get_rc; } +#ifndef CONFIG_MSM_GVM_QUIN ret = of_property_read_u32(dev->of_node, "qcom,wlan-rc-num", &rc_num); if (ret) { pr_err("%s: Failed to find PCIe RC number!\n", __func__); @@ -2916,6 +2922,7 @@ static int cnss_probe(struct platform_device *pdev) pr_err("%s: Failed to enable PCIe RC%x!\n", __func__, rc_num); goto err_pcie_enumerate; } +#endif penv->pcie_link_state = PCIE_LINK_UP; @@ -3096,7 +3103,9 @@ err_subsys_reg: devm_unregister_esoc_client(&pdev->dev, penv->esoc_desc); err_esoc_reg: +#ifndef CONFIG_MSM_GVM_QUIN err_pcie_enumerate: +#endif err_get_rc: cnss_configure_wlan_en_gpio(WLAN_EN_LOW); cnss_wlan_release_resources(); diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c index dcaf3203f197..fc35b0892768 100644 --- a/drivers/net/wireless/cnss2/main.c +++ b/drivers/net/wireless/cnss2/main.c @@ -1690,6 +1690,7 @@ static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv) static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv) { + plat_priv->cal_done = true; cnss_wlfw_wlan_mode_send_sync(plat_priv, QMI_WLFW_OFF_V01); cnss_shutdown(plat_priv); clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state); diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h index f658b6d391b8..a36281cb560f 100644 --- a/drivers/net/wireless/cnss2/main.h +++ b/drivers/net/wireless/cnss2/main.h @@ -107,6 +107,7 @@ struct cnss_fw_mem { void *va; phys_addr_t pa; bool valid; + u32 type; }; enum cnss_driver_event_type { @@ -192,7 +193,8 @@ struct cnss_plat_data { struct wlfw_rf_board_info_s_v01 board_info; struct wlfw_soc_info_s_v01 soc_info; struct wlfw_fw_version_info_s_v01 fw_version_info; - struct cnss_fw_mem fw_mem; + u32 fw_mem_seg_len; + struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; struct cnss_fw_mem m3_mem; struct cnss_pin_connect_result pin_result; struct dentry *root_dentry; @@ -204,6 +206,7 @@ struct cnss_plat_data { u32 diag_reg_read_mem_type; u32 diag_reg_read_len; u8 *diag_reg_read_buf; + bool cal_done; }; void *cnss_bus_dev_to_bus_priv(struct device *dev); diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c index 2ee0edda1b5b..5746366ff852 100644 --- a/drivers/net/wireless/cnss2/pci.c +++ b/drivers/net/wireless/cnss2/pci.c @@ -737,18 +737,21 @@ int cnss_pm_request_resume(struct cnss_pci_data *pci_priv) int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - - if (!fw_mem->va && fw_mem->size) { - fw_mem->va = dma_alloc_coherent(&pci_priv->pci_dev->dev, - fw_mem->size, &fw_mem->pa, - GFP_KERNEL); - if (!fw_mem->va) { - cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx\n", - fw_mem->size); - fw_mem->size = 0; - - return -ENOMEM; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int i; + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (!fw_mem[i].va && fw_mem[i].size) { + fw_mem[i].va = + dma_alloc_coherent(&pci_priv->pci_dev->dev, + fw_mem[i].size, + &fw_mem[i].pa, GFP_KERNEL); + if (!fw_mem[i].va) { + cnss_pr_err("Failed to allocate memory for FW, size: 0x%zx, type: %u\n", + fw_mem[i].size, fw_mem[i].type); + + return -ENOMEM; + } } } @@ -758,17 +761,25 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv) static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv) { struct cnss_plat_data *plat_priv = pci_priv->plat_priv; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - - if (fw_mem->va && fw_mem->size) { - cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", - fw_mem->va, &fw_mem->pa, fw_mem->size); - dma_free_coherent(&pci_priv->pci_dev->dev, fw_mem->size, - fw_mem->va, fw_mem->pa); - fw_mem->va = NULL; - fw_mem->pa = 0; - fw_mem->size = 0; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int i; + + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + if (fw_mem[i].va && fw_mem[i].size) { + cnss_pr_dbg("Freeing memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", + fw_mem[i].va, &fw_mem[i].pa, + fw_mem[i].size, fw_mem[i].type); + dma_free_coherent(&pci_priv->pci_dev->dev, + fw_mem[i].size, fw_mem[i].va, + fw_mem[i].pa); + fw_mem[i].va = NULL; + fw_mem[i].pa = 0; + fw_mem[i].size = 0; + fw_mem[i].type = 0; + } } + + plat_priv->fw_mem_seg_len = 0; } int cnss_pci_load_m3(struct cnss_pci_data *pci_priv) diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c index f4344aee54ee..b8777c18d252 100644 --- a/drivers/net/wireless/cnss2/qmi.c +++ b/drivers/net/wireless/cnss2/qmi.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -159,10 +159,9 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) memset(&req, 0, sizeof(req)); memset(&resp, 0, sizeof(resp)); - req.daemon_support_valid = 1; - req.daemon_support = daemon_support; - - cnss_pr_dbg("daemon_support is %d\n", req.daemon_support); + req.num_clients_valid = 1; + req.num_clients = daemon_support ? 2 : 1; + cnss_pr_dbg("Number of clients is %d\n", req.num_clients); req.wake_msi = cnss_get_wake_msi(plat_priv); if (req.wake_msi) { @@ -170,6 +169,19 @@ static int cnss_wlfw_host_cap_send_sync(struct cnss_plat_data *plat_priv) req.wake_msi_valid = 1; } + req.bdf_support_valid = 1; + req.bdf_support = 1; + + req.m3_support_valid = 1; + req.m3_support = 1; + + req.m3_cache_support_valid = 1; + req.m3_cache_support = 1; + + req.cal_done_valid = 1; + req.cal_done = plat_priv->cal_done; + cnss_pr_dbg("Calibration done is %d\n", plat_priv->cal_done); + req_desc.max_msg_len = WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN; req_desc.msg_id = QMI_WLFW_HOST_CAP_REQ_V01; req_desc.ei_array = wlfw_host_cap_req_msg_v01_ei; @@ -221,8 +233,8 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv) req.request_mem_enable = 1; req.fw_mem_ready_enable_valid = 1; req.fw_mem_ready_enable = 1; - req.cold_boot_cal_done_enable_valid = 1; - req.cold_boot_cal_done_enable = 1; + req.fw_init_done_enable_valid = 1; + req.fw_init_done_enable = 1; req.pin_connect_result_enable_valid = 1; req.pin_connect_result_enable = 1; @@ -260,27 +272,48 @@ static int cnss_wlfw_request_mem_ind_hdlr(struct cnss_plat_data *plat_priv, void *msg, unsigned int msg_len) { struct msg_desc ind_desc; - struct wlfw_request_mem_ind_msg_v01 ind_msg; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - int ret = 0; + struct wlfw_request_mem_ind_msg_v01 *ind_msg; + int ret = 0, i; + + ind_msg = kzalloc(sizeof(*ind_msg), GFP_KERNEL); + if (!ind_msg) + return -ENOMEM; ind_desc.msg_id = QMI_WLFW_REQUEST_MEM_IND_V01; ind_desc.max_msg_len = WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN; ind_desc.ei_array = wlfw_request_mem_ind_msg_v01_ei; - ret = qmi_kernel_decode(&ind_desc, &ind_msg, msg, msg_len); + ret = qmi_kernel_decode(&ind_desc, ind_msg, msg, msg_len); if (ret < 0) { cnss_pr_err("Failed to decode request memory indication, msg_len: %u, err = %d\n", ret, msg_len); - return ret; + goto out; } - fw_mem->size = ind_msg.size; + if (ind_msg->mem_seg_len == 0 || + ind_msg->mem_seg_len > QMI_WLFW_MAX_NUM_MEM_SEG_V01) { + cnss_pr_err("Invalid memory segment length: %u\n", + ind_msg->mem_seg_len); + ret = -EINVAL; + goto out; + } + + cnss_pr_dbg("FW memory segment count is %u\n", ind_msg->mem_seg_len); + plat_priv->fw_mem_seg_len = ind_msg->mem_seg_len; + for (i = 0; i < plat_priv->fw_mem_seg_len; i++) { + plat_priv->fw_mem[i].type = ind_msg->mem_seg[i].type; + plat_priv->fw_mem[i].size = ind_msg->mem_seg[i].size; + } cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_REQUEST_MEM, 0, NULL); + kfree(ind_msg); return 0; + +out: + kfree(ind_msg); + return ret; } static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv, @@ -317,29 +350,46 @@ static int cnss_qmi_pin_result_ind_hdlr(struct cnss_plat_data *plat_priv, int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) { - struct wlfw_respond_mem_req_msg_v01 req; - struct wlfw_respond_mem_resp_msg_v01 resp; + struct wlfw_respond_mem_req_msg_v01 *req; + struct wlfw_respond_mem_resp_msg_v01 *resp; struct msg_desc req_desc, resp_desc; - struct cnss_fw_mem *fw_mem = &plat_priv->fw_mem; - int ret = 0; + struct cnss_fw_mem *fw_mem = plat_priv->fw_mem; + int ret = 0, i; cnss_pr_dbg("Sending respond memory message, state: 0x%lx\n", plat_priv->driver_state); - if (!fw_mem->pa || !fw_mem->size) { - cnss_pr_err("Memory for FW is not available!\n"); - ret = -ENOMEM; - goto out; - } + req = kzalloc(sizeof(*req), GFP_KERNEL); + if (!req) + return -ENOMEM; + + resp = kzalloc(sizeof(*resp), GFP_KERNEL); + if (!resp) + return -ENOMEM; - cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx\n", - fw_mem->va, &fw_mem->pa, fw_mem->size); + req->mem_seg_len = plat_priv->fw_mem_seg_len; + for (i = 0; i < req->mem_seg_len; i++) { + if (!fw_mem[i].pa || !fw_mem[i].size) { + if (fw_mem[i].type == 0) { + cnss_pr_err("Invalid memory for FW type, segment = %d\n", + i); + ret = -EINVAL; + goto out; + } + cnss_pr_err("Memory for FW is not available for type: %u\n", + fw_mem[i].type); + ret = -ENOMEM; + goto out; + } - memset(&req, 0, sizeof(req)); - memset(&resp, 0, sizeof(resp)); + cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n", + fw_mem[i].va, &fw_mem[i].pa, + fw_mem[i].size, fw_mem[i].type); - req.addr = fw_mem->pa; - req.size = fw_mem->size; + req->mem_seg[i].addr = fw_mem[i].pa; + req->mem_seg[i].size = fw_mem[i].size; + req->mem_seg[i].type = fw_mem[i].type; + } req_desc.max_msg_len = WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN; req_desc.msg_id = QMI_WLFW_RESPOND_MEM_REQ_V01; @@ -349,8 +399,8 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) resp_desc.msg_id = QMI_WLFW_RESPOND_MEM_RESP_V01; resp_desc.ei_array = wlfw_respond_mem_resp_msg_v01_ei; - ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, &req, - sizeof(req), &resp_desc, &resp, sizeof(resp), + ret = qmi_send_req_wait(plat_priv->qmi_wlfw_clnt, &req_desc, req, + sizeof(*req), &resp_desc, resp, sizeof(*resp), QMI_WLFW_TIMEOUT_MS); if (ret < 0) { cnss_pr_err("Failed to send respond memory request, err = %d\n", @@ -358,16 +408,21 @@ int cnss_wlfw_respond_mem_send_sync(struct cnss_plat_data *plat_priv) goto out; } - if (resp.resp.result != QMI_RESULT_SUCCESS_V01) { + if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { cnss_pr_err("Respond memory request failed, result: %d, err: %d\n", - resp.resp.result, resp.resp.error); - ret = resp.resp.result; + resp->resp.result, resp->resp.error); + ret = resp->resp.result; goto out; } + kfree(req); + kfree(resp); return 0; + out: CNSS_ASSERT(0); + kfree(req); + kfree(resp); return ret; } @@ -908,12 +963,12 @@ static void cnss_wlfw_clnt_ind(struct qmi_handle *handle, CNSS_DRIVER_EVENT_FW_MEM_READY, 0, NULL); break; - case QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01: + case QMI_WLFW_FW_READY_IND_V01: cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_COLD_BOOT_CAL_DONE, 0, NULL); break; - case QMI_WLFW_FW_READY_IND_V01: + case QMI_WLFW_FW_INIT_DONE_IND_V01: cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_FW_READY, 0, NULL); @@ -974,11 +1029,11 @@ int cnss_wlfw_server_arrive(struct cnss_plat_data *plat_priv) cnss_pr_info("QMI WLFW service connected, state: 0x%lx\n", plat_priv->driver_state); - ret = cnss_wlfw_host_cap_send_sync(plat_priv); + ret = cnss_wlfw_ind_register_send_sync(plat_priv); if (ret < 0) goto out; - ret = cnss_wlfw_ind_register_send_sync(plat_priv); + ret = cnss_wlfw_host_cap_send_sync(plat_priv); if (ret < 0) goto out; diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c index 7d6a771bc0d5..bbf707b869bd 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -62,7 +62,7 @@ static struct elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -97,7 +97,7 @@ static struct elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -123,7 +123,7 @@ static struct elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -140,7 +140,7 @@ static struct elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -175,7 +175,131 @@ static struct elem_info wlfw_memory_region_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_cfg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + offset), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + size), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_cfg_s_v01, + secure_flag), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_seg_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + type), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01, + .elem_size = sizeof(struct wlfw_mem_cfg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_s_v01, + mem_cfg), + .ei_array = wlfw_mem_cfg_s_v01_ei, + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +static struct elem_info wlfw_mem_seg_resp_s_v01_ei[] = { + { + .data_type = QMI_UNSIGNED_8_BYTE, + .elem_len = 1, + .elem_size = sizeof(u64), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + addr), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + size), + }, + { + .data_type = QMI_SIGNED_4_BYTE_ENUM, + .elem_len = 1, + .elem_size = sizeof(enum wlfw_mem_type_enum_v01), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + type), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0, + .offset = offsetof(struct wlfw_mem_seg_resp_s_v01, + restore), + }, + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -201,7 +325,7 @@ static struct elem_info wlfw_rf_chip_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -218,7 +342,7 @@ static struct elem_info wlfw_rf_board_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -235,7 +359,7 @@ static struct elem_info wlfw_soc_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -261,7 +385,7 @@ static struct elem_info wlfw_fw_version_info_s_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -418,7 +542,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, - cold_boot_cal_done_enable_valid), + fw_init_done_enable_valid), }, { .data_type = QMI_UNSIGNED_1_BYTE, @@ -427,7 +551,7 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x18, .offset = offsetof(struct wlfw_ind_register_req_msg_v01, - cold_boot_cal_done_enable), + fw_init_done_enable), }, { .data_type = QMI_OPT_FLAG, @@ -448,9 +572,45 @@ struct elem_info wlfw_ind_register_req_msg_v01_ei[] = { rejuvenate_enable), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + xo_cal_enable), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cal_done_enable_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_ind_register_req_msg_v01, + cal_done_enable), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -489,7 +649,7 @@ struct elem_info wlfw_ind_register_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -497,7 +657,7 @@ struct elem_info wlfw_fw_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -505,7 +665,7 @@ struct elem_info wlfw_msa_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -573,7 +733,7 @@ struct elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -608,7 +768,7 @@ struct elem_info wlfw_wlan_mode_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -626,7 +786,7 @@ struct elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -764,7 +924,7 @@ struct elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -782,7 +942,7 @@ struct elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -790,7 +950,7 @@ struct elem_info wlfw_cap_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -920,7 +1080,7 @@ struct elem_info wlfw_cap_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1054,7 +1214,7 @@ struct elem_info wlfw_bdf_download_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1073,7 +1233,7 @@ struct elem_info wlfw_bdf_download_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1117,7 +1277,7 @@ struct elem_info wlfw_cal_report_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1135,7 +1295,7 @@ struct elem_info wlfw_cal_report_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1153,7 +1313,7 @@ struct elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1269,7 +1429,7 @@ struct elem_info wlfw_cal_download_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1288,7 +1448,7 @@ struct elem_info wlfw_cal_download_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1316,7 +1476,7 @@ struct elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1342,7 +1502,7 @@ struct elem_info wlfw_cal_update_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1459,7 +1619,7 @@ struct elem_info wlfw_cal_update_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1485,7 +1645,7 @@ struct elem_info wlfw_msa_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1522,7 +1682,7 @@ struct elem_info wlfw_msa_info_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1530,7 +1690,7 @@ struct elem_info wlfw_msa_ready_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1548,7 +1708,7 @@ struct elem_info wlfw_msa_ready_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1574,7 +1734,7 @@ struct elem_info wlfw_ini_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1592,7 +1752,7 @@ struct elem_info wlfw_ini_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1627,7 +1787,7 @@ struct elem_info wlfw_athdiag_read_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1676,7 +1836,7 @@ struct elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1724,7 +1884,7 @@ struct elem_info wlfw_athdiag_write_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1743,7 +1903,7 @@ struct elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1760,7 +1920,7 @@ struct elem_info wlfw_vbatt_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1778,7 +1938,7 @@ struct elem_info wlfw_vbatt_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1804,7 +1964,7 @@ struct elem_info wlfw_mac_addr_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1822,7 +1982,7 @@ struct elem_info wlfw_mac_addr_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1834,16 +1994,16 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, - daemon_support_valid), + num_clients_valid), }, { - .data_type = QMI_UNSIGNED_1_BYTE, + .data_type = QMI_UNSIGNED_4_BYTE, .elem_len = 1, - .elem_size = sizeof(u8), + .elem_size = sizeof(u32), .is_array = NO_ARRAY, .tlv_type = 0x10, .offset = offsetof(struct wlfw_host_cap_req_msg_v01, - daemon_support), + num_clients), }, { .data_type = QMI_OPT_FLAG, @@ -1864,9 +2024,216 @@ struct elem_info wlfw_host_cap_req_msg_v01_ei[] = { wake_msi), }, { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_valid), + }, + { + .data_type = QMI_DATA_LEN, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios_len), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = QMI_WLFW_MAX_NUM_GPIO_V01, + .elem_size = sizeof(u32), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x12, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + gpios), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x13, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + nm_modem), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x14, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x15, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + bdf_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x16, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x17, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + m3_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x18, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_filesys_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x19, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_cache_support), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1A, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + cal_done), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket_valid), + }, + { + .data_type = QMI_UNSIGNED_4_BYTE, + .elem_len = 1, + .elem_size = sizeof(u32), + .is_array = NO_ARRAY, + .tlv_type = 0x1B, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_bucket), + }, + { + .data_type = QMI_OPT_FLAG, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode_valid), + }, + { + .data_type = QMI_UNSIGNED_1_BYTE, + .elem_len = 1, + .elem_size = sizeof(u8), + .is_array = NO_ARRAY, + .tlv_type = 0x1C, + .offset = offsetof(struct wlfw_host_cap_req_msg_v01, + mem_cfg_mode), + }, + { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1884,50 +2251,61 @@ struct elem_info wlfw_host_cap_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info wlfw_request_mem_ind_msg_v01_ei[] = { { - .data_type = QMI_UNSIGNED_4_BYTE, + .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(u32), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, - size), + mem_seg_len), + }, + { + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, + .offset = offsetof(struct wlfw_request_mem_ind_msg_v01, + mem_seg), + .ei_array = wlfw_mem_seg_s_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; struct elem_info wlfw_respond_mem_req_msg_v01_ei[] = { { - .data_type = QMI_UNSIGNED_8_BYTE, + .data_type = QMI_DATA_LEN, .elem_len = 1, - .elem_size = sizeof(u64), + .elem_size = sizeof(u8), .is_array = NO_ARRAY, .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, - addr), + mem_seg_len), }, { - .data_type = QMI_UNSIGNED_4_BYTE, - .elem_len = 1, - .elem_size = sizeof(u32), - .is_array = NO_ARRAY, - .tlv_type = 0x02, + .data_type = QMI_STRUCT, + .elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01, + .elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01), + .is_array = VAR_LEN_ARRAY, + .tlv_type = 0x01, .offset = offsetof(struct wlfw_respond_mem_req_msg_v01, - size), + mem_seg), + .ei_array = wlfw_mem_seg_resp_s_v01_ei, }, { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1945,7 +2323,7 @@ struct elem_info wlfw_respond_mem_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -1953,15 +2331,15 @@ struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; -struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[] = { +struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2041,7 +2419,7 @@ struct elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2049,7 +2427,7 @@ struct elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2068,7 +2446,7 @@ struct elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2096,7 +2474,7 @@ struct elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2155,7 +2533,7 @@ struct elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2181,7 +2559,7 @@ struct elem_info wlfw_m3_info_req_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2199,7 +2577,7 @@ struct elem_info wlfw_m3_info_resp_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; @@ -2216,6 +2594,14 @@ struct elem_info wlfw_xo_cal_ind_msg_v01_ei[] = { { .data_type = QMI_EOTI, .is_array = NO_ARRAY, - .is_array = QMI_COMMON_TLV_TYPE, + .tlv_type = QMI_COMMON_TLV_TYPE, + }, +}; + +struct elem_info wlfw_cal_done_ind_msg_v01_ei[] = { + { + .data_type = QMI_EOTI, + .is_array = NO_ARRAY, + .tlv_type = QMI_COMMON_TLV_TYPE, }, }; diff --git a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h index 9b56eb0c02fb..00a873d11d14 100644 --- a/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h +++ b/drivers/net/wireless/cnss2/wlan_firmware_service_v01.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -23,10 +23,12 @@ #define QMI_WLFW_BDF_DOWNLOAD_REQ_V01 0x0025 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 #define QMI_WLFW_INITIATE_CAL_UPDATE_IND_V01 0x002A +#define QMI_WLFW_CAL_DONE_IND_V01 0x003E #define QMI_WLFW_HOST_CAP_REQ_V01 0x0034 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_RESP_V01 0x003B #define QMI_WLFW_M3_INFO_REQ_V01 0x003C #define QMI_WLFW_CAP_REQ_V01 0x0024 +#define QMI_WLFW_FW_INIT_DONE_IND_V01 0x0038 #define QMI_WLFW_CAL_REPORT_REQ_V01 0x0026 #define QMI_WLFW_M3_INFO_RESP_V01 0x003C #define QMI_WLFW_CAL_UPDATE_RESP_V01 0x0029 @@ -42,7 +44,6 @@ #define QMI_WLFW_WLAN_MODE_REQ_V01 0x0022 #define QMI_WLFW_IND_REGISTER_REQ_V01 0x0020 #define QMI_WLFW_WLAN_CFG_RESP_V01 0x0023 -#define QMI_WLFW_COLD_BOOT_CAL_DONE_IND_V01 0x0038 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_REJUVENATE_IND_V01 0x0039 #define QMI_WLFW_DYNAMIC_FEATURE_MASK_REQ_V01 0x003B @@ -72,13 +73,16 @@ #define QMI_WLFW_IND_REGISTER_RESP_V01 0x0020 #define QMI_WLFW_MAX_NUM_MEMORY_REGIONS_V01 2 +#define QMI_WLFW_MAX_NUM_MEM_SEG_V01 32 #define QMI_WLFW_MAX_NUM_CAL_V01 5 #define QMI_WLFW_MAX_DATA_SIZE_V01 6144 #define QMI_WLFW_FUNCTION_NAME_LEN_V01 128 #define QMI_WLFW_MAX_NUM_CE_V01 12 #define QMI_WLFW_MAX_TIMESTAMP_LEN_V01 32 #define QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01 6144 +#define QMI_WLFW_MAX_NUM_GPIO_V01 32 #define QMI_WLFW_MAX_BUILD_ID_LEN_V01 128 +#define QMI_WLFW_MAX_NUM_MEM_CFG_V01 2 #define QMI_WLFW_MAX_STR_LEN_V01 16 #define QMI_WLFW_MAX_NUM_SHADOW_REG_V01 24 #define QMI_WLFW_MAC_ADDR_SIZE_V01 6 @@ -117,6 +121,17 @@ enum wlfw_pipedir_enum_v01 { WLFW_PIPEDIR_ENUM_MAX_VAL_V01 = INT_MAX, }; +enum wlfw_mem_type_enum_v01 { + WLFW_MEM_TYPE_ENUM_MIN_VAL_V01 = INT_MIN, + QMI_WLFW_MEM_TYPE_MSA_V01 = 0, + QMI_WLFW_MEM_TYPE_DDR_V01 = 1, + QMI_WLFW_MEM_BDF_V01 = 2, + QMI_WLFW_MEM_M3_V01 = 3, + QMI_WLFW_MEM_CAL_V01 = 4, + QMI_WLFW_MEM_DPD_V01 = 5, + WLFW_MEM_TYPE_ENUM_MAX_VAL_V01 = INT_MAX, +}; + #define QMI_WLFW_CE_ATTR_FLAGS_V01 ((u32)0x00) #define QMI_WLFW_CE_ATTR_NO_SNOOP_V01 ((u32)0x01) #define QMI_WLFW_CE_ATTR_BYTE_SWAP_DATA_V01 ((u32)0x02) @@ -128,6 +143,7 @@ enum wlfw_pipedir_enum_v01 { #define QMI_WLFW_FW_READY_V01 ((u64)0x02ULL) #define QMI_WLFW_MSA_READY_V01 ((u64)0x04ULL) #define QMI_WLFW_FW_MEM_READY_V01 ((u64)0x08ULL) +#define QMI_WLFW_FW_INIT_DONE_V01 ((u64)0x10ULL) #define QMI_WLFW_FW_REJUVENATE_V01 ((u64)0x01ULL) @@ -160,6 +176,26 @@ struct wlfw_memory_region_info_s_v01 { u8 secure_flag; }; +struct wlfw_mem_cfg_s_v01 { + u64 offset; + u32 size; + u8 secure_flag; +}; + +struct wlfw_mem_seg_s_v01 { + u32 size; + enum wlfw_mem_type_enum_v01 type; + u32 mem_cfg_len; + struct wlfw_mem_cfg_s_v01 mem_cfg[QMI_WLFW_MAX_NUM_MEM_CFG_V01]; +}; + +struct wlfw_mem_seg_resp_s_v01 { + u64 addr; + u32 size; + enum wlfw_mem_type_enum_v01 type; + u8 restore; +}; + struct wlfw_rf_chip_info_s_v01 { u32 chip_id; u32 chip_family; @@ -195,13 +231,17 @@ struct wlfw_ind_register_req_msg_v01 { u8 request_mem_enable; u8 fw_mem_ready_enable_valid; u8 fw_mem_ready_enable; - u8 cold_boot_cal_done_enable_valid; - u8 cold_boot_cal_done_enable; + u8 fw_init_done_enable_valid; + u8 fw_init_done_enable; u8 rejuvenate_enable_valid; u32 rejuvenate_enable; + u8 xo_cal_enable_valid; + u8 xo_cal_enable; + u8 cal_done_enable_valid; + u8 cal_done_enable; }; -#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 46 +#define WLFW_IND_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 54 extern struct elem_info wlfw_ind_register_req_msg_v01_ei[]; struct wlfw_ind_register_resp_msg_v01 { @@ -533,13 +573,36 @@ struct wlfw_mac_addr_resp_msg_v01 { extern struct elem_info wlfw_mac_addr_resp_msg_v01_ei[]; struct wlfw_host_cap_req_msg_v01 { - u8 daemon_support_valid; - u8 daemon_support; + u8 num_clients_valid; + u32 num_clients; u8 wake_msi_valid; u32 wake_msi; -}; - -#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 11 + u8 gpios_valid; + u32 gpios_len; + u32 gpios[QMI_WLFW_MAX_NUM_GPIO_V01]; + u8 nm_modem_valid; + u8 nm_modem; + u8 bdf_support_valid; + u8 bdf_support; + u8 bdf_cache_support_valid; + u8 bdf_cache_support; + u8 m3_support_valid; + u8 m3_support; + u8 m3_cache_support_valid; + u8 m3_cache_support; + u8 cal_filesys_support_valid; + u8 cal_filesys_support; + u8 cal_cache_support_valid; + u8 cal_cache_support; + u8 cal_done_valid; + u8 cal_done; + u8 mem_bucket_valid; + u32 mem_bucket; + u8 mem_cfg_mode_valid; + u8 mem_cfg_mode; +}; + +#define WLFW_HOST_CAP_REQ_MSG_V01_MAX_MSG_LEN 189 extern struct elem_info wlfw_host_cap_req_msg_v01_ei[]; struct wlfw_host_cap_resp_msg_v01 { @@ -550,18 +613,19 @@ struct wlfw_host_cap_resp_msg_v01 { extern struct elem_info wlfw_host_cap_resp_msg_v01_ei[]; struct wlfw_request_mem_ind_msg_v01 { - u32 size; + u32 mem_seg_len; + struct wlfw_mem_seg_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 7 +#define WLFW_REQUEST_MEM_IND_MSG_V01_MAX_MSG_LEN 1124 extern struct elem_info wlfw_request_mem_ind_msg_v01_ei[]; struct wlfw_respond_mem_req_msg_v01 { - u64 addr; - u32 size; + u32 mem_seg_len; + struct wlfw_mem_seg_resp_s_v01 mem_seg[QMI_WLFW_MAX_NUM_MEM_SEG_V01]; }; -#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 18 +#define WLFW_RESPOND_MEM_REQ_MSG_V01_MAX_MSG_LEN 548 extern struct elem_info wlfw_respond_mem_req_msg_v01_ei[]; struct wlfw_respond_mem_resp_msg_v01 { @@ -578,12 +642,12 @@ struct wlfw_fw_mem_ready_ind_msg_v01 { #define WLFW_FW_MEM_READY_IND_MSG_V01_MAX_MSG_LEN 0 extern struct elem_info wlfw_fw_mem_ready_ind_msg_v01_ei[]; -struct wlfw_cold_boot_cal_done_ind_msg_v01 { +struct wlfw_fw_init_done_ind_msg_v01 { char placeholder; }; -#define WLFW_COLD_BOOT_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 -extern struct elem_info wlfw_cold_boot_cal_done_ind_msg_v01_ei[]; +#define WLFW_FW_INIT_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_fw_init_done_ind_msg_v01_ei[]; struct wlfw_rejuvenate_ind_msg_v01 { u8 cause_for_rejuvenation_valid; @@ -654,4 +718,11 @@ struct wlfw_xo_cal_ind_msg_v01 { #define WLFW_XO_CAL_IND_MSG_V01_MAX_MSG_LEN 4 extern struct elem_info wlfw_xo_cal_ind_msg_v01_ei[]; +struct wlfw_cal_done_ind_msg_v01 { + char placeholder; +}; + +#define WLFW_CAL_DONE_IND_MSG_V01_MAX_MSG_LEN 0 +extern struct elem_info wlfw_cal_done_ind_msg_v01_ei[]; + #endif diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index f877fbc7d7af..8a9164da6c50 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -699,16 +699,21 @@ static int hwsim_fops_ps_write(void *dat, u64 val) val != PS_MANUAL_POLL) return -EINVAL; - old_ps = data->ps; - data->ps = val; - - local_bh_disable(); if (val == PS_MANUAL_POLL) { + if (data->ps != PS_ENABLED) + return -EINVAL; + local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); - data->ps_poll_pending = true; - } else if (old_ps == PS_DISABLED && val != PS_DISABLED) { + local_bh_enable(); + return 0; + } + old_ps = data->ps; + data->ps = val; + + local_bh_disable(); + if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); diff --git a/drivers/net/wireless/mediatek/mt7601u/mcu.c b/drivers/net/wireless/mediatek/mt7601u/mcu.c index fbb1986eda3c..686b1b5dd394 100644 --- a/drivers/net/wireless/mediatek/mt7601u/mcu.c +++ b/drivers/net/wireless/mediatek/mt7601u/mcu.c @@ -66,8 +66,10 @@ mt7601u_mcu_msg_alloc(struct mt7601u_dev *dev, const void *data, int len) WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); - skb_reserve(skb, MT_DMA_HDR_LEN); - memcpy(skb_put(skb, len), data, len); + if (skb) { + skb_reserve(skb, MT_DMA_HDR_LEN); + memcpy(skb_put(skb, len), data, len); + } return skb; } @@ -170,6 +172,8 @@ static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); } @@ -205,6 +209,8 @@ mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) }; skb = mt7601u_mcu_msg_alloc(dev, &msg, sizeof(msg)); + if (!skb) + return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 0881ba8535f4..c78abfc7bd96 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = { 0x04, 0x08, /* Noise gain, limit offset */ 0x28, 0x28, /* det rssi, med busy offsets */ 7, /* det sync thresh */ - 0, 2, 2 /* test mode, min, max */ + 0, 2, 2, /* test mode, min, max */ + 0, /* rx/tx delay */ + 0, 0, 0, 0, 0, 0, /* current BSS id */ + 0 /* hop set */ }; /*===========================================================================*/ @@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local) * a_beacon_period = hops a_beacon_period = KuS *//* 64ms = 010000 */ if (local->fw_ver == 0x55) { - memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms, + memcpy(&local->sparm.b4, b4_default_startup_parms, sizeof(struct b4_startup_params)); /* Translate sane kus input values to old build 4/5 format */ /* i = hop time in uS truncated to 3 bytes */ diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c index c48b7e8ee0d6..b51815eccdb3 100644 --- a/drivers/net/wireless/realtek/rtlwifi/pci.c +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c @@ -1572,7 +1572,14 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw) dev_kfree_skb_irq(skb); ring->idx = (ring->idx + 1) % ring->entries; } + + if (rtlpriv->use_new_trx_flow) { + rtlpci->tx_ring[i].cur_tx_rp = 0; + rtlpci->tx_ring[i].cur_tx_wp = 0; + } + ring->idx = 0; + ring->entries = rtlpci->txringcount[i]; } } spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c index 5a3df9198ddf..89515f02c353 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c @@ -1123,7 +1123,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw) /* Configuration Space offset 0x70f BIT7 is used to control L0S */ tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); - _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); + _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) | + ASPM_L1_LATENCY << 3); /* Configuration Space offset 0x719 Bit3 is for L1 * BIT4 is for clock request diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index a13d1f2b5912..259590013382 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c @@ -3425,6 +3425,10 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf) /* because rndis_command() sleeps we need to use workqueue */ priv->workqueue = create_singlethread_workqueue("rndis_wlan"); + if (!priv->workqueue) { + wiphy_free(wiphy); + return -ENOMEM; + } INIT_WORK(&priv->work, rndis_wlan_worker); INIT_DELAYED_WORK(&priv->dev_poller_work, rndis_device_poller); INIT_DELAYED_WORK(&priv->scan_work, rndis_get_scan_results); diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 9bee3f11898a..869411f55d88 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -1196,8 +1196,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw, WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc; - wl1251_acx_arp_ip_filter(wl, enable, addr); - + ret = wl1251_acx_arp_ip_filter(wl, enable, addr); if (ret < 0) goto out_sleep; } diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c index af62c4c854f3..b4f31dad40d6 100644 --- a/drivers/nfc/nfcmrvl/fw_dnld.c +++ b/drivers/nfc/nfcmrvl/fw_dnld.c @@ -17,7 +17,7 @@ */ #include <linux/module.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include <linux/firmware.h> #include <linux/nfc.h> #include <net/nfc/nci.h> diff --git a/drivers/nfc/nfcmrvl/spi.c b/drivers/nfc/nfcmrvl/spi.c index a7faa0bcc01e..fc8e78a29d77 100644 --- a/drivers/nfc/nfcmrvl/spi.c +++ b/drivers/nfc/nfcmrvl/spi.c @@ -96,10 +96,9 @@ static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv, /* Send the SPI packet */ err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, skb); - if (err != 0) { + if (err) nfc_err(priv->dev, "spi_send failed %d", err); - kfree_skb(skb); - } + return err; } diff --git a/drivers/of/device.c b/drivers/of/device.c index 97a280d50d6d..7c509bff9295 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -223,7 +223,7 @@ ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) str[i] = '_'; } - return tsize; + return repend; } EXPORT_SYMBOL_GPL(of_device_get_modalias); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 78530d1714dc..bdce0679674c 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -2646,6 +2646,7 @@ enum parport_pc_pci_cards { netmos_9901, netmos_9865, quatech_sppxp100, + wch_ch382l, }; @@ -2708,6 +2709,7 @@ static struct parport_pc_pci { /* netmos_9901 */ { 1, { { 0, -1 }, } }, /* netmos_9865 */ { 1, { { 0, -1 }, } }, /* quatech_sppxp100 */ { 1, { { 0, 1 }, } }, + /* wch_ch382l */ { 1, { { 2, -1 }, } }, }; static const struct pci_device_id parport_pc_pci_tbl[] = { @@ -2797,6 +2799,8 @@ static const struct pci_device_id parport_pc_pci_tbl[] = { /* Quatech SPPXP-100 Parallel port PCI ExpressCard */ { PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 }, + /* WCH CH382L PCI-E single parallel port card */ + { 0x1c00, 0x3050, 0x1c00, 0x3050, 0, 0, wch_ch382l }, { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci, parport_pc_pci_tbl); diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 193ac13de49b..566897f24dee 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -230,7 +230,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, res->flags |= IORESOURCE_ROM_ENABLE; l64 = l & PCI_ROM_ADDRESS_MASK; sz64 = sz & PCI_ROM_ADDRESS_MASK; - mask64 = (u32)PCI_ROM_ADDRESS_MASK; + mask64 = PCI_ROM_ADDRESS_MASK; } if (res->flags & IORESOURCE_MEM_64) { diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 254192b5dad1..4eb1cf0ed00c 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3631,6 +3631,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, + quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 25062966cbfa..8b2f8b2a574e 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -63,7 +63,7 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) mask = (u32)PCI_BASE_ADDRESS_IO_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; } else if (resno == PCI_ROM_RESOURCE) { - mask = (u32)PCI_ROM_ADDRESS_MASK; + mask = PCI_ROM_ADDRESS_MASK; } else { mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 39400dda27c2..d6d671a925e1 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -323,10 +323,16 @@ validate_group(struct perf_event *event) return 0; } +static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu) +{ + struct platform_device *pdev = armpmu->plat_device; + + return pdev ? dev_get_platdata(&pdev->dev) : NULL; +} + static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) { struct arm_pmu *armpmu; - struct platform_device *plat_device; struct arm_pmu_platdata *plat; int ret; u64 start_clock, finish_clock; @@ -338,8 +344,8 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) * dereference. */ armpmu = *(void **)dev; - plat_device = armpmu->plat_device; - plat = dev_get_platdata(&plat_device->dev); + + plat = armpmu_get_platdata(armpmu); start_clock = sched_clock(); if (plat && plat->handle_irq) diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2686a4450dfc..f4639a9f1e48 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -979,19 +979,16 @@ struct pinctrl_state *pinctrl_lookup_state(struct pinctrl *p, EXPORT_SYMBOL_GPL(pinctrl_lookup_state); /** - * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * pinctrl_commit_state() - select/activate/program a pinctrl state to HW * @p: the pinctrl handle for the device that requests configuration * @state: the state handle to select/activate/program */ -int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +static int pinctrl_commit_state(struct pinctrl *p, struct pinctrl_state *state) { struct pinctrl_setting *setting, *setting2; struct pinctrl_state *old_state = p->state; int ret; - if (p->state == state) - return 0; - if (p->state) { /* * For each pinmux setting in the old state, forget SW's record @@ -1055,6 +1052,19 @@ unapply_new_state: return ret; } + +/** + * pinctrl_select_state() - select/activate/program a pinctrl state to HW + * @p: the pinctrl handle for the device that requests configuration + * @state: the state handle to select/activate/program + */ +int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *state) +{ + if (p->state == state) + return 0; + + return pinctrl_commit_state(p, state); +} EXPORT_SYMBOL_GPL(pinctrl_select_state); static void devm_pinctrl_release(struct device *dev, void *res) @@ -1223,7 +1233,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map) int pinctrl_force_sleep(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_sleep)) - return pinctrl_select_state(pctldev->p, pctldev->hog_sleep); + return pinctrl_commit_state(pctldev->p, pctldev->hog_sleep); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_sleep); @@ -1235,7 +1245,7 @@ EXPORT_SYMBOL_GPL(pinctrl_force_sleep); int pinctrl_force_default(struct pinctrl_dev *pctldev) { if (!IS_ERR(pctldev->p) && !IS_ERR(pctldev->hog_default)) - return pinctrl_select_state(pctldev->p, pctldev->hog_default); + return pinctrl_commit_state(pctldev->p, pctldev->hog_default); return 0; } EXPORT_SYMBOL_GPL(pinctrl_force_default); diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 92430f781eb7..a0b8c8a8c323 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -59,12 +59,14 @@ static int send_command(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) { int ret; + int (*xfer_fxn)(struct cros_ec_device *ec, struct cros_ec_command *msg); if (ec_dev->proto_version > 2) - ret = ec_dev->pkt_xfer(ec_dev, msg); + xfer_fxn = ec_dev->pkt_xfer; else - ret = ec_dev->cmd_xfer(ec_dev, msg); + xfer_fxn = ec_dev->cmd_xfer; + ret = (*xfer_fxn)(ec_dev, msg); if (msg->result == EC_RES_IN_PROGRESS) { int i; struct cros_ec_command *status_msg; @@ -87,7 +89,7 @@ static int send_command(struct cros_ec_device *ec_dev, for (i = 0; i < EC_COMMAND_RETRIES; i++) { usleep_range(10000, 11000); - ret = ec_dev->cmd_xfer(ec_dev, status_msg); + ret = (*xfer_fxn)(ec_dev, status_msg); if (ret < 0) break; diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index f3baf9973989..24f1630a8b3f 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -187,7 +187,7 @@ static ssize_t show_ec_version(struct device *dev, count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); else { - msg->data[sizeof(msg->data) - 1] = '\0'; + msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); } diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index fd1452e28352..dc376b0fd276 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -35,8 +35,10 @@ #define PIPE_REG_ADDRESS 0x10 /* write: physical address */ #define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */ #define PIPE_REG_WAKES 0x14 /* read: wake flags */ -#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */ -#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */ +#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address + */ +#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address + */ #define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */ #define PIPE_REG_VERSION 0x24 /* read: device version */ @@ -53,12 +55,16 @@ /* The following commands are related to write operations */ #define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */ #define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing - is possible */ + * is possible + */ #define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */ #define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading - * is possible */ + * is possible + */ -/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ +/* Possible status values used to signal errors - + * see goldfish_pipe_error_convert + */ #define PIPE_ERROR_INVAL -1 #define PIPE_ERROR_AGAIN -2 #define PIPE_ERROR_NOMEM -3 @@ -71,14 +77,6 @@ #define MAX_PAGES_TO_GRAB 32 -#define DEBUG 0 - -#if DEBUG -#define DPRINT(...) { printk(KERN_ERR __VA_ARGS__); } -#else -#define DPRINT(...) -#endif - /* This data type models a given pipe instance */ struct goldfish_pipe { struct goldfish_pipe_dev *dev; @@ -158,6 +156,7 @@ static int valid_batchbuffer_addr(struct goldfish_pipe_dev *dev, { u32 aph, apl; u64 paddr; + aph = readl(dev->base + PIPE_REG_PARAMS_ADDR_HIGH); apl = readl(dev->base + PIPE_REG_PARAMS_ADDR_LOW); @@ -174,7 +173,8 @@ static int setup_access_params_addr(struct platform_device *pdev, u64 paddr; struct access_params *aps; - aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), GFP_KERNEL); + aps = devm_kzalloc(&pdev->dev, sizeof(struct access_params), + GFP_KERNEL); if (!aps) return -1; @@ -226,7 +226,7 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, struct goldfish_pipe *pipe = filp->private_data; struct goldfish_pipe_dev *dev = pipe->dev; unsigned long address, address_end; - struct page* pages[MAX_PAGES_TO_GRAB] = {}; + struct page *pages[MAX_PAGES_TO_GRAB] = {}; int count = 0, ret = -EINVAL; /* If the emulator already closed the pipe, no need to go further */ @@ -268,17 +268,17 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, ret = get_user_pages_fast(first_page, requested_pages, !is_write, pages); - DPRINT("%s: requested pages: %d %d %p\n", __FUNCTION__, - ret, requested_pages, first_page); + pr_debug("%s: requested pages: %d %ld %p\n", __func__, ret, + requested_pages, (void*)first_page); if (ret == 0) { - DPRINT("%s: error: (requested pages == 0) (wanted %d)\n", - __FUNCTION__, requested_pages); + pr_err("%s: error: (requested pages == 0) (wanted %ld)\n", + __func__, requested_pages); mutex_unlock(&pipe->lock); return ret; } if (ret < 0) { - DPRINT("%s: (requested pages < 0) %d \n", - __FUNCTION__, requested_pages); + pr_err("%s: (requested pages < 0) %ld \n", + __func__, requested_pages); mutex_unlock(&pipe->lock); return ret; } @@ -293,8 +293,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, xaddr_prev = xaddr_i; num_contiguous_pages++; } else { - DPRINT("%s: discontinuous page boundary: %d pages instead\n", - __FUNCTION__, page_i); + pr_err("%s: discontinuous page boundary: %d pages instead\n", + __func__, page_i); break; } } @@ -345,8 +345,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer, * ABI relies on this behavior. */ if (status != PIPE_ERROR_AGAIN) - pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n", - status, is_write ? "write" : "read"); + pr_err_ratelimited("goldfish_pipe: backend returned error %d on %s\n", + status, is_write ? "write" : "read"); ret = 0; break; } @@ -516,8 +516,9 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) pipe->dev = dev; mutex_init(&pipe->lock); - DPRINT("%s: call. pipe_dev pipe_dev=0x%lx new_pipe_addr=0x%lx file=0x%lx\n", __FUNCTION__, pipe_dev, pipe, file); - // spin lock init, write head of list, i guess + pr_debug("%s: call. pipe_dev dev=%p new_pipe_addr=%p file=%p\n", + __func__, dev, pipe, file); + /* spin lock init, write head of list, i guess */ init_waitqueue_head(&pipe->wake_queue); /* @@ -540,7 +541,7 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp) { struct goldfish_pipe *pipe = filp->private_data; - DPRINT("%s: call. pipe=0x%lx file=0x%lx\n", __FUNCTION__, pipe, filp); + pr_debug("%s: call. pipe=%p file=%p\n", __func__, pipe, filp); /* The guest is closing the channel, so tell the emulator right now */ goldfish_cmd(pipe, CMD_CLOSE); kfree(pipe); @@ -566,8 +567,8 @@ static struct miscdevice goldfish_pipe_dev = { int goldfish_pipe_device_init_v1(struct platform_device *pdev) { struct goldfish_pipe_dev *dev = pipe_dev; - int err = devm_request_irq(&pdev->dev, dev->irq, goldfish_pipe_interrupt, - IRQF_SHARED, "goldfish_pipe", dev); + int err = devm_request_irq(&pdev->dev, dev->irq, + goldfish_pipe_interrupt, IRQF_SHARED, "goldfish_pipe", dev); if (err) { dev_err(&pdev->dev, "unable to allocate IRQ for v1\n"); return err; @@ -585,5 +586,5 @@ int goldfish_pipe_device_init_v1(struct platform_device *pdev) void goldfish_pipe_device_deinit_v1(struct platform_device *pdev) { - misc_deregister(&goldfish_pipe_dev); + misc_deregister(&goldfish_pipe_dev); } diff --git a/drivers/platform/goldfish/goldfish_pipe_v2.c b/drivers/platform/goldfish/goldfish_pipe_v2.c index ad373ed36555..3119b3341a7b 100644 --- a/drivers/platform/goldfish/goldfish_pipe_v2.c +++ b/drivers/platform/goldfish/goldfish_pipe_v2.c @@ -46,6 +46,7 @@ * exchange is properly mapped during a transfer. */ +#include <linux/printk.h> #include "goldfish_pipe.h" @@ -58,8 +59,7 @@ enum { PIPE_CURRENT_DEVICE_VERSION = 2 }; -/* - * IMPORTANT: The following constants must match the ones used and defined +/* IMPORTANT: The following constants must match the ones used and defined * in external/qemu/hw/goldfish_pipe.c in the Android source tree. */ @@ -70,7 +70,10 @@ enum PipePollFlags { PIPE_POLL_HUP = 1 << 2 }; -/* Possible status values used to signal errors - see goldfish_pipe_error_convert */ +/* + * Possible status values used to signal errors - see + * goldfish_pipe_error_convert + */ enum PipeErrors { PIPE_ERROR_INVAL = -1, PIPE_ERROR_AGAIN = -2, @@ -117,8 +120,8 @@ enum PipeCmdCode { PIPE_CMD_WAKE_ON_READ, /* - * TODO(zyy): implement a deferred read/write execution to allow parallel - * processing of pipe operations on the host. + * TODO(zyy): implement a deferred read/write execution to allow + * parallel processing of pipe operations on the host. */ PIPE_CMD_WAKE_ON_DONE_IO, }; @@ -135,17 +138,21 @@ struct goldfish_pipe_command; /* A per-pipe command structure, shared with the host */ struct goldfish_pipe_command { - s32 cmd; /* PipeCmdCode, guest -> host */ - s32 id; /* pipe id, guest -> host */ - s32 status; /* command execution status, host -> guest */ + s32 cmd; /* PipeCmdCode, guest -> host */ + s32 id; /* pipe id, guest -> host */ + s32 status; /* command execution status, host -> guest */ s32 reserved; /* to pad to 64-bit boundary */ union { /* Parameters for PIPE_CMD_{READ,WRITE} */ struct { - u32 buffers_count; /* number of buffers, guest -> host */ - s32 consumed_size; /* number of consumed bytes, host -> guest */ - u64 ptrs[MAX_BUFFERS_PER_COMMAND]; /* buffer pointers, guest -> host */ - u32 sizes[MAX_BUFFERS_PER_COMMAND]; /* buffer sizes, guest -> host */ + /* number of buffers, guest -> host */ + u32 buffers_count; + /* number of consumed bytes, host -> guest */ + s32 consumed_size; + /* buffer pointers, guest -> host */ + u64 ptrs[MAX_BUFFERS_PER_COMMAND]; + /* buffer sizes, guest -> host */ + u32 sizes[MAX_BUFFERS_PER_COMMAND]; } rw_params; }; }; @@ -165,34 +172,46 @@ struct open_command_param { /* Device-level set of buffers shared with the host */ struct goldfish_pipe_dev_buffers { struct open_command_param open_command_params; - struct signalled_pipe_buffer signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; + struct signalled_pipe_buffer + signalled_pipe_buffers[MAX_SIGNALLED_PIPES]; }; /* This data type models a given pipe instance */ struct goldfish_pipe { - u32 id; /* pipe ID - index into goldfish_pipe_dev::pipes array */ - unsigned long flags; /* The wake flags pipe is waiting for - * Note: not protected with any lock, uses atomic operations - * and barriers to make it thread-safe. - */ - unsigned long signalled_flags; /* wake flags host have signalled, - * - protected by goldfish_pipe_dev::lock */ + /* pipe ID - index into goldfish_pipe_dev::pipes array */ + u32 id; - struct goldfish_pipe_command *command_buffer; /* A pointer to command buffer */ + /* The wake flags pipe is waiting for. + * Note: not protected with any lock, uses atomic operations and + * barriers to make it thread-safe. + */ + unsigned long flags; + + /* wake flags host have signalled, + * protected by goldfish_pipe_dev::lock + */ + unsigned long signalled_flags; - /* doubly linked list of signalled pipes, protected by goldfish_pipe_dev::lock */ + /* A pointer to command buffer */ + struct goldfish_pipe_command *command_buffer; + + /* doubly linked list of signalled pipes, + * protected by goldfish_pipe_dev::lock + */ struct goldfish_pipe *prev_signalled; struct goldfish_pipe *next_signalled; /* * A pipe's own lock. Protects the following: - * - *command_buffer - makes sure a command can safely write its parameters - * to the host and read the results back. + * - *command_buffer - makes sure a command can safely write its + * parameters to the host and read the results back. */ struct mutex lock; - wait_queue_head_t wake_queue; /* A wake queue for sleeping until host signals an event */ - struct goldfish_pipe_dev *dev; /* Pointer to the parent goldfish_pipe_dev instance */ + /* A wake queue for sleeping until host signals an event */ + wait_queue_head_t wake_queue; + /* Pointer to the parent goldfish_pipe_dev instance */ + struct goldfish_pipe_dev *dev; }; struct goldfish_pipe_dev pipe_dev[1] = {}; @@ -200,7 +219,8 @@ struct goldfish_pipe_dev pipe_dev[1] = {}; static int goldfish_cmd_locked(struct goldfish_pipe *pipe, enum PipeCmdCode cmd) { pipe->command_buffer->cmd = cmd; - pipe->command_buffer->status = PIPE_ERROR_INVAL; /* failure by default */ + /* failure by default */ + pipe->command_buffer->status = PIPE_ERROR_INVAL; writel(pipe->id, pipe->dev->base + PIPE_REG_CMD); return pipe->command_buffer->status; } @@ -235,10 +255,12 @@ static int goldfish_pipe_error_convert(int status) static int pin_user_pages(unsigned long first_page, unsigned long last_page, unsigned last_page_size, int is_write, - struct page *pages[MAX_BUFFERS_PER_COMMAND], unsigned *iter_last_page_size) + struct page *pages[MAX_BUFFERS_PER_COMMAND], + unsigned *iter_last_page_size) { int ret; int requested_pages = ((last_page - first_page) >> PAGE_SHIFT) + 1; + if (requested_pages > MAX_BUFFERS_PER_COMMAND) { requested_pages = MAX_BUFFERS_PER_COMMAND; *iter_last_page_size = PAGE_SIZE; @@ -260,10 +282,11 @@ static void release_user_pages(struct page **pages, int pages_count, int is_write, s32 consumed_size) { int i; + for (i = 0; i < pages_count; i++) { - if (!is_write && consumed_size > 0) { + if (!is_write && consumed_size > 0) set_page_dirty(pages[i]); - } + put_page(pages[i]); } } @@ -291,7 +314,9 @@ static void populate_rw_params( command->rw_params.sizes[0] = size_on_page; for (; i < pages_count; ++i) { xaddr = page_to_phys(pages[i]); - size_on_page = (i == pages_count - 1) ? iter_last_page_size : PAGE_SIZE; + size_on_page = (i == pages_count - 1) ? + iter_last_page_size : PAGE_SIZE; + if (xaddr == xaddr_prev + PAGE_SIZE) { command->rw_params.sizes[buffer_idx] += size_on_page; } else { @@ -307,7 +332,7 @@ static void populate_rw_params( static int transfer_max_buffers(struct goldfish_pipe* pipe, unsigned long address, unsigned long address_end, int is_write, unsigned long last_page, unsigned int last_page_size, - s32* consumed_size, int* status) + s32 *consumed_size, int *status) { struct page *pages[MAX_BUFFERS_PER_COMMAND]; unsigned long first_page = address & PAGE_MASK; @@ -327,8 +352,9 @@ static int transfer_max_buffers(struct goldfish_pipe* pipe, pipe->command_buffer); /* Transfer the data */ - *status = goldfish_cmd_locked(pipe, - is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); + *status = goldfish_cmd_locked( + pipe, + is_write ? PIPE_CMD_WRITE : PIPE_CMD_READ); *consumed_size = pipe->command_buffer->rw_params.consumed_size; @@ -389,12 +415,14 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, s32 consumed_size; int status; ret = transfer_max_buffers(pipe, address, address_end, is_write, - last_page, last_page_size, &consumed_size, &status); + last_page, last_page_size, &consumed_size, &status); if (ret < 0) break; if (consumed_size > 0) { - /* No matter what's the status, we've transfered something */ + /* No matter what's the status, we've transfered + * something + */ count += consumed_size; address += consumed_size; } @@ -413,8 +441,9 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, * err. */ if (status != PIPE_ERROR_AGAIN) - pr_info_ratelimited("goldfish_pipe: backend error %d on %s\n", - status, is_write ? "write" : "read"); + pr_err_ratelimited( + "goldfish_pipe: backend error %d on %s\n", + status, is_write ? "write" : "read"); break; } @@ -422,7 +451,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, * If the error is not PIPE_ERROR_AGAIN, or if we are in * non-blocking mode, just return the error code. */ - if (status != PIPE_ERROR_AGAIN || (filp->f_flags & O_NONBLOCK) != 0) { + if (status != PIPE_ERROR_AGAIN + || (filp->f_flags & O_NONBLOCK) != 0) { ret = goldfish_pipe_error_convert(status); break; } @@ -440,7 +470,8 @@ static ssize_t goldfish_pipe_read_write(struct file *filp, static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer, size_t bufflen, loff_t *ppos) { - return goldfish_pipe_read_write(filp, buffer, bufflen, /* is_write */ 0); + return goldfish_pipe_read_write(filp, buffer, bufflen, + /* is_write */ 0); } static ssize_t goldfish_pipe_write(struct file *filp, @@ -461,9 +492,8 @@ static unsigned int goldfish_pipe_poll(struct file *filp, poll_table *wait) poll_wait(filp, &pipe->wake_queue, wait); status = goldfish_cmd(pipe, PIPE_CMD_POLL); - if (status < 0) { + if (status < 0) return -ERESTARTSYS; - } if (status & PIPE_POLL_IN) mask |= POLLIN | POLLRDNORM; @@ -493,9 +523,9 @@ static void signalled_pipes_add_locked(struct goldfish_pipe_dev *dev, || dev->first_signalled_pipe == pipe) return; /* already in the list */ pipe->next_signalled = dev->first_signalled_pipe; - if (dev->first_signalled_pipe) { + if (dev->first_signalled_pipe) dev->first_signalled_pipe->prev_signalled = pipe; - } + dev->first_signalled_pipe = pipe; } @@ -511,21 +541,22 @@ static void signalled_pipes_remove_locked(struct goldfish_pipe_dev *dev, pipe->next_signalled = NULL; } -static struct goldfish_pipe *signalled_pipes_pop_front(struct goldfish_pipe_dev *dev, +static struct goldfish_pipe *signalled_pipes_pop_front( + struct goldfish_pipe_dev *dev, int *wakes) { struct goldfish_pipe *pipe; unsigned long flags; + spin_lock_irqsave(&dev->lock, flags); pipe = dev->first_signalled_pipe; if (pipe) { *wakes = pipe->signalled_flags; pipe->signalled_flags = 0; - /* - * This is an optimized version of signalled_pipes_remove_locked() - - * we want to make it as fast as possible to wake the sleeping pipe - * operations faster + /* This is an optimized version of + * signalled_pipes_remove_locked() - we want to make it as fast + * as possible to wake the sleeping pipe operations faster. */ dev->first_signalled_pipe = pipe->next_signalled; if (dev->first_signalled_pipe) @@ -553,8 +584,8 @@ static void goldfish_interrupt_task(unsigned long unused) clear_bit(BIT_WAKE_ON_WRITE, &pipe->flags); } /* - * wake_up_interruptible() implies a write barrier, so don't explicitly - * add another one here. + * wake_up_interruptible() implies a write barrier, so don't + * explicitly add another one here. */ wake_up_interruptible(&pipe->wake_queue); } @@ -608,6 +639,7 @@ static irqreturn_t goldfish_pipe_interrupt(int irq, void *dev_id) static int get_free_pipe_id_locked(struct goldfish_pipe_dev *dev) { int id; + for (id = 0; id < dev->pipes_capacity; ++id) if (!dev->pipes[id]) return id; @@ -657,12 +689,13 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) init_waitqueue_head(&pipe->wake_queue); /* - * Command buffer needs to be allocated on its own page to make sure it is - * physically contiguous in host's address space. + * Command buffer needs to be allocated on its own page to make sure it + * is physically contiguous in host's address space. */ pipe->command_buffer = - (struct goldfish_pipe_command*)__get_free_page(GFP_KERNEL); + (struct goldfish_pipe_command *)__get_free_page(GFP_KERNEL); if (!pipe->command_buffer) { + pr_err("Could not alloc pipe command buffer!\n"); status = -ENOMEM; goto err_pipe; } @@ -671,6 +704,7 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) id = get_free_pipe_id_locked(dev); if (id < 0) { + pr_err("Could not get free pipe id!\n"); status = id; goto err_id_locked; } @@ -686,10 +720,13 @@ static int goldfish_pipe_open(struct inode *inode, struct file *file) (u64)(unsigned long)__pa(pipe->command_buffer); status = goldfish_cmd_locked(pipe, PIPE_CMD_OPEN); spin_unlock_irqrestore(&dev->lock, flags); - if (status < 0) + if (status < 0) { + pr_err("Could not tell host of new pipe! status=%d", status); goto err_cmd; + } /* All is done, save the pipe into the file's private data field */ file->private_data = pipe; + pr_debug("%s on 0x%p\n", __func__, pipe); return 0; err_cmd: @@ -709,6 +746,8 @@ static int goldfish_pipe_release(struct inode *inode, struct file *filp) struct goldfish_pipe *pipe = filp->private_data; struct goldfish_pipe_dev *dev = pipe->dev; + pr_debug("%s on 0x%p\n", __func__, pipe); + /* The guest is closing the channel, so tell the emulator right now */ (void)goldfish_cmd(pipe, PIPE_CMD_CLOSE); @@ -757,15 +796,16 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev) dev->first_signalled_pipe = NULL; dev->pipes_capacity = INITIAL_PIPES_CAPACITY; - dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), GFP_KERNEL); + dev->pipes = kcalloc(dev->pipes_capacity, sizeof(*dev->pipes), + GFP_KERNEL); if (!dev->pipes) return -ENOMEM; /* * We're going to pass two buffers, open_command_params and * signalled_pipe_buffers, to the host. This means each of those buffers - * needs to be contained in a single physical page. The easiest choice is - * to just allocate a page and place the buffers in it. + * needs to be contained in a single physical page. The easiest choice + * is to just allocate a page and place the buffers in it. */ BUG_ON(sizeof(*dev->buffers) > PAGE_SIZE); page = (char*)__get_free_page(GFP_KERNEL); @@ -778,13 +818,19 @@ static int goldfish_pipe_device_init_v2(struct platform_device *pdev) /* Send the buffer addresses to the host */ { u64 paddr = __pa(&dev->buffers->signalled_pipe_buffers); - writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); - writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_SIGNAL_BUFFER); - writel((u32)MAX_SIGNALLED_PIPES, dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); + + writel((u32)(unsigned long)(paddr >> 32), + dev->base + PIPE_REG_SIGNAL_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, + dev->base + PIPE_REG_SIGNAL_BUFFER); + writel((u32)MAX_SIGNALLED_PIPES, + dev->base + PIPE_REG_SIGNAL_BUFFER_COUNT); paddr = __pa(&dev->buffers->open_command_params); - writel((u32)(unsigned long)(paddr >> 32), dev->base + PIPE_REG_OPEN_BUFFER_HIGH); - writel((u32)(unsigned long)paddr, dev->base + PIPE_REG_OPEN_BUFFER); + writel((u32)(unsigned long)(paddr >> 32), + dev->base + PIPE_REG_OPEN_BUFFER_HIGH); + writel((u32)(unsigned long)paddr, + dev->base + PIPE_REG_OPEN_BUFFER); } return 0; } diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c index dd9ea463c2a4..d97340477cf3 100644 --- a/drivers/platform/goldfish/pdev_bus.c +++ b/drivers/platform/goldfish/pdev_bus.c @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> +#include <linux/goldfish.h> #define PDEV_BUS_OP_DONE (0x00) #define PDEV_BUS_OP_REMOVE_DEV (0x04) @@ -130,10 +131,9 @@ static int goldfish_new_pdev(void) dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1); *dev->pdev.dev.dma_mask = ~0; -#ifdef CONFIG_64BIT - writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH); -#endif - writel((u32)(unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME); + gf_write_ptr(name, pdev_bus_base + PDEV_BUS_GET_NAME, + pdev_bus_base + PDEV_BUS_GET_NAME_HIGH); + name[name_len] = '\0'; dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID); dev->pdev.resource[0].start = base; diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c index 039a8b6a50b5..937f10e3c9ad 100644 --- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c @@ -1432,6 +1432,8 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Extended IOCTLs */ case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); if (copy_from_user(&extend_ioctl_data, (u8 *)ifr->ifr_ifru.ifru_data, diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c index c9e5a46c08f0..4dd7e4f3728e 100644 --- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c +++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c @@ -1566,6 +1566,8 @@ static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /* Extended IOCTLs */ case RMNET_IOCTL_EXTENDED: + if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN)) + return -EPERM; IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n"); if (copy_from_user(&extend_ioctl_data, (u8 *)ifr->ifr_ifru.ifru_data, diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index a3661cc44f86..0e0403e024c5 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -101,6 +101,15 @@ static const struct dmi_system_id asus_quirks[] = { }, { .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X302UA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X302UA"), + }, + .driver_data = &quirk_asus_wapf4, + }, + { + .callback = dmi_matched, .ident = "ASUSTeK COMPUTER INC. X401U", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c index dfe1ee89f7c7..922a86787c5c 100644 --- a/drivers/power/pda_power.c +++ b/drivers/power/pda_power.c @@ -30,9 +30,9 @@ static inline unsigned int get_irq_flags(struct resource *res) static struct device *dev; static struct pda_power_pdata *pdata; static struct resource *ac_irq, *usb_irq; -static struct timer_list charger_timer; -static struct timer_list supply_timer; -static struct timer_list polling_timer; +static struct delayed_work charger_work; +static struct delayed_work polling_work; +static struct delayed_work supply_work; static int polling; static struct power_supply *pda_psy_ac, *pda_psy_usb; @@ -140,7 +140,7 @@ static void update_charger(void) } } -static void supply_timer_func(unsigned long unused) +static void supply_work_func(struct work_struct *work) { if (ac_status == PDA_PSY_TO_CHANGE) { ac_status = new_ac_status; @@ -161,11 +161,12 @@ static void psy_changed(void) * Okay, charger set. Now wait a bit before notifying supplicants, * charge power should stabilize. */ - mod_timer(&supply_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_charger)); + cancel_delayed_work(&supply_work); + schedule_delayed_work(&supply_work, + msecs_to_jiffies(pdata->wait_for_charger)); } -static void charger_timer_func(unsigned long unused) +static void charger_work_func(struct work_struct *work) { update_status(); psy_changed(); @@ -184,13 +185,14 @@ static irqreturn_t power_changed_isr(int irq, void *power_supply) * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ - mod_timer(&charger_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_status)); + cancel_delayed_work(&charger_work); + schedule_delayed_work(&charger_work, + msecs_to_jiffies(pdata->wait_for_status)); return IRQ_HANDLED; } -static void polling_timer_func(unsigned long unused) +static void polling_work_func(struct work_struct *work) { int changed = 0; @@ -211,8 +213,9 @@ static void polling_timer_func(unsigned long unused) if (changed) psy_changed(); - mod_timer(&polling_timer, - jiffies + msecs_to_jiffies(pdata->polling_interval)); + cancel_delayed_work(&polling_work); + schedule_delayed_work(&polling_work, + msecs_to_jiffies(pdata->polling_interval)); } #if IS_ENABLED(CONFIG_USB_PHY) @@ -250,8 +253,9 @@ static int otg_handle_notification(struct notifier_block *nb, * Wait a bit before reading ac/usb line status and setting charger, * because ac/usb status readings may lag from irq. */ - mod_timer(&charger_timer, - jiffies + msecs_to_jiffies(pdata->wait_for_status)); + cancel_delayed_work(&charger_work); + schedule_delayed_work(&charger_work, + msecs_to_jiffies(pdata->wait_for_status)); return NOTIFY_OK; } @@ -300,8 +304,8 @@ static int pda_power_probe(struct platform_device *pdev) if (!pdata->ac_max_uA) pdata->ac_max_uA = 500000; - setup_timer(&charger_timer, charger_timer_func, 0); - setup_timer(&supply_timer, supply_timer_func, 0); + INIT_DELAYED_WORK(&charger_work, charger_work_func); + INIT_DELAYED_WORK(&supply_work, supply_work_func); ac_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "ac"); usb_irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "usb"); @@ -385,9 +389,10 @@ static int pda_power_probe(struct platform_device *pdev) if (polling) { dev_dbg(dev, "will poll for status\n"); - setup_timer(&polling_timer, polling_timer_func, 0); - mod_timer(&polling_timer, - jiffies + msecs_to_jiffies(pdata->polling_interval)); + INIT_DELAYED_WORK(&polling_work, polling_work_func); + cancel_delayed_work(&polling_work); + schedule_delayed_work(&polling_work, + msecs_to_jiffies(pdata->polling_interval)); } if (ac_irq || usb_irq) @@ -433,9 +438,9 @@ static int pda_power_remove(struct platform_device *pdev) free_irq(ac_irq->start, pda_psy_ac); if (polling) - del_timer_sync(&polling_timer); - del_timer_sync(&charger_timer); - del_timer_sync(&supply_timer); + cancel_delayed_work_sync(&polling_work); + cancel_delayed_work_sync(&charger_work); + cancel_delayed_work_sync(&supply_work); if (pdata->is_usb_online) power_supply_unregister(pda_psy_usb); diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 84419af16f77..fd12ccc11e26 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone( power_zone->id = result; idr_init(&power_zone->idr); + result = -ENOMEM; power_zone->name = kstrdup(name, GFP_KERNEL); if (!power_zone->name) goto err_name_alloc; diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 2e481b9e8ea5..60a5e0c63a13 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -97,30 +97,26 @@ static s32 scaled_ppm_to_ppb(long ppm) /* posix clock implementation */ -static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp) +static int ptp_clock_getres(struct posix_clock *pc, struct timespec64 *tp) { tp->tv_sec = 0; tp->tv_nsec = 1; return 0; } -static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp) +static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); - struct timespec64 ts = timespec_to_timespec64(*tp); - return ptp->info->settime64(ptp->info, &ts); + return ptp->info->settime64(ptp->info, tp); } -static int ptp_clock_gettime(struct posix_clock *pc, struct timespec *tp) +static int ptp_clock_gettime(struct posix_clock *pc, struct timespec64 *tp) { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); - struct timespec64 ts; int err; - err = ptp->info->gettime64(ptp->info, &ts); - if (!err) - *tp = timespec64_to_timespec(ts); + err = ptp->info->gettime64(ptp->info, tp); return err; } @@ -133,7 +129,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { - struct timespec ts; + struct timespec64 ts; ktime_t kt; s64 delta; @@ -146,7 +142,7 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) if ((unsigned long) ts.tv_nsec >= NSEC_PER_SEC) return -EINVAL; - kt = timespec_to_ktime(ts); + kt = timespec64_to_ktime(ts); delta = ktime_to_ns(kt); err = ops->adjtime(ops, delta); } else if (tx->modes & ADJ_FREQUENCY) { diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index d4de0607b502..3039fb762893 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -69,6 +69,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, struct tegra_pwm_chip *pc = to_tegra_pwm_chip(chip); unsigned long long c; unsigned long rate, hz; + unsigned long long ns100 = NSEC_PER_SEC; u32 val = 0; int err; @@ -87,9 +88,11 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * cycles at the PWM clock rate will take period_ns nanoseconds. */ rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; - hz = NSEC_PER_SEC / period_ns; - rate = (rate + (hz / 2)) / hz; + /* Consider precision in PWM_SCALE_WIDTH rate calculation */ + ns100 *= 100; + hz = DIV_ROUND_CLOSEST_ULL(ns100, period_ns); + rate = DIV_ROUND_CLOSEST(rate * 100, hz); /* * Since the actual PWM divider is the register's frequency divider diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c index 3a6d0290c54c..c5e272ea4372 100644 --- a/drivers/regulator/anatop-regulator.c +++ b/drivers/regulator/anatop-regulator.c @@ -296,6 +296,11 @@ static int anatop_regulator_probe(struct platform_device *pdev) if (!sreg->sel && !strcmp(sreg->name, "vddpu")) sreg->sel = 22; + /* set the default voltage of the pcie phy to be 1.100v */ + if (!sreg->sel && rdesc->name && + !strcmp(rdesc->name, "vddpcie")) + sreg->sel = 0x10; + if (!sreg->bypass && !sreg->sel) { dev_err(&pdev->dev, "Failed to read a valid default voltage selector.\n"); return -EINVAL; diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index 853976bd3d36..9473715725df 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -217,6 +217,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) missing = year; } + /* Can't proceed if alarm is still invalid after replacing + * missing fields. + */ + err = rtc_valid_tm(&alarm->time); + if (err) + goto done; + /* with luck, no rollover is needed */ t_now = rtc_tm_to_time64(&now); t_alm = rtc_tm_to_time64(&alarm->time); @@ -268,9 +275,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) dev_warn(&rtc->dev, "alarm rollover not handled\n"); } -done: err = rtc_valid_tm(&alarm->time); +done: if (err) { dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 8f7034ba7d9e..86015b393dd5 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -41,6 +41,9 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_platform.h> +#ifdef CONFIG_X86 +#include <asm/i8259.h> +#endif /* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */ #include <asm-generic/rtc.h> @@ -1058,17 +1061,23 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { cmos_wake_setup(&pnp->dev); - if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) + if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { + unsigned int irq = 0; +#ifdef CONFIG_X86 /* Some machines contain a PNP entry for the RTC, but * don't define the IRQ. It should always be safe to - * hardcode it in these cases + * hardcode it on systems with a legacy PIC. */ + if (nr_legacy_irqs()) + irq = 8; +#endif return cmos_do_probe(&pnp->dev, - pnp_get_resource(pnp, IORESOURCE_IO, 0), 8); - else + pnp_get_resource(pnp, IORESOURCE_IO, 0), irq); + } else { return cmos_do_probe(&pnp->dev, pnp_get_resource(pnp, IORESOURCE_IO, 0), pnp_irq(pnp, 0)); + } } static void __exit cmos_pnp_remove(struct pnp_dev *pnp) diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 3b3049c8c9e0..c0eb113588ff 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -527,6 +527,10 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (get_user(new_margin, (int __user *)arg)) return -EFAULT; + /* the hardware's tick rate is 4096 Hz, so + * the counter value needs to be scaled accordingly + */ + new_margin <<= 12; if (new_margin < 1 || new_margin > 16777216) return -EINVAL; @@ -535,7 +539,8 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, ds1374_wdt_ping(); /* fallthrough */ case WDIOC_GETTIMEOUT: - return put_user(wdt_margin, (int __user *)arg); + /* when returning ... inverse is true */ + return put_user((wdt_margin >> 12), (int __user *)arg); case WDIOC_SETOPTIONS: if (copy_from_user(&options, (int __user *)arg, sizeof(int))) return -EFAULT; @@ -543,14 +548,15 @@ static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, if (options & WDIOS_DISABLECARD) { pr_info("disable watchdog\n"); ds1374_wdt_disable(); + return 0; } if (options & WDIOS_ENABLECARD) { pr_info("enable watchdog\n"); ds1374_wdt_settimeout(wdt_margin); ds1374_wdt_ping(); + return 0; } - return -EINVAL; } return -ENOTTY; diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index 229dd2fe8f45..c6b0c7ed7a30 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -150,6 +150,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); + + /* check if no alarm is set */ + if (y_m_d == 0 && h_m_s_ms == 0) { + pr_debug("No alarm is set\n"); + rc = -ENOENT; + goto exit; + } else { + pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms); + } + opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); exit: diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index 950c5d0b6dca..afab89f5be48 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -257,7 +257,7 @@ static int snvs_rtc_probe(struct platform_device *pdev) of_property_read_u32(pdev->dev.of_node, "offset", &data->offset); } - if (!data->regmap) { + if (IS_ERR(data->regmap)) { dev_err(&pdev->dev, "Can't find snvs syscon\n"); return -ENODEV; } diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e7a6f1222642..b76a85d14ef0 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -1881,8 +1881,12 @@ static int __dasd_device_is_unusable(struct dasd_device *device, { int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); - if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { - /* dasd is being set offline. */ + if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && + !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { + /* + * dasd is being set offline + * but it is no safe offline where we have to allow I/O + */ return 1; } if (device->stopped) { diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 5006cb6ce62d..50030cdf91fb 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -591,6 +591,11 @@ struct qeth_cmd_buffer { void (*callback) (struct qeth_channel *, struct qeth_cmd_buffer *); }; +static inline struct qeth_ipa_cmd *__ipa_cmd(struct qeth_cmd_buffer *iob) +{ + return (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE); +} + /** * definition of a qeth channel, used for read and write */ diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e5b9506698b1..95c631125a20 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -517,8 +517,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) queue == card->qdio.no_in_queues - 1; } - -static int qeth_issue_next_read(struct qeth_card *card) +static int __qeth_issue_next_read(struct qeth_card *card) { int rc; struct qeth_cmd_buffer *iob; @@ -549,6 +548,17 @@ static int qeth_issue_next_read(struct qeth_card *card) return rc; } +static int qeth_issue_next_read(struct qeth_card *card) +{ + int ret; + + spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card))); + ret = __qeth_issue_next_read(card); + spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card))); + + return ret; +} + static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) { struct qeth_reply *reply; @@ -952,7 +962,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread) spin_lock_irqsave(&card->thread_mask_lock, flags); card->thread_running_mask &= ~thread; spin_unlock_irqrestore(&card->thread_mask_lock, flags); - wake_up(&card->wait_q); + wake_up_all(&card->wait_q); } EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); @@ -1156,6 +1166,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, } rc = qeth_get_problem(cdev, irb); if (rc) { + card->read_or_write_problem = 1; qeth_clear_ipacmd_list(card); qeth_schedule_recovery(card); goto out; @@ -1174,7 +1185,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, return; if (channel == &card->read && channel->state == CH_STATE_UP) - qeth_issue_next_read(card); + __qeth_issue_next_read(card); iob = channel->iob; index = channel->buf_no; @@ -2054,7 +2065,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, unsigned long flags; struct qeth_reply *reply = NULL; unsigned long timeout, event_timeout; - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = NULL; QETH_CARD_TEXT(card, 2, "sendctl"); @@ -2068,23 +2079,27 @@ int qeth_send_control_data(struct qeth_card *card, int len, } reply->callback = reply_cb; reply->param = reply_param; - if (card->state == CARD_STATE_DOWN) - reply->seqno = QETH_IDX_COMMAND_SEQNO; - else - reply->seqno = card->seqno.ipa++; + init_waitqueue_head(&reply->wait_q); - spin_lock_irqsave(&card->lock, flags); - list_add_tail(&reply->list, &card->cmd_waiter_list); - spin_unlock_irqrestore(&card->lock, flags); QETH_DBF_HEX(CTRL, 2, iob->data, QETH_DBF_CTRL_LEN); while (atomic_cmpxchg(&card->write.irq_pending, 0, 1)) ; - qeth_prepare_control_data(card, len, iob); - if (IS_IPA(iob->data)) + if (IS_IPA(iob->data)) { + cmd = __ipa_cmd(iob); + cmd->hdr.seqno = card->seqno.ipa++; + reply->seqno = cmd->hdr.seqno; event_timeout = QETH_IPA_TIMEOUT; - else + } else { + reply->seqno = QETH_IDX_COMMAND_SEQNO; event_timeout = QETH_TIMEOUT; + } + qeth_prepare_control_data(card, len, iob); + + spin_lock_irqsave(&card->lock, flags); + list_add_tail(&reply->list, &card->cmd_waiter_list); + spin_unlock_irqrestore(&card->lock, flags); + timeout = jiffies + event_timeout; QETH_CARD_TEXT(card, 6, "noirqpnd"); @@ -2109,9 +2124,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, /* we have only one long running ipassist, since we can ensure process context of this command we can sleep */ - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - if ((cmd->hdr.command == IPA_CMD_SETIP) && - (cmd->hdr.prot_version == QETH_PROT_IPV4)) { + if (cmd && cmd->hdr.command == IPA_CMD_SETIP && + cmd->hdr.prot_version == QETH_PROT_IPV4) { if (!wait_event_timeout(reply->wait_q, atomic_read(&reply->received), event_timeout)) goto time_err; @@ -2877,7 +2891,7 @@ static void qeth_fill_ipacmd_header(struct qeth_card *card, memset(cmd, 0, sizeof(struct qeth_ipa_cmd)); cmd->hdr.command = command; cmd->hdr.initiator = IPA_CMD_INITIATOR_HOST; - cmd->hdr.seqno = card->seqno.ipa; + /* cmd->hdr.seqno is set by qeth_send_control_data() */ cmd->hdr.adapter_type = qeth_get_ipa_adp_type(card->info.link_type); cmd->hdr.rel_adapter_no = (__u8) card->info.portno; if (card->options.layer2) @@ -4966,8 +4980,6 @@ static void qeth_core_free_card(struct qeth_card *card) QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); qeth_clean_channel(&card->read); qeth_clean_channel(&card->write); - if (card->dev) - free_netdev(card->dev); kfree(card->ip_tbd_list); qeth_free_qdio_buffers(card); unregister_service_level(&card->qeth_service_level); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 58bcb3c9a86a..acdb5ccb0ab9 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1062,8 +1062,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) qeth_l2_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } return; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 0d6888cbd96e..bbdb3b6c54bb 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -3243,8 +3243,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) qeth_l3_set_offline(cgdev); if (card->dev) { - netif_napi_del(&card->napi); unregister_netdev(card->dev); + free_netdev(card->dev); card->dev = NULL; } diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index 499e369eabf0..8bc1625337f6 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -191,6 +191,7 @@ struct bnx2fc_hba { struct bnx2fc_cmd_mgr *cmd_mgr; spinlock_t hba_lock; struct mutex hba_mutex; + struct mutex hba_stats_mutex; unsigned long adapter_state; #define ADAPTER_STATE_UP 0 #define ADAPTER_STATE_GOING_DOWN 1 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 67405c628864..d0b227ffbd5f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) if (!fw_stats) return NULL; + mutex_lock(&hba->hba_stats_mutex); + bnx2fc_stats = fc_get_host_stats(shost); init_completion(&hba->stat_req_done); if (bnx2fc_send_stat_req(hba)) - return bnx2fc_stats; + goto unlock_stats_mutex; rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ)); if (!rc) { BNX2FC_HBA_DBG(lport, "FW stat req timed out\n"); - return bnx2fc_stats; + goto unlock_stats_mutex; } BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt); bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt; @@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost) memcpy(&hba->prev_stats, hba->stats_buffer, sizeof(struct fcoe_statistics_params)); + +unlock_stats_mutex: + mutex_unlock(&hba->hba_stats_mutex); return bnx2fc_stats; } @@ -1302,6 +1307,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic) } spin_lock_init(&hba->hba_lock); mutex_init(&hba->hba_mutex); + mutex_init(&hba->hba_stats_mutex); hba->cnic = cnic; diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 622bdabc8894..dab195f04da7 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -1769,7 +1769,6 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) goto bye; } - mempool_free(mbp, hw->mb_mempool); if (finicsum != cfcsum) { csio_warn(hw, "Config File checksum mismatch: csum=%#x, computed=%#x\n", @@ -1780,6 +1779,10 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param) rv = csio_hw_validate_caps(hw, mbp); if (rv != 0) goto bye; + + mempool_free(mbp, hw->mb_mempool); + mbp = NULL; + /* * Note that we're operating with parameters * not supplied by the driver, rather than from hard-wired diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 7a58128a0000..2f61d8cd5882 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -835,8 +835,10 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd) qc->err_mask |= AC_ERR_OTHER; sata_port->ioasa.status |= ATA_BUSY; - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); ata_qc_complete(qc); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -5864,8 +5866,10 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd) res->in_erp = 0; } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -6255,8 +6259,10 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg, } scsi_dma_unmap(ipr_cmd->scsi_cmd); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); } /** @@ -6282,8 +6288,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd) scsi_dma_unmap(scsi_cmd); spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags); - list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); scsi_cmd->scsi_done(scsi_cmd); + if (ipr_cmd->eh_comp) + complete(ipr_cmd->eh_comp); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags); } else { spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index efce04df2109..9f0b00c38658 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) */ switch (session->state) { case ISCSI_STATE_FAILED: + /* + * cmds should fail during shutdown, if the session + * state is bad, allowing completion to happen + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + reason = FAILURE_SESSION_FAILED; + sc->result = DID_NO_CONNECT << 16; + break; + } case ISCSI_STATE_IN_RECOVERY: reason = FAILURE_SESSION_IN_RECOVERY; sc->result = DID_IMM_RETRY << 16; @@ -1980,6 +1989,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) if (session->state != ISCSI_STATE_LOGGED_IN) { /* + * During shutdown, if session is prematurely disconnected, + * recovery won't happen and there will be hung cmds. Not + * handling cmds would trigger EH, also bad in this case. + * Instead, handle cmd, allow completion to happen and let + * upper layer to deal with the result. + */ + if (unlikely(system_state != SYSTEM_RUNNING)) { + sc->result = DID_NO_CONNECT << 16; + ISCSI_DBG_EH(session, "sc on shutdown, handled\n"); + rc = BLK_EH_HANDLED; + goto done; + } + /* * We are probably in the middle of iscsi recovery so let * that complete and handle the error. */ @@ -2083,7 +2105,7 @@ done: task->last_timeout = jiffies; spin_unlock(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? - "timer reset" : "nh"); + "timer reset" : "shutdown or nh"); return rc; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 022bb6e10d98..12886f96b286 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->phy->minimum_linkrate = dr->pmin_linkrate; phy->phy->maximum_linkrate = dr->pmax_linkrate; phy->phy->negotiated_linkrate = phy->linkrate; + phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED); skip: if (new_phy) @@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) res = smp_execute_task(dev, req, RPEL_REQ_SIZE, resp, RPEL_RESP_SIZE); - if (!res) + if (res) goto out; phy->invalid_dword_count = scsi_to_u32(&resp[12]); @@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy) phy->phy_reset_problem_count = scsi_to_u32(&resp[24]); out: + kfree(req); kfree(resp); return res; diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8379fbbc60db..ef43847153ea 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13493,6 +13493,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, case LPFC_Q_CREATE_VERSION_1: bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1, wq->entry_count); + bf_set(lpfc_mbox_hdr_version, &shdr->request, + LPFC_Q_CREATE_VERSION_1); + switch (wq->entry_size) { default: case 64: diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 14c0334f41e4..26c67c42985c 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -55,6 +55,7 @@ struct mac_esp_priv { int error; }; static struct esp *esp_chips[2]; +static DEFINE_SPINLOCK(esp_chips_lock); #define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \ platform_get_drvdata((struct platform_device *) \ @@ -562,15 +563,18 @@ static int esp_mac_probe(struct platform_device *dev) } host->irq = IRQ_MAC_SCSI; - esp_chips[dev->id] = esp; - mb(); - if (esp_chips[!dev->id] == NULL) { - err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); - if (err < 0) { - esp_chips[dev->id] = NULL; - goto fail_free_priv; - } + + /* The request_irq() call is intended to succeed for the first device + * and fail for the second device. + */ + err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL); + spin_lock(&esp_chips_lock); + if (err < 0 && esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); + goto fail_free_priv; } + esp_chips[dev->id] = esp; + spin_unlock(&esp_chips_lock); err = scsi_esp_register(esp, &dev->dev); if (err) @@ -579,8 +583,13 @@ static int esp_mac_probe(struct platform_device *dev) return 0; fail_free_irq: - if (esp_chips[!dev->id] == NULL) + spin_lock(&esp_chips_lock); + esp_chips[dev->id] = NULL; + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); free_irq(host->irq, esp); + } else + spin_unlock(&esp_chips_lock); fail_free_priv: kfree(mep); fail_free_command_block: @@ -599,9 +608,13 @@ static int esp_mac_remove(struct platform_device *dev) scsi_esp_unregister(esp); + spin_lock(&esp_chips_lock); esp_chips[dev->id] = NULL; - if (!(esp_chips[0] || esp_chips[1])) + if (esp_chips[!dev->id] == NULL) { + spin_unlock(&esp_chips_lock); free_irq(irq, NULL); + } else + spin_unlock(&esp_chips_lock); kfree(mep); diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index e111c3d8c5d6..b868ef3b2ca3 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -3886,19 +3886,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) return 0; } - /* - * Bug work around for firmware SATL handling. The loop - * is based on atomic operations and ensures consistency - * since we're lockless at this point - */ - do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { - scmd->result = SAM_STAT_BUSY; - scmd->scsi_done(scmd); - return 0; - } - } while (_scsih_set_satl_pending(scmd, true)); - sas_target_priv_data = sas_device_priv_data->sas_target; /* invalid device handle */ @@ -3924,6 +3911,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) sas_device_priv_data->block) return SCSI_MLQUEUE_DEVICE_BUSY; + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + if (scmd->sc_data_direction == DMA_FROM_DEVICE) mpi_control = MPI2_SCSIIO_CONTROL_READ; else if (scmd->sc_data_direction == DMA_TO_DEVICE) @@ -3945,6 +3945,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (!smid) { pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", ioc->name, __func__); + _scsih_set_satl_pending(scmd, false); goto out; } mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); @@ -3975,6 +3976,7 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) if (mpi_request->DataLength) { if (ioc->build_sg_scmd(ioc, scmd, smid)) { mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); goto out; } } else diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index e197c6f39de2..aa18c729d23a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -365,6 +365,7 @@ qla24xx_abort_sp_done(void *data, void *ptr, int res) srb_t *sp = (srb_t *)ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; + del_timer(&sp->u.iocb_cmd.timer); complete(&abt->u.abt.comp); } diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index e6faa0b050d1..824e27eec7a1 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -5502,7 +5502,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, fc_port_t *fcport; int rc; - fcport = kzalloc(sizeof(*fcport), GFP_KERNEL); + fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL); if (!fcport) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f, "qla_target(%d): Allocation of tmp FC port failed", diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 60720e5b1ebc..6b61b09b3226 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -180,7 +180,7 @@ static struct { {"HITACHI", "6586-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HITACHI", "6588-", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, {"HP", "A6189A", NULL, BLIST_SPARSELUN | BLIST_LARGELUN}, /* HP VA7400 */ - {"HP", "OPEN-", "*", BLIST_REPORTLUN2}, /* HP XP Arrays */ + {"HP", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, /* HP XP Arrays */ {"HP", "NetRAID-4M", NULL, BLIST_FORCELUN}, {"HP", "HSV100", NULL, BLIST_REPORTLUN2 | BLIST_NOSTARTONADD}, {"HP", "C1557A", NULL, BLIST_FORCELUN}, @@ -589,17 +589,12 @@ int scsi_get_device_flags_keyed(struct scsi_device *sdev, int key) { struct scsi_dev_info_list *devinfo; - int err; devinfo = scsi_dev_info_list_find(vendor, model, key); if (!IS_ERR(devinfo)) return devinfo->flags; - err = PTR_ERR(devinfo); - if (err != -ENOENT) - return err; - - /* nothing found, return nothing */ + /* key or device not found: return nothing */ if (key != SCSI_DEVINFO_GLOBAL) return 0; diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 4d655b568269..5711d58f9e81 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -56,10 +56,13 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { {"IBM", "1815", "rdac", }, {"IBM", "1818", "rdac", }, {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, {"SGI", "TP9", "rdac", }, {"SGI", "IS", "rdac", }, - {"STK", "OPENstorage D280", "rdac", }, + {"STK", "OPENstorage", "rdac", }, {"STK", "FLEXLINE 380", "rdac", }, + {"STK", "BladeCtlr", "rdac", }, {"SUN", "CSM", "rdac", }, {"SUN", "LCSM100", "rdac", }, {"SUN", "STK6580_6780", "rdac", }, diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c index 044d06410d4c..01168acc864d 100644 --- a/drivers/scsi/ses.c +++ b/drivers/scsi/ses.c @@ -546,7 +546,6 @@ static void ses_enclosure_data_process(struct enclosure_device *edev, ecomp = &edev->component[components++]; if (!IS_ERR(ecomp)) { - ses_get_power_status(edev, ecomp); if (addl_desc_ptr) ses_process_descriptor( ecomp, diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 3bc15d2664a1..453171425ba9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -535,6 +535,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); retval = count; free_old_hdr: kfree(old_hdr); @@ -575,6 +576,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) } err_out: err2 = sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return err ? : err2 ? : count; } @@ -674,18 +676,14 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { - static char cmd[TASK_COMM_LEN]; - if (strcmp(current->comm, cmd)) { - printk_ratelimited(KERN_WARNING - "sg_write: data in/out %d/%d bytes " - "for SCSI command 0x%x-- guessing " - "data in;\n program %s not setting " - "count and/or reply_len properly\n", - old_hdr.reply_len - (int)SZ_SG_HEADER, - input_size, (unsigned int) cmnd[0], - current->comm); - strcpy(cmd, current->comm); - } + printk_ratelimited(KERN_WARNING + "sg_write: data in/out %d/%d bytes " + "for SCSI command 0x%x-- guessing " + "data in;\n program %s not setting " + "count and/or reply_len properly\n", + old_hdr.reply_len - (int)SZ_SG_HEADER, + input_size, (unsigned int) cmnd[0], + current->comm); } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; @@ -784,11 +782,15 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); + if (hp->dxfer_len >= SZ_256M) + return -EINVAL; + k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, "sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return k; /* probably out of space --> ENOMEM */ } if (atomic_read(&sdp->detaching)) { @@ -801,6 +803,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, } sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); return -ENODEV; } @@ -1293,6 +1296,7 @@ sg_rq_end_io_usercontext(struct work_struct *work) struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); + sg_remove_request(sfp, srp); kref_put(&sfp->f_ref, sg_remove_sfp); } @@ -1834,8 +1838,6 @@ sg_finish_rem_req(Sg_request *srp) else sg_remove_scat(sfp, req_schp); - sg_remove_request(sfp, srp); - return ret; } @@ -2072,11 +2074,12 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id) if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ - break; + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); + return resp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); - return resp; + return NULL; } /* always adds to end of list */ @@ -2182,12 +2185,17 @@ sg_remove_sfp_usercontext(struct work_struct *work) struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; Sg_request *srp; + unsigned long iflags; /* Cleanup any responses which were never read(). */ + write_lock_irqsave(&sfp->rq_list_lock, iflags); while (!list_empty(&sfp->rq_list)) { srp = list_first_entry(&sfp->rq_list, Sg_request, entry); sg_finish_rem_req(srp); + list_del(&srp->entry); + srp->parentfp = NULL; } + write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 03a2aadf0d3c..8ef905cbfc9c 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -28,6 +28,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> +#include <scsi/scsi_devinfo.h> #include <linux/seqlock.h> #define VIRTIO_SCSI_MEMPOOL_SZ 64 @@ -704,6 +705,28 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc) return virtscsi_tmf(vscsi, cmd); } +static int virtscsi_device_alloc(struct scsi_device *sdevice) +{ + /* + * Passed through SCSI targets (e.g. with qemu's 'scsi-block') + * may have transfer limits which come from the host SCSI + * controller or something on the host side other than the + * target itself. + * + * To make this work properly, the hypervisor can adjust the + * target's VPD information to advertise these limits. But + * for that to work, the guest has to look at the VPD pages, + * which we won't do by default if it is an SPC-2 device, even + * if it does actually support it. + * + * So, set the blist to always try to read the VPD pages. + */ + sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; + + return 0; +} + + /** * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth * @sdev: Virtscsi target whose queue depth to change @@ -775,6 +798,7 @@ static struct scsi_host_template virtscsi_host_template_single = { .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, @@ -795,6 +819,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, .use_clustering = ENABLE_CLUSTERING, diff --git a/drivers/soc/qcom/hab/hab.c b/drivers/soc/qcom/hab/hab.c index 1e568c79fcae..3294fc34bdf8 100644 --- a/drivers/soc/qcom/hab/hab.c +++ b/drivers/soc/qcom/hab/hab.c @@ -378,7 +378,7 @@ struct hab_message *hab_vchan_recv(struct uhab_context *ctx, physical_channel_rx_dispatch((unsigned long) vchan->pchan); } - message = hab_msg_dequeue(vchan, !nonblocking_flag); + message = hab_msg_dequeue(vchan, flags); if (!message) { if (nonblocking_flag) ret = -EAGAIN; diff --git a/drivers/soc/qcom/hab/hab.h b/drivers/soc/qcom/hab/hab.h index 72635e70c94c..ffb0637055d4 100644 --- a/drivers/soc/qcom/hab/hab.h +++ b/drivers/soc/qcom/hab/hab.h @@ -147,7 +147,8 @@ struct hab_header { (((vcid) & HAB_VCID_ID_MASK) >> HAB_VCID_ID_SHIFT) -#define HAB_HEADER_SET_SESSION_ID(header, sid) ((header).session_id = (sid)) +#define HAB_HEADER_SET_SESSION_ID(header, sid) \ + ((header).session_id = (sid)) #define HAB_HEADER_SET_SIZE(header, size) \ ((header).id_type_size = ((header).id_type_size & \ @@ -281,8 +282,8 @@ struct uhab_context { }; /* - * array to describe the VM and its MMID configuration as what is connected to - * so this is describing a pchan's remote side + * array to describe the VM and its MMID configuration as + * what is connected to so this is describing a pchan's remote side */ struct vmid_mmid_desc { int vmid; /* remote vmid */ @@ -341,8 +342,9 @@ struct virtual_channel { }; /* - * Struct shared between local and remote, contents are composed by exporter, - * the importer only writes to pdata and local (exporter) domID + * Struct shared between local and remote, contents + * are composed by exporter, the importer only writes + * to pdata and local (exporter) domID */ struct export_desc { uint32_t export_id; @@ -410,16 +412,10 @@ int habmem_hyp_revoke(void *expdata, uint32_t count); void *habmem_imp_hyp_open(void); void habmem_imp_hyp_close(void *priv, int kernel); -long habmem_imp_hyp_map(void *priv, void *impdata, uint32_t count, - uint32_t remotedom, - uint64_t *index, - void **pkva, - int kernel, - uint32_t userflags); +int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param, + struct export_desc *exp, int kernel); -long habmm_imp_hyp_unmap(void *priv, uint64_t index, - uint32_t count, - int kernel); +int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp); int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); @@ -427,7 +423,7 @@ int habmem_imp_hyp_mmap(struct file *flip, struct vm_area_struct *vma); void hab_msg_free(struct hab_message *message); struct hab_message *hab_msg_dequeue(struct virtual_channel *vchan, - int wait_flag); + unsigned int flags); void hab_msg_recv(struct physical_channel *pchan, struct hab_header *header); diff --git a/drivers/soc/qcom/hab/hab_mem_linux.c b/drivers/soc/qcom/hab/hab_mem_linux.c index ecc3f52a6662..a779067ee4c4 100644 --- a/drivers/soc/qcom/hab/hab_mem_linux.c +++ b/drivers/soc/qcom/hab/hab_mem_linux.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -29,6 +29,9 @@ struct pages_list { uint32_t userflags; struct file *filp_owner; struct file *filp_mapper; + struct dma_buf *dmabuf; + int32_t export_id; + int32_t vcid; }; struct importer_context { @@ -58,7 +61,7 @@ static int match_file(const void *p, struct file *file, unsigned int fd) } -static int habmem_get_dma_pages(unsigned long address, +static int habmem_get_dma_pages_from_va(unsigned long address, int page_count, struct page **pages) { @@ -142,6 +145,56 @@ err: return rc; } +static int habmem_get_dma_pages_from_fd(int32_t fd, + int page_count, + struct page **pages) +{ + struct dma_buf *dmabuf = NULL; + struct scatterlist *s; + struct sg_table *sg_table = NULL; + struct dma_buf_attachment *attach = NULL; + struct page *page; + int i, j, rc = 0; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + attach = dma_buf_attach(dmabuf, hab_driver.dev); + if (IS_ERR_OR_NULL(attach)) { + pr_err("dma_buf_attach failed\n"); + goto err; + } + + sg_table = dma_buf_map_attachment(attach, DMA_TO_DEVICE); + + if (IS_ERR_OR_NULL(sg_table)) { + pr_err("dma_buf_map_attachment failed\n"); + goto err; + } + + for_each_sg(sg_table->sgl, s, sg_table->nents, i) { + page = sg_page(s); + pr_debug("sgl length %d\n", s->length); + + for (j = 0; j < (s->length >> PAGE_SHIFT); j++) { + pages[rc] = nth_page(page, j); + rc++; + if (WARN_ON(rc >= page_count)) + break; + } + } + +err: + if (!IS_ERR_OR_NULL(sg_table)) + dma_buf_unmap_attachment(attach, sg_table, DMA_TO_DEVICE); + if (!IS_ERR_OR_NULL(attach)) + dma_buf_detach(dmabuf, attach); + if (!IS_ERR_OR_NULL(dmabuf)) + dma_buf_put(dmabuf); + return rc; +} + /* * exporter - grant & revoke * degenerate sharabled page list based on CPU friendly virtual "address". @@ -165,7 +218,11 @@ int habmem_hyp_grant_user(unsigned long address, down_read(¤t->mm->mmap_sem); if (HABMM_EXP_MEM_TYPE_DMA & flags) { - ret = habmem_get_dma_pages(address, + ret = habmem_get_dma_pages_from_va(address, + page_count, + pages); + } else if (HABMM_EXPIMP_FLAGS_FD & flags) { + ret = habmem_get_dma_pages_from_fd(address, page_count, pages); } else { @@ -260,30 +317,156 @@ void habmem_imp_hyp_close(void *imp_ctx, int kernel) kfree(priv); } -/* - * setup pages, be ready for the following mmap call - * index is output to refer to this imported buffer described by the import data - */ -long habmem_imp_hyp_map(void *imp_ctx, - void *impdata, - uint32_t count, - uint32_t remotedom, - uint64_t *index, - void **pkva, - int kernel, - uint32_t userflags) +static struct sg_table *hab_mem_map_dma_buf( + struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct pages_list *pglist = dmabuf->priv; + struct sg_table *sgt; + struct scatterlist *sg; + int i; + int ret = 0; + struct page **pages = pglist->pages; + + sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + ret = sg_alloc_table(sgt, pglist->npages, GFP_KERNEL); + if (ret) { + kfree(sgt); + return ERR_PTR(-ENOMEM); + } + + for_each_sg(sgt->sgl, sg, pglist->npages, i) { + sg_set_page(sg, pages[i], PAGE_SIZE, 0); + } + + return sgt; +} + + +static void hab_mem_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + sg_free_table(sgt); + kfree(sgt); +} + +static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct page *page; + struct pages_list *pglist; + + unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; + + /* PHY address */ + unsigned long fault_offset = + (unsigned long)vmf->virtual_address - vma->vm_start + offset; + unsigned long fault_index = fault_offset>>PAGE_SHIFT; + int page_idx; + + if (vma == NULL) + return VM_FAULT_SIGBUS; + + pglist = vma->vm_private_data; + + page_idx = fault_index - pglist->index; + if (page_idx < 0 || page_idx >= pglist->npages) { + pr_err("Out of page array! page_idx %d, pg cnt %ld", + page_idx, pglist->npages); + return VM_FAULT_SIGBUS; + } + + page = pglist->pages[page_idx]; + get_page(page); + vmf->page = page; + return 0; +} + +static void hab_map_open(struct vm_area_struct *vma) +{ +} + +static void hab_map_close(struct vm_area_struct *vma) +{ +} + +static const struct vm_operations_struct habmem_vm_ops = { + .fault = hab_map_fault, + .open = hab_map_open, + .close = hab_map_close, +}; + +static int hab_mem_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct pages_list *pglist = dmabuf->priv; + uint32_t obj_size = pglist->npages << PAGE_SHIFT; + + if (vma == NULL) + return VM_FAULT_SIGBUS; + + /* Check for valid size. */ + if (obj_size < vma->vm_end - vma->vm_start) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_ops = &habmem_vm_ops; + vma->vm_private_data = pglist; + vma->vm_flags |= VM_MIXEDMAP; + + return 0; +} + +static void hab_mem_dma_buf_release(struct dma_buf *dmabuf) +{ +} + +static void *hab_mem_dma_buf_kmap(struct dma_buf *dmabuf, + unsigned long offset) +{ + return NULL; +} + +static void hab_mem_dma_buf_kunmap(struct dma_buf *dmabuf, + unsigned long offset, + void *ptr) +{ +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = hab_mem_map_dma_buf, + .unmap_dma_buf = hab_mem_unmap_dma_buf, + .mmap = hab_mem_mmap, + .release = hab_mem_dma_buf_release, + .kmap_atomic = hab_mem_dma_buf_kmap, + .kunmap_atomic = hab_mem_dma_buf_kunmap, + .kmap = hab_mem_dma_buf_kmap, + .kunmap = hab_mem_dma_buf_kunmap, +}; + +static int habmem_imp_hyp_map_fd(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + int32_t *pfd) { struct page **pages; - struct compressed_pfns *pfn_table = (struct compressed_pfns *)impdata; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; struct pages_list *pglist; struct importer_context *priv = imp_ctx; unsigned long pfn; int i, j, k = 0; + pgprot_t prot = PAGE_KERNEL; + int32_t fd; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); if (!pfn_table || !priv) return -EINVAL; - pages = vmalloc(count * sizeof(struct page *)); + pages = vmalloc(exp->payload_count * sizeof(struct page *)); if (!pages) return -ENOMEM; @@ -303,145 +486,230 @@ long habmem_imp_hyp_map(void *imp_ctx, } pglist->pages = pages; - pglist->npages = count; - pglist->kernel = kernel; - pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT; + pglist->npages = exp->payload_count; + pglist->kernel = 0; + pglist->index = 0; pglist->refcntk = pglist->refcntu = 0; pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) + prot = pgprot_writecombine(prot); + + exp_info.ops = &dma_buf_ops; + exp_info.size = exp->payload_count << PAGE_SHIFT; + exp_info.flags = O_RDWR; + exp_info.priv = pglist; + pglist->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(pglist->dmabuf)) { + vfree(pages); + kfree(pglist); + return PTR_ERR(pglist->dmabuf); + } - *index = pglist->index << PAGE_SHIFT; - - if (kernel) { - pgprot_t prot = PAGE_KERNEL; - - if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) - prot = pgprot_writecombine(prot); - - pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot); - if (pglist->kva == NULL) { - vfree(pages); - kfree(pglist); - pr_err("%ld pages vmap failed\n", pglist->npages); - return -ENOMEM; - } else { - pr_debug("%ld pages vmap pass, return %pK\n", - pglist->npages, pglist->kva); - } - - pglist->uva = NULL; - pglist->refcntk++; - *pkva = pglist->kva; - *index = (uint64_t)((uintptr_t)pglist->kva); - } else { - pglist->kva = NULL; + fd = dma_buf_fd(pglist->dmabuf, O_CLOEXEC); + if (fd < 0) { + dma_buf_put(pglist->dmabuf); + vfree(pages); + kfree(pglist); + return -EINVAL; } + pglist->refcntk++; + write_lock(&priv->implist_lock); list_add_tail(&pglist->list, &priv->imp_list); priv->cnt++; write_unlock(&priv->implist_lock); - pr_debug("index returned %llx\n", *index); + + *pfd = fd; return 0; } -/* the input index is PHY address shifted for uhab, and kva for khab */ -long habmm_imp_hyp_unmap(void *imp_ctx, - uint64_t index, - uint32_t count, - int kernel) +static int habmem_imp_hyp_map_kva(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + void **pkva) { + struct page **pages; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; + struct pages_list *pglist; struct importer_context *priv = imp_ctx; - struct pages_list *pglist, *tmp; - int found = 0; - uint64_t pg_index = index >> PAGE_SHIFT; - - write_lock(&priv->implist_lock); - list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) { - pr_debug("node pglist %pK, kernel %d, pg_index %llx\n", - pglist, pglist->kernel, pg_index); + unsigned long pfn; + int i, j, k = 0; + pgprot_t prot = PAGE_KERNEL; - if (kernel) { - if (pglist->kva == (void *)((uintptr_t)index)) - found = 1; - } else { - if (pglist->index == pg_index) - found = 1; - } + if (!pfn_table || !priv) + return -EINVAL; + pages = vmalloc(exp->payload_count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; + pglist = kzalloc(sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + vfree(pages); + return -ENOMEM; + } - if (found) { - list_del(&pglist->list); - priv->cnt--; - break; + pfn = pfn_table->first_pfn; + for (i = 0; i < pfn_table->nregions; i++) { + for (j = 0; j < pfn_table->region[i].size; j++) { + pages[k] = pfn_to_page(pfn+j); + k++; } + pfn += pfn_table->region[i].size + pfn_table->region[i].space; } - write_unlock(&priv->implist_lock); - if (!found) { - pr_err("failed to find export id on index %llx\n", index); - return -EINVAL; + pglist->pages = pages; + pglist->npages = exp->payload_count; + pglist->kernel = 1; + pglist->refcntk = pglist->refcntu = 0; + pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + if (!(userflags & HABMM_IMPORT_FLAGS_CACHED)) + prot = pgprot_writecombine(prot); + + pglist->kva = vmap(pglist->pages, pglist->npages, VM_MAP, prot); + if (pglist->kva == NULL) { + vfree(pages); + kfree(pglist); + pr_err("%ld pages vmap failed\n", pglist->npages); + return -ENOMEM; } - pr_debug("detach pglist %pK, index %llx, kernel %d, list cnt %d\n", - pglist, pglist->index, pglist->kernel, priv->cnt); + pr_debug("%ld pages vmap pass, return %p\n", + pglist->npages, pglist->kva); - if (kernel) - if (pglist->kva) - vunmap(pglist->kva); + pglist->refcntk++; - vfree(pglist->pages); - kfree(pglist); + write_lock(&priv->implist_lock); + list_add_tail(&pglist->list, &priv->imp_list); + priv->cnt++; + write_unlock(&priv->implist_lock); + + *pkva = pglist->kva; return 0; } -static int hab_map_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +static int habmem_imp_hyp_map_uva(void *imp_ctx, + struct export_desc *exp, + uint32_t userflags, + uint64_t *index) { - struct page *page; + struct page **pages; + struct compressed_pfns *pfn_table = + (struct compressed_pfns *)exp->payload; struct pages_list *pglist; + struct importer_context *priv = imp_ctx; + unsigned long pfn; + int i, j, k = 0; - unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; - - /* PHY address */ - unsigned long fault_offset = - (unsigned long)vmf->virtual_address - vma->vm_start + offset; - unsigned long fault_index = fault_offset>>PAGE_SHIFT; - int page_idx; + if (!pfn_table || !priv) + return -EINVAL; - if (vma == NULL) - return VM_FAULT_SIGBUS; + pages = vmalloc(exp->payload_count * sizeof(struct page *)); + if (!pages) + return -ENOMEM; - pglist = vma->vm_private_data; + pglist = kzalloc(sizeof(*pglist), GFP_KERNEL); + if (!pglist) { + vfree(pages); + return -ENOMEM; + } - page_idx = fault_index - pglist->index; - if (page_idx < 0 || page_idx >= pglist->npages) { - pr_err("Out of page array. page_idx %d, pg cnt %ld", - page_idx, pglist->npages); - return VM_FAULT_SIGBUS; + pfn = pfn_table->first_pfn; + for (i = 0; i < pfn_table->nregions; i++) { + for (j = 0; j < pfn_table->region[i].size; j++) { + pages[k] = pfn_to_page(pfn+j); + k++; + } + pfn += pfn_table->region[i].size + pfn_table->region[i].space; } - pr_debug("Fault page index %d\n", page_idx); + pglist->pages = pages; + pglist->npages = exp->payload_count; + pglist->index = page_to_phys(pages[0]) >> PAGE_SHIFT; + pglist->refcntk = pglist->refcntu = 0; + pglist->userflags = userflags; + pglist->export_id = exp->export_id; + pglist->vcid = exp->vcid_remote; + + write_lock(&priv->implist_lock); + list_add_tail(&pglist->list, &priv->imp_list); + priv->cnt++; + write_unlock(&priv->implist_lock); + + *index = pglist->index << PAGE_SHIFT; - page = pglist->pages[page_idx]; - get_page(page); - vmf->page = page; return 0; } -static void hab_map_open(struct vm_area_struct *vma) +int habmem_imp_hyp_map(void *imp_ctx, struct hab_import *param, + struct export_desc *exp, int kernel) { + int ret = 0; + + if (kernel) + ret = habmem_imp_hyp_map_kva(imp_ctx, exp, + param->flags, + (void **)¶m->kva); + else if (param->flags & HABMM_EXPIMP_FLAGS_FD) + ret = habmem_imp_hyp_map_fd(imp_ctx, exp, + param->flags, + (int32_t *)¶m->kva); + else + ret = habmem_imp_hyp_map_uva(imp_ctx, exp, + param->flags, + ¶m->index); + + return ret; } -static void hab_map_close(struct vm_area_struct *vma) +int habmm_imp_hyp_unmap(void *imp_ctx, struct export_desc *exp) { -} + struct importer_context *priv = imp_ctx; + struct pages_list *pglist, *tmp; + int found = 0; -static const struct vm_operations_struct habmem_vm_ops = { + write_lock(&priv->implist_lock); + list_for_each_entry_safe(pglist, tmp, &priv->imp_list, list) { + if (pglist->export_id == exp->export_id && + pglist->vcid == exp->vcid_remote) { + found = 1; + } - .fault = hab_map_fault, - .open = hab_map_open, - .close = hab_map_close, -}; + if (found) { + list_del(&pglist->list); + priv->cnt--; + break; + } + } + write_unlock(&priv->implist_lock); + + if (!found) { + pr_err("failed to find export id %u\n", exp->export_id); + return -EINVAL; + } + + pr_debug("detach pglist %p, kernel %d, list cnt %d\n", + pglist, pglist->kernel, priv->cnt); + + if (pglist->kva) + vunmap(pglist->kva); + + if (pglist->dmabuf) + dma_buf_put(pglist->dmabuf); + + vfree(pglist->pages); + kfree(pglist); + + return 0; +} int habmem_imp_hyp_mmap(struct file *filp, struct vm_area_struct *vma) { diff --git a/drivers/soc/qcom/hab/hab_mimex.c b/drivers/soc/qcom/hab/hab_mimex.c index 67601590908e..00fbeabed4bb 100644 --- a/drivers/soc/qcom/hab/hab_mimex.c +++ b/drivers/soc/qcom/hab/hab_mimex.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -345,25 +345,20 @@ int hab_mem_import(struct uhab_context *ctx, exp->export_id, exp->payload_count, exp->domid_local, *((uint32_t *)exp->payload)); - ret = habmem_imp_hyp_map(ctx->import_ctx, - exp->payload, - exp->payload_count, - exp->domid_local, - &exp->import_index, - &exp->kva, - kernel, - param->flags); + ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel); + if (ret) { pr_err("Import fail ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n", ret, exp->payload_count, exp->domid_local, *((uint32_t *)exp->payload)); return ret; } - pr_debug("import index %llx, kva %llx, kernel %d\n", - exp->import_index, param->kva, kernel); - param->index = exp->import_index; - param->kva = (uint64_t)exp->kva; + exp->import_index = param->index; + exp->kva = kernel ? (void *)param->kva : NULL; + + pr_debug("import index %llx, kva or fd %llx, kernel %d\n", + exp->import_index, param->kva, kernel); return ret; } @@ -396,13 +391,10 @@ int hab_mem_unimport(struct uhab_context *ctx, if (!found) ret = -EINVAL; else { - ret = habmm_imp_hyp_unmap(ctx->import_ctx, - exp->import_index, - exp->payload_count, - kernel); + ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp); if (ret) { - pr_err("unmap fail id:%d pcnt:%d kernel:%d\n", - exp->export_id, exp->payload_count, kernel); + pr_err("unmap fail id:%d pcnt:%d vcid:%d\n", + exp->export_id, exp->payload_count, exp->vcid_remote); } param->kva = (uint64_t)exp->kva; kfree(exp); diff --git a/drivers/soc/qcom/hab/hab_msg.c b/drivers/soc/qcom/hab/hab_msg.c index 700239a25652..d5c625e8c1c9 100644 --- a/drivers/soc/qcom/hab/hab_msg.c +++ b/drivers/soc/qcom/hab/hab_msg.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -43,16 +43,24 @@ void hab_msg_free(struct hab_message *message) } struct hab_message * -hab_msg_dequeue(struct virtual_channel *vchan, int wait_flag) +hab_msg_dequeue(struct virtual_channel *vchan, unsigned int flags) { struct hab_message *message = NULL; int ret = 0; - - if (wait_flag) { - if (hab_rx_queue_empty(vchan)) - ret = wait_event_interruptible(vchan->rx_queue, - !hab_rx_queue_empty(vchan) || - vchan->otherend_closed); + int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING); + int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE); + + if (wait) { + if (hab_rx_queue_empty(vchan)) { + if (interruptible) + ret = wait_event_interruptible(vchan->rx_queue, + !hab_rx_queue_empty(vchan) || + vchan->otherend_closed); + else + wait_event(vchan->rx_queue, + !hab_rx_queue_empty(vchan) || + vchan->otherend_closed); + } } /* return all the received messages before the remote close */ @@ -74,7 +82,7 @@ static void hab_msg_queue(struct virtual_channel *vchan, list_add_tail(&message->node, &vchan->rx_list); spin_unlock_bh(&vchan->rx_lock); - wake_up_interruptible(&vchan->rx_queue); + wake_up(&vchan->rx_queue); } static int hab_export_enqueue(struct virtual_channel *vchan, diff --git a/drivers/soc/qcom/hab/hab_vchan.c b/drivers/soc/qcom/hab/hab_vchan.c index e8b8866d570d..2db4db8f321b 100644 --- a/drivers/soc/qcom/hab/hab_vchan.c +++ b/drivers/soc/qcom/hab/hab_vchan.c @@ -110,10 +110,7 @@ hab_vchan_free(struct kref *ref) } spin_unlock_bh(&ctx->imp_lock); if (found) { - habmm_imp_hyp_unmap(ctx->import_ctx, - exp->import_index, - exp->payload_count, - ctx->kernel); + habmm_imp_hyp_unmap(ctx->import_ctx, exp); ctx->import_total--; kfree(exp); } @@ -160,7 +157,7 @@ void hab_vchan_stop(struct virtual_channel *vchan) { if (vchan) { vchan->otherend_closed = 1; - wake_up_interruptible(&vchan->rx_queue); + wake_up(&vchan->rx_queue); } } diff --git a/drivers/soc/qcom/msm_performance.c b/drivers/soc/qcom/msm_performance.c index 1046af031838..1857d369bc94 100644 --- a/drivers/soc/qcom/msm_performance.c +++ b/drivers/soc/qcom/msm_performance.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -2379,6 +2379,7 @@ end: static void __ref try_hotplug(struct cluster *data) { unsigned int i; + struct device *dev; if (!clusters_inited) return; @@ -2405,7 +2406,8 @@ static void __ref try_hotplug(struct cluster *data) pr_debug("msm_perf: Offlining CPU%d\n", i); cpumask_set_cpu(i, data->offlined_cpus); lock_device_hotplug(); - if (device_offline(get_cpu_device(i))) { + dev = get_cpu_device(i); + if (!dev || device_offline(dev)) { cpumask_clear_cpu(i, data->offlined_cpus); pr_debug("msm_perf: Offlining CPU%d failed\n", i); @@ -2423,7 +2425,8 @@ static void __ref try_hotplug(struct cluster *data) continue; pr_debug("msm_perf: Onlining CPU%d\n", i); lock_device_hotplug(); - if (device_online(get_cpu_device(i))) { + dev = get_cpu_device(i); + if (!dev || device_online(dev)) { pr_debug("msm_perf: Onlining CPU%d failed\n", i); unlock_device_hotplug(); @@ -2442,11 +2445,19 @@ static void __ref try_hotplug(struct cluster *data) static void __ref release_cluster_control(struct cpumask *off_cpus) { int cpu; + struct device *dev; for_each_cpu(cpu, off_cpus) { pr_debug("msm_perf: Release CPU %d\n", cpu); lock_device_hotplug(); - if (!device_online(get_cpu_device(cpu))) + dev = get_cpu_device(cpu); + if (!dev) { + pr_debug("msm_perf: Failed to get CPU%d\n", + cpu); + unlock_device_hotplug(); + continue; + } + if (!device_online(dev)) cpumask_clear_cpu(cpu, off_cpus); unlock_device_hotplug(); } diff --git a/drivers/soc/qcom/qdsp6v2/apr.c b/drivers/soc/qcom/qdsp6v2/apr.c index b1afd02b49bf..fefc348c0027 100644 --- a/drivers/soc/qcom/qdsp6v2/apr.c +++ b/drivers/soc/qcom/qdsp6v2/apr.c @@ -1,4 +1,5 @@ -/* Copyright (c) 2010-2014, 2016 The Linux Foundation. All rights reserved. +/* Copyright (c) 2010-2014, 2016, 2018 The Linux Foundation. + * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -209,6 +210,16 @@ static struct apr_svc_table svc_tbl_voice[] = { }, }; +static const struct apr_svc_table svc_tbl_sdsp[] = { + { + /* Micro Audio Service */ + .name = "MAS", + .idx = 0, + .id = APR_SVC_MAS, + .client_id = APR_CLIENT_AUDIO, + }, +}; + enum apr_subsys_state apr_get_modem_state(void) { return atomic_read(&q6.modem_state); @@ -444,6 +455,9 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, */ can_open_channel = false; domain_id = APR_DOMAIN_MODEM; + } else if (!strcmp(dest, "SDSP")) { + domain_id = APR_DOMAIN_SDSP; + pr_debug("APR: SDSP DOMAIN_ID %d\n", domain_id); } else { pr_err("APR: wrong destination\n"); goto done; @@ -472,6 +486,8 @@ struct apr_svc *apr_register(char *dest, char *svc_name, apr_fn svc_fn, } } pr_debug("%s: modem Up\n", __func__); + } else if (dest_id == APR_DEST_DSPS) { + pr_debug("%s: Sensor DSP Up\n", __func__); } if (apr_get_svc(svc_name, domain_id, &client_id, &svc_idx, &svc_id)) { @@ -624,6 +640,8 @@ void apr_cb_func(void *buf, int len, void *priv) pr_err("APR: Wrong svc :%d\n", svc); return; } + } else if (hdr->src_domain == APR_DOMAIN_SDSP) { + clnt = APR_CLIENT_AUDIO; } else { pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); return; @@ -700,6 +718,9 @@ int apr_get_svc(const char *svc_name, int domain_id, int *client_id, if ((domain_id == APR_DOMAIN_ADSP)) { tbl = (struct apr_svc_table *)&svc_tbl_qdsp6; size = ARRAY_SIZE(svc_tbl_qdsp6); + } else if (domain_id == APR_DOMAIN_SDSP) { + tbl = (struct apr_svc_table *)&svc_tbl_sdsp; + size = ARRAY_SIZE(svc_tbl_sdsp); } else { tbl = (struct apr_svc_table *)&svc_tbl_voice; size = ARRAY_SIZE(svc_tbl_voice); diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal.c b/drivers/soc/qcom/qdsp6v2/apr_tal.c index 6cffe7be655a..3884667cc12c 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_tal.c +++ b/drivers/soc/qcom/qdsp6v2/apr_tal.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2010-2011, 2013-2014, 2016 The Linux Foundation. +/* Copyright (c) 2010-2011, 2013-2014, 2016, 2018 The Linux Foundation. * All rights reserved. * * This program is free software; you can redistribute it and/or modify @@ -38,6 +38,14 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = { "apr_audio_svc", "apr_voice_svc", }, + { + "", + "", + }, + { + "apr_apps_sdsp", + "apr_apps_sdsp", + }, }; struct apr_svc_ch_dev apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX]; @@ -162,7 +170,8 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, if ((clnt >= APR_CLIENT_MAX) || (dest >= APR_DEST_MAX) || (dl >= APR_DL_MAX)) { - pr_err("apr_tal: Invalid params\n"); + pr_err("apr_tal: Invalid params clnt %d dest %d dl %d\n", + clnt, dest, dl); return NULL; } @@ -184,10 +193,12 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, pr_debug("apr_tal:Wakeup done\n"); apr_svc_ch[dl][dest][clnt].dest_state = 0; } + rc = smd_named_open_on_edge(svc_names[dest][clnt], dest, - &apr_svc_ch[dl][dest][clnt].ch, - &apr_svc_ch[dl][dest][clnt], - apr_tal_notify); + &apr_svc_ch[dl][dest][clnt].ch, + &apr_svc_ch[dl][dest][clnt], + apr_tal_notify); + if (rc < 0) { pr_err("apr_tal: smd_open failed %s\n", svc_names[dest][clnt]); @@ -256,6 +267,12 @@ static int apr_smd_probe(struct platform_device *pdev) clnt = APR_CLIENT_AUDIO; apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); + } else if (pdev->id == APR_DEST_DSPS) { + pr_info("apr_tal:Sensor DSP Is Up\n"); + dest = APR_DEST_DSPS; + clnt = APR_CLIENT_AUDIO; + apr_svc_ch[APR_DL_SMD][dest][clnt].dest_state = 1; + wake_up(&apr_svc_ch[APR_DL_SMD][dest][clnt].dest); } else pr_err("apr_tal:Invalid Dest Id: %d\n", pdev->id); @@ -278,6 +295,14 @@ static struct platform_driver apr_modem_driver = { }, }; +static struct platform_driver apr_sdsp_driver = { + .probe = apr_smd_probe, + .driver = { + .name = "apr_apps_sdsp", + .owner = THIS_MODULE, + }, +}; + static int __init apr_tal_init(void) { int i, j, k; @@ -293,6 +318,7 @@ static int __init apr_tal_init(void) } platform_driver_register(&apr_q6_driver); platform_driver_register(&apr_modem_driver); + platform_driver_register(&apr_sdsp_driver); return 0; } device_initcall(apr_tal_init); diff --git a/drivers/soc/qcom/qdsp6v2/apr_v2.c b/drivers/soc/qcom/qdsp6v2/apr_v2.c index 037fb3327ef0..d42f2ff5912e 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_v2.c +++ b/drivers/soc/qcom/qdsp6v2/apr_v2.c @@ -37,6 +37,8 @@ uint16_t apr_get_data_src(struct apr_hdr *hdr) return APR_DEST_MODEM; else if (hdr->src_domain == APR_DOMAIN_ADSP) return APR_DEST_QDSP6; + else if (hdr->src_domain == APR_DOMAIN_SDSP) + return APR_DEST_DSPS; else { pr_err("APR: Pkt from wrong source: %d\n", hdr->src_domain); return APR_DEST_MAX; /*RETURN INVALID VALUE*/ @@ -47,6 +49,8 @@ int apr_get_dest_id(char *dest) { if (!strcmp(dest, "ADSP")) return APR_DEST_QDSP6; + else if (!strcmp(dest, "SDSP")) + return APR_DEST_DSPS; else return APR_DEST_MODEM; } diff --git a/drivers/soc/qcom/qdsp6v2/apr_vm.c b/drivers/soc/qcom/qdsp6v2/apr_vm.c index 56592ac91e1b..bd555b6e6f3b 100644 --- a/drivers/soc/qcom/qdsp6v2/apr_vm.c +++ b/drivers/soc/qcom/qdsp6v2/apr_vm.c @@ -529,25 +529,23 @@ static int apr_vm_cb_thread(void *data) { uint32_t apr_rx_buf_len; struct aprv2_vm_ack_rx_pkt_available_t apr_ack; + unsigned long delay = jiffies + (HZ / 2); int status = 0; int ret = 0; while (1) { - apr_rx_buf_len = sizeof(apr_rx_buf); - ret = habmm_socket_recv(hab_handle_rx, - (void *)&apr_rx_buf, - &apr_rx_buf_len, - 0xFFFFFFFF, - 0); + do { + apr_rx_buf_len = sizeof(apr_rx_buf); + ret = habmm_socket_recv(hab_handle_rx, + (void *)&apr_rx_buf, + &apr_rx_buf_len, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && (ret == -EAGAIN) && + (apr_rx_buf_len == 0)); if (ret) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, ret); - /* - * TODO: depends on the HAB error code, - * may need to implement - * a retry mechanism. - * break if recv failed ? - */ break; } diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c index 7ef16ad5575b..15c3e7e42c6d 100644 --- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c +++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion_vm.c @@ -83,6 +83,7 @@ static int msm_audio_ion_smmu_map(struct ion_client *client, struct msm_audio_smmu_vm_map_cmd_rsp cmd_rsp; struct msm_audio_smmu_map_data *map_data = NULL; struct msm_audio_smmu_vm_map_cmd smmu_map_cmd; + unsigned long delay = jiffies + (HZ / 2); rc = ion_handle_get_size(client, handle, len); if (rc) { @@ -122,12 +123,15 @@ static int msm_audio_ion_smmu_map(struct ion_client *client, goto err; } - cmd_rsp_size = sizeof(cmd_rsp); - rc = habmm_socket_recv(msm_audio_ion_hab_handle, - (void *)&cmd_rsp, - &cmd_rsp_size, - 0xFFFFFFFF, - 0); + do { + cmd_rsp_size = sizeof(cmd_rsp); + rc = habmm_socket_recv(msm_audio_ion_hab_handle, + (void *)&cmd_rsp, + &cmd_rsp_size, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && (rc == -EAGAIN) && + (cmd_rsp_size == 0)); if (rc) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, rc); @@ -181,6 +185,7 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client, struct msm_audio_smmu_vm_unmap_cmd_rsp cmd_rsp; struct msm_audio_smmu_map_data *map_data, *next; struct msm_audio_smmu_vm_unmap_cmd smmu_unmap_cmd; + unsigned long delay = jiffies + (HZ / 2); /* * Though list_for_each_entry_safe is delete safe, lock @@ -205,12 +210,15 @@ static int msm_audio_ion_smmu_unmap(struct ion_client *client, goto err; } - cmd_rsp_size = sizeof(cmd_rsp); - rc = habmm_socket_recv(msm_audio_ion_hab_handle, - (void *)&cmd_rsp, - &cmd_rsp_size, - 0xFFFFFFFF, - 0); + do { + cmd_rsp_size = sizeof(cmd_rsp); + rc = habmm_socket_recv(msm_audio_ion_hab_handle, + (void *)&cmd_rsp, + &cmd_rsp_size, + 0xFFFFFFFF, + 0); + } while (time_before(jiffies, delay) && + (rc == -EAGAIN) && (cmd_rsp_size == 0)); if (rc) { pr_err("%s: habmm_socket_recv failed %d\n", __func__, rc); diff --git a/drivers/soc/qcom/qdsp6v2/voice_svc.c b/drivers/soc/qcom/qdsp6v2/voice_svc.c index c560ec7d7401..f01ab2499a75 100644 --- a/drivers/soc/qcom/qdsp6v2/voice_svc.c +++ b/drivers/soc/qcom/qdsp6v2/voice_svc.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -773,7 +773,7 @@ static int voice_svc_probe(struct platform_device *pdev) if (ret) { pr_err("%s: Failed to alloc chrdev\n", __func__); ret = -ENODEV; - goto chrdev_err; + goto done; } voice_svc_dev->major = MAJOR(device_num); @@ -820,8 +820,6 @@ dev_err: class_destroy(voice_svc_class); class_err: unregister_chrdev_region(0, MINOR_NUMBER); -chrdev_err: - kfree(voice_svc_dev); done: return ret; } @@ -835,7 +833,6 @@ static int voice_svc_remove(struct platform_device *pdev) device_destroy(voice_svc_class, device_num); class_destroy(voice_svc_class); unregister_chrdev_region(0, MINOR_NUMBER); - kfree(voice_svc_dev); return 0; } diff --git a/drivers/soc/qcom/rpm_stats.c b/drivers/soc/qcom/rpm_stats.c index b54af9eae8ec..ed7493d063ae 100644 --- a/drivers/soc/qcom/rpm_stats.c +++ b/drivers/soc/qcom/rpm_stats.c @@ -430,7 +430,7 @@ static ssize_t rpmstats_show(struct kobject *kobj, prvdata); } - ret = snprintf(buf, prvdata->len, prvdata->buf); + ret = snprintf(buf, prvdata->len, "%s", prvdata->buf); iounmap(prvdata->reg_base); ioremap_fail: kfree(prvdata); diff --git a/drivers/soc/qcom/scm_qcpe.c b/drivers/soc/qcom/scm_qcpe.c index dfa00f56476a..a788c8c3673e 100644 --- a/drivers/soc/qcom/scm_qcpe.c +++ b/drivers/soc/qcom/scm_qcpe.c @@ -195,32 +195,13 @@ static int scm_remap_error(int err) return -EINVAL; } -static int get_hab_vmid(u32 *mm_ip_id) -{ - int result, i; - struct device_node *hab_node = NULL; - int tmp = -1; - - /* parse device tree*/ - pr_info("parsing hab node in device tree...\n"); - hab_node = of_find_compatible_node(NULL, NULL, "qcom,hab"); - if (hab_node) { - /* read local vmid of this VM, like 0 for host, 1 for AGL GVM */ - result = of_property_read_u32(hab_node, "vmid", &tmp); - if (!result) { - pr_info("local vmid = %d\n", tmp); - *mm_ip_id = MM_QCPE_START + tmp; - return 0; - } - pr_err("failed to read local vmid, result = %d\n", result); - } else { - pr_err("no hab device tree node\n"); - } +#define MAX_SCM_ARGS 10 - pr_info("assuming default vmid = 2\n"); - *mm_ip_id = MM_QCPE_VM2; - return 0; -} +struct qcpe_msg_s { + uint64_t fn_id; + uint64_t arginfo; + uint64_t args[MAX_SCM_ARGS]; +}; static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) { @@ -228,16 +209,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) static u32 handle; u32 ret; u32 size_bytes; - - struct smc_params_s { - uint64_t x0; - uint64_t x1; - uint64_t x2; - uint64_t x3; - uint64_t x4; - uint64_t x5; - uint64_t sid; - } smc_params; + struct qcpe_msg_s msg; pr_info("scm_call_qcpe: IN: 0x%x, 0x%x, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx", fn_id, desc->arginfo, desc->args[0], desc->args[1], @@ -245,15 +217,7 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) desc->args[5], desc->args[6]); if (!opened) { - u32 mm_ip_id; - - ret = get_hab_vmid(&mm_ip_id); - if (ret) { - pr_err("scm_call_qcpe: get_hab_vmid failed with ret = %d", - ret); - return ret; - } - ret = habmm_socket_open(&handle, mm_ip_id, 0, 0); + ret = habmm_socket_open(&handle, MM_QCPE_VM1, 0, 0); if (ret) { pr_err("scm_call_qcpe: habmm_socket_open failed with ret = %d", ret); @@ -262,39 +226,45 @@ static int scm_call_qcpe(u32 fn_id, struct scm_desc *desc) opened = true; } - smc_params.x0 = fn_id | 0x40000000; /* SMC64_MASK */ - smc_params.x1 = desc->arginfo; - smc_params.x2 = desc->args[0]; - smc_params.x3 = desc->args[1]; - smc_params.x4 = desc->args[2]; - smc_params.x5 = desc->x5; - smc_params.sid = 0; + msg.fn_id = fn_id | 0x40000000; /* SMC64_MASK */ + msg.arginfo = desc->arginfo; + msg.args[0] = desc->args[0]; + msg.args[1] = desc->args[1]; + msg.args[2] = desc->args[2]; + msg.args[3] = desc->x5; + msg.args[4] = 0; - ret = habmm_socket_send(handle, &smc_params, sizeof(smc_params), 0); - if (ret) + ret = habmm_socket_send(handle, &msg, sizeof(msg), 0); + if (ret) { + pr_err("scm_call_qcpe: habmm_socket_send failed with ret = %d", + ret); return ret; + } - size_bytes = sizeof(smc_params); - memset(&smc_params, 0x0, sizeof(smc_params)); + size_bytes = sizeof(msg); + memset(&msg, 0x0, sizeof(msg)); - ret = habmm_socket_recv(handle, &smc_params, &size_bytes, 0, 0); - if (ret) + ret = habmm_socket_recv(handle, &msg, &size_bytes, 0, 0); + if (ret) { + pr_err("scm_call_qcpe: habmm_socket_recv failed with ret = %d", + ret); return ret; + } - if (size_bytes != sizeof(smc_params)) { + if (size_bytes != sizeof(msg)) { pr_err("scm_call_qcpe: expected size: %lu, actual=%u\n", - sizeof(smc_params), size_bytes); + sizeof(msg), size_bytes); return SCM_ERROR; } - desc->ret[0] = smc_params.x1; - desc->ret[1] = smc_params.x2; - desc->ret[2] = smc_params.x3; + desc->ret[0] = msg.args[1]; + desc->ret[1] = msg.args[2]; + desc->ret[2] = msg.args[3]; pr_info("scm_call_qcpe: OUT: 0x%llx, 0x%llx, 0x%llx, 0x%llx", - smc_params.x0, desc->ret[0], desc->ret[1], desc->ret[2]); + msg.args[0], msg.args[1], msg.args[2], msg.args[3]); - return smc_params.x0; + return msg.args[0]; } static u32 smc(u32 cmd_addr) diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 1ddba9ae8c0f..c872a2e54c4b 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c @@ -651,7 +651,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) buf = t->rx_buf; t->rx_dma = dma_map_single(&spi->dev, buf, t->len, DMA_FROM_DEVICE); - if (dma_mapping_error(&spi->dev, !t->rx_dma)) { + if (dma_mapping_error(&spi->dev, t->rx_dma)) { ret = -EFAULT; goto err_rx_map; } diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c index a6d7029a85ac..581df3ebfc88 100644 --- a/drivers/spi/spi-dw-mmio.c +++ b/drivers/spi/spi-dw-mmio.c @@ -120,8 +120,8 @@ static int dw_spi_mmio_remove(struct platform_device *pdev) { struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev); - clk_disable_unprepare(dwsmmio->clk); dw_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); return 0; } diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index ed8283e7397a..83b53cd956aa 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -457,6 +457,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, int elements = 0; int word_len, element_count; struct omap2_mcspi_cs *cs = spi->controller_state; + void __iomem *chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; count = xfer->len; @@ -517,8 +519,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, if (l & OMAP2_MCSPI_CHCONF_TURBO) { elements--; - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { + if (!mcspi_wait_for_reg_bit(chstat_reg, + OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); @@ -536,8 +538,7 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, return count; } } - if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) - & OMAP2_MCSPI_CHSTAT_RXS)) { + if (!mcspi_wait_for_reg_bit(chstat_reg, OMAP2_MCSPI_CHSTAT_RXS)) { u32 w; w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0); diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c index e77add01b0e9..48888ab630c2 100644 --- a/drivers/spi/spi-sun6i.c +++ b/drivers/spi/spi-sun6i.c @@ -457,7 +457,7 @@ err_free_master: static int sun6i_spi_remove(struct platform_device *pdev) { - pm_runtime_disable(&pdev->dev); + pm_runtime_force_suspend(&pdev->dev); return 0; } diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 26629b856f91..6c4445863705 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -90,6 +90,15 @@ config ONESHOT_SYNC_USER help Provide a userspace API for creating oneshot sync objects. +config ANDROID_VSOC + tristate "Android Virtual SoC support" + default n + depends on PCI_MSI + ---help--- + This option adds support for the Virtual SoC driver needed to boot + a 'cuttlefish' Android image inside QEmu. The driver interacts with + a QEmu ivshmem device. If built as a module, it will be called vsoc. + source "drivers/staging/android/ion/Kconfig" source "drivers/staging/android/fiq_debugger/Kconfig" diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index b0b47ae4c0ea..8ef816152020 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o obj-$(CONFIG_SYNC) += sync.o sync_debug.o obj-$(CONFIG_SW_SYNC) += sw_sync.o obj-$(CONFIG_ONESHOT_SYNC) += oneshot_sync.o +obj-$(CONFIG_ANDROID_VSOC) += vsoc.o diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 8f3ac37bfe12..0c32c00fa700 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -25,5 +25,15 @@ ion/ exposes existing cma regions and doesn't reserve unecessarily memory when booting a system which doesn't use ion. +vsoc.c, uapi/vsoc_shm.h + - The current driver uses the same wait queue for all of the futexes in a + region. This will cause false wakeups in regions with a large number of + waiting threads. We should eventually use multiple queues and select the + queue based on the region. + - Add debugfs support for examining the permissions of regions. + - Use ioremap_wc instead of ioremap_nocache. + - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been + superseded by the futex and is there for legacy reasons. + Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: Arve HjønnevÃ¥g <arve@android.com> and Riley Andrews <riandrews@android.com> diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index a9c1d93c557e..cba6b4e17fee 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -330,24 +330,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) mutex_lock(&ashmem_mutex); if (asma->size == 0) { - ret = -EINVAL; - goto out; + mutex_unlock(&ashmem_mutex); + return -EINVAL; } if (!asma->file) { - ret = -EBADF; - goto out; + mutex_unlock(&ashmem_mutex); + return -EBADF; } + mutex_unlock(&ashmem_mutex); + ret = vfs_llseek(asma->file, offset, origin); if (ret < 0) - goto out; + return ret; /** Copy f_pos from backing file, since f_ops->llseek() sets it */ file->f_pos = asma->file->f_pos; - -out: - mutex_unlock(&ashmem_mutex); return ret; } @@ -698,16 +697,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, size_t pgstart, pgend; int ret = -EINVAL; + if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) + return -EFAULT; + mutex_lock(&ashmem_mutex); if (unlikely(!asma->file)) goto out_unlock; - if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) { - ret = -EFAULT; - goto out_unlock; - } - /* per custom, you can pass zero for len to mean "everything onward" */ if (!pin.len) pin.len = PAGE_ALIGN(asma->size) - pin.offset; diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h new file mode 100644 index 000000000000..741b1387c25b --- /dev/null +++ b/drivers/staging/android/uapi/vsoc_shm.h @@ -0,0 +1,303 @@ +/* + * Copyright (C) 2017 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_VSOC_SHM_H +#define _UAPI_LINUX_VSOC_SHM_H + +#include <linux/types.h> + +/** + * A permission is a token that permits a receiver to read and/or write an area + * of memory within a Vsoc region. + * + * An fd_scoped permission grants both read and write access, and can be + * attached to a file description (see open(2)). + * Ownership of the area can then be shared by passing a file descriptor + * among processes. + * + * begin_offset and end_offset define the area of memory that is controlled by + * the permission. owner_offset points to a word, also in shared memory, that + * controls ownership of the area. + * + * ownership of the region expires when the associated file description is + * released. + * + * At most one permission can be attached to each file description. + * + * This is useful when implementing HALs like gralloc that scope and pass + * ownership of shared resources via file descriptors. + * + * The caller is responsibe for doing any fencing. + * + * The calling process will normally identify a currently free area of + * memory. It will construct a proposed fd_scoped_permission_arg structure: + * + * begin_offset and end_offset describe the area being claimed + * + * owner_offset points to the location in shared memory that indicates the + * owner of the area. + * + * owned_value is the value that will be stored in owner_offset iff the + * permission can be granted. It must be different than VSOC_REGION_FREE. + * + * Two fd_scoped_permission structures are compatible if they vary only by + * their owned_value fields. + * + * The driver ensures that, for any group of simultaneous callers proposing + * compatible fd_scoped_permissions, it will accept exactly one of the + * propopsals. The other callers will get a failure with errno of EAGAIN. + * + * A process receiving a file descriptor can identify the region being + * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl. + */ +struct fd_scoped_permission { + __u32 begin_offset; + __u32 end_offset; + __u32 owner_offset; + __u32 owned_value; +}; + +/* + * This value represents a free area of memory. The driver expects to see this + * value at owner_offset when creating a permission otherwise it will not do it, + * and will write this value back once the permission is no longer needed. + */ +#define VSOC_REGION_FREE ((__u32)0) + +/** + * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION + */ +struct fd_scoped_permission_arg { + struct fd_scoped_permission perm; + __s32 managed_region_fd; +}; + +#define VSOC_NODE_FREE ((__u32)0) + +/* + * Describes a signal table in shared memory. Each non-zero entry in the + * table indicates that the receiver should signal the futex at the given + * offset. Offsets are relative to the region, not the shared memory window. + * + * interrupt_signalled_offset is used to reliably signal interrupts across the + * vmm boundary. There are two roles: transmitter and receiver. For example, + * in the host_to_guest_signal_table the host is the transmitter and the + * guest is the receiver. The protocol is as follows: + * + * 1. The transmitter should convert the offset of the futex to an offset + * in the signal table [0, (1 << num_nodes_lg2)) + * The transmitter can choose any appropriate hashing algorithm, including + * hash = futex_offset & ((1 << num_nodes_lg2) - 1) + * + * 3. The transmitter should atomically compare and swap futex_offset with 0 + * at hash. There are 3 possible outcomes + * a. The swap fails because the futex_offset is already in the table. + * The transmitter should stop. + * b. Some other offset is in the table. This is a hash collision. The + * transmitter should move to another table slot and try again. One + * possible algorithm: + * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1) + * c. The swap worked. Continue below. + * + * 3. The transmitter atomically swaps 1 with the value at the + * interrupt_signalled_offset. There are two outcomes: + * a. The prior value was 1. In this case an interrupt has already been + * posted. The transmitter is done. + * b. The prior value was 0, indicating that the receiver may be sleeping. + * The transmitter will issue an interrupt. + * + * 4. On waking the receiver immediately exchanges a 0 with the + * interrupt_signalled_offset. If it receives a 0 then this a spurious + * interrupt. That may occasionally happen in the current protocol, but + * should be rare. + * + * 5. The receiver scans the signal table by atomicaly exchanging 0 at each + * location. If a non-zero offset is returned from the exchange the + * receiver wakes all sleepers at the given offset: + * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT); + * + * 6. The receiver thread then does a conditional wait, waking immediately + * if the value at interrupt_signalled_offset is non-zero. This catches cases + * here additional signals were posted while the table was being scanned. + * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT + * ioctl. + */ +struct vsoc_signal_table_layout { + /* log_2(Number of signal table entries) */ + __u32 num_nodes_lg2; + /* + * Offset to the first signal table entry relative to the start of the + * region + */ + __u32 futex_uaddr_table_offset; + /* + * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates + * that one or more offsets are currently posted in the table. + * semi-unique access to an entry in the table + */ + __u32 interrupt_signalled_offset; +}; + +#define VSOC_REGION_WHOLE ((__s32)0) +#define VSOC_DEVICE_NAME_SZ 16 + +/** + * Each HAL would (usually) talk to a single device region + * Mulitple entities care about these regions: + * - The ivshmem_server will populate the regions in shared memory + * - The guest kernel will read the region, create minor device nodes, and + * allow interested parties to register for FUTEX_WAKE events in the region + * - HALs will access via the minor device nodes published by the guest kernel + * - Host side processes will access the region via the ivshmem_server: + * 1. Pass name to ivshmem_server at a UNIX socket + * 2. ivshmemserver will reply with 2 fds: + * - host->guest doorbell fd + * - guest->host doorbell fd + * - fd for the shared memory region + * - region offset + * 3. Start a futex receiver thread on the doorbell fd pointed at the + * signal_nodes + */ +struct vsoc_device_region { + __u16 current_version; + __u16 min_compatible_version; + __u32 region_begin_offset; + __u32 region_end_offset; + __u32 offset_of_region_data; + struct vsoc_signal_table_layout guest_to_host_signal_table; + struct vsoc_signal_table_layout host_to_guest_signal_table; + /* Name of the device. Must always be terminated with a '\0', so + * the longest supported device name is 15 characters. + */ + char device_name[VSOC_DEVICE_NAME_SZ]; + /* There are two ways that permissions to access regions are handled: + * - When subdivided_by is VSOC_REGION_WHOLE, any process that can + * open the device node for the region gains complete access to it. + * - When subdivided is set processes that open the region cannot + * access it. Access to a sub-region must be established by invoking + * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region + * referenced in subdivided_by, providing a fileinstance + * (represented by a fd) opened on this region. + */ + __u32 managed_by; +}; + +/* + * The vsoc layout descriptor. + * The first 4K should be reserved for the shm header and region descriptors. + * The regions should be page aligned. + */ + +struct vsoc_shm_layout_descriptor { + __u16 major_version; + __u16 minor_version; + + /* size of the shm. This may be redundant but nice to have */ + __u32 size; + + /* number of shared memory regions */ + __u32 region_count; + + /* The offset to the start of region descriptors */ + __u32 vsoc_region_desc_offset; +}; + +/* + * This specifies the current version that should be stored in + * vsoc_shm_layout_descriptor.major_version and + * vsoc_shm_layout_descriptor.minor_version. + * It should be updated only if the vsoc_device_region and + * vsoc_shm_layout_descriptor structures have changed. + * Versioning within each region is transferred + * via the min_compatible_version and current_version fields in + * vsoc_device_region. The driver does not consult these fields: they are left + * for the HALs and host processes and will change independently of the layout + * version. + */ +#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2 +#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0 + +#define VSOC_CREATE_FD_SCOPED_PERMISSION \ + _IOW(0xF5, 0, struct fd_scoped_permission) +#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt if one is not already + * in flight. + */ +#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2) + +/* + * When this returns the guest will scan host_to_guest_signal_table to + * check for new futexes to wake. + */ +/* TODO(ghartman): Consider moving this to the bottom half */ +#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3) + +/* + * Guest HALs will use this to retrieve the region description after + * opening their device node. + */ +#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region) + +/* + * Wake any threads that may be waiting for a host interrupt on this region. + * This is mostly used during shutdown. + */ +#define VSOC_SELF_INTERRUPT _IO(0xF5, 5) + +/* + * This is used to signal the host to scan the guest_to_host_signal_table + * for new futexes to wake. This sends an interrupt unconditionally. + */ +#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6) + +enum wait_types { + VSOC_WAIT_UNDEFINED = 0, + VSOC_WAIT_IF_EQUAL = 1, + VSOC_WAIT_IF_EQUAL_TIMEOUT = 2 +}; + +/* + * Wait for a condition to be true + * + * Note, this is sized and aligned so the 32 bit and 64 bit layouts are + * identical. + */ +struct vsoc_cond_wait { + /* Input: Offset of the 32 bit word to check */ + __u32 offset; + /* Input: Value that will be compared with the offset */ + __u32 value; + /* Monotonic time to wake at in seconds */ + __u64 wake_time_sec; + /* Input: Monotonic time to wait in nanoseconds */ + __u32 wake_time_nsec; + /* Input: Type of wait */ + __u32 wait_type; + /* Output: Number of times the thread woke before returning. */ + __u32 wakes; + /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit + * compatibility. + */ + __u32 reserved_1; +}; + +#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait) + +/* Wake any local threads waiting at the offset given in arg */ +#define VSOC_COND_WAKE _IO(0xF5, 8) + +#endif /* _UAPI_LINUX_VSOC_SHM_H */ diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c new file mode 100644 index 000000000000..587c66d709b9 --- /dev/null +++ b/drivers/staging/android/vsoc.c @@ -0,0 +1,1169 @@ +/* + * drivers/android/staging/vsoc.c + * + * Android Virtual System on a Chip (VSoC) driver + * + * Copyright (C) 2017 Google, Inc. + * + * Author: ghartman@google.com + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * + * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory + * Copyright 2009 Cam Macdonell <cam@cs.ualberta.ca> + * + * Based on cirrusfb.c and 8139cp.c: + * Copyright 1999-2001 Jeff Garzik + * Copyright 2001-2004 Jeff Garzik + */ + +#include <linux/dma-mapping.h> +#include <linux/freezer.h> +#include <linux/futex.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/proc_fs.h> +#include <linux/sched.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> +#include <linux/cdev.h> +#include <linux/file.h> +#include "uapi/vsoc_shm.h" + +#define VSOC_DEV_NAME "vsoc" + +/* + * Description of the ivshmem-doorbell PCI device used by QEmu. These + * constants follow docs/specs/ivshmem-spec.txt, which can be found in + * the QEmu repository. This was last reconciled with the version that + * came out with 2.8 + */ + +/* + * These constants are determined KVM Inter-VM shared memory device + * register offsets + */ +enum { + INTR_MASK = 0x00, /* Interrupt Mask */ + INTR_STATUS = 0x04, /* Interrupt Status */ + IV_POSITION = 0x08, /* VM ID */ + DOORBELL = 0x0c, /* Doorbell */ +}; + +static const int REGISTER_BAR; /* Equal to 0 */ +static const int MAX_REGISTER_BAR_LEN = 0x100; +/* + * The MSI-x BAR is not used directly. + * + * static const int MSI_X_BAR = 1; + */ +static const int SHARED_MEMORY_BAR = 2; + +struct vsoc_region_data { + char name[VSOC_DEVICE_NAME_SZ + 1]; + wait_queue_head_t interrupt_wait_queue; + /* TODO(b/73664181): Use multiple futex wait queues */ + wait_queue_head_t futex_wait_queue; + /* Flag indicating that an interrupt has been signalled by the host. */ + atomic_t *incoming_signalled; + /* Flag indicating the guest has signalled the host. */ + atomic_t *outgoing_signalled; + int irq_requested; + int device_created; +}; + +struct vsoc_device { + /* Kernel virtual address of REGISTER_BAR. */ + void __iomem *regs; + /* Physical address of SHARED_MEMORY_BAR. */ + phys_addr_t shm_phys_start; + /* Kernel virtual address of SHARED_MEMORY_BAR. */ + void *kernel_mapped_shm; + /* Size of the entire shared memory window in bytes. */ + size_t shm_size; + /* + * Pointer to the virtual address of the shared memory layout structure. + * This is probably identical to kernel_mapped_shm, but saving this + * here saves a lot of annoying casts. + */ + struct vsoc_shm_layout_descriptor *layout; + /* + * Points to a table of region descriptors in the kernel's virtual + * address space. Calculated from + * vsoc_shm_layout_descriptor.vsoc_region_desc_offset + */ + struct vsoc_device_region *regions; + /* Head of a list of permissions that have been granted. */ + struct list_head permissions; + struct pci_dev *dev; + /* Per-region (and therefore per-interrupt) information. */ + struct vsoc_region_data *regions_data; + /* + * Table of msi-x entries. This has to be separated from struct + * vsoc_region_data because the kernel deals with them as an array. + */ + struct msix_entry *msix_entries; + /* + * Flags that indicate what we've initialzied. These are used to do an + * orderly cleanup of the device. + */ + char enabled_device; + char requested_regions; + char cdev_added; + char class_added; + char msix_enabled; + /* Mutex that protectes the permission list */ + struct mutex mtx; + /* Major number assigned by the kernel */ + int major; + + struct cdev cdev; + struct class *class; +}; + +static struct vsoc_device vsoc_dev; + +/* + * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions. + */ + +struct fd_scoped_permission_node { + struct fd_scoped_permission permission; + struct list_head list; +}; + +struct vsoc_private_data { + struct fd_scoped_permission_node *fd_scoped_permission_node; +}; + +static long vsoc_ioctl(struct file *, unsigned int, unsigned long); +static int vsoc_mmap(struct file *, struct vm_area_struct *); +static int vsoc_open(struct inode *, struct file *); +static int vsoc_release(struct inode *, struct file *); +static ssize_t vsoc_read(struct file *, char *, size_t, loff_t *); +static ssize_t vsoc_write(struct file *, const char *, size_t, loff_t *); +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin); +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg *__user arg); +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm); +static long do_vsoc_describe_region(struct file *, + struct vsoc_device_region __user *); +static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off); + +/** + * Validate arguments on entry points to the driver. + */ +inline int vsoc_validate_inode(struct inode *inode) +{ + if (iminor(inode) >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "describe_region: invalid region %d\n", iminor(inode)); + return -ENODEV; + } + return 0; +} + +inline int vsoc_validate_filep(struct file *filp) +{ + int ret = vsoc_validate_inode(file_inode(filp)); + + if (ret) + return ret; + if (!filp->private_data) { + dev_err(&vsoc_dev.dev->dev, + "No private data on fd, region %d\n", + iminor(file_inode(filp))); + return -EBADFD; + } + return 0; +} + +/* Converts from shared memory offset to virtual address */ +static inline void *shm_off_to_virtual_addr(__u32 offset) +{ + return vsoc_dev.kernel_mapped_shm + offset; +} + +/* Converts from shared memory offset to physical address */ +static inline phys_addr_t shm_off_to_phys_addr(__u32 offset) +{ + return vsoc_dev.shm_phys_start + offset; +} + +/** + * Convenience functions to obtain the region from the inode or file. + * Dangerous to call before validating the inode/file. + */ +static inline struct vsoc_device_region *vsoc_region_from_inode( + struct inode *inode) +{ + return &vsoc_dev.regions[iminor(inode)]; +} + +static inline struct vsoc_device_region *vsoc_region_from_filep( + struct file *inode) +{ + return vsoc_region_from_inode(file_inode(inode)); +} + +static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r) +{ + return r->region_end_offset - r->region_begin_offset; +} + +static const struct file_operations vsoc_ops = { + .owner = THIS_MODULE, + .open = vsoc_open, + .mmap = vsoc_mmap, + .read = vsoc_read, + .unlocked_ioctl = vsoc_ioctl, + .compat_ioctl = vsoc_ioctl, + .write = vsoc_write, + .llseek = vsoc_lseek, + .release = vsoc_release, +}; + +static struct pci_device_id vsoc_id_table[] = { + {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, + {0}, +}; + +MODULE_DEVICE_TABLE(pci, vsoc_id_table); + +static void vsoc_remove_device(struct pci_dev *pdev); +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static struct pci_driver vsoc_pci_driver = { + .name = "vsoc", + .id_table = vsoc_id_table, + .probe = vsoc_probe_device, + .remove = vsoc_remove_device, +}; + +static int do_create_fd_scoped_permission( + struct vsoc_device_region *region_p, + struct fd_scoped_permission_node *np, + struct fd_scoped_permission_arg *__user arg) +{ + struct file *managed_filp; + s32 managed_fd; + atomic_t *owner_ptr = NULL; + struct vsoc_device_region *managed_region_p; + + if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) || + copy_from_user(&managed_fd, + &arg->managed_region_fd, sizeof(managed_fd))) { + return -EFAULT; + } + managed_filp = fdget(managed_fd).file; + /* Check that it's a valid fd, */ + if (!managed_filp || vsoc_validate_filep(managed_filp)) + return -EPERM; + /* EEXIST if the given fd already has a permission. */ + if (((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node) + return -EEXIST; + managed_region_p = vsoc_region_from_filep(managed_filp); + /* Check that the provided region is managed by this one */ + if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p) + return -EPERM; + /* The area must be well formed and have non-zero size */ + if (np->permission.begin_offset >= np->permission.end_offset) + return -EINVAL; + /* The area must fit in the memory window */ + if (np->permission.end_offset > + vsoc_device_region_size(managed_region_p)) + return -ERANGE; + /* The area must be in the region data section */ + if (np->permission.begin_offset < + managed_region_p->offset_of_region_data) + return -ERANGE; + /* The area must be page aligned */ + if (!PAGE_ALIGNED(np->permission.begin_offset) || + !PAGE_ALIGNED(np->permission.end_offset)) + return -EINVAL; + /* Owner offset must be naturally aligned in the window */ + if (np->permission.owner_offset & + (sizeof(np->permission.owner_offset) - 1)) + return -EINVAL; + /* The owner flag must reside in the owner memory */ + if (np->permission.owner_offset + sizeof(np->permission.owner_offset) > + vsoc_device_region_size(region_p)) + return -ERANGE; + /* The owner flag must reside in the data section */ + if (np->permission.owner_offset < region_p->offset_of_region_data) + return -EINVAL; + /* The owner value must change to claim the memory */ + if (np->permission.owned_value == VSOC_REGION_FREE) + return -EINVAL; + owner_ptr = + (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset + + np->permission.owner_offset); + /* We've already verified that this is in the shared memory window, so + * it should be safe to write to this address. + */ + if (atomic_cmpxchg(owner_ptr, + VSOC_REGION_FREE, + np->permission.owned_value) != VSOC_REGION_FREE) { + return -EBUSY; + } + ((struct vsoc_private_data *)managed_filp->private_data)-> + fd_scoped_permission_node = np; + /* The file offset needs to be adjusted if the calling + * process did any read/write operations on the fd + * before creating the permission. + */ + if (managed_filp->f_pos) { + if (managed_filp->f_pos > np->permission.end_offset) { + /* If the offset is beyond the permission end, set it + * to the end. + */ + managed_filp->f_pos = np->permission.end_offset; + } else { + /* If the offset is within the permission interval + * keep it there otherwise reset it to zero. + */ + if (managed_filp->f_pos < np->permission.begin_offset) { + managed_filp->f_pos = 0; + } else { + managed_filp->f_pos -= + np->permission.begin_offset; + } + } + } + return 0; +} + +static void do_destroy_fd_scoped_permission_node( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission_node *node) +{ + if (node) { + do_destroy_fd_scoped_permission(owner_region_p, + &node->permission); + mutex_lock(&vsoc_dev.mtx); + list_del(&node->list); + mutex_unlock(&vsoc_dev.mtx); + kfree(node); + } +} + +static void do_destroy_fd_scoped_permission( + struct vsoc_device_region *owner_region_p, + struct fd_scoped_permission *perm) +{ + atomic_t *owner_ptr = NULL; + int prev = 0; + + if (!perm) + return; + owner_ptr = (atomic_t *)shm_off_to_virtual_addr( + owner_region_p->region_begin_offset + perm->owner_offset); + prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE); + if (prev != perm->owned_value) + dev_err(&vsoc_dev.dev->dev, + "%x-%x: owner (%s) %x: expected to be %x was %x", + perm->begin_offset, perm->end_offset, + owner_region_p->device_name, perm->owner_offset, + perm->owned_value, prev); +} + +static long do_vsoc_describe_region(struct file *filp, + struct vsoc_device_region __user *dest) +{ + struct vsoc_device_region *region_p; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + if (copy_to_user(dest, region_p, sizeof(*region_p))) + return -EFAULT; + return 0; +} + +/** + * Implements the inner logic of cond_wait. Copies to and from userspace are + * done in the helper function below. + */ +static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg) +{ + DEFINE_WAIT(wait); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + struct hrtimer_sleeper timeout, *to = NULL; + int ret = 0; + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + atomic_t *address = NULL; + struct timespec ts; + + /* Ensure that the offset is aligned */ + if (arg->offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)arg->offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + address = shm_off_to_virtual_addr(region_p->region_begin_offset + + arg->offset); + + /* Ensure that the type of wait is valid */ + switch (arg->wait_type) { + case VSOC_WAIT_IF_EQUAL: + break; + case VSOC_WAIT_IF_EQUAL_TIMEOUT: + to = &timeout; + break; + default: + return -EINVAL; + } + + if (to) { + /* Copy the user-supplied timesec into the kernel structure. + * We do things this way to flatten differences between 32 bit + * and 64 bit timespecs. + */ + ts.tv_sec = arg->wake_time_sec; + ts.tv_nsec = arg->wake_time_nsec; + + if (!timespec_valid(&ts)) + return -EINVAL; + hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + current->timer_slack_ns); + + hrtimer_init_sleeper(to, current); + } + + while (1) { + prepare_to_wait(&data->futex_wait_queue, &wait, + TASK_INTERRUPTIBLE); + /* + * Check the sentinel value after prepare_to_wait. If the value + * changes after this check the writer will call signal, + * changing the task state from INTERRUPTIBLE to RUNNING. That + * will ensure that schedule() will eventually schedule this + * task. + */ + if (atomic_read(address) != arg->value) { + ret = 0; + break; + } + if (to) { + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); + if (likely(to->task)) + freezable_schedule(); + hrtimer_cancel(&to->timer); + if (!to->task) { + ret = -ETIMEDOUT; + break; + } + } else { + freezable_schedule(); + } + /* Count the number of times that we woke up. This is useful + * for unit testing. + */ + ++arg->wakes; + if (signal_pending(current)) { + ret = -EINTR; + break; + } + } + finish_wait(&data->futex_wait_queue, &wait); + if (to) + destroy_hrtimer_on_stack(&to->timer); + return ret; +} + +/** + * Handles the details of copying from/to userspace to ensure that the copies + * happen on all of the return paths of cond_wait. + */ +static int do_vsoc_cond_wait(struct file *filp, + struct vsoc_cond_wait __user *untrusted_in) +{ + struct vsoc_cond_wait arg; + int rval = 0; + + if (copy_from_user(&arg, untrusted_in, sizeof(arg))) + return -EFAULT; + /* wakes is an out parameter. Initialize it to something sensible. */ + arg.wakes = 0; + rval = handle_vsoc_cond_wait(filp, &arg); + if (copy_to_user(untrusted_in, &arg, sizeof(arg))) + return -EFAULT; + return rval; +} + +static int do_vsoc_cond_wake(struct file *filp, uint32_t offset) +{ + struct vsoc_device_region *region_p = vsoc_region_from_filep(filp); + u32 region_number = iminor(file_inode(filp)); + struct vsoc_region_data *data = vsoc_dev.regions_data + region_number; + /* Ensure that the offset is aligned */ + if (offset & (sizeof(uint32_t) - 1)) + return -EADDRNOTAVAIL; + /* Ensure that the offset is within shared memory */ + if (((uint64_t)offset) + region_p->region_begin_offset + + sizeof(uint32_t) > region_p->region_end_offset) + return -E2BIG; + /* + * TODO(b/73664181): Use multiple futex wait queues. + * We need to wake every sleeper when the condition changes. Typically + * only a single thread will be waiting on the condition, but there + * are exceptions. The worst case is about 10 threads. + */ + wake_up_interruptible_all(&data->futex_wait_queue); + return 0; +} + +static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + int rv = 0; + struct vsoc_device_region *region_p; + u32 reg_num; + struct vsoc_region_data *reg_data; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + region_p = vsoc_region_from_filep(filp); + reg_num = iminor(file_inode(filp)); + reg_data = vsoc_dev.regions_data + reg_num; + switch (cmd) { + case VSOC_CREATE_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = NULL; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + /* We can't allocate memory for the permission */ + if (!node) + return -ENOMEM; + INIT_LIST_HEAD(&node->list); + rv = do_create_fd_scoped_permission( + region_p, + node, + (struct fd_scoped_permission_arg __user *)arg); + if (!rv) { + mutex_lock(&vsoc_dev.mtx); + list_add(&node->list, &vsoc_dev.permissions); + mutex_unlock(&vsoc_dev.mtx); + } else { + kfree(node); + return rv; + } + } + break; + + case VSOC_GET_FD_SCOPED_PERMISSION: + { + struct fd_scoped_permission_node *node = + ((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node; + if (!node) + return -ENOENT; + if (copy_to_user + ((struct fd_scoped_permission __user *)arg, + &node->permission, sizeof(node->permission))) + return -EFAULT; + } + break; + + case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST: + if (!atomic_xchg( + reg_data->outgoing_signalled, + 1)) { + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + } else { + return -EBUSY; + } + break; + + case VSOC_SEND_INTERRUPT_TO_HOST: + writel(reg_num, vsoc_dev.regs + DOORBELL); + return 0; + + case VSOC_WAIT_FOR_INCOMING_INTERRUPT: + wait_event_interruptible( + reg_data->interrupt_wait_queue, + (atomic_read(reg_data->incoming_signalled) != 0)); + break; + + case VSOC_DESCRIBE_REGION: + return do_vsoc_describe_region( + filp, + (struct vsoc_device_region __user *)arg); + + case VSOC_SELF_INTERRUPT: + atomic_set(reg_data->incoming_signalled, 1); + wake_up_interruptible(®_data->interrupt_wait_queue); + break; + + case VSOC_COND_WAIT: + return do_vsoc_cond_wait(filp, + (struct vsoc_cond_wait __user *)arg); + case VSOC_COND_WAKE: + return do_vsoc_cond_wake(filp, arg); + + default: + return -EINVAL; + } + return 0; +} + +static ssize_t vsoc_read(struct file *filp, char *buffer, size_t len, + loff_t *poffset) +{ + __u32 area_off; + void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_to_user(buffer, area_p, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin) +{ + ssize_t area_len = 0; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, NULL); + switch (origin) { + case SEEK_SET: + break; + + case SEEK_CUR: + if (offset > 0 && offset + filp->f_pos < 0) + return -EOVERFLOW; + offset += filp->f_pos; + break; + + case SEEK_END: + if (offset > 0 && offset + area_len < 0) + return -EOVERFLOW; + offset += area_len; + break; + + case SEEK_DATA: + if (offset >= area_len) + return -EINVAL; + if (offset < 0) + offset = 0; + break; + + case SEEK_HOLE: + /* Next hole is always the end of the region, unless offset is + * beyond that + */ + if (offset < area_len) + offset = area_len; + break; + + default: + return -EINVAL; + } + + if (offset < 0 || offset > area_len) + return -EINVAL; + filp->f_pos = offset; + + return offset; +} + +static ssize_t vsoc_write(struct file *filp, const char *buffer, + size_t len, loff_t *poffset) +{ + __u32 area_off; + void *area_p; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + area_p = shm_off_to_virtual_addr(area_off); + area_p += *poffset; + area_len -= *poffset; + if (area_len <= 0) + return 0; + if (area_len < len) + len = area_len; + if (copy_from_user(area_p, buffer, len)) + return -EFAULT; + *poffset += len; + return len; +} + +static irqreturn_t vsoc_interrupt(int irq, void *region_data_v) +{ + struct vsoc_region_data *region_data = + (struct vsoc_region_data *)region_data_v; + int reg_num = region_data - vsoc_dev.regions_data; + + if (unlikely(!region_data)) + return IRQ_NONE; + + if (unlikely(reg_num < 0 || + reg_num >= vsoc_dev.layout->region_count)) { + dev_err(&vsoc_dev.dev->dev, + "invalid irq @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) { + dev_err(&vsoc_dev.dev->dev, + "irq not aligned @%p reg_num=0x%04x\n", + region_data, reg_num); + return IRQ_NONE; + } + wake_up_interruptible(®ion_data->interrupt_wait_queue); + return IRQ_HANDLED; +} + +static int vsoc_probe_device(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + int result; + int i; + resource_size_t reg_size; + dev_t devt; + + vsoc_dev.dev = pdev; + result = pci_enable_device(pdev); + if (result) { + dev_err(&pdev->dev, + "pci_enable_device failed %s: error %d\n", + pci_name(pdev), result); + return result; + } + vsoc_dev.enabled_device = 1; + result = pci_request_regions(pdev, "vsoc"); + if (result < 0) { + dev_err(&pdev->dev, "pci_request_regions failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.requested_regions = 1; + /* Set up the control registers in BAR 0 */ + reg_size = pci_resource_len(pdev, REGISTER_BAR); + if (reg_size > MAX_REGISTER_BAR_LEN) + vsoc_dev.regs = + pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN); + else + vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size); + + if (!vsoc_dev.regs) { + dev_err(&pdev->dev, + "cannot ioremap registers of size %zu\n", + (size_t)reg_size); + vsoc_remove_device(pdev); + return -EBUSY; + } + + /* Map the shared memory in BAR 2 */ + vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR); + vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR); + + dev_info(&pdev->dev, "shared memory @ DMA %p size=0x%zx\n", + (void *)vsoc_dev.shm_phys_start, vsoc_dev.shm_size); + /* TODO(ghartman): ioremap_wc should work here */ + vsoc_dev.kernel_mapped_shm = ioremap_nocache( + vsoc_dev.shm_phys_start, vsoc_dev.shm_size); + if (!vsoc_dev.kernel_mapped_shm) { + dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + + vsoc_dev.layout = + (struct vsoc_shm_layout_descriptor *)vsoc_dev.kernel_mapped_shm; + dev_info(&pdev->dev, "major_version: %d\n", + vsoc_dev.layout->major_version); + dev_info(&pdev->dev, "minor_version: %d\n", + vsoc_dev.layout->minor_version); + dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size); + dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count); + if (vsoc_dev.layout->major_version != + CURRENT_VSOC_LAYOUT_MAJOR_VERSION) { + dev_err(&vsoc_dev.dev->dev, + "driver supports only major_version %d\n", + CURRENT_VSOC_LAYOUT_MAJOR_VERSION); + vsoc_remove_device(pdev); + return -EBUSY; + } + result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count, + VSOC_DEV_NAME); + if (result) { + dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.major = MAJOR(devt); + cdev_init(&vsoc_dev.cdev, &vsoc_ops); + vsoc_dev.cdev.owner = THIS_MODULE; + result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count); + if (result) { + dev_err(&vsoc_dev.dev->dev, "cdev_add error\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.cdev_added = 1; + vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME); + if (IS_ERR(vsoc_dev.class)) { + dev_err(&vsoc_dev.dev->dev, "class_create failed\n"); + vsoc_remove_device(pdev); + return PTR_ERR(vsoc_dev.class); + } + vsoc_dev.class_added = 1; + vsoc_dev.regions = (struct vsoc_device_region *) + (vsoc_dev.kernel_mapped_shm + + vsoc_dev.layout->vsoc_region_desc_offset); + vsoc_dev.msix_entries = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL); + if (!vsoc_dev.msix_entries) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate msix_entries\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data = kcalloc( + vsoc_dev.layout->region_count, + sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL); + if (!vsoc_dev.regions_data) { + dev_err(&vsoc_dev.dev->dev, + "unable to allocate regions' data\n"); + vsoc_remove_device(pdev); + return -ENOSPC; + } + for (i = 0; i < vsoc_dev.layout->region_count; ++i) + vsoc_dev.msix_entries[i].entry = i; + + result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries, + vsoc_dev.layout->region_count); + if (result) { + dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result); + vsoc_remove_device(pdev); + return -ENOSPC; + } + /* Check that all regions are well formed */ + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + + if (!PAGE_ALIGNED(region->region_begin_offset) || + !PAGE_ALIGNED(region->region_end_offset)) { + dev_err(&vsoc_dev.dev->dev, + "region %d not aligned (%x:%x)", i, + region->region_begin_offset, + region->region_end_offset); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->region_begin_offset >= region->region_end_offset || + region->region_end_offset > vsoc_dev.shm_size) { + dev_err(&vsoc_dev.dev->dev, + "region %d offsets are wrong: %x %x %zx", + i, region->region_begin_offset, + region->region_end_offset, vsoc_dev.shm_size); + vsoc_remove_device(pdev); + return -EFAULT; + } + if (region->managed_by >= vsoc_dev.layout->region_count) { + dev_err(&vsoc_dev.dev->dev, + "region %d has invalid owner: %u", + i, region->managed_by); + vsoc_remove_device(pdev); + return -EFAULT; + } + } + vsoc_dev.msix_enabled = 1; + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + const struct vsoc_device_region *region = vsoc_dev.regions + i; + size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1; + const struct vsoc_signal_table_layout *h_to_g_signal_table = + ®ion->host_to_guest_signal_table; + const struct vsoc_signal_table_layout *g_to_h_signal_table = + ®ion->guest_to_host_signal_table; + + vsoc_dev.regions_data[i].name[name_sz] = '\0'; + memcpy(vsoc_dev.regions_data[i].name, region->device_name, + name_sz); + dev_info(&pdev->dev, "region %d name=%s\n", + i, vsoc_dev.regions_data[i].name); + init_waitqueue_head( + &vsoc_dev.regions_data[i].interrupt_wait_queue); + init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue); + vsoc_dev.regions_data[i].incoming_signalled = + vsoc_dev.kernel_mapped_shm + + region->region_begin_offset + + h_to_g_signal_table->interrupt_signalled_offset; + vsoc_dev.regions_data[i].outgoing_signalled = + vsoc_dev.kernel_mapped_shm + + region->region_begin_offset + + g_to_h_signal_table->interrupt_signalled_offset; + + result = request_irq( + vsoc_dev.msix_entries[i].vector, + vsoc_interrupt, 0, + vsoc_dev.regions_data[i].name, + vsoc_dev.regions_data + i); + if (result) { + dev_info(&pdev->dev, + "request_irq failed irq=%d vector=%d\n", + i, vsoc_dev.msix_entries[i].vector); + vsoc_remove_device(pdev); + return -ENOSPC; + } + vsoc_dev.regions_data[i].irq_requested = 1; + if (!device_create(vsoc_dev.class, NULL, + MKDEV(vsoc_dev.major, i), + NULL, vsoc_dev.regions_data[i].name)) { + dev_err(&vsoc_dev.dev->dev, "device_create failed\n"); + vsoc_remove_device(pdev); + return -EBUSY; + } + vsoc_dev.regions_data[i].device_created = 1; + } + return 0; +} + +/* + * This should undo all of the allocations in the probe function in reverse + * order. + * + * Notes: + * + * The device may have been partially initialized, so double check + * that the allocations happened. + * + * This function may be called multiple times, so mark resources as freed + * as they are deallocated. + */ +static void vsoc_remove_device(struct pci_dev *pdev) +{ + int i; + /* + * pdev is the first thing to be set on probe and the last thing + * to be cleared here. If it's NULL then there is no cleanup. + */ + if (!pdev || !vsoc_dev.dev) + return; + dev_info(&pdev->dev, "remove_device\n"); + if (vsoc_dev.regions_data) { + for (i = 0; i < vsoc_dev.layout->region_count; ++i) { + if (vsoc_dev.regions_data[i].device_created) { + device_destroy(vsoc_dev.class, + MKDEV(vsoc_dev.major, i)); + vsoc_dev.regions_data[i].device_created = 0; + } + if (vsoc_dev.regions_data[i].irq_requested) + free_irq(vsoc_dev.msix_entries[i].vector, NULL); + vsoc_dev.regions_data[i].irq_requested = 0; + } + kfree(vsoc_dev.regions_data); + vsoc_dev.regions_data = 0; + } + if (vsoc_dev.msix_enabled) { + pci_disable_msix(pdev); + vsoc_dev.msix_enabled = 0; + } + kfree(vsoc_dev.msix_entries); + vsoc_dev.msix_entries = 0; + vsoc_dev.regions = 0; + if (vsoc_dev.class_added) { + class_destroy(vsoc_dev.class); + vsoc_dev.class_added = 0; + } + if (vsoc_dev.cdev_added) { + cdev_del(&vsoc_dev.cdev); + vsoc_dev.cdev_added = 0; + } + if (vsoc_dev.major && vsoc_dev.layout) { + unregister_chrdev_region(MKDEV(vsoc_dev.major, 0), + vsoc_dev.layout->region_count); + vsoc_dev.major = 0; + } + vsoc_dev.layout = 0; + if (vsoc_dev.kernel_mapped_shm) { + pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm); + vsoc_dev.kernel_mapped_shm = 0; + } + if (vsoc_dev.regs) { + pci_iounmap(pdev, vsoc_dev.regs); + vsoc_dev.regs = 0; + } + if (vsoc_dev.requested_regions) { + pci_release_regions(pdev); + vsoc_dev.requested_regions = 0; + } + if (vsoc_dev.enabled_device) { + pci_disable_device(pdev); + vsoc_dev.enabled_device = 0; + } + /* Do this last: it indicates that the device is not initialized. */ + vsoc_dev.dev = NULL; +} + +static void __exit vsoc_cleanup_module(void) +{ + vsoc_remove_device(vsoc_dev.dev); + pci_unregister_driver(&vsoc_pci_driver); +} + +static int __init vsoc_init_module(void) +{ + int err = -ENOMEM; + + INIT_LIST_HEAD(&vsoc_dev.permissions); + mutex_init(&vsoc_dev.mtx); + + err = pci_register_driver(&vsoc_pci_driver); + if (err < 0) + return err; + return 0; +} + +static int vsoc_open(struct inode *inode, struct file *filp) +{ + /* Can't use vsoc_validate_filep because filp is still incomplete */ + int ret = vsoc_validate_inode(inode); + + if (ret) + return ret; + filp->private_data = + kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL); + if (!filp->private_data) + return -ENOMEM; + return 0; +} + +static int vsoc_release(struct inode *inode, struct file *filp) +{ + struct vsoc_private_data *private_data = NULL; + struct fd_scoped_permission_node *node = NULL; + struct vsoc_device_region *owner_region_p = NULL; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + private_data = (struct vsoc_private_data *)filp->private_data; + if (!private_data) + return 0; + + node = private_data->fd_scoped_permission_node; + if (node) { + owner_region_p = vsoc_region_from_inode(inode); + if (owner_region_p->managed_by != VSOC_REGION_WHOLE) { + owner_region_p = + &vsoc_dev.regions[owner_region_p->managed_by]; + } + do_destroy_fd_scoped_permission_node(owner_region_p, node); + private_data->fd_scoped_permission_node = NULL; + } + kfree(private_data); + filp->private_data = NULL; + + return 0; +} + +/* + * Returns the device relative offset and length of the area specified by the + * fd scoped permission. If there is no fd scoped permission set, a default + * permission covering the entire region is assumed, unless the region is owned + * by another one, in which case the default is a permission with zero size. + */ +static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset) +{ + __u32 off = 0; + ssize_t length = 0; + struct vsoc_device_region *region_p; + struct fd_scoped_permission *perm; + + region_p = vsoc_region_from_filep(filp); + off = region_p->region_begin_offset; + perm = &((struct vsoc_private_data *)filp->private_data)-> + fd_scoped_permission_node->permission; + if (perm) { + off += perm->begin_offset; + length = perm->end_offset - perm->begin_offset; + } else if (region_p->managed_by == VSOC_REGION_WHOLE) { + /* No permission set and the regions is not owned by another, + * default to full region access. + */ + length = vsoc_device_region_size(region_p); + } else { + /* return zero length, access is denied. */ + length = 0; + } + if (area_offset) + *area_offset = off; + return length; +} + +static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long len = vma->vm_end - vma->vm_start; + __u32 area_off; + phys_addr_t mem_off; + ssize_t area_len; + int retval = vsoc_validate_filep(filp); + + if (retval) + return retval; + area_len = vsoc_get_area(filp, &area_off); + /* Add the requested offset */ + area_off += (vma->vm_pgoff << PAGE_SHIFT); + area_len -= (vma->vm_pgoff << PAGE_SHIFT); + if (area_len < len) + return -EINVAL; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + mem_off = shm_off_to_phys_addr(area_off); + if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT, + len, vma->vm_page_prot)) + return -EAGAIN; + return 0; +} + +module_init(vsoc_init_module); +module_exit(vsoc_cleanup_module); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Greg Hartman <ghartman@google.com>"); +MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device"); +MODULE_VERSION("1.0"); diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c index b63dd2ef78b5..1f398d06f4ee 100644 --- a/drivers/staging/comedi/drivers.c +++ b/drivers/staging/comedi/drivers.c @@ -484,8 +484,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s, struct comedi_cmd *cmd = &async->cmd; if (cmd->stop_src == TRIG_COUNT) { - unsigned int nscans = nsamples / cmd->scan_end_arg; - unsigned int scans_left = __comedi_nscans_left(s, nscans); + unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg); unsigned int scan_pos = comedi_bytes_to_samples(s, async->scan_progress); unsigned long long samples_left = 0; diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index c975f6e8be49..8f181caffca3 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -1348,6 +1348,8 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status) ack |= NISTC_INTA_ACK_AI_START; if (a_status & NISTC_AI_STATUS1_STOP) ack |= NISTC_INTA_ACK_AI_STOP; + if (a_status & NISTC_AI_STATUS1_OVER) + ack |= NISTC_INTA_ACK_AI_ERR; if (ack) ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG); } diff --git a/drivers/staging/goldfish/Kconfig b/drivers/staging/goldfish/Kconfig index c579141a7bed..c8871d0c0776 100644 --- a/drivers/staging/goldfish/Kconfig +++ b/drivers/staging/goldfish/Kconfig @@ -10,10 +10,3 @@ config GOLDFISH_SYNC ---help--- Emulated sync fences for the Goldfish Android Virtual Device -config MTD_GOLDFISH_NAND - tristate "Goldfish NAND device" - depends on GOLDFISH - depends on MTD - help - Drives the emulated NAND flash device on the Google Goldfish - Android virtual device. diff --git a/drivers/staging/goldfish/Makefile b/drivers/staging/goldfish/Makefile index 0cf525588210..30db49141814 100644 --- a/drivers/staging/goldfish/Makefile +++ b/drivers/staging/goldfish/Makefile @@ -3,7 +3,6 @@ # obj-$(CONFIG_GOLDFISH_AUDIO) += goldfish_audio.o -obj-$(CONFIG_MTD_GOLDFISH_NAND) += goldfish_nand.o # and sync diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c deleted file mode 100644 index 623353db5a08..000000000000 --- a/drivers/staging/goldfish/goldfish_nand.c +++ /dev/null @@ -1,442 +0,0 @@ -/* - * drivers/mtd/devices/goldfish_nand.c - * - * Copyright (C) 2007 Google, Inc. - * Copyright (C) 2012 Intel, Inc. - * Copyright (C) 2013 Intel, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include <linux/io.h> -#include <linux/device.h> -#include <linux/module.h> -#include <linux/slab.h> -#include <linux/ioport.h> -#include <linux/vmalloc.h> -#include <linux/mtd/mtd.h> -#include <linux/platform_device.h> -#include <linux/mutex.h> -#include <linux/goldfish.h> -#include <asm/div64.h> - -#include "goldfish_nand_reg.h" - -struct goldfish_nand { - /* lock protects access to the device registers */ - struct mutex lock; - unsigned char __iomem *base; - struct cmd_params *cmd_params; - size_t mtd_count; - struct mtd_info mtd[0]; -}; - -static u32 goldfish_nand_cmd_with_params(struct mtd_info *mtd, - enum nand_cmd cmd, u64 addr, u32 len, - void *ptr, u32 *rv) -{ - u32 cmdp; - struct goldfish_nand *nand = mtd->priv; - struct cmd_params *cps = nand->cmd_params; - unsigned char __iomem *base = nand->base; - - if (!cps) - return -1; - - switch (cmd) { - case NAND_CMD_ERASE: - cmdp = NAND_CMD_ERASE_WITH_PARAMS; - break; - case NAND_CMD_READ: - cmdp = NAND_CMD_READ_WITH_PARAMS; - break; - case NAND_CMD_WRITE: - cmdp = NAND_CMD_WRITE_WITH_PARAMS; - break; - default: - return -1; - } - cps->dev = mtd - nand->mtd; - cps->addr_high = (u32)(addr >> 32); - cps->addr_low = (u32)addr; - cps->transfer_size = len; - cps->data = (unsigned long)ptr; - writel(cmdp, base + NAND_COMMAND); - *rv = cps->result; - return 0; -} - -static u32 goldfish_nand_cmd(struct mtd_info *mtd, enum nand_cmd cmd, - u64 addr, u32 len, void *ptr) -{ - struct goldfish_nand *nand = mtd->priv; - u32 rv; - unsigned char __iomem *base = nand->base; - - mutex_lock(&nand->lock); - if (goldfish_nand_cmd_with_params(mtd, cmd, addr, len, ptr, &rv)) { - writel(mtd - nand->mtd, base + NAND_DEV); - writel((u32)(addr >> 32), base + NAND_ADDR_HIGH); - writel((u32)addr, base + NAND_ADDR_LOW); - writel(len, base + NAND_TRANSFER_SIZE); - gf_write_ptr(ptr, base + NAND_DATA, base + NAND_DATA_HIGH); - writel(cmd, base + NAND_COMMAND); - rv = readl(base + NAND_RESULT); - } - mutex_unlock(&nand->lock); - return rv; -} - -static int goldfish_nand_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - loff_t ofs = instr->addr; - u32 len = instr->len; - u32 rem; - - if (ofs + len > mtd->size) - goto invalid_arg; - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (len % mtd->writesize) - goto invalid_arg; - len = len / mtd->writesize * (mtd->writesize + mtd->oobsize); - - if (goldfish_nand_cmd(mtd, NAND_CMD_ERASE, ofs, len, NULL) != len) { - pr_err("goldfish_nand_erase: erase failed, start %llx, len %x, dev_size %llx, erase_size %x\n", - ofs, len, mtd->size, mtd->erasesize); - return -EIO; - } - - instr->state = MTD_ERASE_DONE; - mtd_erase_callback(instr); - - return 0; - -invalid_arg: - pr_err("goldfish_nand_erase: invalid erase, start %llx, len %x, dev_size %llx, erase_size %x\n", - ofs, len, mtd->size, mtd->erasesize); - return -EINVAL; -} - -static int goldfish_nand_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - u32 rem; - - if (ofs + ops->len > mtd->size) - goto invalid_arg; - if (ops->datbuf && ops->len && ops->len != mtd->writesize) - goto invalid_arg; - if (ops->ooblen + ops->ooboffs > mtd->oobsize) - goto invalid_arg; - - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (ops->datbuf) - ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, - ops->len, ops->datbuf); - ofs += mtd->writesize + ops->ooboffs; - if (ops->oobbuf) - ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, ofs, - ops->ooblen, ops->oobbuf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_read_oob: invalid read, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - u32 rem; - - if (ofs + ops->len > mtd->size) - goto invalid_arg; - if (ops->len && ops->len != mtd->writesize) - goto invalid_arg; - if (ops->ooblen + ops->ooboffs > mtd->oobsize) - goto invalid_arg; - - rem = do_div(ofs, mtd->writesize); - if (rem) - goto invalid_arg; - ofs *= (mtd->writesize + mtd->oobsize); - - if (ops->datbuf) - ops->retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, - ops->len, ops->datbuf); - ofs += mtd->writesize + ops->ooboffs; - if (ops->oobbuf) - ops->oobretlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, ofs, - ops->ooblen, ops->oobbuf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_write_oob: invalid write, start %llx, len %zx, ooblen %zx, dev_size %llx, write_size %x\n", - ofs, ops->len, ops->ooblen, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - u32 rem; - - if (from + len > mtd->size) - goto invalid_arg; - - rem = do_div(from, mtd->writesize); - if (rem) - goto invalid_arg; - from *= (mtd->writesize + mtd->oobsize); - - *retlen = goldfish_nand_cmd(mtd, NAND_CMD_READ, from, len, buf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_read: invalid read, start %llx, len %zx, dev_size %llx, write_size %x\n", - from, len, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - u32 rem; - - if (to + len > mtd->size) - goto invalid_arg; - - rem = do_div(to, mtd->writesize); - if (rem) - goto invalid_arg; - to *= (mtd->writesize + mtd->oobsize); - - *retlen = goldfish_nand_cmd(mtd, NAND_CMD_WRITE, to, len, (void *)buf); - return 0; - -invalid_arg: - pr_err("goldfish_nand_write: invalid write, start %llx, len %zx, dev_size %llx, write_size %x\n", - to, len, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_block_isbad(struct mtd_info *mtd, loff_t ofs) -{ - u32 rem; - - if (ofs >= mtd->size) - goto invalid_arg; - - rem = do_div(ofs, mtd->erasesize); - if (rem) - goto invalid_arg; - ofs *= mtd->erasesize / mtd->writesize; - ofs *= (mtd->writesize + mtd->oobsize); - - return goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_GET, ofs, 0, NULL); - -invalid_arg: - pr_err("goldfish_nand_block_isbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", - ofs, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int goldfish_nand_block_markbad(struct mtd_info *mtd, loff_t ofs) -{ - u32 rem; - - if (ofs >= mtd->size) - goto invalid_arg; - - rem = do_div(ofs, mtd->erasesize); - if (rem) - goto invalid_arg; - ofs *= mtd->erasesize / mtd->writesize; - ofs *= (mtd->writesize + mtd->oobsize); - - if (goldfish_nand_cmd(mtd, NAND_CMD_BLOCK_BAD_SET, ofs, 0, NULL) != 1) - return -EIO; - return 0; - -invalid_arg: - pr_err("goldfish_nand_block_markbad: invalid arg, ofs %llx, dev_size %llx, write_size %x\n", - ofs, mtd->size, mtd->writesize); - return -EINVAL; -} - -static int nand_setup_cmd_params(struct platform_device *pdev, - struct goldfish_nand *nand) -{ - u64 paddr; - unsigned char __iomem *base = nand->base; - - nand->cmd_params = devm_kzalloc(&pdev->dev, - sizeof(struct cmd_params), GFP_KERNEL); - if (!nand->cmd_params) - return -1; - - paddr = __pa(nand->cmd_params); - writel((u32)(paddr >> 32), base + NAND_CMD_PARAMS_ADDR_HIGH); - writel((u32)paddr, base + NAND_CMD_PARAMS_ADDR_LOW); - return 0; -} - -static int goldfish_nand_init_device(struct platform_device *pdev, - struct goldfish_nand *nand, int id) -{ - u32 name_len; - u32 result; - u32 flags; - unsigned char __iomem *base = nand->base; - struct mtd_info *mtd = &nand->mtd[id]; - char *name; - - mutex_lock(&nand->lock); - writel(id, base + NAND_DEV); - flags = readl(base + NAND_DEV_FLAGS); - name_len = readl(base + NAND_DEV_NAME_LEN); - mtd->writesize = readl(base + NAND_DEV_PAGE_SIZE); - mtd->size = readl(base + NAND_DEV_SIZE_LOW); - mtd->size |= (u64)readl(base + NAND_DEV_SIZE_HIGH) << 32; - mtd->oobsize = readl(base + NAND_DEV_EXTRA_SIZE); - mtd->oobavail = mtd->oobsize; - mtd->erasesize = readl(base + NAND_DEV_ERASE_SIZE) / - (mtd->writesize + mtd->oobsize) * mtd->writesize; - do_div(mtd->size, mtd->writesize + mtd->oobsize); - mtd->size *= mtd->writesize; - dev_dbg(&pdev->dev, - "goldfish nand dev%d: size %llx, page %d, extra %d, erase %d\n", - id, mtd->size, mtd->writesize, - mtd->oobsize, mtd->erasesize); - mutex_unlock(&nand->lock); - - mtd->priv = nand; - - name = devm_kzalloc(&pdev->dev, name_len + 1, GFP_KERNEL); - if (!name) - return -ENOMEM; - mtd->name = name; - - result = goldfish_nand_cmd(mtd, NAND_CMD_GET_DEV_NAME, 0, name_len, - name); - if (result != name_len) { - dev_err(&pdev->dev, - "goldfish_nand_init_device failed to get dev name %d != %d\n", - result, name_len); - return -ENODEV; - } - ((char *)mtd->name)[name_len] = '\0'; - - /* Setup the MTD structure */ - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - if (flags & NAND_DEV_FLAG_READ_ONLY) - mtd->flags &= ~MTD_WRITEABLE; - if (flags & NAND_DEV_FLAG_CMD_PARAMS_CAP) - nand_setup_cmd_params(pdev, nand); - - mtd->owner = THIS_MODULE; - mtd->_erase = goldfish_nand_erase; - mtd->_read = goldfish_nand_read; - mtd->_write = goldfish_nand_write; - mtd->_read_oob = goldfish_nand_read_oob; - mtd->_write_oob = goldfish_nand_write_oob; - mtd->_block_isbad = goldfish_nand_block_isbad; - mtd->_block_markbad = goldfish_nand_block_markbad; - - if (mtd_device_register(mtd, NULL, 0)) - return -EIO; - - return 0; -} - -static int goldfish_nand_probe(struct platform_device *pdev) -{ - u32 num_dev; - int i; - int err; - u32 num_dev_working; - u32 version; - struct resource *r; - struct goldfish_nand *nand; - unsigned char __iomem *base; - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) - return -ENODEV; - - base = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE); - if (!base) - return -ENOMEM; - - version = readl(base + NAND_VERSION); - if (version != NAND_VERSION_CURRENT) { - dev_err(&pdev->dev, - "goldfish_nand_init: version mismatch, got %d, expected %d\n", - version, NAND_VERSION_CURRENT); - return -ENODEV; - } - num_dev = readl(base + NAND_NUM_DEV); - if (num_dev == 0) - return -ENODEV; - - nand = devm_kzalloc(&pdev->dev, sizeof(*nand) + - sizeof(struct mtd_info) * num_dev, GFP_KERNEL); - if (!nand) - return -ENOMEM; - - mutex_init(&nand->lock); - nand->base = base; - nand->mtd_count = num_dev; - platform_set_drvdata(pdev, nand); - - num_dev_working = 0; - for (i = 0; i < num_dev; i++) { - err = goldfish_nand_init_device(pdev, nand, i); - if (err == 0) - num_dev_working++; - } - if (num_dev_working == 0) - return -ENODEV; - return 0; -} - -static int goldfish_nand_remove(struct platform_device *pdev) -{ - struct goldfish_nand *nand = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < nand->mtd_count; i++) { - if (nand->mtd[i].name) - mtd_device_unregister(&nand->mtd[i]); - } - return 0; -} - -static struct platform_driver goldfish_nand_driver = { - .probe = goldfish_nand_probe, - .remove = goldfish_nand_remove, - .driver = { - .name = "goldfish_nand" - } -}; - -module_platform_driver(goldfish_nand_driver); -MODULE_LICENSE("GPL"); diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h deleted file mode 100644 index 43aeba3a4c8f..000000000000 --- a/drivers/staging/goldfish/goldfish_nand_reg.h +++ /dev/null @@ -1,76 +0,0 @@ -/* - * drivers/mtd/devices/goldfish_nand_reg.h - * - * Copyright (C) 2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef GOLDFISH_NAND_REG_H -#define GOLDFISH_NAND_REG_H - -enum nand_cmd { - /* Write device name for NAND_DEV to NAND_DATA (vaddr) */ - NAND_CMD_GET_DEV_NAME, - NAND_CMD_READ, - NAND_CMD_WRITE, - NAND_CMD_ERASE, - /* NAND_RESULT is 1 if block is bad, 0 if it is not */ - NAND_CMD_BLOCK_BAD_GET, - NAND_CMD_BLOCK_BAD_SET, - NAND_CMD_READ_WITH_PARAMS, - NAND_CMD_WRITE_WITH_PARAMS, - NAND_CMD_ERASE_WITH_PARAMS -}; - -enum nand_dev_flags { - NAND_DEV_FLAG_READ_ONLY = 0x00000001, - NAND_DEV_FLAG_CMD_PARAMS_CAP = 0x00000002, -}; - -#define NAND_VERSION_CURRENT (1) - -enum nand_reg { - /* Global */ - NAND_VERSION = 0x000, - NAND_NUM_DEV = 0x004, - NAND_DEV = 0x008, - - /* Dev info */ - NAND_DEV_FLAGS = 0x010, - NAND_DEV_NAME_LEN = 0x014, - NAND_DEV_PAGE_SIZE = 0x018, - NAND_DEV_EXTRA_SIZE = 0x01c, - NAND_DEV_ERASE_SIZE = 0x020, - NAND_DEV_SIZE_LOW = 0x028, - NAND_DEV_SIZE_HIGH = 0x02c, - - /* Command */ - NAND_RESULT = 0x040, - NAND_COMMAND = 0x044, - NAND_DATA = 0x048, - NAND_DATA_HIGH = 0x100, - NAND_TRANSFER_SIZE = 0x04c, - NAND_ADDR_LOW = 0x050, - NAND_ADDR_HIGH = 0x054, - NAND_CMD_PARAMS_ADDR_LOW = 0x058, - NAND_CMD_PARAMS_ADDR_HIGH = 0x05c, -}; - -struct cmd_params { - u32 dev; - u32 addr_low; - u32 addr_high; - u32 transfer_size; - unsigned long data; - u32 result; -}; -#endif diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c index 39f5261c9854..5cf5b7334089 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c @@ -824,7 +824,7 @@ void sptlrpc_request_out_callback(struct ptlrpc_request *req) if (req->rq_pool || !req->rq_reqbuf) return; - kfree(req->rq_reqbuf); + kvfree(req->rq_reqbuf); req->rq_reqbuf = NULL; req->rq_reqbuf_len = 0; } diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c index fdfeb42b2b8f..06ef26872462 100644 --- a/drivers/staging/speakup/kobjects.c +++ b/drivers/staging/speakup/kobjects.c @@ -831,7 +831,9 @@ static ssize_t message_show(struct kobject *kobj, struct msg_group_t *group = spk_find_msg_group(attr->attr.name); unsigned long flags; - BUG_ON(!group); + if (WARN_ON(!group)) + return -EINVAL; + spin_lock_irqsave(&speakup_info.spinlock, flags); retval = message_show_helper(buf, group->start, group->end); spin_unlock_irqrestore(&speakup_info.spinlock, flags); @@ -843,7 +845,9 @@ static ssize_t message_store(struct kobject *kobj, struct kobj_attribute *attr, { struct msg_group_t *group = spk_find_msg_group(attr->attr.name); - BUG_ON(!group); + if (WARN_ON(!group)) + return -EINVAL; + return message_store_helper(buf, count, group); } diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c index c119f20dfd44..3f2ccf9d7358 100644 --- a/drivers/staging/unisys/visorhba/visorhba_main.c +++ b/drivers/staging/unisys/visorhba/visorhba_main.c @@ -792,7 +792,7 @@ static void do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) { struct scsi_device *scsidev; - unsigned char buf[36]; + unsigned char *buf; struct scatterlist *sg; unsigned int i; char *this_page; @@ -807,6 +807,10 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) if (cmdrsp->scsi.no_disk_result == 0) return; + buf = kzalloc(sizeof(char) * 36, GFP_KERNEL); + if (!buf) + return; + /* Linux scsi code wants a device at Lun 0 * to issue report luns, but we don't want * a disk there so we'll present a processor @@ -820,6 +824,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) if (scsi_sg_count(scsicmd) == 0) { memcpy(scsi_sglist(scsicmd), buf, cmdrsp->scsi.bufflen); + kfree(buf); return; } @@ -831,6 +836,7 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd) memcpy(this_page, buf + bufind, sg[i].length); kunmap_atomic(this_page_orig); } + kfree(buf); } else { devdata = (struct visorhba_devdata *)scsidev->host->hostdata; for_each_vdisk_match(vdisk, devdata, scsidev) { diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c index dbbe72c7e255..f78353ddeea5 100644 --- a/drivers/staging/wilc1000/host_interface.c +++ b/drivers/staging/wilc1000/host_interface.c @@ -2179,6 +2179,8 @@ static s32 Handle_Get_InActiveTime(struct host_if_drv *hif_drv, wid.type = WID_STR; wid.size = ETH_ALEN; wid.val = kmalloc(wid.size, GFP_KERNEL); + if (!wid.val) + return -ENOMEM; stamac = wid.val; memcpy(stamac, strHostIfStaInactiveT->mac, ETH_ALEN); diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c index 450af1b77f99..b2092c5ec7f3 100644 --- a/drivers/staging/wilc1000/linux_mon.c +++ b/drivers/staging/wilc1000/linux_mon.c @@ -251,6 +251,8 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb, if (skb->data[0] == 0xc0 && (!(memcmp(broadcast, &skb->data[4], 6)))) { skb2 = dev_alloc_skb(skb->len + sizeof(struct wilc_wfi_radiotap_cb_hdr)); + if (!skb2) + return -ENOMEM; memcpy(skb_put(skb2, skb->len), skb->data, skb->len); diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c index 013a6240f193..c1ad0aea23b9 100644 --- a/drivers/staging/wlan-ng/prism2mgmt.c +++ b/drivers/staging/wlan-ng/prism2mgmt.c @@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp) hw->ident_sta_fw.variant) > HFA384x_FIRMWARE_VERSION(1, 5, 0)) { if (msg->scantype.data != P80211ENUM_scantype_active) - word = cpu_to_le16(msg->maxchanneltime.data); + word = msg->maxchanneltime.data; else word = 0; diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 2e35db7f4aac..c15af2fcf2ba 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -276,12 +276,11 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, else ret = vfs_iter_read(fd, &iter, &pos); - kfree(bvec); - if (is_write) { if (ret < 0 || ret != data_length) { pr_err("%s() write returned %d\n", __func__, ret); - return (ret < 0 ? ret : -EINVAL); + if (ret >= 0) + ret = -EINVAL; } } else { /* @@ -294,17 +293,29 @@ static int fd_do_rw(struct se_cmd *cmd, struct file *fd, pr_err("%s() returned %d, expecting %u for " "S_ISBLK\n", __func__, ret, data_length); - return (ret < 0 ? ret : -EINVAL); + if (ret >= 0) + ret = -EINVAL; } } else { if (ret < 0) { pr_err("%s() returned %d for non S_ISBLK\n", __func__, ret); - return ret; + } else if (ret != data_length) { + /* + * Short read case: + * Probably some one truncate file under us. + * We must explicitly zero sg-pages to prevent + * expose uninizialized pages to userspace. + */ + if (ret < data_length) + ret += iov_iter_zero(data_length - ret, &iter); + else + ret = -EINVAL; } } } - return 1; + kfree(bvec); + return ret; } static sense_reason_t diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c index 5f955af4671a..db49c33264d5 100644 --- a/drivers/thermal/msm-tsens.c +++ b/drivers/thermal/msm-tsens.c @@ -89,6 +89,7 @@ #define TSENS_TM_CRITICAL_INT_EN BIT(2) #define TSENS_TM_UPPER_INT_EN BIT(1) #define TSENS_TM_LOWER_INT_EN BIT(0) +#define TSENS_TM_UPPER_LOWER_INT_DISABLE 0xffffffff #define TSENS_TM_UPPER_INT_MASK(n) (((n) & 0xffff0000) >> 16) #define TSENS_TM_LOWER_INT_MASK(n) ((n) & 0xffff) @@ -269,8 +270,8 @@ struct tsens_tm_device { uint32_t wd_bark_val; int tsens_irq; int tsens_critical_irq; - void *tsens_addr; - void *tsens_calib_addr; + void __iomem *tsens_addr; + void __iomem *tsens_calib_addr; int tsens_len; int calib_len; struct resource *res_tsens_mem; @@ -2079,6 +2080,7 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev) void __iomem *sensor_int_mask_addr; unsigned int srot_val; int crit_mask; + void __iomem *int_mask_addr; if (!tmdev) { pr_err("Invalid tsens device\n"); @@ -2104,6 +2106,10 @@ static int tsens_hw_init(struct tsens_tm_device *tmdev) /*Update critical cycle monitoring*/ mb(); } + int_mask_addr = TSENS_TM_UPPER_LOWER_INT_MASK + (tmdev->tsens_addr); + writel_relaxed(TSENS_TM_UPPER_LOWER_INT_DISABLE, + int_mask_addr); writel_relaxed(TSENS_TM_CRITICAL_INT_EN | TSENS_TM_UPPER_INT_EN | TSENS_TM_LOWER_INT_EN, TSENS_TM_INT_EN(tmdev->tsens_addr)); diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c index 1246aa6fcab0..737635f0bec0 100644 --- a/drivers/thermal/power_allocator.c +++ b/drivers/thermal/power_allocator.c @@ -523,6 +523,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) struct thermal_instance *instance; struct power_allocator_params *params = tz->governor_data; + mutex_lock(&tz->lock); list_for_each_entry(instance, &tz->thermal_instances, tz_node) { if ((instance->trip != params->trip_max_desired_temperature) || (!cdev_is_power_actor(instance->cdev))) @@ -532,6 +533,7 @@ static void allow_maximum_power(struct thermal_zone_device *tz) instance->cdev->updated = false; thermal_cdev_update(instance->cdev); } + mutex_unlock(&tz->lock); } /** diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c6ec9e64451b..4686e93aaf94 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -26,20 +26,25 @@ #include <linux/mm.h> #include <linux/dma-mapping.h> #include <linux/serial_core.h> +#include <linux/of.h> /* Goldfish tty register's offsets */ -#define GOLDFISH_TTY_REG_BYTES_READY 0x04 -#define GOLDFISH_TTY_REG_CMD 0x08 -#define GOLDFISH_TTY_REG_DATA_PTR 0x10 -#define GOLDFISH_TTY_REG_DATA_LEN 0x14 -#define GOLDFISH_TTY_REG_DATA_PTR_HIGH 0x18 -#define GOLDFISH_TTY_REG_VERSION 0x20 +enum { + GOLDFISH_TTY_REG_BYTES_READY = 0x04, + GOLDFISH_TTY_REG_CMD = 0x08, + GOLDFISH_TTY_REG_DATA_PTR = 0x10, + GOLDFISH_TTY_REG_DATA_LEN = 0x14, + GOLDFISH_TTY_REG_DATA_PTR_HIGH = 0x18, + GOLDFISH_TTY_REG_VERSION = 0x20, +}; /* Goldfish tty commands */ -#define GOLDFISH_TTY_CMD_INT_DISABLE 0 -#define GOLDFISH_TTY_CMD_INT_ENABLE 1 -#define GOLDFISH_TTY_CMD_WRITE_BUFFER 2 -#define GOLDFISH_TTY_CMD_READ_BUFFER 3 +enum { + GOLDFISH_TTY_CMD_INT_DISABLE = 0, + GOLDFISH_TTY_CMD_INT_ENABLE = 1, + GOLDFISH_TTY_CMD_WRITE_BUFFER = 2, + GOLDFISH_TTY_CMD_READ_BUFFER = 3, +}; struct goldfish_tty { struct tty_port port; @@ -82,32 +87,35 @@ static void do_rw_io(struct goldfish_tty *qtty, } static void goldfish_tty_rw(struct goldfish_tty *qtty, - unsigned long addr, + const void *address_ptr, unsigned int count, int is_write) { dma_addr_t dma_handle; enum dma_data_direction dma_dir; + uintptr_t address; + address = (uintptr_t)address_ptr; dma_dir = (is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (qtty->version > 0) { /* * Goldfish TTY for Ranchu platform uses * physical addresses and DMA for read/write operations */ - unsigned long addr_end = addr + count; + uintptr_t address_end = address + count; - while (addr < addr_end) { - unsigned long pg_end = (addr & PAGE_MASK) + PAGE_SIZE; - unsigned long next = - pg_end < addr_end ? pg_end : addr_end; - unsigned long avail = next - addr; + while (address < address_end) { + uintptr_t page_end = (address & PAGE_MASK) + PAGE_SIZE; + uintptr_t next = page_end < address_end ? + page_end : address_end; + uintptr_t avail = next - address; /* * Map the buffer's virtual address to the DMA address * so the buffer can be accessed by the device. */ - dma_handle = dma_map_single(qtty->dev, (void *)addr, + dma_handle = dma_map_single(qtty->dev, (void *)address, avail, dma_dir); if (dma_mapping_error(qtty->dev, dma_handle)) { @@ -122,31 +130,30 @@ static void goldfish_tty_rw(struct goldfish_tty *qtty, */ dma_unmap_single(qtty->dev, dma_handle, avail, dma_dir); - addr += avail; + address += avail; } } else { /* * Old style Goldfish TTY used on the Goldfish platform * uses virtual addresses. */ - do_rw_io(qtty, addr, count, is_write); + do_rw_io(qtty, address, count, is_write); } + } static void goldfish_tty_do_write(int line, const char *buf, unsigned int count) { struct goldfish_tty *qtty = &goldfish_ttys[line]; - unsigned long address = (unsigned long)(void *)buf; - goldfish_tty_rw(qtty, address, count, 1); + goldfish_tty_rw(qtty, buf, count, 1); } static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) { struct goldfish_tty *qtty = dev_id; void __iomem *base = qtty->base; - unsigned long address; unsigned char *buf; u32 count; @@ -155,9 +162,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id) return IRQ_NONE; count = tty_prepare_flip_string(&qtty->port, &buf, count); - - address = (unsigned long)(void *)buf; - goldfish_tty_rw(qtty, address, count, 0); + goldfish_tty_rw(qtty, buf, count, 0); tty_schedule_flip(&qtty->port); return IRQ_HANDLED; @@ -181,6 +186,7 @@ static void goldfish_tty_shutdown(struct tty_port *port) static int goldfish_tty_open(struct tty_struct *tty, struct file *filp) { struct goldfish_tty *qtty = &goldfish_ttys[tty->index]; + return tty_port_open(&qtty->port, tty, filp); } @@ -210,6 +216,7 @@ static int goldfish_tty_chars_in_buffer(struct tty_struct *tty) { struct goldfish_tty *qtty = &goldfish_ttys[tty->index]; void __iomem *base = qtty->base; + return readl(base + GOLDFISH_TTY_REG_BYTES_READY); } diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 9aff37186246..78bd121ecede 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -1467,6 +1467,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci) * in which case an opening port goes back to closed and a closing port * is simply put into closed state (any further frames from the other * end will get a DM response) + * + * Some control dlci can stay in ADM mode with other dlci working just + * fine. In that case we can just keep the control dlci open after the + * DLCI_OPENING retries time out. */ static void gsm_dlci_t1(unsigned long data) @@ -1480,8 +1484,15 @@ static void gsm_dlci_t1(unsigned long data) if (dlci->retries) { gsm_command(dlci->gsm, dlci->addr, SABM|PF); mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100); - } else + } else if (!dlci->addr && gsm->control == (DM | PF)) { + if (debug & 8) + pr_info("DLCI %d opening in ADM mode.\n", + dlci->addr); + gsm_dlci_open(dlci); + } else { gsm_dlci_close(dlci); + } + break; case DLCI_CLOSING: dlci->retries--; @@ -1499,8 +1510,8 @@ static void gsm_dlci_t1(unsigned long data) * @dlci: DLCI to open * * Commence opening a DLCI from the Linux side. We issue SABM messages - * to the modem which should then reply with a UA, at which point we - * will move into open state. Opening is done asynchronously with retry + * to the modem which should then reply with a UA or ADM, at which point + * we will move into open state. Opening is done asynchronously with retry * running off timers and the responses. */ diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index e8dd296fb25b..c4383573cf66 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -608,6 +608,10 @@ static int omap_8250_startup(struct uart_port *port) up->lsr_saved_flags = 0; up->msr_saved_flags = 0; + /* Disable DMA for console UART */ + if (uart_console(port)) + up->dma = NULL; + if (up->dma) { ret = serial8250_request_dma(up); if (ret) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 7025f47fa284..746c76b358a0 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -5300,6 +5300,17 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ pbn_b2_4_115200 }, /* + * BrainBoxes UC-260 + */ + { PCI_VENDOR_ID_INTASHIELD, 0x0D21, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0E34, + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00, + pbn_b2_4_115200 }, + /* * Perle PCI-RAS cards */ { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 53e4d5056db7..e0277cf0bf58 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c @@ -1783,6 +1783,7 @@ static void atmel_get_ip_name(struct uart_port *port) switch (version) { case 0x302: case 0x10213: + case 0x10302: dev_dbg(port->dev, "This version is usart\n"); atmel_port->is_usart = true; break; diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c index fcf803ffad19..cdd2f942317c 100644 --- a/drivers/tty/serial/sccnxp.c +++ b/drivers/tty/serial/sccnxp.c @@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev) clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { - if (PTR_ERR(clk) == -EPROBE_DEFER) { - ret = -EPROBE_DEFER; + ret = PTR_ERR(clk); + if (ret == -EPROBE_DEFER) goto err_out; - } + uartclk = 0; + } else { + clk_prepare_enable(clk); + uartclk = clk_get_rate(clk); + } + + if (!uartclk) { dev_notice(&pdev->dev, "Using default clock frequency\n"); uartclk = s->chip->freq_std; - } else - uartclk = clk_get_rate(clk); + } /* Check input frequency */ if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) { diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 80d0ffe7abc1..8dd822feb972 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -847,6 +847,8 @@ static void sci_receive_chars(struct uart_port *port) /* Tell the rest of the system the news. New characters! */ tty_flip_buffer_push(tport); } else { + /* TTY buffers full; read from RX reg to prevent lockup */ + serial_port_in(port, SCxRDR); serial_port_in(port, SCxSR); /* dummy read */ sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); } @@ -1455,7 +1457,16 @@ static void sci_free_dma(struct uart_port *port) if (s->chan_rx) sci_rx_dma_release(s, false); } -#else + +static void sci_flush_buffer(struct uart_port *port) +{ + /* + * In uart_flush_buffer(), the xmit circular buffer has just been + * cleared, so we have to reset tx_dma_len accordingly. + */ + to_sci_port(port)->tx_dma_len = 0; +} +#else /* !CONFIG_SERIAL_SH_SCI_DMA */ static inline void sci_request_dma(struct uart_port *port) { } @@ -1463,7 +1474,9 @@ static inline void sci_request_dma(struct uart_port *port) static inline void sci_free_dma(struct uart_port *port) { } -#endif + +#define sci_flush_buffer NULL +#endif /* !CONFIG_SERIAL_SH_SCI_DMA */ static irqreturn_t sci_rx_interrupt(int irq, void *ptr) { @@ -2203,6 +2216,7 @@ static struct uart_ops sci_uart_ops = { .break_ctl = sci_break_ctl, .startup = sci_startup, .shutdown = sci_shutdown, + .flush_buffer = sci_flush_buffer, .set_termios = sci_set_termios, .pm = sci_pm, .type = sci_type, diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 1bb629ab8ecc..a638c1738547 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1694,6 +1694,8 @@ static void release_tty(struct tty_struct *tty, int idx) if (tty->link) tty->link->port->itty = NULL; tty_buffer_cancel_work(tty->port); + if (tty->link) + tty_buffer_cancel_work(tty->link->port); tty_kref_put(tty->link); tty_kref_put(tty); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index e4f69bddcfb1..ff3286fc22d8 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -1312,6 +1312,11 @@ static void csi_m(struct vc_data *vc) case 3: vc->vc_italic = 1; break; + case 21: + /* + * No console drivers support double underline, so + * convert it to a single underline. + */ case 4: vc->vc_underline = 1; break; @@ -1348,7 +1353,6 @@ static void csi_m(struct vc_data *vc) vc->vc_disp_ctrl = 1; vc->vc_toggle_meta = 1; break; - case 21: case 22: vc->vc_intensity = 1; break; @@ -1725,7 +1729,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear) default_attr(vc); update_attr(vc); - vc->vc_tab_stop[0] = 0x01010100; + vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = vc->vc_tab_stop[2] = vc->vc_tab_stop[3] = @@ -1769,7 +1773,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) vc->vc_pos -= (vc->vc_x << 1); while (vc->vc_x < vc->vc_cols - 1) { vc->vc_x++; - if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) + if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31))) break; } vc->vc_pos += (vc->vc_x << 1); @@ -1829,7 +1833,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) lf(vc); return; case 'H': - vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31)); return; case 'Z': respond_ID(tty); @@ -2022,7 +2026,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c) return; case 'g': if (!vc->vc_par[0]) - vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); + vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31)); else if (vc->vc_par[0] == 3) { vc->vc_tab_stop[0] = vc->vc_tab_stop[1] = diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 939c6ad71068..57ee43512992 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -851,7 +851,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci) { ci_hdrc_gadget_destroy(ci); ci_hdrc_host_destroy(ci); - if (ci->is_otg) + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) ci_hdrc_otg_destroy(ci); } @@ -951,27 +951,35 @@ static int ci_hdrc_probe(struct platform_device *pdev) /* initialize role(s) before the interrupt is requested */ if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) { ret = ci_hdrc_host_init(ci); - if (ret) - dev_info(dev, "doesn't support host\n"); + if (ret) { + if (ret == -ENXIO) + dev_info(dev, "doesn't support host\n"); + else + goto deinit_phy; + } } if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) { ret = ci_hdrc_gadget_init(ci); - if (ret) - dev_info(dev, "doesn't support gadget\n"); + if (ret) { + if (ret == -ENXIO) + dev_info(dev, "doesn't support gadget\n"); + else + goto deinit_host; + } } if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) { dev_err(dev, "no supported roles\n"); ret = -ENODEV; - goto deinit_phy; + goto deinit_gadget; } if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) { ret = ci_hdrc_otg_init(ci); if (ret) { dev_err(dev, "init otg fails, ret = %d\n", ret); - goto stop; + goto deinit_gadget; } } @@ -1036,7 +1044,12 @@ static int ci_hdrc_probe(struct platform_device *pdev) ci_extcon_unregister(ci); stop: - ci_role_destroy(ci); + if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) + ci_hdrc_otg_destroy(ci); +deinit_gadget: + ci_hdrc_gadget_destroy(ci); +deinit_host: + ci_hdrc_host_destroy(ci); deinit_phy: ci_usb_phy_exit(ci); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index 8e641b5893ed..29adabdb305f 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -147,6 +147,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); + /* Linger a bit, prior to the next control message. */ + if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG) + msleep(200); + kfree(dr); return ret; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 774c97bb1c08..4f1c6f8d4352 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -229,7 +229,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, /* Corsair Strafe RGB */ - { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | + USB_QUIRK_DELAY_CTRL_MSG }, /* Corsair K70 LUX */ { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index 571c21727ff9..85fb6226770c 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -1402,8 +1402,12 @@ static void dwc2_conn_id_status_change(struct work_struct *work) if (count > 250) dev_err(hsotg->dev, "Connection id status change timed out\n"); - hsotg->op_state = OTG_STATE_A_HOST; + spin_lock_irqsave(&hsotg->lock, flags); + dwc2_hsotg_disconnect(hsotg); + spin_unlock_irqrestore(&hsotg->lock, flags); + + hsotg->op_state = OTG_STATE_A_HOST; /* Initialize the Core for Host mode */ dwc2_core_init(hsotg, false, -1); dwc2_enable_global_interrupts(hsotg); diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 2be268d2423d..03a926ebf34b 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -112,6 +112,10 @@ static int kdwc3_probe(struct platform_device *pdev) dev->dma_mask = &kdwc3_dma_mask; kdwc->clk = devm_clk_get(kdwc->dev, "usb"); + if (IS_ERR(kdwc->clk)) { + dev_err(kdwc->dev, "unable to get usb clock\n"); + return PTR_ERR(kdwc->clk); + } error = clk_prepare_enable(kdwc->clk); if (error < 0) { diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c index 2cd600a58fd7..3f59a2f8b84f 100644 --- a/drivers/usb/dwc3/dwc3-msm.c +++ b/drivers/usb/dwc3/dwc3-msm.c @@ -3487,6 +3487,7 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) if (on) { dev_dbg(mdwc->dev, "%s: turn on host\n", __func__); + pm_runtime_get_sync(mdwc->dev); mdwc->hs_phy->flags |= PHY_HOST_MODE; if (dwc->maximum_speed == USB_SPEED_SUPER) { mdwc->ss_phy->flags |= PHY_HOST_MODE; @@ -3495,7 +3496,6 @@ static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on) } usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH); - pm_runtime_get_sync(mdwc->dev); dbg_event(0xFF, "StrtHost gync", atomic_read(&mdwc->dev->power.usage_count)); if (!IS_ERR(mdwc->vbus_reg)) diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c index a412f024d834..61dfceb336d6 100644 --- a/drivers/usb/gadget/function/f_accessory.c +++ b/drivers/usb/gadget/function/f_accessory.c @@ -887,6 +887,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev, u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; + /* + * If instance is not created which is the case in power off charging + * mode, dev will be NULL. Hence return error if it is the case. + */ + if (!dev) + return -ENODEV; /* * printk(KERN_INFO "acc_ctrlrequest " * "%02x.%02x v%04x i%04x l%u\n", diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 1ffde9c5408c..9edc01692142 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -68,18 +68,27 @@ __ffs_data_got_descs(struct ffs_data *ffs, char *data, size_t len); static int __must_check __ffs_data_got_strings(struct ffs_data *ffs, char *data, size_t len); +static LIST_HEAD(inst_list); + /* ffs instance status */ -static DEFINE_MUTEX(ffs_ep_lock); -static bool ffs_inst_exist; -static struct f_fs_opts *g_opts; +#define INST_NAME_SIZE 16 -/* Free instance structures */ -static void ffs_inst_clean(struct f_fs_opts *opts); -static void ffs_inst_clean_delay(void); -static int ffs_inst_exist_check(void); +struct ffs_inst_status { + char inst_name[INST_NAME_SIZE]; + struct list_head list; + struct mutex ffs_lock; + bool inst_exist; + struct f_fs_opts *opts; + struct ffs_data *ffs_data; +}; -/* Global ffs_data pointer */ -static struct ffs_data *g_ffs_data; +/* Free instance structures */ +static void ffs_inst_clean(struct f_fs_opts *opts, + const char *inst_name); +static void ffs_inst_clean_delay(const char *inst_name); +static int ffs_inst_exist_check(const char *inst_name); +static struct ffs_inst_status *name_to_inst_status( + const char *inst_name, bool create_inst); /* The function structure ***************************************************/ @@ -300,7 +309,7 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf, ffs_log("enter:len %zu state %d setup_state %d flags %lu", len, ffs->state, ffs->setup_state, ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -490,7 +499,7 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf, ffs_log("enter:len %zu state %d setup_state %d flags %lu", len, ffs->state, ffs->setup_state, ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -601,7 +610,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file) ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -643,7 +652,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -668,7 +677,7 @@ static unsigned int ffs_ep0_poll(struct file *file, poll_table *wait) ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state, ffs->setup_state, ffs->flags, atomic_read(&ffs->opened)); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(ffs->dev_name); if (ret < 0) return ret; @@ -799,6 +808,10 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) ffs_log("enter: epfile name %s epfile err %d (%s)", epfile->name, atomic_read(&epfile->error), io_data->read ? "READ" : "WRITE"); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); + if (ret < 0) + return ret; + smp_mb__before_atomic(); retry: if (atomic_read(&epfile->error)) @@ -1085,7 +1098,7 @@ ffs_epfile_open(struct inode *inode, struct file *file) ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); if (ret < 0) return ret; @@ -1143,16 +1156,11 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) { struct ffs_io_data io_data, *p = &io_data; ssize_t res; - int ret; ENTER(); ffs_log("enter"); - ret = ffs_inst_exist_check(); - if (ret < 0) - return ret; - if (!is_sync_kiocb(kiocb)) { p = kmalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) @@ -1189,16 +1197,11 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) { struct ffs_io_data io_data, *p = &io_data; ssize_t res; - int ret; ENTER(); ffs_log("enter"); - ret = ffs_inst_exist_check(); - if (ret < 0) - return ret; - if (!is_sync_kiocb(kiocb)) { p = kmalloc(sizeof(io_data), GFP_KERNEL); if (unlikely(!p)) @@ -1275,7 +1278,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code, ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state, epfile->ffs->setup_state, epfile->ffs->flags); - ret = ffs_inst_exist_check(); + ret = ffs_inst_exist_check(epfile->ffs->dev_name); if (ret < 0) return ret; @@ -1583,6 +1586,7 @@ ffs_fs_mount(struct file_system_type *t, int flags, int ret; void *ffs_dev; struct ffs_data *ffs; + struct ffs_inst_status *inst_status; ENTER(); @@ -1612,6 +1616,18 @@ ffs_fs_mount(struct file_system_type *t, int flags, ffs->private_data = ffs_dev; data.ffs_data = ffs; + inst_status = name_to_inst_status(ffs->dev_name, false); + if (IS_ERR(inst_status)) { + ffs_log("failed to find instance (%s)\n", + ffs->dev_name); + return ERR_PTR(-EINVAL); + } + + /* Store ffs to global status structure */ + ffs_dev_lock(); + inst_status->ffs_data = ffs; + ffs_dev_unlock(); + rv = mount_nodev(t, flags, &data, ffs_sb_fill); if (IS_ERR(rv) && data.ffs_data) { ffs_release_dev(data.ffs_data); @@ -1711,6 +1727,9 @@ static void ffs_data_opened(struct ffs_data *ffs) static void ffs_data_put(struct ffs_data *ffs) { + struct ffs_inst_status *inst_status; + const char *dev_name; + ENTER(); ffs_log("enter"); @@ -1718,16 +1737,20 @@ static void ffs_data_put(struct ffs_data *ffs) smp_mb__before_atomic(); if (unlikely(atomic_dec_and_test(&ffs->ref))) { pr_info("%s(): freeing\n", __func__); - /* Clear g_ffs_data */ - ffs_dev_lock(); - g_ffs_data = NULL; - ffs_dev_unlock(); + /* Clear ffs from global structure */ + inst_status = name_to_inst_status(ffs->dev_name, false); + if (!IS_ERR(inst_status)) { + ffs_dev_lock(); + inst_status->ffs_data = NULL; + ffs_dev_unlock(); + } ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || waitqueue_active(&ffs->ep0req_completion.wait)); - kfree(ffs->dev_name); + dev_name = ffs->dev_name; kfree(ffs); - ffs_inst_clean_delay(); + ffs_inst_clean_delay(dev_name); + kfree(dev_name); } ffs_log("exit"); @@ -1792,11 +1815,6 @@ static struct ffs_data *ffs_data_new(void) /* XXX REVISIT need to update it in some places, or do we? */ ffs->ev.can_stall = 1; - /* Store ffs to g_ffs_data */ - ffs_dev_lock(); - g_ffs_data = ffs; - ffs_dev_unlock(); - ffs_log("exit"); return ffs; @@ -3684,79 +3702,146 @@ static struct config_item_type ffs_func_type = { /* Function registration interface ******************************************/ -static int ffs_inst_exist_check(void) +static struct ffs_inst_status *name_to_inst_status( + const char *inst_name, bool create_inst) +{ + struct ffs_inst_status *inst_status; + + list_for_each_entry(inst_status, &inst_list, list) { + if (!strncasecmp(inst_status->inst_name, + inst_name, strlen(inst_name))) + return inst_status; + } + + if (!create_inst) + return ERR_PTR(-ENODEV); + + inst_status = kzalloc(sizeof(struct ffs_inst_status), + GFP_KERNEL); + if (!inst_status) + return ERR_PTR(-ENOMEM); + + mutex_init(&inst_status->ffs_lock); + snprintf(inst_status->inst_name, INST_NAME_SIZE, inst_name); + list_add_tail(&inst_status->list, &inst_list); + + return inst_status; +} + +static int ffs_inst_exist_check(const char *inst_name) { - mutex_lock(&ffs_ep_lock); + struct ffs_inst_status *inst_status; - if (unlikely(ffs_inst_exist == false)) { - mutex_unlock(&ffs_ep_lock); + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { pr_err_ratelimited( - "%s: f_fs instance freed already.\n", - __func__); + "%s: failed to find instance (%s)\n", + __func__, inst_name); + return -ENODEV; + } + + mutex_lock(&inst_status->ffs_lock); + + if (unlikely(inst_status->inst_exist == false)) { + mutex_unlock(&inst_status->ffs_lock); + pr_err_ratelimited( + "%s: f_fs instance (%s) has been freed already.\n", + __func__, inst_name); return -ENODEV; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); return 0; } -static void ffs_inst_clean(struct f_fs_opts *opts) +static void ffs_inst_clean(struct f_fs_opts *opts, + const char *inst_name) { - g_opts = NULL; + struct ffs_inst_status *inst_status; + + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { + pr_err_ratelimited( + "%s: failed to find instance (%s)\n", + __func__, inst_name); + return; + } + + inst_status->opts = NULL; + ffs_dev_lock(); _ffs_free_dev(opts->dev); ffs_dev_unlock(); kfree(opts); } -static void ffs_inst_clean_delay(void) +static void ffs_inst_clean_delay(const char *inst_name) { - mutex_lock(&ffs_ep_lock); + struct ffs_inst_status *inst_status; - if (unlikely(ffs_inst_exist == false)) { - if (g_opts) { - ffs_inst_clean(g_opts); + inst_status = name_to_inst_status(inst_name, false); + if (IS_ERR(inst_status)) { + pr_err_ratelimited( + "%s: failed to find (%s) instance\n", + __func__, inst_name); + return; + } + + mutex_lock(&inst_status->ffs_lock); + + if (unlikely(inst_status->inst_exist == false)) { + if (inst_status->opts) { + ffs_inst_clean(inst_status->opts, inst_name); pr_err_ratelimited("%s: Delayed free memory\n", __func__); } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); return; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); } static void ffs_free_inst(struct usb_function_instance *f) { struct f_fs_opts *opts; + struct ffs_inst_status *inst_status; opts = to_f_fs_opts(f); - mutex_lock(&ffs_ep_lock); + inst_status = name_to_inst_status(opts->dev->name, false); + if (IS_ERR(inst_status)) { + ffs_log("failed to find (%s) instance\n", + opts->dev->name); + return; + } + + mutex_lock(&inst_status->ffs_lock); if (opts->dev->ffs_data && atomic_read(&opts->dev->ffs_data->opened)) { - ffs_inst_exist = false; - mutex_unlock(&ffs_ep_lock); - ffs_log("%s: Dev is open, free mem when dev close\n", - __func__); + inst_status->inst_exist = false; + mutex_unlock(&inst_status->ffs_lock); + ffs_log("Dev is open, free mem when dev (%s) close\n", + opts->dev->name); return; } - ffs_inst_clean(opts); - ffs_inst_exist = false; - g_opts = NULL; - mutex_unlock(&ffs_ep_lock); + ffs_inst_clean(opts, opts->dev->name); + inst_status->inst_exist = false; + mutex_unlock(&inst_status->ffs_lock); } #define MAX_INST_NAME_LEN 40 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) { - struct f_fs_opts *opts; + struct f_fs_opts *opts, *opts_prev; + struct ffs_data *ffs_data_tmp; char *ptr; const char *tmp; int name_len, ret; + struct ffs_inst_status *inst_status; name_len = strlen(name) + 1; if (name_len > MAX_INST_NAME_LEN) @@ -3766,13 +3851,22 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) if (!ptr) return -ENOMEM; - mutex_lock(&ffs_ep_lock); - if (g_opts) { - mutex_unlock(&ffs_ep_lock); - ffs_log("%s: prev inst do not freed yet\n", __func__); + inst_status = name_to_inst_status(ptr, true); + if (IS_ERR(inst_status)) { + ffs_log("failed to create status struct for (%s) instance\n", + ptr); + return -EINVAL; + } + + mutex_lock(&inst_status->ffs_lock); + opts_prev = inst_status->opts; + if (opts_prev) { + mutex_unlock(&inst_status->ffs_lock); + ffs_log("instance (%s): prev inst do not freed yet\n", + inst_status->inst_name); return -EBUSY; } - mutex_unlock(&ffs_ep_lock); + mutex_unlock(&inst_status->ffs_lock); opts = to_f_fs_opts(fi); tmp = NULL; @@ -3794,8 +3888,9 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) * ffs_private_data also need to update new allocated opts->dev * address. */ - if (g_ffs_data) - opts->dev->ffs_data = g_ffs_data; + ffs_data_tmp = inst_status->ffs_data; + if (ffs_data_tmp) + opts->dev->ffs_data = ffs_data_tmp; if (opts->dev->ffs_data) opts->dev->ffs_data->private_data = opts->dev; @@ -3804,10 +3899,10 @@ static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) kfree(tmp); - mutex_lock(&ffs_ep_lock); - ffs_inst_exist = true; - g_opts = opts; - mutex_unlock(&ffs_ep_lock); + mutex_lock(&inst_status->ffs_lock); + inst_status->inst_exist = true; + inst_status->opts = opts; + mutex_unlock(&inst_status->ffs_lock); return 0; } @@ -4212,6 +4307,20 @@ module_init(ffs_init); static void __exit ffs_exit(void) { + struct ffs_inst_status *inst_status, *inst_status_tmp = NULL; + + list_for_each_entry(inst_status, &inst_list, list) { + if (inst_status_tmp) { + list_del(&inst_status_tmp->list); + kfree(inst_status_tmp); + } + inst_status_tmp = inst_status; + } + if (inst_status_tmp) { + list_del(&inst_status_tmp->list); + kfree(inst_status_tmp); + } + if (ffs_ipc_log) { ipc_log_context_destroy(ffs_ipc_log); ffs_ipc_log = NULL; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index ee579ba2b59e..a5dae5bb62ab 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -223,6 +223,13 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, /* pick the first one */ list = list_first_entry(&hidg->completed_out_req, struct f_hidg_req_list, list); + + /* + * Remove this from list to protect it from beign free() + * while host disables our function + */ + list_del(&list->list); + req = list->req; count = min_t(unsigned int, count, req->actual - list->pos); spin_unlock_irqrestore(&hidg->spinlock, flags); @@ -238,15 +245,20 @@ static ssize_t f_hidg_read(struct file *file, char __user *buffer, * call, taking into account its current read position. */ if (list->pos == req->actual) { - spin_lock_irqsave(&hidg->spinlock, flags); - list_del(&list->list); kfree(list); - spin_unlock_irqrestore(&hidg->spinlock, flags); req->length = hidg->report_length; ret = usb_ep_queue(hidg->out_ep, req, GFP_KERNEL); - if (ret < 0) + if (ret < 0) { + free_ep_req(hidg->out_ep, req); return ret; + } + } else { + spin_lock_irqsave(&hidg->spinlock, flags); + list_add(&list->list, &hidg->completed_out_req); + spin_unlock_irqrestore(&hidg->spinlock, flags); + + wake_up(&hidg->read_queue); } return count; @@ -490,14 +502,18 @@ static void hidg_disable(struct usb_function *f) { struct f_hidg *hidg = func_to_hidg(f); struct f_hidg_req_list *list, *next; + unsigned long flags; usb_ep_disable(hidg->in_ep); usb_ep_disable(hidg->out_ep); + spin_lock_irqsave(&hidg->spinlock, flags); list_for_each_entry_safe(list, next, &hidg->completed_out_req, list) { + free_ep_req(hidg->out_ep, list->req); list_del(&list->list); kfree(list); } + spin_unlock_irqrestore(&hidg->spinlock, flags); } static int hidg_set_alt(struct usb_function *f, unsigned intf, unsigned alt) diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 79f554f1fb23..1fcdbdc35cd1 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -210,12 +210,6 @@ static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep, return alloc_ep_req(ep, length, length); } -static void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static const uint8_t f_midi_cin_length[] = { 0, 0, 2, 3, 3, 1, 2, 3, 3, 3, 3, 3, 2, 2, 3, 1 }; diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c index 9f3ced62d916..67b243989938 100644 --- a/drivers/usb/gadget/function/f_sourcesink.c +++ b/drivers/usb/gadget/function/f_sourcesink.c @@ -303,12 +303,6 @@ static inline struct usb_request *ss_alloc_ep_req(struct usb_ep *ep, int len) return alloc_ep_req(ep, len, ss->buflen); } -void free_ep_req(struct usb_ep *ep, struct usb_request *req) -{ - kfree(req->buf); - usb_ep_free_request(ep, req); -} - static void disable_ep(struct usb_composite_dev *cdev, struct usb_ep *ep) { int value; diff --git a/drivers/usb/gadget/function/g_zero.h b/drivers/usb/gadget/function/g_zero.h index 15f180904f8a..5ed90b437f18 100644 --- a/drivers/usb/gadget/function/g_zero.h +++ b/drivers/usb/gadget/function/g_zero.h @@ -59,7 +59,6 @@ void lb_modexit(void); int lb_modinit(void); /* common utilities */ -void free_ep_req(struct usb_ep *ep, struct usb_request *req); void disable_endpoints(struct usb_composite_dev *cdev, struct usb_ep *in, struct usb_ep *out, struct usb_ep *iso_in, struct usb_ep *iso_out); diff --git a/drivers/usb/gadget/u_f.c b/drivers/usb/gadget/u_f.c index c6276f0268ae..907f8144813c 100644 --- a/drivers/usb/gadget/u_f.c +++ b/drivers/usb/gadget/u_f.c @@ -11,16 +11,18 @@ * published by the Free Software Foundation. */ -#include <linux/usb/gadget.h> #include "u_f.h" +#include <linux/usb/ch9.h> -struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len) +struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len) { struct usb_request *req; req = usb_ep_alloc_request(ep, GFP_ATOMIC); if (req) { req->length = len ?: default_len; + if (usb_endpoint_dir_out(ep->desc)) + req->length = usb_ep_align(ep, req->length); req->buf = kmalloc(req->length, GFP_ATOMIC); if (!req->buf) { usb_ep_free_request(ep, req); diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h index 1d5f0eb68552..69a1d10df04f 100644 --- a/drivers/usb/gadget/u_f.h +++ b/drivers/usb/gadget/u_f.h @@ -16,6 +16,8 @@ #ifndef __U_F_H__ #define __U_F_H__ +#include <linux/usb/gadget.h> + /* Variable Length Array Macros **********************************************/ #define vla_group(groupname) size_t groupname##__next = 0 #define vla_group_size(groupname) groupname##__next @@ -45,8 +47,26 @@ struct usb_ep; struct usb_request; -struct usb_request *alloc_ep_req(struct usb_ep *ep, int len, int default_len); - -#endif /* __U_F_H__ */ +/** + * alloc_ep_req - returns a usb_request allocated by the gadget driver and + * allocates the request's buffer. + * + * @ep: the endpoint to allocate a usb_request + * @len: usb_requests's buffer suggested size + * @default_len: used if @len is not provided, ie, is 0 + * + * In case @ep direction is OUT, the @len will be aligned to ep's + * wMaxPacketSize. In order to avoid memory leaks or drops, *always* use + * usb_requests's length (req->length) to refer to the allocated buffer size. + * Requests allocated via alloc_ep_req() *must* be freed by free_ep_req(). + */ +struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len); +/* Frees a usb_request previously allocated by alloc_ep_req() */ +static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req) +{ + kfree(req->buf); + usb_ep_free_request(ep, req); +} +#endif /* __U_F_H__ */ diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index ccb9c213cc9f..e9bd8d4abca0 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -475,7 +475,7 @@ static int bdc_probe(struct platform_device *pdev) bdc->dev = dev; dev_dbg(bdc->dev, "bdc->regs: %p irq=%d\n", bdc->regs, bdc->irq); - temp = bdc_readl(bdc->regs, BDC_BDCSC); + temp = bdc_readl(bdc->regs, BDC_BDCCAP1); if ((temp & BDC_P64) && !dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) { dev_dbg(bdc->dev, "Using 64-bit address\n"); diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c index 02968842b359..708e36f530d8 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_pci.c +++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c @@ -82,6 +82,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) if (ret) { dev_err(&pci->dev, "couldn't add resources to bdc device\n"); + platform_device_put(bdc); return ret; } diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 8080a11947b7..eb876ed96861 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2105,16 +2105,13 @@ static int dummy_hub_control( } break; case USB_PORT_FEAT_POWER: - if (hcd->speed == HCD_USB3) { - if (dum_hcd->port_status & USB_PORT_STAT_POWER) - dev_dbg(dummy_dev(dum_hcd), - "power-off\n"); - } else - if (dum_hcd->port_status & - USB_SS_PORT_STAT_POWER) - dev_dbg(dummy_dev(dum_hcd), - "power-off\n"); - /* FALLS THROUGH */ + dev_dbg(dummy_dev(dum_hcd), "power-off\n"); + if (hcd->speed == HCD_USB3) + dum_hcd->port_status &= ~USB_SS_PORT_STAT_POWER; + else + dum_hcd->port_status &= ~USB_PORT_STAT_POWER; + set_link_state(dum_hcd); + break; default: dum_hcd->port_status &= ~(1 << wValue); set_link_state(dum_hcd); @@ -2285,14 +2282,13 @@ static int dummy_hub_control( if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); - set_link_state(dum_hcd); } } else if ((dum_hcd->port_status & USB_PORT_STAT_POWER) != 0) { dum_hcd->port_status |= (1 << wValue); - set_link_state(dum_hcd); } + set_link_state(dum_hcd); } break; case GetPortErrorCount: diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 408c8eca2bbe..61fb325e4267 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -437,7 +437,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match); static struct platform_driver usb_xhci_driver = { .probe = xhci_plat_probe, .remove = xhci_plat_remove, - .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "xhci-hcd", .pm = DEV_PM_OPS, diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c index 060d78d53118..d0c7f4949f6f 100644 --- a/drivers/usb/mon/mon_text.c +++ b/drivers/usb/mon/mon_text.c @@ -82,6 +82,8 @@ struct mon_reader_text { wait_queue_head_t wait; int printf_size; + size_t printf_offset; + size_t printf_togo; char *printf_buf; struct mutex printf_lock; @@ -373,73 +375,103 @@ err_alloc: return rc; } -/* - * For simplicity, we read one record in one system call and throw out - * what does not fit. This means that the following does not work: - * dd if=/dbg/usbmon/0t bs=10 - * Also, we do not allow seeks and do not bother advancing the offset. - */ +static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp, + char __user * const buf, const size_t nbytes) +{ + const size_t togo = min(nbytes, rp->printf_togo); + + if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo)) + return -EFAULT; + rp->printf_togo -= togo; + rp->printf_offset += togo; + return togo; +} + +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_t(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - - mon_text_read_head_t(rp, &ptr, ep); - mon_text_read_statset(rp, &ptr, ep); - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_t(rp, &ptr, ep); + mon_text_read_statset(rp, &ptr, ep); + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); + } + + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } +/* ppos is not advanced since the llseek operation is not permitted. */ static ssize_t mon_text_read_u(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) + size_t nbytes, loff_t *ppos) { struct mon_reader_text *rp = file->private_data; struct mon_event_text *ep; struct mon_text_ptr ptr; + ssize_t ret; - if (IS_ERR(ep = mon_text_read_wait(rp, file))) - return PTR_ERR(ep); mutex_lock(&rp->printf_lock); - ptr.cnt = 0; - ptr.pbuf = rp->printf_buf; - ptr.limit = rp->printf_size; - mon_text_read_head_u(rp, &ptr, ep); - if (ep->type == 'E') { - mon_text_read_statset(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { - mon_text_read_isostat(rp, &ptr, ep); - mon_text_read_isodesc(rp, &ptr, ep); - } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { - mon_text_read_intstat(rp, &ptr, ep); - } else { - mon_text_read_statset(rp, &ptr, ep); + if (rp->printf_togo == 0) { + + ep = mon_text_read_wait(rp, file); + if (IS_ERR(ep)) { + mutex_unlock(&rp->printf_lock); + return PTR_ERR(ep); + } + ptr.cnt = 0; + ptr.pbuf = rp->printf_buf; + ptr.limit = rp->printf_size; + + mon_text_read_head_u(rp, &ptr, ep); + if (ep->type == 'E') { + mon_text_read_statset(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { + mon_text_read_isostat(rp, &ptr, ep); + mon_text_read_isodesc(rp, &ptr, ep); + } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { + mon_text_read_intstat(rp, &ptr, ep); + } else { + mon_text_read_statset(rp, &ptr, ep); + } + ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, + " %d", ep->length); + mon_text_read_data(rp, &ptr, ep); + + rp->printf_togo = ptr.cnt; + rp->printf_offset = 0; + + kmem_cache_free(rp->e_slab, ep); } - ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, - " %d", ep->length); - mon_text_read_data(rp, &ptr, ep); - if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) - ptr.cnt = -EFAULT; + ret = mon_text_copy_to_user(rp, buf, nbytes); mutex_unlock(&rp->printf_lock); - kmem_cache_free(rp->e_slab, ep); - return ptr.cnt; + return ret; } static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c index 2bc3c6fa417a..a76a6577ee98 100644 --- a/drivers/usb/phy/phy-msm-ssusb-qmp.c +++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c @@ -473,10 +473,7 @@ static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend) } if (suspend) { - if (!phy->cable_connected) - writel_relaxed(0x00, - phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); - else + if (phy->cable_connected) msm_ssusb_qmp_enable_autonomous(phy, 1); /* Make sure above write completed with PHY */ @@ -540,6 +537,10 @@ static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy, struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp, phy); + writel_relaxed(0x00, + phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + readl_relaxed(phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]); + dev_dbg(uphy->dev, "QMP phy disconnect notification\n"); dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected); phy->cable_connected = false; diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index a4ab4fdf5ba3..64a4427678b0 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -151,6 +151,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x155A, 0x1006) }, /* ELDAT Easywave RX09 */ { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ { USB_DEVICE(0x166A, 0x0301) }, /* Clipsal 5800PC C-Bus Wireless PC Interface */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 64fe9dc25ed4..a224c7a3ce09 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -773,6 +773,7 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, + { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, @@ -935,6 +936,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) }, { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_FHE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 543d2801632b..76a10b222ff9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -922,6 +922,9 @@ /* * RT Systems programming cables for various ham radios */ +/* This device uses the VID of FTDI */ +#define RTSYSTEMS_USB_VX8_PID 0x9e50 /* USB-VX8 USB to 7 pin modular plug for Yaesu VX-8 radio */ + #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ #define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ #define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ @@ -1441,6 +1444,12 @@ #define FTDI_CINTERION_MC55I_PID 0xA951 /* + * Product: FirmwareHubEmulator + * Manufacturer: Harman Becker Automotive Systems + */ +#define FTDI_FHE_PID 0xA9A0 + +/* * Product: Comet Caller ID decoder * Manufacturer: Crucible Technologies */ diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c index 091e8ec7a6c0..962bb6376b0c 100644 --- a/drivers/usb/storage/ene_ub6250.c +++ b/drivers/usb/storage/ene_ub6250.c @@ -1953,6 +1953,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag) bcb->CDB[0] = 0xEF; result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0); + if (us->srb != NULL) + scsi_set_resid(us->srb, 0); info->BIN_FLAG = flag; kfree(buf); @@ -2306,21 +2308,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb) static int ene_transport(struct scsi_cmnd *srb, struct us_data *us) { - int result = 0; + int result = USB_STOR_XFER_GOOD; struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra); /*US_DEBUG(usb_stor_show_command(us, srb)); */ scsi_set_resid(srb, 0); - if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) { + if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) result = ene_init(us); - } else { + if (result == USB_STOR_XFER_GOOD) { + result = USB_STOR_TRANSPORT_ERROR; if (info->SD_Status.Ready) result = sd_scsi_irp(us, srb); if (info->MS_Status.Ready) result = ms_scsi_irp(us, srb); } - return 0; + return result; } static struct scsi_host_template ene_ub6250_host_template; diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index de7214ae4fed..6cac8f26b97a 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -1052,7 +1052,7 @@ static int uas_post_reset(struct usb_interface *intf) return 0; err = uas_configure_endpoints(devinfo); - if (err && err != ENODEV) + if (err && err != -ENODEV) shost_printk(KERN_ERR, shost, "%s: alloc streams error %d after reset", __func__, err); diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index c10eceb76c39..1a34d2a89de6 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -2142,6 +2142,13 @@ UNUSUAL_DEV( 0x22b8, 0x3010, 0x0001, 0x0001, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_CAPACITY | US_FL_IGNORE_RESIDUE ), +/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */ +UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117, + "JMicron", + "USB to ATA/ATAPI Bridge", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_BROKEN_FUA ), + /* Reported-by George Cherian <george.cherian@cavium.com> */ UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, "JMicron", diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index ad2146a9ab2d..675819a1af37 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -173,8 +173,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file) if (mask) vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); if (mask & POLLERR) { - if (poll->wqh) - remove_wait_queue(poll->wqh, &poll->wait); + vhost_poll_stop(poll); ret = -EINVAL; } diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 517f565b65d7..598ec7545e84 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c @@ -409,7 +409,10 @@ static const char *vgacon_startup(void) vga_video_port_val = VGA_CRT_DM; if ((screen_info.orig_video_ega_bx & 0xff) != 0x10) { static struct resource ega_console_resource = - { .name = "ega", .start = 0x3B0, .end = 0x3BF }; + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_EGAM; vga_vram_size = 0x8000; display_desc = "EGA+"; @@ -417,9 +420,15 @@ static const char *vgacon_startup(void) &ega_console_resource); } else { static struct resource mda1_console_resource = - { .name = "mda", .start = 0x3B0, .end = 0x3BB }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3B0, + .end = 0x3BB }; static struct resource mda2_console_resource = - { .name = "mda", .start = 0x3BF, .end = 0x3BF }; + { .name = "mda", + .flags = IORESOURCE_IO, + .start = 0x3BF, + .end = 0x3BF }; vga_video_type = VIDEO_TYPE_MDA; vga_vram_size = 0x2000; display_desc = "*MDA"; @@ -441,15 +450,21 @@ static const char *vgacon_startup(void) vga_vram_size = 0x8000; if (!screen_info.orig_video_isVGA) { - static struct resource ega_console_resource - = { .name = "ega", .start = 0x3C0, .end = 0x3DF }; + static struct resource ega_console_resource = + { .name = "ega", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_EGAC; display_desc = "EGA"; request_resource(&ioport_resource, &ega_console_resource); } else { - static struct resource vga_console_resource - = { .name = "vga+", .start = 0x3C0, .end = 0x3DF }; + static struct resource vga_console_resource = + { .name = "vga+", + .flags = IORESOURCE_IO, + .start = 0x3C0, + .end = 0x3DF }; vga_video_type = VIDEO_TYPE_VGAC; display_desc = "VGA+"; request_resource(&ioport_resource, @@ -493,7 +508,10 @@ static const char *vgacon_startup(void) } } else { static struct resource cga_console_resource = - { .name = "cga", .start = 0x3D4, .end = 0x3D5 }; + { .name = "cga", + .flags = IORESOURCE_IO, + .start = 0x3D4, + .end = 0x3D5 }; vga_video_type = VIDEO_TYPE_CGA; vga_vram_size = 0x2000; display_desc = "*CGA"; diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 9362424c2340..924b3d6c3e9b 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -759,8 +759,8 @@ static int clcdfb_of_dma_setup(struct clcd_fb *fb) if (err) return err; - framesize = fb->panel->mode.xres * fb->panel->mode.yres * - fb->panel->bpp / 8; + framesize = PAGE_ALIGN(fb->panel->mode.xres * fb->panel->mode.yres * + fb->panel->bpp / 8); fb->fb.screen_base = dma_alloc_coherent(&fb->dev->dev, framesize, &dma, GFP_KERNEL); if (!fb->fb.screen_base) diff --git a/drivers/video/fbdev/goldfishfb.c b/drivers/video/fbdev/goldfishfb.c index 1e56b50e4082..88adb2970b44 100644 --- a/drivers/video/fbdev/goldfishfb.c +++ b/drivers/video/fbdev/goldfishfb.c @@ -38,11 +38,58 @@ enum { FB_SET_BLANK = 0x18, FB_GET_PHYS_WIDTH = 0x1c, FB_GET_PHYS_HEIGHT = 0x20, + FB_GET_FORMAT = 0x24, FB_INT_VSYNC = 1U << 0, FB_INT_BASE_UPDATE_DONE = 1U << 1 }; +/* These values *must* match the platform definitions found under + * <system/graphics.h> + */ +enum { + HAL_PIXEL_FORMAT_RGBA_8888 = 1, + HAL_PIXEL_FORMAT_RGBX_8888 = 2, + HAL_PIXEL_FORMAT_RGB_888 = 3, + HAL_PIXEL_FORMAT_RGB_565 = 4, + HAL_PIXEL_FORMAT_BGRA_8888 = 5, +}; + +struct framebuffer_config { + u8 bytes_per_pixel; + u8 red_offset; + u8 red_length; + u8 green_offset; + u8 green_length; + u8 blue_offset; + u8 blue_length; + u8 transp_offset; + u8 transp_length; +}; + +enum { + CHAR_BIT = 8 +}; + +static const struct framebuffer_config *get_fb_config_from_format(int format) +{ + static const struct framebuffer_config fb_configs[] = { + { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Invalid, assume RGB_565 */ + { 4, 0, 8, 8, 8, 16, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_RGBA_8888 */ + { 4, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGBX_8888 */ + { 3, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_888 */ + { 2, 11, 5, 5, 6, 0, 5, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_565 */ + { 4, 16, 8, 8, 8, 0, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_BGRA_8888 */ + }; + + if (format > 0 && + format < sizeof(fb_configs) / sizeof(struct framebuffer_config)) { + return &fb_configs[format]; + } + + return &fb_configs[HAL_PIXEL_FORMAT_RGB_565]; /* legacy default */ +} + struct goldfish_fb { void __iomem *reg_base; int irq; @@ -125,8 +172,10 @@ static int goldfish_fb_check_var(struct fb_var_screeninfo *var, static int goldfish_fb_set_par(struct fb_info *info) { struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb); + if (fb->rotation != fb->fb.var.rotate) { - info->fix.line_length = info->var.xres * 2; + info->fix.line_length = info->var.xres * + (fb->fb.var.bits_per_pixel / CHAR_BIT); fb->rotation = fb->fb.var.rotate; writel(fb->rotation, fb->reg_base + FB_SET_ROTATION); } @@ -143,19 +192,24 @@ static int goldfish_fb_pan_display(struct fb_var_screeninfo *var, spin_lock_irqsave(&fb->lock, irq_flags); base_update_count = fb->base_update_count; - writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset, - fb->reg_base + FB_SET_BASE); + writel(fb->fb.fix.smem_start + + fb->fb.var.xres * + (fb->fb.var.bits_per_pixel / CHAR_BIT) * + var->yoffset, + fb->reg_base + FB_SET_BASE); spin_unlock_irqrestore(&fb->lock, irq_flags); wait_event_timeout(fb->wait, fb->base_update_count != base_update_count, HZ / 15); if (fb->base_update_count == base_update_count) - pr_err("goldfish_fb_pan_display: timeout waiting for base update\n"); + pr_err("goldfish_fb_pan_display: timeout waiting for " + "base update\n"); return 0; } static int goldfish_fb_blank(int blank, struct fb_info *info) { struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb); + switch (blank) { case FB_BLANK_NORMAL: writel(1, fb->reg_base + FB_SET_BLANK); @@ -186,8 +240,10 @@ static int goldfish_fb_probe(struct platform_device *pdev) struct resource *r; struct goldfish_fb *fb; size_t framesize; - u32 width, height; + u32 width, height, format; + int bytes_per_pixel; dma_addr_t fbpaddr; + const struct framebuffer_config *fb_config; fb = kzalloc(sizeof(*fb), GFP_KERNEL); if (fb == NULL) { @@ -217,13 +273,20 @@ static int goldfish_fb_probe(struct platform_device *pdev) width = readl(fb->reg_base + FB_GET_WIDTH); height = readl(fb->reg_base + FB_GET_HEIGHT); + format = readl(fb->reg_base + FB_GET_FORMAT); + fb_config = get_fb_config_from_format(format); + if (!fb_config) { + ret = -EINVAL; + goto err_no_irq; + } + bytes_per_pixel = fb_config->bytes_per_pixel; fb->fb.fbops = &goldfish_fb_ops; fb->fb.flags = FBINFO_FLAG_DEFAULT; fb->fb.pseudo_palette = fb->cmap; fb->fb.fix.type = FB_TYPE_PACKED_PIXELS; fb->fb.fix.visual = FB_VISUAL_TRUECOLOR; - fb->fb.fix.line_length = width * 2; + fb->fb.fix.line_length = width * bytes_per_pixel; fb->fb.fix.accel = FB_ACCEL_NONE; fb->fb.fix.ypanstep = 1; @@ -231,20 +294,22 @@ static int goldfish_fb_probe(struct platform_device *pdev) fb->fb.var.yres = height; fb->fb.var.xres_virtual = width; fb->fb.var.yres_virtual = height * 2; - fb->fb.var.bits_per_pixel = 16; + fb->fb.var.bits_per_pixel = bytes_per_pixel * CHAR_BIT; fb->fb.var.activate = FB_ACTIVATE_NOW; fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT); fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH); fb->fb.var.pixclock = 0; - fb->fb.var.red.offset = 11; - fb->fb.var.red.length = 5; - fb->fb.var.green.offset = 5; - fb->fb.var.green.length = 6; - fb->fb.var.blue.offset = 0; - fb->fb.var.blue.length = 5; + fb->fb.var.red.offset = fb_config->red_offset; + fb->fb.var.red.length = fb_config->red_length; + fb->fb.var.green.offset = fb_config->green_offset; + fb->fb.var.green.length = fb_config->green_length; + fb->fb.var.blue.offset = fb_config->blue_offset; + fb->fb.var.blue.length = fb_config->blue_length; + fb->fb.var.transp.offset = fb_config->transp_offset; + fb->fb.var.transp.length = fb_config->transp_length; - framesize = width * height * 2 * 2; + framesize = width * height * 2 * bytes_per_pixel; fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent( &pdev->dev, framesize, &fbpaddr, GFP_KERNEL); @@ -295,7 +360,8 @@ static int goldfish_fb_remove(struct platform_device *pdev) size_t framesize; struct goldfish_fb *fb = platform_get_drvdata(pdev); - framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2; + framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * + (fb->fb.var.bits_per_pixel / CHAR_BIT); unregister_framebuffer(&fb->fb); free_irq(fb->irq, fb); diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c index d357a616b05e..dff8b63eea64 100644 --- a/drivers/video/fbdev/msm/mdp3_ctrl.c +++ b/drivers/video/fbdev/msm/mdp3_ctrl.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. +/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -1563,9 +1563,10 @@ static int mdp3_get_metadata(struct msm_fb_data_type *mfd, } break; case metadata_op_get_ion_fd: - if (mfd->fb_ion_handle) { + if (mfd->fb_ion_handle && mfd->fb_ion_client) { metadata->data.fbmem_ionfd = - dma_buf_fd(mfd->fbmem_buf, 0); + ion_share_dma_buf_fd(mfd->fb_ion_client, + mfd->fb_ion_handle); if (metadata->data.fbmem_ionfd < 0) pr_err("fd allocation failed. fd = %d\n", metadata->data.fbmem_ionfd); diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index d0a4e2f79a57..d215faacce04 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1600,6 +1600,7 @@ static int sm501fb_start(struct sm501fb_info *info, info->fbmem = ioremap(res->start, resource_size(res)); if (info->fbmem == NULL) { dev_err(dev, "cannot remap framebuffer\n"); + ret = -ENXIO; goto err_mem_res; } diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c index 53326badfb61..2add8def83be 100644 --- a/drivers/video/fbdev/udlfb.c +++ b/drivers/video/fbdev/udlfb.c @@ -1487,15 +1487,25 @@ static struct device_attribute fb_device_attrs[] = { static int dlfb_select_std_channel(struct dlfb_data *dev) { int ret; - u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7, + void *buf; + static const u8 set_def_chn[] = { + 0x57, 0xCD, 0xDC, 0xA7, 0x1C, 0x88, 0x5E, 0x15, 0x60, 0xFE, 0xC6, 0x97, 0x16, 0x3D, 0x47, 0xF2 }; + buf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL); + + if (!buf) + return -ENOMEM; + ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), NR_USB_REQUEST_CHANNEL, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, - set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + buf, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT); + + kfree(buf); + return ret; } diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c index 728cb6b23c42..7d8dfc7f1269 100644 --- a/drivers/video/fbdev/vfb.c +++ b/drivers/video/fbdev/vfb.c @@ -298,8 +298,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var, */ static int vfb_set_par(struct fb_info *info) { + switch (info->var.bits_per_pixel) { + case 1: + info->fix.visual = FB_VISUAL_MONO01; + break; + case 8: + info->fix.visual = FB_VISUAL_PSEUDOCOLOR; + break; + case 16: + case 24: + case 32: + info->fix.visual = FB_VISUAL_TRUECOLOR; + break; + } + info->fix.line_length = get_line_length(info->var.xres_virtual, info->var.bits_per_pixel); + return 0; } @@ -540,6 +555,8 @@ static int vfb_probe(struct platform_device *dev) goto err2; platform_set_drvdata(dev, info); + vfb_set_par(info); + fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n", videomemorysize >> 10); return 0; diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c index 1cf907ecded4..111a0ab6280a 100644 --- a/drivers/video/hdmi.c +++ b/drivers/video/hdmi.c @@ -321,6 +321,17 @@ int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame) } EXPORT_SYMBOL(hdmi_vendor_infoframe_init); +static int hdmi_vendor_infoframe_length(const struct hdmi_vendor_infoframe *frame) +{ + /* for side by side (half) we also need to provide 3D_Ext_Data */ + if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) + return 6; + else if (frame->vic != 0 || frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) + return 5; + else + return 4; +} + /** * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer * @frame: HDMI infoframe @@ -341,19 +352,11 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, u8 *ptr = buffer; size_t length; - /* empty info frame */ - if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID) - return -EINVAL; - /* only one of those can be supplied */ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) return -EINVAL; - /* for side by side (half) we also need to provide 3D_Ext_Data */ - if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) - frame->length = 6; - else - frame->length = 5; + frame->length = hdmi_vendor_infoframe_length(frame); length = HDMI_INFOFRAME_HEADER_SIZE + frame->length; @@ -372,14 +375,16 @@ ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame, ptr[5] = 0x0c; ptr[6] = 0x00; - if (frame->vic) { - ptr[7] = 0x1 << 5; /* video format */ - ptr[8] = frame->vic; - } else { + if (frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID) { ptr[7] = 0x2 << 5; /* video format */ ptr[8] = (frame->s3d_struct & 0xf) << 4; if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) ptr[9] = (frame->s3d_ext_data & 0xf) << 4; + } else if (frame->vic) { + ptr[7] = 0x1 << 5; /* video format */ + ptr[8] = frame->vic; + } else { + ptr[7] = 0x0 << 5; /* video format */ } hdmi_infoframe_set_checksum(buffer, length); @@ -1165,7 +1170,7 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, if (ptr[0] != HDMI_INFOFRAME_TYPE_VENDOR || ptr[1] != 1 || - (ptr[2] != 5 && ptr[2] != 6)) + (ptr[2] != 4 && ptr[2] != 5 && ptr[2] != 6)) return -EINVAL; length = ptr[2]; @@ -1193,16 +1198,22 @@ hdmi_vendor_any_infoframe_unpack(union hdmi_vendor_any_infoframe *frame, hvf->length = length; - if (hdmi_video_format == 0x1) { - hvf->vic = ptr[4]; - } else if (hdmi_video_format == 0x2) { + if (hdmi_video_format == 0x2) { + if (length != 5 && length != 6) + return -EINVAL; hvf->s3d_struct = ptr[4] >> 4; if (hvf->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF) { - if (length == 6) - hvf->s3d_ext_data = ptr[5] >> 4; - else + if (length != 6) return -EINVAL; + hvf->s3d_ext_data = ptr[5] >> 4; } + } else if (hdmi_video_format == 0x1) { + if (length != 5) + return -EINVAL; + hvf->vic = ptr[4]; + } else { + if (length != 4) + return -EINVAL; } return 0; diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 286369d4f0f5..be99112fad00 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -51,6 +51,7 @@ static char expect_release; static unsigned long hpwdt_is_open; static void __iomem *pci_mem_addr; /* the PCI-memory address */ +static unsigned long __iomem *hpwdt_nmistat; static unsigned long __iomem *hpwdt_timer_reg; static unsigned long __iomem *hpwdt_timer_con; @@ -474,6 +475,11 @@ static int hpwdt_time_left(void) } #ifdef CONFIG_HPWDT_NMI_DECODING +static int hpwdt_my_nmi(void) +{ + return ioread8(hpwdt_nmistat) & 0x6; +} + /* * NMI Handler */ @@ -485,6 +491,9 @@ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) if (!hpwdt_nmi_decoding) goto out; + if ((ulReason == NMI_UNKNOWN) && !hpwdt_my_nmi()) + return NMI_DONE; + spin_lock_irqsave(&rom_lock, rom_pl); if (!die_nmi_called && !is_icru && !is_uefi) asminline_call(&cmn_regs, cru_rom_addr); @@ -700,7 +709,7 @@ static void dmi_find_icru(const struct dmi_header *dm, void *dummy) smbios_proliant_ptr = (struct smbios_proliant_info *) dm; if (smbios_proliant_ptr->misc_features & 0x01) is_icru = 1; - if (smbios_proliant_ptr->misc_features & 0x408) + if (smbios_proliant_ptr->misc_features & 0x1400) is_uefi = 1; } } @@ -840,6 +849,7 @@ static int hpwdt_init_one(struct pci_dev *dev, retval = -ENOMEM; goto error_pci_iomap; } + hpwdt_nmistat = pci_mem_addr + 0x6e; hpwdt_timer_reg = pci_mem_addr + 0x70; hpwdt_timer_con = pci_mem_addr + 0x72; |
