diff options
Diffstat (limited to 'drivers')
| -rw-r--r-- | drivers/acpi/acpi_lpss.c | 2 | ||||
| -rw-r--r-- | drivers/base/cpu.c | 8 | ||||
| -rw-r--r-- | drivers/char/tpm/tpm-dev.c | 43 | ||||
| -rw-r--r-- | drivers/i2c/busses/i2c-imx.c | 3 | ||||
| -rw-r--r-- | drivers/infiniband/core/umem.c | 11 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/mr.c | 50 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_stats.c | 2 | ||||
| -rw-r--r-- | drivers/iommu/arm-smmu.c | 155 | ||||
| -rw-r--r-- | drivers/media/platform/msm/ais/sensor/csid/msm_csid.c | 12 | ||||
| -rw-r--r-- | drivers/net/xen-netfront.c | 8 | ||||
| -rw-r--r-- | drivers/pci/pci-acpi.c | 2 | ||||
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_init.c | 7 | ||||
| -rw-r--r-- | drivers/scsi/qla2xxx/qla_os.c | 5 | ||||
| -rw-r--r-- | drivers/scsi/sr.c | 29 | ||||
| -rw-r--r-- | drivers/soc/qcom/hab/khab_test.c | 6 | ||||
| -rw-r--r-- | drivers/soc/qcom/icnss.c | 1 | ||||
| -rw-r--r-- | drivers/usb/phy/class-dual-role.c | 21 |
17 files changed, 283 insertions, 82 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index f9e0d09f7c66..8a0f77fb5181 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -154,10 +154,12 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = { static const struct lpss_device_desc byt_pwm_dev_desc = { .flags = LPSS_SAVE_CTX, + .prv_offset = 0x800, }; static const struct lpss_device_desc bsw_pwm_dev_desc = { .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY, + .prv_offset = 0x800, }; static const struct lpss_device_desc byt_uart_dev_desc = { diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 21044b1f29bc..fb2a1e605c86 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -699,16 +699,24 @@ ssize_t __weak cpu_show_spec_store_bypass(struct device *dev, return sprintf(buf, "Not affected\n"); } +ssize_t __weak cpu_show_l1tf(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL); +static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, &dev_attr_spectre_v1.attr, &dev_attr_spectre_v2.attr, &dev_attr_spec_store_bypass.attr, + &dev_attr_l1tf.attr, NULL }; diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c index 912ad30be585..4719aa781bf2 100644 --- a/drivers/char/tpm/tpm-dev.c +++ b/drivers/char/tpm/tpm-dev.c @@ -25,7 +25,7 @@ struct file_priv { struct tpm_chip *chip; /* Data passed to and from the tpm via the read/write calls */ - atomic_t data_pending; + size_t data_pending; struct mutex buffer_mutex; struct timer_list user_read_timer; /* user needs to claim result */ @@ -46,7 +46,7 @@ static void timeout_work(struct work_struct *work) struct file_priv *priv = container_of(work, struct file_priv, work); mutex_lock(&priv->buffer_mutex); - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; memset(priv->data_buffer, 0, sizeof(priv->data_buffer)); mutex_unlock(&priv->buffer_mutex); } @@ -72,7 +72,6 @@ static int tpm_open(struct inode *inode, struct file *file) } priv->chip = chip; - atomic_set(&priv->data_pending, 0); mutex_init(&priv->buffer_mutex); setup_timer(&priv->user_read_timer, user_reader_timeout, (unsigned long)priv); @@ -86,28 +85,24 @@ static ssize_t tpm_read(struct file *file, char __user *buf, size_t size, loff_t *off) { struct file_priv *priv = file->private_data; - ssize_t ret_size; + ssize_t ret_size = 0; int rc; del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); - ret_size = atomic_read(&priv->data_pending); - if (ret_size > 0) { /* relay data */ - ssize_t orig_ret_size = ret_size; - if (size < ret_size) - ret_size = size; + mutex_lock(&priv->buffer_mutex); - mutex_lock(&priv->buffer_mutex); + if (priv->data_pending) { + ret_size = min_t(ssize_t, size, priv->data_pending); rc = copy_to_user(buf, priv->data_buffer, ret_size); - memset(priv->data_buffer, 0, orig_ret_size); + memset(priv->data_buffer, 0, priv->data_pending); if (rc) ret_size = -EFAULT; - mutex_unlock(&priv->buffer_mutex); + priv->data_pending = 0; } - atomic_set(&priv->data_pending, 0); - + mutex_unlock(&priv->buffer_mutex); return ret_size; } @@ -118,18 +113,20 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, size_t in_size = size; ssize_t out_size; - /* cannot perform a write until the read has cleared - either via tpm_read or a user_read_timer timeout. - This also prevents splitted buffered writes from blocking here. - */ - if (atomic_read(&priv->data_pending) != 0) - return -EBUSY; - if (in_size > TPM_BUFSIZE) return -E2BIG; mutex_lock(&priv->buffer_mutex); + /* Cannot perform a write until the read has cleared either via + * tpm_read or a user_read_timer timeout. This also prevents split + * buffered writes from blocking here. + */ + if (priv->data_pending != 0) { + mutex_unlock(&priv->buffer_mutex); + return -EBUSY; + } + if (copy_from_user (priv->data_buffer, (void __user *) buf, in_size)) { mutex_unlock(&priv->buffer_mutex); @@ -153,7 +150,7 @@ static ssize_t tpm_write(struct file *file, const char __user *buf, return out_size; } - atomic_set(&priv->data_pending, out_size); + priv->data_pending = out_size; mutex_unlock(&priv->buffer_mutex); /* Set a timeout by which the reader must come claim the result */ @@ -172,7 +169,7 @@ static int tpm_release(struct inode *inode, struct file *file) del_singleshot_timer_sync(&priv->user_read_timer); flush_work(&priv->work); file->private_data = NULL; - atomic_set(&priv->data_pending, 0); + priv->data_pending = 0; clear_bit(0, &priv->chip->is_open); kfree(priv); return 0; diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index d4d853680ae4..a4abf7dc9576 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -382,6 +382,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, goto err_desc; } + reinit_completion(&dma->cmd_complete); txdesc->callback = i2c_imx_dma_callback; txdesc->callback_param = i2c_imx; if (dma_submit_error(dmaengine_submit(txdesc))) { @@ -631,7 +632,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, * The first byte must be transmitted by the CPU. */ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); @@ -690,7 +690,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, if (result) return result; - reinit_completion(&i2c_imx->dma->cmd_complete); time_left = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 6790ebb366dd..98fd9a594841 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -122,16 +122,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, umem->address = addr; umem->page_size = PAGE_SIZE; umem->pid = get_task_pid(current, PIDTYPE_PID); - /* - * We ask for writable memory if any of the following - * access flags are set. "Local write" and "remote write" - * obviously require write access. "Remote atomic" can do - * things like fetch and add, which will modify memory, and - * "MW bind" can change permissions by binding a window. - */ - umem->writable = !!(access & - (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE | - IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND)); + umem->writable = ib_access_writable(access); if (access & IB_ACCESS_ON_DEMAND) { put_pid(umem->pid); diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index ce87e9cc7eff..bf52e35dd506 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -130,6 +130,40 @@ out: return err; } +static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start, + u64 length, u64 virt_addr, + int access_flags) +{ + /* + * Force registering the memory as writable if the underlying pages + * are writable. This is so rereg can change the access permissions + * from readable to writable without having to run through ib_umem_get + * again + */ + if (!ib_access_writable(access_flags)) { + struct vm_area_struct *vma; + + down_read(¤t->mm->mmap_sem); + /* + * FIXME: Ideally this would iterate over all the vmas that + * cover the memory, but for now it requires a single vma to + * entirely cover the MR to support RO mappings. + */ + vma = find_vma(current->mm, start); + if (vma && vma->vm_end >= start + length && + vma->vm_start <= start) { + if (vma->vm_flags & VM_WRITE) + access_flags |= IB_ACCESS_LOCAL_WRITE; + } else { + access_flags |= IB_ACCESS_LOCAL_WRITE; + } + + up_read(¤t->mm->mmap_sem); + } + + return ib_umem_get(context, start, length, access_flags, 0); +} + struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata) @@ -144,10 +178,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (!mr) return ERR_PTR(-ENOMEM); - /* Force registering the memory as writable. */ - /* Used for memory re-registeration. HCA protects the access */ - mr->umem = ib_umem_get(pd->uobject->context, start, length, - access_flags | IB_ACCESS_LOCAL_WRITE, 0); + mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length, + virt_addr, access_flags); if (IS_ERR(mr->umem)) { err = PTR_ERR(mr->umem); goto err_free; @@ -214,6 +246,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, } if (flags & IB_MR_REREG_ACCESS) { + if (ib_access_writable(mr_access_flags) && !mmr->umem->writable) + return -EPERM; + err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry, convert_access(mr_access_flags)); @@ -227,10 +262,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr); ib_umem_release(mmr->umem); - mmr->umem = ib_umem_get(mr->uobject->context, start, length, - mr_access_flags | - IB_ACCESS_LOCAL_WRITE, - 0); + mmr->umem = + mlx4_get_umem_mr(mr->uobject->context, start, length, + virt_addr, mr_access_flags); if (IS_ERR(mmr->umem)) { err = PTR_ERR(mmr->umem); /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 748b63b86cbc..40242ead096f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -643,7 +643,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, struct ocrdma_stats *pstats = filp->private_data; struct ocrdma_dev *dev = pstats->dev; - if (count > 32) + if (*ppos != 0 || count == 0 || count > sizeof(tmp_str)) goto err; if (copy_from_user(tmp_str, buffer, count)) diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 56f2980adc28..90306a0e2164 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -287,6 +287,22 @@ FSR_EF | FSR_PF | FSR_TF | FSR_IGN) #define FSYNR0_WNR (1 << 4) +#define MAX_GLOBAL_REG_SAVE_ENTRIES (2 * ARM_SMMU_MAX_SMRS + 1) + +enum arm_smmu_save_ctx { + SAVE_ARM_SMMU_CB_SCTLR, + SAVE_ARM_SMMU_CB_ACTLR, + SAVE_ARM_SMMU_CB_TTBCR2, + SAVE_ARM_SMMU_CB_TTBR0, + SAVE_ARM_SMMU_CB_TTBR1, + SAVE_ARM_SMMU_CB_TTBCR, + SAVE_ARM_SMMU_CB_CONTEXTIDR, + SAVE_ARM_SMMU_CB_S1_MAIR0, + SAVE_ARM_SMMU_CB_S1_MAIR1, + SAVE_ARM_SMMU_GR1_CBA2R, + SAVE_ARM_SMMU_GR1_CBAR, + SAVE_ARM_SMMU_MAX_CNT, +}; static int force_stage; module_param_named(force_stage, force_stage, int, S_IRUGO); @@ -407,6 +423,8 @@ struct arm_smmu_device { enum tz_smmu_device_id sec_id; int regulator_defer; + u64 regs[ARM_SMMU_MAX_CBS*(SAVE_ARM_SMMU_MAX_CNT)]; + u64 reg_global[MAX_GLOBAL_REG_SAVE_ENTRIES]; }; struct arm_smmu_cfg { @@ -4170,6 +4188,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) idr_init(&smmu->asid_idr); + platform_set_drvdata(pdev, smmu); + err = register_regulator_notifier(smmu); if (err) goto out_free_irqs; @@ -4295,10 +4315,145 @@ release_memory: return -ENOMEM; } +#if CONFIG_PM +static int arm_smmu_pm_suspend(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + u64 *regs, *reg_global; + int j, k = 0; + u32 cb_count = 0; + void __iomem *base, *gr0_base, *gr1_base; + + if (!smmu) + return -ENODEV; + + if (!smmu->attach_count) + return 0; + + if (arm_smmu_enable_clocks(smmu)) { + dev_err(smmu->dev, "failed to enable clocks for smmu"); + return -EINVAL; + } + + regs = &smmu->regs[0]; + reg_global = &smmu->reg_global[0]; + cb_count = smmu->num_context_banks; + + gr0_base = ARM_SMMU_GR0(smmu); + gr1_base = ARM_SMMU_GR1(smmu); + + for (j = 0; j < cb_count; j++) { + base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, j); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_SCTLR); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_ACTLR); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_TTBCR2); + regs[k++] = readq_relaxed(base + ARM_SMMU_CB_TTBR0); + regs[k++] = readq_relaxed(base + ARM_SMMU_CB_TTBR1); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_TTBCR); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_CONTEXTIDR); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_S1_MAIR0); + regs[k++] = readl_relaxed(base + ARM_SMMU_CB_S1_MAIR1); + regs[k++] = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBA2R(j)); + regs[k++] = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBAR(j)); + } + + for (j = 0, k = 0; j < smmu->num_mapping_groups; j++) { + reg_global[k++] = readl_relaxed( + gr0_base + ARM_SMMU_GR0_S2CR(j)); + reg_global[k++] = readl_relaxed( + gr0_base + ARM_SMMU_GR0_SMR(j)); + } + reg_global[k++] = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + + ARM_SMMU_GR0_sCR0); + + arm_smmu_disable_clocks(smmu); + + return 0; +} +static int arm_smmu_pm_resume(struct device *dev) +{ + struct arm_smmu_device *smmu = dev_get_drvdata(dev); + u64 *regs, *reg_global; + int j, k = 0; + u32 cb_count = 0; + void __iomem *base, *gr0_base, *gr1_base; + + if (!smmu) + return -ENODEV; + + if (!smmu->attach_count) + return 0; + + if (arm_smmu_enable_clocks(smmu)) { + dev_err(smmu->dev, "failed to enable clocks for smmu"); + return -EINVAL; + } + + regs = &smmu->regs[0]; + reg_global = &smmu->reg_global[0]; + cb_count = smmu->num_context_banks; + + gr0_base = ARM_SMMU_GR0(smmu); + gr1_base = ARM_SMMU_GR1(smmu); + + for (j = 0; j < cb_count; j++) { + base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, j); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_SCTLR); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_ACTLR); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_TTBCR2); + writeq_relaxed(regs[k++], base + ARM_SMMU_CB_TTBR0); + writeq_relaxed(regs[k++], base + ARM_SMMU_CB_TTBR1); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_TTBCR); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_CONTEXTIDR); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_S1_MAIR0); + writel_relaxed(regs[k++], base + ARM_SMMU_CB_S1_MAIR1); + writel_relaxed(regs[k++], gr1_base + ARM_SMMU_GR1_CBA2R(j)); + writel_relaxed(regs[k++], gr1_base + ARM_SMMU_GR1_CBAR(j)); + } + + for (j = 0, k = 0; j < smmu->num_mapping_groups; j++) { + writel_relaxed(reg_global[k++], + gr0_base + ARM_SMMU_GR0_S2CR(j)); + writel_relaxed(reg_global[k++], + gr0_base + ARM_SMMU_GR0_SMR(j)); + } + writel_relaxed(reg_global[k++], + ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); + + /* Do a tlb flush */ + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); + writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); + __arm_smmu_tlb_sync(smmu); + + arm_smmu_disable_clocks(smmu); + + return 0; +} +#else +static inline int arm_smmu_pm_suspend(struct device *dev) +{ + return 0; +} + +static inline int arm_smmu_pm_resume(struct device *dev) +{ + return 0; +} +#endif + +static const struct dev_pm_ops arm_smmu_pm_ops = { +#ifdef CONFIG_PM + .freeze_late = arm_smmu_pm_suspend, + .thaw_early = arm_smmu_pm_resume, + .restore_early = arm_smmu_pm_resume, +#endif +}; + static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu", .of_match_table = of_match_ptr(arm_smmu_of_match), + .pm = &arm_smmu_pm_ops, }, .probe = arm_smmu_device_dt_probe, .remove = arm_smmu_device_remove, diff --git a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c index b820aa45136a..c62f6227e2be 100644 --- a/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c +++ b/drivers/media/platform/msm/ais/sensor/csid/msm_csid.c @@ -812,8 +812,10 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg) unsigned char cid = csid_params.lut_params.vc_cfg_a[i]. cid; - csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] = - csid_params.lut_params.vc_cfg_a[i]; + if (cid < MAX_CID) + csid_dev->current_csid_params.lut_params. + vc_cfg_a[cid] = csid_params.lut_params. + vc_cfg_a[i]; CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d", csid_params.lut_params.vc_cfg_a[i].cid, @@ -851,8 +853,10 @@ static int32_t msm_csid_cmd(struct csid_device *csid_dev, void *arg) unsigned char cid = csid_params.lut_params.vc_cfg_a[i]. cid; - csid_dev->current_csid_params.lut_params.vc_cfg_a[cid] = - csid_params.lut_params.vc_cfg_a[i]; + if (cid < MAX_CID) + csid_dev->current_csid_params.lut_params. + vc_cfg_a[cid] = csid_params.lut_params. + vc_cfg_a[i]; CDBG("vc_cfg_a[%d] : dt=%d, decode_fmt=%d", csid_params.lut_params.vc_cfg_a[i].cid, diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index bec9f099573b..68d0a5c9d437 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -879,7 +879,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { - struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -888,15 +887,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - if (shinfo->nr_frags == MAX_SKB_FRAGS) { + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index a32ba753e413..afaf13474796 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -543,7 +543,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) union acpi_object *obj; struct pci_host_bridge *bridge; - if (acpi_pci_disabled || !bus->bridge) + if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) return; acpi_pci_slot_enumerate(bus); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index a9eb3cd453be..41a646696bab 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -325,11 +325,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, wait_for_completion(&tm_iocb->u.tmf.comp); - rval = tm_iocb->u.tmf.comp_status == CS_COMPLETE ? - QLA_SUCCESS : QLA_FUNCTION_FAILED; + rval = tm_iocb->u.tmf.data; - if ((rval != QLA_SUCCESS) || tm_iocb->u.tmf.data) { - ql_dbg(ql_dbg_taskm, vha, 0x8030, + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8030, "TM IOCB failed (%x).\n", rval); } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5cbf20ab94aa..18b19744398a 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4938,8 +4938,9 @@ qla2x00_do_dpc(void *data) } } - if (test_and_clear_bit(ISP_ABORT_NEEDED, - &base_vha->dpc_flags)) { + if (test_and_clear_bit + (ISP_ABORT_NEEDED, &base_vha->dpc_flags) && + !test_bit(UNLOADING, &base_vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, base_vha, 0x4007, "ISP abort scheduled.\n"); diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index de53c9694b68..5dc288fecace 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -520,18 +520,26 @@ static int sr_init_command(struct scsi_cmnd *SCpnt) static int sr_block_open(struct block_device *bdev, fmode_t mode) { struct scsi_cd *cd; + struct scsi_device *sdev; int ret = -ENXIO; + cd = scsi_cd_get(bdev->bd_disk); + if (!cd) + goto out; + + sdev = cd->device; + scsi_autopm_get_device(sdev); check_disk_change(bdev); mutex_lock(&sr_mutex); - cd = scsi_cd_get(bdev->bd_disk); - if (cd) { - ret = cdrom_open(&cd->cdi, bdev, mode); - if (ret) - scsi_cd_put(cd); - } + ret = cdrom_open(&cd->cdi, bdev, mode); mutex_unlock(&sr_mutex); + + scsi_autopm_put_device(sdev); + if (ret) + scsi_cd_put(cd); + +out: return ret; } @@ -559,6 +567,8 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (ret) goto out; + scsi_autopm_get_device(sdev); + /* * Send SCSI addressing ioctls directly to mid level, send other * ioctls to cdrom/block level. @@ -567,15 +577,18 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: ret = scsi_ioctl(sdev, cmd, argp); - goto out; + goto put; } ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); if (ret != -ENOSYS) - goto out; + goto put; ret = scsi_ioctl(sdev, cmd, argp); +put: + scsi_autopm_put_device(sdev); + out: mutex_unlock(&sr_mutex); return ret; diff --git a/drivers/soc/qcom/hab/khab_test.c b/drivers/soc/qcom/hab/khab_test.c index bb04815ad35e..6add7af0489f 100644 --- a/drivers/soc/qcom/hab/khab_test.c +++ b/drivers/soc/qcom/hab/khab_test.c @@ -278,7 +278,7 @@ static ssize_t vchan_show(struct kobject *kobj, struct kobj_attribute *attr, } static ssize_t vchan_store(struct kobject *kobj, struct kobj_attribute *attr, - char *buf, size_t count) + const char *buf, size_t count) { int ret; @@ -297,7 +297,7 @@ static ssize_t ctx_show(struct kobject *kobj, struct kobj_attribute *attr, } static ssize_t ctx_store(struct kobject *kobj, struct kobj_attribute *attr, - char *buf, size_t count) + const char *buf, size_t count) { int ret; @@ -316,7 +316,7 @@ static ssize_t expimp_show(struct kobject *kobj, struct kobj_attribute *attr, } static ssize_t expimp_store(struct kobject *kobj, struct kobj_attribute *attr, - char *buf, size_t count) + const char *buf, size_t count) { int ret; diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c index d2dd714f762f..1fc891b06016 100644 --- a/drivers/soc/qcom/icnss.c +++ b/drivers/soc/qcom/icnss.c @@ -2097,6 +2097,7 @@ static int icnss_driver_event_server_arrive(void *data) err_setup_msa: icnss_assign_msa_perm_all(penv, ICNSS_MSA_PERM_HLOS_ALL); + clear_bit(ICNSS_MSA0_ASSIGNED, &penv->state); err_power_on: icnss_hw_power_off(penv); fail: diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c index 9ef889593ef5..51fcb545a9d5 100644 --- a/drivers/usb/phy/class-dual-role.c +++ b/drivers/usb/phy/class-dual-role.c @@ -70,7 +70,15 @@ static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper) return ret; } -static void dual_role_changed_work(struct work_struct *work); +static void dual_role_changed_work(struct work_struct *work) +{ + struct dual_role_phy_instance *dual_role = + container_of(work, struct dual_role_phy_instance, + changed_work); + + dev_dbg(&dual_role->dev, "%s\n", __func__); + kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE); +} void dual_role_instance_changed(struct dual_role_phy_instance *dual_role) { @@ -497,17 +505,6 @@ out: return ret; } -static void dual_role_changed_work(struct work_struct *work) -{ - struct dual_role_phy_instance *dual_role = - container_of(work, struct dual_role_phy_instance, - changed_work); - - dev_dbg(&dual_role->dev, "%s\n", __func__); - sysfs_update_group(&dual_role->dev.kobj, &dual_role_attr_group); - kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE); -} - /******************* Module Init ***********************************/ static int __init dual_role_class_init(void) |
