diff options
Diffstat (limited to 'drivers/gpu/msm')
| -rw-r--r-- | drivers/gpu/msm/a5xx_reg.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno-gpulist.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno.c | 15 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_a3xx.c | 40 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_a4xx_preempt.c | 2 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_a5xx.c | 26 | ||||
| -rw-r--r-- | drivers/gpu/msm/adreno_perfcounter.c | 32 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.c | 43 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl.h | 3 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_debugfs.c | 93 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_mmu.c | 20 | ||||
| -rw-r--r-- | drivers/gpu/msm/kgsl_sharedmem.c | 7 |
13 files changed, 249 insertions, 38 deletions
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h index f3b4e6622043..436b6949c414 100644 --- a/drivers/gpu/msm/a5xx_reg.h +++ b/drivers/gpu/msm/a5xx_reg.h @@ -608,6 +608,7 @@ #define A5XX_PC_PERFCTR_PC_SEL_7 0xD17 /* HLSQ registers */ +#define A5XX_HLSQ_DBG_ECO_CNTL 0xE04 #define A5XX_HLSQ_ADDR_MODE_CNTL 0xE05 #define A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0xE10 #define A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0xE11 @@ -632,6 +633,7 @@ #define A5XX_VFD_PERFCTR_VFD_SEL_7 0xE57 /* VPC registers */ +#define A5XX_VPC_DBG_ECO_CNTL 0xE60 #define A5XX_VPC_ADDR_MODE_CNTL 0xE61 #define A5XX_VPC_PERFCTR_VPC_SEL_0 0xE64 #define A5XX_VPC_PERFCTR_VPC_SEL_1 0xE65 diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h index 3615be45b6d9..a02ed40ba9d5 100644 --- a/drivers/gpu/msm/adreno-gpulist.h +++ b/drivers/gpu/msm/adreno-gpulist.h @@ -269,7 +269,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = { .patchid = ANY_ID, .features = ADRENO_PREEMPTION | ADRENO_64BIT | ADRENO_CONTENT_PROTECTION | - ADRENO_GPMU | ADRENO_SPTP_PC, + ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM, .pm4fw_name = "a530_pm4.fw", .pfpfw_name = "a530_pfp.fw", .zap_name = "a540_zap", diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 6160aa567fbf..11226472d801 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -843,6 +843,8 @@ static struct { { ADRENO_QUIRK_FAULT_DETECT_MASK, "qcom,gpu-quirk-fault-detect-mask" }, { ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING, "qcom,gpu-quirk-dp2clockgating-disable" }, + { ADRENO_QUIRK_DISABLE_LMLOADKILL, + "qcom,gpu-quirk-lmloadkill-disable" }, }; static int adreno_of_get_power(struct adreno_device *adreno_dev, @@ -2109,8 +2111,6 @@ static int adreno_soft_reset(struct kgsl_device *device) adreno_support_64bit(adreno_dev)) gpudev->enable_64bit(adreno_dev); - /* Restore physical performance counter values after soft reset */ - adreno_perfcounter_restore(adreno_dev); /* Reinitialize the GPU */ gpudev->start(adreno_dev); @@ -2137,6 +2137,9 @@ static int adreno_soft_reset(struct kgsl_device *device) set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv); } + /* Restore physical performance counter values after soft reset */ + adreno_perfcounter_restore(adreno_dev); + return ret; } @@ -2291,9 +2294,9 @@ static void adreno_read(struct kgsl_device *device, void __iomem *base, unsigned int mem_len) { - unsigned int __iomem *reg; + void __iomem *reg; BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len); - reg = (unsigned int __iomem *)(base + (offsetwords << 2)); + reg = (base + (offsetwords << 2)); if (!in_interrupt()) kgsl_pre_hwaccess(device); @@ -2333,7 +2336,7 @@ static void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, unsigned int value) { - unsigned int __iomem *reg; + void __iomem *reg; BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len); @@ -2343,7 +2346,7 @@ static void adreno_regwrite(struct kgsl_device *device, trace_kgsl_regwrite(device, offsetwords, value); kgsl_cffdump_regwrite(device, offsetwords << 2, value); - reg = (unsigned int __iomem *)(device->reg_virt + (offsetwords << 2)); + reg = (device->reg_virt + (offsetwords << 2)); /*ensure previous writes post before this one, * i.e. act like normal writel() */ diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h index a2af26c81f50..d81142db5b58 100644 --- a/drivers/gpu/msm/adreno.h +++ b/drivers/gpu/msm/adreno.h @@ -123,6 +123,8 @@ #define ADRENO_QUIRK_FAULT_DETECT_MASK BIT(3) /* Disable RB sampler datapath clock gating optimization */ #define ADRENO_QUIRK_DISABLE_RB_DP2CLOCKGATING BIT(4) +/* Disable local memory(LM) feature to avoid corner case error */ +#define ADRENO_QUIRK_DISABLE_LMLOADKILL BIT(5) /* Flags to control command packet settings */ #define KGSL_CMD_FLAGS_NONE 0 diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index 3f5a9c6318f6..423071811b43 100644 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -151,6 +151,43 @@ static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = { 0x00000000, 0x03000000, 0x00000000, 0x00000000, }; +static void a3xx_efuse_speed_bin(struct adreno_device *adreno_dev) +{ + unsigned int val; + unsigned int speed_bin[3]; + struct kgsl_device *device = &adreno_dev->dev; + + if (of_property_read_u32_array(device->pdev->dev.of_node, + "qcom,gpu-speed-bin", speed_bin, 3)) + return; + + adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val); + + adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2]; +} + +static const struct { + int (*check)(struct adreno_device *adreno_dev); + void (*func)(struct adreno_device *adreno_dev); +} a3xx_efuse_funcs[] = { + { adreno_is_a306a, a3xx_efuse_speed_bin }, +}; + +static void a3xx_check_features(struct adreno_device *adreno_dev) +{ + unsigned int i; + + if (adreno_efuse_map(adreno_dev)) + return; + + for (i = 0; i < ARRAY_SIZE(a3xx_efuse_funcs); i++) { + if (a3xx_efuse_funcs[i].check(adreno_dev)) + a3xx_efuse_funcs[i].func(adreno_dev); + } + + adreno_efuse_unmap(adreno_dev); +} + /** * _a3xx_pwron_fixup() - Initialize a special command buffer to run a * post-power collapse shader workaround @@ -604,6 +641,9 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev) gpudev->vbif_xin_halt_ctrl0_mask = A30X_VBIF_XIN_HALT_CTRL0_MASK; } + + /* Check efuse bits for various capabilties */ + a3xx_check_features(adreno_dev); } static int a3xx_send_me_init(struct adreno_device *adreno_dev, diff --git a/drivers/gpu/msm/adreno_a4xx_preempt.c b/drivers/gpu/msm/adreno_a4xx_preempt.c index 4087ac60c89e..ef837dc4b7ea 100644 --- a/drivers/gpu/msm/adreno_a4xx_preempt.c +++ b/drivers/gpu/msm/adreno_a4xx_preempt.c @@ -146,6 +146,8 @@ static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb, &ptname, PT_INFO_OFFSET(current_rb_ptname)); pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu), ptname); + if (IS_ERR_OR_NULL(pt)) + return (pt == NULL) ? -ENOENT : PTR_ERR(pt); /* set the ringbuffer for incoming RB */ pt_switch_sizedwords = adreno_iommu_set_pt_generate_cmds(incoming_rb, diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c index 860f6d2925f1..f652e955b07c 100644 --- a/drivers/gpu/msm/adreno_a5xx.c +++ b/drivers/gpu/msm/adreno_a5xx.c @@ -196,6 +196,8 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev) /* A510 has 3 XIN ports in VBIF */ gpudev->vbif_xin_halt_ctrl0_mask = A510_VBIF_XIN_HALT_CTRL0_MASK; + } else if (adreno_is_a540(adreno_dev)) { + gpudev->snapshot_data->sect_sizes->cp_merciu = 1024; } /* Calculate SP local and private mem addresses */ @@ -1534,12 +1536,12 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev, const char *name, struct clk *clk) { if (adreno_is_a540(adreno_dev)) { - if (!strcmp(name, "mem_iface_clk")) - clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH); - clk_set_flags(clk, CLKFLAG_NORETAIN_MEM); - if (!strcmp(name, "core_clk")) { + if (!strcmp(name, "mem_iface_clk")) { clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH); clk_set_flags(clk, CLKFLAG_NORETAIN_MEM); + } else if (!strcmp(name, "core_clk")) { + clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH); + clk_set_flags(clk, CLKFLAG_RETAIN_MEM); } } } @@ -1781,11 +1783,11 @@ static void a5xx_start(struct adreno_device *adreno_dev) set_bit(ADRENO_DEVICE_HANG_INTR, &adreno_dev->priv); gpudev->irq->mask |= (1 << A5XX_INT_MISC_HANG_DETECT); /* - * Set hang detection threshold to 1 million cycles - * (0xFFFF*16) + * Set hang detection threshold to 4 million cycles + * (0x3FFFF*16) */ kgsl_regwrite(device, A5XX_RBBM_INTERFACE_HANG_INT_CNTL, - (1 << 30) | 0xFFFF); + (1 << 30) | 0x3FFFF); } @@ -1944,6 +1946,16 @@ static void a5xx_start(struct adreno_device *adreno_dev) } + /* + * VPC corner case with local memory load kill leads to corrupt + * internal state. Normal Disable does not work for all a5x chips. + * So do the following setting to disable it. + */ + if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_DISABLE_LMLOADKILL)) { + kgsl_regrmw(device, A5XX_VPC_DBG_ECO_CNTL, 0, 0x1 << 23); + kgsl_regrmw(device, A5XX_HLSQ_DBG_ECO_CNTL, 0x1 << 18, 0); + } + a5xx_preemption_start(adreno_dev); a5xx_protect_init(adreno_dev); } diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c index 42f8119ad8b4..f5f99c3ebb4a 100644 --- a/drivers/gpu/msm/adreno_perfcounter.c +++ b/drivers/gpu/msm/adreno_perfcounter.c @@ -522,12 +522,18 @@ int adreno_perfcounter_get(struct adreno_device *adreno_dev, if (empty == -1) return -EBUSY; + /* initialize the new counter */ + group->regs[empty].countable = countable; + /* enable the new counter */ ret = adreno_perfcounter_enable(adreno_dev, groupid, empty, countable); - if (ret) + if (ret) { + /* Put back the perfcounter */ + if (!(group->flags & ADRENO_PERFCOUNTER_GROUP_FIXED)) + group->regs[empty].countable = + KGSL_PERFCOUNTER_NOT_USED; return ret; - /* initialize the new counter */ - group->regs[empty].countable = countable; + } /* set initial kernel and user count */ if (flags & PERFCOUNTER_FLAG_KERNEL) { @@ -720,10 +726,22 @@ static int _perfcounter_enable_default(struct adreno_device *adreno_dev, /* wait for the above commands submitted to complete */ ret = adreno_ringbuffer_waittimestamp(rb, rb->timestamp, ADRENO_IDLE_TIMEOUT); - if (ret) - KGSL_DRV_ERR(device, - "Perfcounter %u/%u/%u start via commands failed %d\n", - group, counter, countable, ret); + if (ret) { + /* + * If we were woken up because of cancelling rb events + * either due to soft reset or adreno_stop, ignore the + * error and return 0 here. The perfcounter is already + * set up in software and it will be programmed in + * hardware when we wake up or come up after soft reset, + * by adreno_perfcounter_restore. + */ + if (ret == -EAGAIN) + ret = 0; + else + KGSL_DRV_ERR(device, + "Perfcounter %u/%u/%u start via commands failed %d\n", + group, counter, countable, ret); + } } else { /* Select the desired perfcounter */ kgsl_regwrite(device, reg->select, countable); diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index fe0715629825..554eb2dffae4 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -2507,6 +2507,8 @@ static int kgsl_setup_dma_buf(struct kgsl_device *device, meta->dmabuf = dmabuf; meta->attach = attach; + attach->priv = entry; + entry->priv_data = meta; entry->memdesc.pagetable = pagetable; entry->memdesc.size = 0; @@ -2557,6 +2559,45 @@ out: } #endif +#ifdef CONFIG_DMA_SHARED_BUFFER +void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, + int *egl_surface_count, int *egl_image_count) +{ + struct kgsl_dma_buf_meta *meta = entry->priv_data; + struct dma_buf *dmabuf = meta->dmabuf; + struct dma_buf_attachment *mem_entry_buf_attachment = meta->attach; + struct device *buf_attachment_dev = mem_entry_buf_attachment->dev; + struct dma_buf_attachment *attachment = NULL; + + mutex_lock(&dmabuf->lock); + list_for_each_entry(attachment, &dmabuf->attachments, node) { + struct kgsl_mem_entry *scan_mem_entry = NULL; + + if (attachment->dev != buf_attachment_dev) + continue; + + scan_mem_entry = attachment->priv; + if (!scan_mem_entry) + continue; + + switch (kgsl_memdesc_get_memtype(&scan_mem_entry->memdesc)) { + case KGSL_MEMTYPE_EGL_SURFACE: + (*egl_surface_count)++; + break; + case KGSL_MEMTYPE_EGL_IMAGE: + (*egl_image_count)++; + break; + } + } + mutex_unlock(&dmabuf->lock); +} +#else +void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, + int *egl_surface_count, int *egl_image_count) +{ +} +#endif + long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { @@ -3891,7 +3932,7 @@ kgsl_mmap_memstore(struct kgsl_device *device, struct vm_area_struct *vma) return -EINVAL; } - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, device->memstore.physaddr >> PAGE_SHIFT, diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 826c4edb3582..fbf9197b6d1b 100644 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -435,6 +435,9 @@ long kgsl_ioctl_sparse_unbind(struct kgsl_device_private *dev_priv, void kgsl_mem_entry_destroy(struct kref *kref); +void kgsl_get_egl_counts(struct kgsl_mem_entry *entry, + int *egl_surface_count, int *egl_image_count); + struct kgsl_mem_entry * __must_check kgsl_sharedmem_find(struct kgsl_process_private *private, uint64_t gpuaddr); diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c index 2f293e4da398..7758fc956055 100644 --- a/drivers/gpu/msm/kgsl_debugfs.c +++ b/drivers/gpu/msm/kgsl_debugfs.c @@ -125,13 +125,15 @@ static char get_cacheflag(const struct kgsl_memdesc *m) } -static int print_mem_entry(int id, void *ptr, void *data) +static int print_mem_entry(void *data, void *ptr) { struct seq_file *s = data; struct kgsl_mem_entry *entry = ptr; char flags[10]; char usage[16]; struct kgsl_memdesc *m = &entry->memdesc; + unsigned int usermem_type = kgsl_memdesc_usermem_type(m); + int egl_surface_count = 0, egl_image_count = 0; if (m->flags & KGSL_MEMFLAGS_SPARSE_VIRT) return 0; @@ -149,12 +151,17 @@ static int print_mem_entry(int id, void *ptr, void *data) kgsl_get_memory_usage(usage, sizeof(usage), m->flags); - seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu", + if (usermem_type == KGSL_MEM_ENTRY_ION) + kgsl_get_egl_counts(entry, &egl_surface_count, + &egl_image_count); + + seq_printf(s, "%pK %pK %16llu %5d %9s %10s %16s %5d %16llu %6d %6d", (uint64_t *)(uintptr_t) m->gpuaddr, (unsigned long *) m->useraddr, m->size, entry->id, flags, - memtype_str(kgsl_memdesc_usermem_type(m)), - usage, (m->sgt ? m->sgt->nents : 0), m->mapsize); + memtype_str(usermem_type), + usage, (m->sgt ? m->sgt->nents : 0), m->mapsize, + egl_surface_count, egl_image_count); if (entry->metadata[0] != 0) seq_printf(s, " %s", entry->metadata); @@ -164,25 +171,83 @@ static int print_mem_entry(int id, void *ptr, void *data) return 0; } -static int process_mem_print(struct seq_file *s, void *unused) +static struct kgsl_mem_entry *process_mem_seq_find(struct seq_file *s, + void *ptr, loff_t pos) { + struct kgsl_mem_entry *entry = ptr; struct kgsl_process_private *private = s->private; + int id = 0; + loff_t temp_pos = 1; - seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s\n", - "gpuaddr", "useraddr", "size", "id", "flags", "type", - "usage", "sglen", "mapsize"); + if (entry != SEQ_START_TOKEN) + id = entry->id + 1; spin_lock(&private->mem_lock); - idr_for_each(&private->mem_idr, print_mem_entry, s); + for (entry = idr_get_next(&private->mem_idr, &id); entry; + id++, entry = idr_get_next(&private->mem_idr, &id), + temp_pos++) { + if (temp_pos == pos && kgsl_mem_entry_get(entry)) { + spin_unlock(&private->mem_lock); + goto found; + } + } spin_unlock(&private->mem_lock); - return 0; + entry = NULL; +found: + if (ptr != SEQ_START_TOKEN) + kgsl_mem_entry_put(ptr); + + return entry; +} + +static void *process_mem_seq_start(struct seq_file *s, loff_t *pos) +{ + loff_t seq_file_offset = *pos; + + if (seq_file_offset == 0) + return SEQ_START_TOKEN; + else + return process_mem_seq_find(s, SEQ_START_TOKEN, + seq_file_offset); +} + +static void process_mem_seq_stop(struct seq_file *s, void *ptr) +{ + if (ptr && ptr != SEQ_START_TOKEN) + kgsl_mem_entry_put(ptr); } +static void *process_mem_seq_next(struct seq_file *s, void *ptr, + loff_t *pos) +{ + ++*pos; + return process_mem_seq_find(s, ptr, 1); +} + +static int process_mem_seq_show(struct seq_file *s, void *ptr) +{ + if (ptr == SEQ_START_TOKEN) { + seq_printf(s, "%16s %16s %16s %5s %9s %10s %16s %5s %16s %6s %6s\n", + "gpuaddr", "useraddr", "size", "id", "flags", "type", + "usage", "sglen", "mapsize", "eglsrf", "eglimg"); + return 0; + } else + return print_mem_entry(s, ptr); +} + +static const struct seq_operations process_mem_seq_fops = { + .start = process_mem_seq_start, + .stop = process_mem_seq_stop, + .next = process_mem_seq_next, + .show = process_mem_seq_show, +}; + static int process_mem_open(struct inode *inode, struct file *file) { int ret; pid_t pid = (pid_t) (unsigned long) inode->i_private; + struct seq_file *s = NULL; struct kgsl_process_private *private = NULL; private = kgsl_process_private_find(pid); @@ -190,9 +255,13 @@ static int process_mem_open(struct inode *inode, struct file *file) if (!private) return -ENODEV; - ret = single_open(file, process_mem_print, private); + ret = seq_open(file, &process_mem_seq_fops); if (ret) kgsl_process_private_put(private); + else { + s = file->private_data; + s->private = private; + } return ret; } @@ -205,7 +274,7 @@ static int process_mem_release(struct inode *inode, struct file *file) if (private) kgsl_process_private_put(private); - return single_release(inode, file); + return seq_release(inode, file); } static const struct file_operations process_mem_fops = { diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index ba564b2851f9..f516b7cd245a 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -390,6 +390,13 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable, if (!memdesc->gpuaddr) return -EINVAL; + if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT | + KGSL_MEMFLAGS_SPARSE_PHYS))) { + /* Only global mappings should be mapped multiple times */ + if (!kgsl_memdesc_is_global(memdesc) && + (KGSL_MEMDESC_MAPPED & memdesc->priv)) + return -EINVAL; + } size = kgsl_memdesc_footprint(memdesc); @@ -403,6 +410,9 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable, atomic_inc(&pagetable->stats.entries); KGSL_STATS_ADD(size, &pagetable->stats.mapped, &pagetable->stats.max_mapped); + + /* This is needed for non-sparse mappings */ + memdesc->priv |= KGSL_MEMDESC_MAPPED; } return 0; @@ -455,6 +465,13 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, if (memdesc->size == 0) return -EINVAL; + if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT | + KGSL_MEMFLAGS_SPARSE_PHYS))) { + /* Only global mappings should be mapped multiple times */ + if (!(KGSL_MEMDESC_MAPPED & memdesc->priv)) + return -EINVAL; + } + if (PT_OP_VALID(pagetable, mmu_unmap)) { uint64_t size; @@ -464,6 +481,9 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, atomic_dec(&pagetable->stats.entries); atomic_long_sub(size, &pagetable->stats.mapped); + + if (!kgsl_memdesc_is_global(memdesc)) + memdesc->priv &= ~KGSL_MEMDESC_MAPPED; } return ret; diff --git a/drivers/gpu/msm/kgsl_sharedmem.c b/drivers/gpu/msm/kgsl_sharedmem.c index 72895c18119f..618e9e9a33a3 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.c +++ b/drivers/gpu/msm/kgsl_sharedmem.c @@ -574,12 +574,11 @@ int kgsl_cache_range_op(struct kgsl_memdesc *memdesc, uint64_t offset, void *addr = (memdesc->hostptr) ? memdesc->hostptr : (void *) memdesc->useraddr; - /* Make sure that size is non-zero */ - if (!size) + if (size == 0 || size > UINT_MAX) return -EINVAL; - /* Make sure that the offset + size isn't bigger than we can handle */ - if ((offset + size) > ULONG_MAX) + /* Make sure that the offset + size does not overflow */ + if ((offset + size < offset) || (offset + size < size)) return -ERANGE; /* Make sure the offset + size do not overflow the address */ |
