diff options
author | Jordan Crouse <jcrouse@codeaurora.org> | 2015-12-03 08:38:11 -0700 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:16:32 -0700 |
commit | 531fca9ffc17f68c49be65beafc0fc5138a21d2d (patch) | |
tree | d2d481cd63bb2bc0d0b10809ba013c4bf885fc59 | |
parent | 7359adc0bfd3d94e4aa8f66b7f18aa276bffa598 (diff) |
msm: kgsl: Move global pagetable entries to the IOMMU driver
Global pagetable entries are exclusively for IOMMU and per-process
pagetables. Move all the code out of the generic driver and into
the IOMMU driver and clean up a bunch of stuff along the way.
Change-Id: Ic0dedbadbb368bb2a289ba4393f729d7e6066a17
Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
-rw-r--r-- | drivers/gpu/msm/adreno.c | 9 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_a5xx_snapshot.c | 4 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_profile.c | 2 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_ringbuffer.c | 8 | ||||
-rw-r--r-- | drivers/gpu/msm/adreno_snapshot.c | 31 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl.c | 2 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_device.h | 3 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_iommu.c | 104 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_iommu.h | 11 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_mmu.c | 252 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_mmu.h | 19 | ||||
-rw-r--r-- | drivers/gpu/msm/kgsl_sharedmem.h | 13 |
12 files changed, 178 insertions, 280 deletions
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 9d8dc05e61c5..0692c9f2608d 100644 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -1139,8 +1139,9 @@ out: static void _adreno_free_memories(struct adreno_device *adreno_dev) { + struct kgsl_device *device = &adreno_dev->dev; if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) - kgsl_free_global(&adreno_dev->cmdbatch_profile_buffer); + kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer); /* Free local copies of firmware and other command streams */ kfree(adreno_dev->pfp_fw); @@ -1152,8 +1153,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev) kfree(adreno_dev->gpmu_cmds); adreno_dev->gpmu_cmds = NULL; - kgsl_free_global(&adreno_dev->pm4); - kgsl_free_global(&adreno_dev->pfp); + kgsl_free_global(device, &adreno_dev->pm4); + kgsl_free_global(device, &adreno_dev->pfp); } static int adreno_remove(struct platform_device *pdev) @@ -1197,7 +1198,7 @@ static int adreno_remove(struct platform_device *pdev) kgsl_device_platform_remove(device); if (test_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv)) { - kgsl_free_global(&adreno_dev->pwron_fixup); + kgsl_free_global(device, &adreno_dev->pwron_fixup); clear_bit(ADRENO_DEVICE_PWRON_FIXUP, &adreno_dev->priv); } clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv); diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c index 95f9198a330a..62eb4f7b24f4 100644 --- a/drivers/gpu/msm/adreno_a5xx_snapshot.c +++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c @@ -905,8 +905,10 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev) count = count_registers(); if (kgsl_allocate_global(device, ®isters, - count * sizeof(unsigned int), 0, 0)) + count * sizeof(unsigned int), 0, 0)) { + kgsl_free_global(device, &capturescript); return; + } /* Build the crash script */ diff --git a/drivers/gpu/msm/adreno_profile.c b/drivers/gpu/msm/adreno_profile.c index 5d5eedcaa798..1f73db223a0a 100644 --- a/drivers/gpu/msm/adreno_profile.c +++ b/drivers/gpu/msm/adreno_profile.c @@ -1108,7 +1108,7 @@ void adreno_profile_close(struct adreno_device *adreno_dev) profile->log_tail = NULL; profile->shared_head = 0; profile->shared_tail = 0; - kgsl_free_global(&profile->shared_buffer); + kgsl_free_global(&adreno_dev->dev, &profile->shared_buffer); profile->shared_size = 0; profile->assignment_count = 0; diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index a80707385e3b..6d577e4045cf 100644 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -424,12 +424,14 @@ int adreno_ringbuffer_init(struct adreno_device *adreno_dev, bool nopreempt) static void _adreno_ringbuffer_close(struct adreno_ringbuffer *rb) { - kgsl_free_global(&rb->pagetable_desc); - kgsl_free_global(&rb->preemption_desc); + struct kgsl_device *device = rb->device; + + kgsl_free_global(device, &rb->pagetable_desc); + kgsl_free_global(device, &rb->preemption_desc); memset(&rb->pt_update_desc, 0, sizeof(struct kgsl_memdesc)); - kgsl_free_global(&rb->buffer_desc); + kgsl_free_global(device, &rb->buffer_desc); kgsl_del_event_group(&rb->events); memset(rb, 0, sizeof(struct adreno_ringbuffer)); } diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c index c0b3b9f0352c..ab12399ba236 100644 --- a/drivers/gpu/msm/adreno_snapshot.c +++ b/drivers/gpu/msm/adreno_snapshot.c @@ -231,6 +231,19 @@ static inline void parse_ib(struct kgsl_device *device, } +static inline bool iommu_is_setstate_addr(struct kgsl_device *device, + uint64_t gpuaddr, uint64_t size) +{ + struct kgsl_iommu *iommu = device->mmu.priv; + + if (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU || + iommu == NULL) + return false; + + return kgsl_gpuaddr_in_memdesc(&iommu->setstate, gpuaddr, + size); +} + /** * snapshot_rb_ibs() - Dump rb data and capture the IB's in the RB as well * @rb: The RB to dump @@ -352,14 +365,16 @@ static void snapshot_rb_ibs(struct adreno_ringbuffer *rb, ibsize = rbptr[index + 3]; } - /* - * Sometimes the kernel generates IBs in global - * memory. We dump the interesting global buffers, - * so there's no need to parse these IBs. - */ - if (!kgsl_search_global_pt_entries(ibaddr, ibsize)) - parse_ib(device, snapshot, snapshot->process, - ibaddr, ibsize); + /* Don't parse known global IBs */ + if (iommu_is_setstate_addr(device, ibaddr, ibsize)) + continue; + + if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup, + ibaddr, ibsize)) + continue; + + parse_ib(device, snapshot, snapshot->process, + ibaddr, ibsize); } index = index + 1; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index b5b12296c6fd..0149d1bf49c3 100644 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -3970,7 +3970,7 @@ void kgsl_device_platform_remove(struct kgsl_device *device) idr_destroy(&device->context_idr); - kgsl_free_global(&device->memstore); + kgsl_free_global(device, &device->memstore); kgsl_mmu_close(device); diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index 71047a727f1b..ee2a269ebb75 100644 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -632,9 +632,6 @@ long kgsl_ioctl_copy_in(unsigned int kernel_cmd, unsigned int user_cmd, long kgsl_ioctl_copy_out(unsigned int kernel_cmd, unsigned int user_cmd, unsigned long, unsigned char *ptr); -int kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry, - struct kgsl_device_private *dev_priv); - /** * kgsl_context_put() - Release context reference count * @context: Pointer to the KGSL context to be released diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c index 0235fd5ebaff..835583a6a0fb 100644 --- a/drivers/gpu/msm/kgsl_iommu.c +++ b/drivers/gpu/msm/kgsl_iommu.c @@ -67,6 +67,89 @@ struct kgsl_iommu_addr_entry { static struct kmem_cache *addr_entry_cache; +/* + * There are certain memory allocations (ringbuffer, memstore, etc) that need to + * be present at the same address in every pagetable. We call these "global" + * pagetable entries. There are relatively few of these and they are mostly + * stable (defined at init time) but the actual number of globals can differ + * slight depending on the target and implementation. + * + * Here we define an array and a simple allocator to keep track of the currently + * active global entries. Each entry is assigned a unique address inside of a + * MMU implementation specific "global" region. The addresses are assigned + * sequentially and never re-used to avoid having to go back and reprogram + * existing pagetables. The entire list of active entries are mapped and + * unmapped into every new pagetable as it is created and destroyed. + * + * Because there are relatively few entries and they are defined at boot time we + * don't need to go over the top to define a dynamic allocation scheme. It will + * be less wasteful to pick a static number with a little bit of growth + * potential. + */ + +#define GLOBAL_PT_ENTRIES 32 + +static struct kgsl_memdesc *global_pt_entries[GLOBAL_PT_ENTRIES]; +static int global_pt_count; +uint64_t global_pt_alloc; + +static void kgsl_iommu_unmap_globals(struct kgsl_pagetable *pagetable) +{ + unsigned int i; + + for (i = 0; i < global_pt_count; i++) { + if (global_pt_entries[i] != NULL) + kgsl_mmu_unmap(pagetable, global_pt_entries[i]); + } +} + +static void kgsl_iommu_map_globals(struct kgsl_pagetable *pagetable) +{ + unsigned int i; + + for (i = 0; i < global_pt_count; i++) { + if (global_pt_entries[i] != NULL) { + int ret = kgsl_mmu_map(pagetable, global_pt_entries[i]); + + BUG_ON(ret); + } + } +} + +static void kgsl_iommu_remove_global(struct kgsl_mmu *mmu, + struct kgsl_memdesc *memdesc) +{ + int i; + + if (memdesc->gpuaddr == 0 || !(memdesc->priv & KGSL_MEMDESC_GLOBAL)) + return; + + for (i = 0; i < global_pt_count; i++) { + if (global_pt_entries[i] == memdesc) { + memdesc->gpuaddr = 0; + memdesc->priv &= ~KGSL_MEMDESC_GLOBAL; + global_pt_entries[i] = NULL; + return; + } + } +} + +static void kgsl_iommu_add_global(struct kgsl_mmu *mmu, + struct kgsl_memdesc *memdesc) +{ + if (memdesc->gpuaddr != 0) + return; + + BUG_ON(global_pt_count >= GLOBAL_PT_ENTRIES); + BUG_ON((global_pt_alloc + memdesc->size) >= KGSL_IOMMU_GLOBAL_MEM_SIZE); + + memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc; + memdesc->priv |= KGSL_MEMDESC_GLOBAL; + global_pt_alloc += memdesc->size; + + global_pt_entries[global_pt_count++] = memdesc; +} + static inline void _iommu_sync_mmu_pc(bool lock) { if (need_iommu_sync == false) @@ -609,8 +692,10 @@ static void kgsl_iommu_destroy_pagetable(struct kgsl_pagetable *pt) if (KGSL_MMU_SECURE_PT == pt->name) ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE]; - else + else { ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER]; + kgsl_iommu_unmap_globals(pt); + } if (iommu_pt->domain) { trace_kgsl_pagetable_destroy(iommu_pt->ttbr0, pt->name); @@ -669,7 +754,7 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu, } } else { pt->va_start = KGSL_IOMMU_SVM_BASE32; - pt->va_end = KGSL_MMU_GLOBAL_MEM_BASE; + pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE; pt->compat_va_start = pt->va_start; pt->compat_va_end = pt->va_end; } @@ -792,6 +877,8 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } + kgsl_iommu_map_globals(pt); + done: if (ret) _free_pt(ctx, pt); @@ -913,6 +1000,8 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt) goto done; } + kgsl_iommu_map_globals(pt); + done: if (ret) _free_pt(ctx, pt); @@ -1037,7 +1126,7 @@ static int kgsl_iommu_init(struct kgsl_mmu *mmu) } } - kgsl_add_global_pt_entry(KGSL_MMU_DEVICE(mmu), &iommu->setstate); + kgsl_iommu_add_global(mmu, &iommu->setstate); done: if (status) @@ -1364,7 +1453,8 @@ static void kgsl_iommu_close(struct kgsl_mmu *mmu) kgsl_guard_page = NULL; } - kgsl_free_global(&iommu->setstate); + kgsl_iommu_remove_global(mmu, &iommu->setstate); + kgsl_sharedmem_free(&iommu->setstate); } static u64 @@ -1735,8 +1825,8 @@ static uint64_t kgsl_iommu_find_svm_region(struct kgsl_pagetable *pagetable, } #define ADDR_IN_GLOBAL(_a) \ - (((_a) >= KGSL_MMU_GLOBAL_MEM_BASE) && \ - ((_a) < (KGSL_MMU_GLOBAL_MEM_BASE + KGSL_MMU_GLOBAL_MEM_SIZE))) + (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \ + ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE))) static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable, uint64_t gpuaddr, uint64_t size) @@ -1881,6 +1971,8 @@ struct kgsl_mmu_ops kgsl_iommu_ops = { .mmu_pagefault_resume = kgsl_iommu_pagefault_resume, .mmu_get_prot_regs = kgsl_iommu_get_prot_regs, .mmu_init_pt = kgsl_iommu_init_pt, + .mmu_add_global = kgsl_iommu_add_global, + .mmu_remove_global = kgsl_iommu_remove_global, }; static struct kgsl_mmu_pt_ops iommu_pt_ops = { diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h index 4c256aadb87e..c8875bb975a9 100644 --- a/drivers/gpu/msm/kgsl_iommu.h +++ b/drivers/gpu/msm/kgsl_iommu.h @@ -19,10 +19,17 @@ #include <linux/of.h> #include "kgsl.h" +/* + * These defines control the address range for allocations that + * are mapped into all pagetables. + */ +#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M +#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000 + #define KGSL_IOMMU_SECURE_SIZE SZ_256M -#define KGSL_IOMMU_SECURE_END KGSL_MMU_GLOBAL_MEM_BASE +#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE #define KGSL_IOMMU_SECURE_BASE \ - (KGSL_MMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE) + (KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE) #define KGSL_IOMMU_SVM_BASE32 0x300000 #define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M) diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c index eaea2625e030..1983a863c28e 100644 --- a/drivers/gpu/msm/kgsl_mmu.c +++ b/drivers/gpu/msm/kgsl_mmu.c @@ -29,233 +29,6 @@ static enum kgsl_mmutype kgsl_mmu_type = KGSL_MMU_TYPE_NONE; static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable); -/* - * There are certain memory allocations (ringbuffer, memstore, etc) that need to - * be present at the same address in every pagetable. We call these "global" - * pagetable entries. There are relatively few of these and they are mostly - * stable (defined at init time) but the actual number of globals can differ - * slight depending on the target and implementation. - * - * Here we define an array and a simple allocator to keep track of the currently - * active global entries. Each entry is assigned a unique address inside of a - * MMU implementation specific "global" region. The addresses are assigned - * sequentially and never re-used to avoid having to go back and reprogram - * existing pagetables. The entire list of active entries are mapped and - * unmapped into every new pagetable as it is created and destroyed. - * - * Because there are relatively few entries and they are defined at boot time we - * don't need to go over the top to define a dynamic allocation scheme. It will - * be less wasteful to pick a static number with a little bit of growth - * potential. - */ - -#define KGSL_MAX_GLOBAL_PT_ENTRIES 32 - -/** - * struct kgsl_global_pt_entries - Collection of global pagetable entries - * @offset - offset into the global PT space to be assigned to then next - * allocation - * @entries: Array of assigned memdesc entries - * @count: Number of currently assigned entries - * - * Maintain a list of global pagetable entries. Pagetables are shared between - * devices so the global pt entry list needs to be driver wide too - */ -static struct kgsl_global_pt_entries { - unsigned int offset; - struct kgsl_memdesc *entries[KGSL_MAX_GLOBAL_PT_ENTRIES]; - int count; -} kgsl_global_pt_entries; - -/** - * kgsl_search_global_pt_entries() - Check to see if the given GPU address - * belongs to any of the global PT entries - * @gpuaddr: GPU address to search for - * @size: Size of the region to search for - * - * Search all the global pagetable entries for the GPU address and size and - * return the memory descriptor - */ -struct kgsl_memdesc *kgsl_search_global_pt_entries(unsigned int gpuaddr, - unsigned int size) -{ - int i; - - for (i = 0; i < KGSL_MAX_GLOBAL_PT_ENTRIES; i++) { - struct kgsl_memdesc *memdesc = - kgsl_global_pt_entries.entries[i]; - - if (memdesc && kgsl_gpuaddr_in_memdesc(memdesc, gpuaddr, size)) - return memdesc; - } - - return NULL; -} -EXPORT_SYMBOL(kgsl_search_global_pt_entries); - -/** - * kgsl_unmap_global_pt_entries() - Unmap all global entries from the given - * pagetable - * @pagetable: Pointer to a kgsl_pagetable structure - * - * Unmap all the current active global entries from the specified pagetable - */ -static void kgsl_unmap_global_pt_entries(struct kgsl_pagetable *pagetable) -{ - int i; - unsigned long flags; - - BUG_ON(pagetable->name == KGSL_MMU_GLOBAL_PT); - - spin_lock_irqsave(&kgsl_driver.ptlock, flags); - if (pagetable->globals_mapped == false) { - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); - return; - } - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); - - for (i = 0; i < KGSL_MAX_GLOBAL_PT_ENTRIES; i++) { - struct kgsl_memdesc *entry = kgsl_global_pt_entries.entries[i]; - if (entry != NULL) - kgsl_mmu_unmap(pagetable, entry); - } - - spin_lock_irqsave(&kgsl_driver.ptlock, flags); - pagetable->globals_mapped = false; - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); -} - -/** - * kgsl_map_global_pt_entries() - Map all active global entries into the given - * pagetable - * @pagetable: Pointer to a kgsl_pagetable structure - * - * Map all the current global PT entries into the specified pagetable. - */ -void kgsl_map_global_pt_entries(struct kgsl_pagetable *pagetable) -{ - int i, ret = 0; - unsigned long flags; - - spin_lock_irqsave(&kgsl_driver.ptlock, flags); - if (pagetable->globals_mapped == true) { - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); - return; - } - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); - - for (i = 0; !ret && i < KGSL_MAX_GLOBAL_PT_ENTRIES; i++) { - struct kgsl_memdesc *entry = kgsl_global_pt_entries.entries[i]; - - if (entry != NULL) { - ret = kgsl_mmu_map(pagetable, entry); - BUG_ON(ret); - } - } - - spin_lock_irqsave(&kgsl_driver.ptlock, flags); - pagetable->globals_mapped = true; - spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); -} -EXPORT_SYMBOL(kgsl_map_global_pt_entries); - -/** - * kgsl_remove_global_pt_entry() - Remove a memory descriptor from the global PT - * entry list - * @memdesc: Pointer to the kgsl memory descriptor to remove - * - * Remove the specified memory descriptor from the current list of global - * pagetable entries - */ -void kgsl_remove_global_pt_entry(struct kgsl_memdesc *memdesc) -{ - int i, j; - - if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) - return; - - if (memdesc->gpuaddr == 0) - return; - - for (i = 0; i < kgsl_global_pt_entries.count; i++) { - if (kgsl_global_pt_entries.entries[i] == memdesc) { - memdesc->gpuaddr = 0; - memdesc->priv &= ~KGSL_MEMDESC_GLOBAL; - for (j = i; j < kgsl_global_pt_entries.count; j++) - kgsl_global_pt_entries.entries[j] = - kgsl_global_pt_entries.entries[j + 1]; - kgsl_global_pt_entries.entries[j - 1] = NULL; - kgsl_global_pt_entries.count--; - break; - } - } -} -EXPORT_SYMBOL(kgsl_remove_global_pt_entry); - -/** - * kgsl_add_global_pt_entry() - Add a new global PT entry to the active list - * @mmu: Pointer to a kgsl_mmu structure for the active MMU implementation - * @memdesc: Pointer to the kgsl memory descriptor to add - * - * Add a memory descriptor to the list of global pagetable entries. - */ -int kgsl_add_global_pt_entry(struct kgsl_device *device, - struct kgsl_memdesc *memdesc) -{ - int i; - int index = 0; - uint64_t gaddr = KGSL_MMU_GLOBAL_MEM_BASE; - uint64_t size = ALIGN(memdesc->size, PAGE_SIZE); - - if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) { - memdesc->gpuaddr = (uint64_t) memdesc->physaddr; - return 0; - } - - /* do we already have a mapping? */ - if (memdesc->gpuaddr != 0) - return 0; - - if (kgsl_global_pt_entries.count == KGSL_MAX_GLOBAL_PT_ENTRIES) - return -ENOMEM; - - /* - * search for the first free slot by going through all valid entries - * and checking for overlap. All entries are in increasing order of - * gpuaddr - */ - for (i = 0; i < kgsl_global_pt_entries.count; i++) { - if (kgsl_addr_range_overlap(gaddr, size, - kgsl_global_pt_entries.entries[i]->gpuaddr, - kgsl_global_pt_entries.entries[i]->size)) - /* On a clash set gaddr to end of clashing entry */ - gaddr = kgsl_global_pt_entries.entries[i]->gpuaddr + - kgsl_global_pt_entries.entries[i]->size; - else - break; - } - index = i; - if ((gaddr + size) >= - (KGSL_MMU_GLOBAL_MEM_BASE + KGSL_MMU_GLOBAL_MEM_SIZE)) - return -ENOMEM; - - memdesc->gpuaddr = gaddr; - - memdesc->priv |= KGSL_MEMDESC_GLOBAL; - /* - * Move the entries from index till the last entry 1 slot right leaving - * the slot at index empty for the newcomer - */ - for (i = kgsl_global_pt_entries.count - 1; i >= index; i--) - kgsl_global_pt_entries.entries[i + 1] = - kgsl_global_pt_entries.entries[i]; - kgsl_global_pt_entries.entries[index] = memdesc; - kgsl_global_pt_entries.count++; - - return 0; -} -EXPORT_SYMBOL(kgsl_add_global_pt_entry); - static void kgsl_destroy_pagetable(struct kref *kref) { struct kgsl_pagetable *pagetable = container_of(kref, @@ -263,8 +36,6 @@ static void kgsl_destroy_pagetable(struct kref *kref) kgsl_mmu_detach_pagetable(pagetable); - kgsl_unmap_global_pt_entries(pagetable); - if (PT_OP_VALID(pagetable, mmu_destroy_pagetable)) pagetable->pt_ops->mmu_destroy_pagetable(pagetable); @@ -586,9 +357,6 @@ kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu, goto err; } - if (KGSL_MMU_SECURE_PT != name) - kgsl_map_global_pt_entries(pagetable); - spin_lock_irqsave(&kgsl_driver.ptlock, flags); list_add(&pagetable->list, &kgsl_driver.pagetable_list); spin_unlock_irqrestore(&kgsl_driver.ptlock, flags); @@ -833,6 +601,26 @@ void kgsl_mmu_close(struct kgsl_device *device) } EXPORT_SYMBOL(kgsl_mmu_close); +void kgsl_mmu_remove_global(struct kgsl_device *device, + struct kgsl_memdesc *memdesc) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (MMU_OP_VALID(mmu, mmu_remove_global)) + mmu->mmu_ops->mmu_remove_global(mmu, memdesc); +} +EXPORT_SYMBOL(kgsl_mmu_remove_global); + +void kgsl_mmu_add_global(struct kgsl_device *device, + struct kgsl_memdesc *memdesc) +{ + struct kgsl_mmu *mmu = &device->mmu; + + if (MMU_OP_VALID(mmu, mmu_add_global)) + mmu->mmu_ops->mmu_add_global(mmu, memdesc); +} +EXPORT_SYMBOL(kgsl_mmu_add_global); + int kgsl_mmu_enabled(void) { if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type) diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h index 2d0570df9283..dcc0d00deb5b 100644 --- a/drivers/gpu/msm/kgsl_mmu.h +++ b/drivers/gpu/msm/kgsl_mmu.h @@ -14,12 +14,6 @@ #define __KGSL_MMU_H #include "kgsl_iommu.h" -/* - * These defines control the address range for allocations that - * are mapped into all pagetables. - */ -#define KGSL_MMU_GLOBAL_MEM_SIZE SZ_8M -#define KGSL_MMU_GLOBAL_MEM_BASE 0xf8000000 /* Identifier for the global page table */ /* Per process page tables will probably pass in the thread group @@ -50,7 +44,6 @@ struct kgsl_pagetable { unsigned int fault_addr; void *priv; struct kgsl_mmu *mmu; - bool globals_mapped; }; struct kgsl_mmu; @@ -73,6 +66,10 @@ struct kgsl_mmu_ops { struct kgsl_protected_registers *(*mmu_get_prot_regs) (struct kgsl_mmu *mmu); int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *); + void (*mmu_add_global)(struct kgsl_mmu *mmu, + struct kgsl_memdesc *memdesc); + void (*mmu_remove_global)(struct kgsl_mmu *mmu, + struct kgsl_memdesc *memdesc); }; struct kgsl_mmu_pt_ops { @@ -166,13 +163,11 @@ int kgsl_mmu_find_region(struct kgsl_pagetable *pagetable, uint64_t region_start, uint64_t region_end, uint64_t *gpuaddr, uint64_t size, unsigned int align); -int kgsl_add_global_pt_entry(struct kgsl_device *device, +void kgsl_mmu_add_global(struct kgsl_device *device, struct kgsl_memdesc *memdesc); -void kgsl_remove_global_pt_entry(struct kgsl_memdesc *memdesc); -void kgsl_map_global_pt_entries(struct kgsl_pagetable *pagetable); +void kgsl_mmu_remove_global(struct kgsl_device *device, + struct kgsl_memdesc *memdesc); -struct kgsl_memdesc *kgsl_search_global_pt_entries(unsigned int gpuaddr, - unsigned int size); struct kgsl_pagetable *kgsl_mmu_get_pt_from_ptname(struct kgsl_mmu *mmu, int ptname); diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h index 151217523759..5093ebd6e51a 100644 --- a/drivers/gpu/msm/kgsl_sharedmem.h +++ b/drivers/gpu/msm/kgsl_sharedmem.h @@ -267,11 +267,8 @@ static inline int kgsl_allocate_global(struct kgsl_device *device, memdesc->priv = priv; ret = kgsl_sharedmem_alloc_contig(device, memdesc, NULL, (size_t) size); - if (!ret) { - ret = kgsl_add_global_pt_entry(device, memdesc); - if (ret) - kgsl_sharedmem_free(memdesc); - } + if (ret == 0) + kgsl_mmu_add_global(device, memdesc); return ret; } @@ -280,14 +277,16 @@ static inline int kgsl_allocate_global(struct kgsl_device *device, * kgsl_free_global() - Free a device wide GPU allocation and remove it from the * global pagetable entry list * + * @device: Pointer to the device * @memdesc: Pointer to the GPU memory descriptor to free * * Remove the specific memory descriptor from the global pagetable entry list * and free it */ -static inline void kgsl_free_global(struct kgsl_memdesc *memdesc) +static inline void kgsl_free_global(struct kgsl_device *device, + struct kgsl_memdesc *memdesc) { - kgsl_remove_global_pt_entry(memdesc); + kgsl_mmu_remove_global(device, memdesc); kgsl_sharedmem_free(memdesc); } |