diff options
| author | Dave Hansen <dave.hansen@linux.intel.com> | 2017-08-30 16:23:00 -0700 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2018-01-05 15:44:23 +0100 |
| commit | bed9bb7f3e6d4045013d2bb9e4004896de57f02b (patch) | |
| tree | 25284cd383867826b0303949ed3bd1ecdee87a68 /arch/x86/mm/pgtable.c | |
| parent | 8a43ddfb93a0c6ae1a6e1f5c25705ec5d1843c40 (diff) | |
kaiser: merged update
Merged fixes and cleanups, rebased to 4.4.89 tree (no 5-level paging).
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Jiri Kosina <jkosina@suse.cz>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/x86/mm/pgtable.c')
| -rw-r--r-- | arch/x86/mm/pgtable.c | 40 |
1 files changed, 13 insertions, 27 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 087d3e1e7125..e2bd5c81279e 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -340,40 +340,26 @@ static inline void _pgd_free(pgd_t *pgd) kmem_cache_free(pgd_cache, pgd); } #else -static inline pgd_t *_pgd_alloc(void) -{ + #ifdef CONFIG_KAISER - // Instead of one PML4, we aquire two PML4s and, thus, an 8kb-aligned memory - // block. Therefore, we have to allocate at least 3 pages. However, the - // __get_free_pages returns us 4 pages. Hence, we store the base pointer at - // the beginning of the page of our 8kb-aligned memory block in order to - // correctly free it afterwars. - - unsigned long pages = __get_free_pages(PGALLOC_GFP, get_order(4*PAGE_SIZE)); - - if(native_get_normal_pgd((pgd_t*) pages) == (pgd_t*) pages) - { - *((unsigned long*)(pages + 2 * PAGE_SIZE)) = pages; - return (pgd_t *) pages; - } - else - { - *((unsigned long*)(pages + 3 * PAGE_SIZE)) = pages; - return (pgd_t *) (pages + PAGE_SIZE); - } +/* + * Instead of one pmd, we aquire two pmds. Being order-1, it is + * both 8k in size and 8k-aligned. That lets us just flip bit 12 + * in a pointer to swap between the two 4k halves. + */ +#define PGD_ALLOCATION_ORDER 1 #else - return (pgd_t *)__get_free_page(PGALLOC_GFP); +#define PGD_ALLOCATION_ORDER 0 #endif + +static inline pgd_t *_pgd_alloc(void) +{ + return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER); } static inline void _pgd_free(pgd_t *pgd) { -#ifdef CONFIG_KAISER - unsigned long pages = *((unsigned long*) ((char*) pgd + 2 * PAGE_SIZE)); - free_pages(pages, get_order(4*PAGE_SIZE)); -#else - free_page((unsigned long)pgd); -#endif + free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); } #endif /* CONFIG_X86_PAE */ |
