diff options
author | Srinivasarao P <spathi@codeaurora.org> | 2018-08-16 10:31:30 +0530 |
---|---|---|
committer | Srinivasarao P <spathi@codeaurora.org> | 2018-08-24 00:07:01 +0530 |
commit | 79de04d8065db03fb4a0cf9d2bf1916b092cabcc (patch) | |
tree | 54733763f20f975ad1b37d26d56690012c0b6786 /mm/memory.c | |
parent | 4bef50d041e80243d279ed1bccda5297b81ba306 (diff) | |
parent | f057ff937754efc42d56bee825187b2ce6c36958 (diff) |
Merge android-4.4.148 (f057ff9) into msm-4.4
* refs/heads/tmp-f057ff9
Linux 4.4.148
x86/speculation/l1tf: Unbreak !__HAVE_ARCH_PFN_MODIFY_ALLOWED architectures
x86/init: fix build with CONFIG_SWAP=n
x86/speculation/l1tf: Fix up CPU feature flags
x86/mm/kmmio: Make the tracer robust against L1TF
x86/mm/pat: Make set_memory_np() L1TF safe
x86/speculation/l1tf: Make pmd/pud_mknotpresent() invert
x86/speculation/l1tf: Invert all not present mappings
x86/speculation/l1tf: Fix up pte->pfn conversion for PAE
x86/speculation/l1tf: Protect PAE swap entries against L1TF
x86/cpufeatures: Add detection of L1D cache flush support.
x86/speculation/l1tf: Extend 64bit swap file size limit
x86/bugs: Move the l1tf function and define pr_fmt properly
x86/speculation/l1tf: Limit swap file size to MAX_PA/2
x86/speculation/l1tf: Disallow non privileged high MMIO PROT_NONE mappings
mm: fix cache mode tracking in vm_insert_mixed()
mm: Add vm_insert_pfn_prot()
x86/speculation/l1tf: Add sysfs reporting for l1tf
x86/speculation/l1tf: Make sure the first page is always reserved
x86/speculation/l1tf: Protect PROT_NONE PTEs against speculation
x86/speculation/l1tf: Protect swap entries against L1TF
x86/speculation/l1tf: Change order of offset/type in swap entry
mm: x86: move _PAGE_SWP_SOFT_DIRTY from bit 7 to bit 1
x86/mm: Fix swap entry comment and macro
x86/mm: Move swap offset/type up in PTE to work around erratum
x86/speculation/l1tf: Increase 32bit PAE __PHYSICAL_PAGE_SHIFT
x86/irqflags: Provide a declaration for native_save_fl
kprobes/x86: Fix %p uses in error messages
x86/speculation: Protect against userspace-userspace spectreRSB
x86/paravirt: Fix spectre-v2 mitigations for paravirt guests
ARM: dts: imx6sx: fix irq for pcie bridge
IB/ocrdma: fix out of bounds access to local buffer
IB/mlx4: Mark user MR as writable if actual virtual memory is writable
IB/core: Make testing MR flags for writability a static inline function
fix __legitimize_mnt()/mntput() race
fix mntput/mntput race
root dentries need RCU-delayed freeing
scsi: sr: Avoid that opening a CD-ROM hangs with runtime power management enabled
ACPI / LPSS: Add missing prv_offset setting for byt/cht PWM devices
xen/netfront: don't cache skb_shinfo()
parisc: Define mb() and add memory barriers to assembler unlock sequences
parisc: Enable CONFIG_MLONGCALLS by default
fork: unconditionally clear stack on fork
ipv4+ipv6: Make INET*_ESP select CRYPTO_ECHAINIV
tpm: fix race condition in tpm_common_write()
ext4: fix check to prevent initializing reserved inodes
Linux 4.4.147
jfs: Fix inconsistency between memory allocation and ea_buf->max_size
i2c: imx: Fix reinit_completion() use
ring_buffer: tracing: Inherit the tracing setting to next ring buffer
ACPI / PCI: Bail early in acpi_pci_add_bus() if there is no ACPI handle
ext4: fix false negatives *and* false positives in ext4_check_descriptors()
netlink: Don't shift on 64 for ngroups
netlink: Don't shift with UB on nlk->ngroups
netlink: Do not subscribe to non-existent groups
nohz: Fix local_timer_softirq_pending()
genirq: Make force irq threading setup more robust
scsi: qla2xxx: Return error when TMF returns
scsi: qla2xxx: Fix ISP recovery on unload
Conflicts:
include/linux/swapfile.h
Removed CONFIG_CRYPTO_ECHAINIV from defconfig files since this upmerge is
adding this config to Kconfig file.
Change-Id: Ide96c29f919d76590c2bdccf356d1d464a892fd7
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r-- | mm/memory.c | 62 |
1 files changed, 51 insertions, 11 deletions
diff --git a/mm/memory.c b/mm/memory.c index 78ab57141731..ab4214e214fe 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1605,8 +1605,29 @@ out: int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { + return vm_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_pfn); + +/** + * vm_insert_pfn_prot - insert single pfn into user vma with specified pgprot + * @vma: user vma to map to + * @addr: target user address of this page + * @pfn: source kernel pfn + * @pgprot: pgprot flags for the inserted page + * + * This is exactly like vm_insert_pfn, except that it allows drivers to + * to override pgprot on a per-page basis. + * + * This only makes sense for IO mappings, and it makes no sense for + * cow mappings. In general, using multiple vmas is preferable; + * vm_insert_pfn_prot should only be used if using multiple VMAs is + * impractical. + */ +int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn, pgprot_t pgprot) +{ int ret; - pgprot_t pgprot = vma->vm_page_prot; /* * Technically, architectures with pte_special can avoid all these * restrictions (same for remap_pfn_range). However we would like @@ -1624,19 +1645,29 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, if (track_pfn_insert(vma, &pgprot, pfn)) return -EINVAL; + if (!pfn_modify_allowed(pfn, pgprot)) + return -EACCES; + ret = insert_pfn(vma, addr, pfn, pgprot); return ret; } -EXPORT_SYMBOL(vm_insert_pfn); +EXPORT_SYMBOL(vm_insert_pfn_prot); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { + pgprot_t pgprot = vma->vm_page_prot; + BUG_ON(!(vma->vm_flags & VM_MIXEDMAP)); if (addr < vma->vm_start || addr >= vma->vm_end) return -EFAULT; + if (track_pfn_insert(vma, &pgprot, pfn)) + return -EINVAL; + + if (!pfn_modify_allowed(pfn, pgprot)) + return -EACCES; /* * If we don't have pte special, then we have to use the pfn_valid() @@ -1649,9 +1680,9 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, struct page *page; page = pfn_to_page(pfn); - return insert_page(vma, addr, page, vma->vm_page_prot); + return insert_page(vma, addr, page, pgprot); } - return insert_pfn(vma, addr, pfn, vma->vm_page_prot); + return insert_pfn(vma, addr, pfn, pgprot); } EXPORT_SYMBOL(vm_insert_mixed); @@ -1666,6 +1697,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, { pte_t *pte; spinlock_t *ptl; + int err = 0; pte = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (!pte) @@ -1673,12 +1705,16 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, arch_enter_lazy_mmu_mode(); do { BUG_ON(!pte_none(*pte)); + if (!pfn_modify_allowed(pfn, prot)) { + err = -EACCES; + break; + } set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); pte_unmap_unlock(pte - 1, ptl); - return 0; + return err; } static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, @@ -1687,6 +1723,7 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, { pmd_t *pmd; unsigned long next; + int err; pfn -= addr >> PAGE_SHIFT; pmd = pmd_alloc(mm, pud, addr); @@ -1695,9 +1732,10 @@ static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud, VM_BUG_ON(pmd_trans_huge(*pmd)); do { next = pmd_addr_end(addr, end); - if (remap_pte_range(mm, pmd, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; + err = remap_pte_range(mm, pmd, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + return err; } while (pmd++, addr = next, addr != end); return 0; } @@ -1708,6 +1746,7 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, { pud_t *pud; unsigned long next; + int err; pfn -= addr >> PAGE_SHIFT; pud = pud_alloc(mm, pgd, addr); @@ -1715,9 +1754,10 @@ static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd, return -ENOMEM; do { next = pud_addr_end(addr, end); - if (remap_pmd_range(mm, pud, addr, next, - pfn + (addr >> PAGE_SHIFT), prot)) - return -ENOMEM; + err = remap_pmd_range(mm, pud, addr, next, + pfn + (addr >> PAGE_SHIFT), prot); + if (err) + return err; } while (pud++, addr = next, addr != end); return 0; } |