wok diff linux/stuff/linux-CVE-2016-5195.u @ rev 19473
Add svgcleaner
author | Aleksej Bobylev <al.bobylev@gmail.com> |
---|---|
date | Tue Nov 01 02:51:21 2016 +0200 (2016-11-01) |
parents | |
children |
line diff
1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/linux/stuff/linux-CVE-2016-5195.u Tue Nov 01 02:51:21 2016 +0200 1.3 @@ -0,0 +1,87 @@ 1.4 +https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2016-5195 1.5 +--- a/include/linux/mm.h 1.6 ++++ b/include/linux/mm.h 1.7 +@@ -1611,6 +1611,7 @@ struct page *follow_page(struct vm_area_struct *, unsigned long address, 1.8 + #define FOLL_MLOCK 0x40 /* mark page as mlocked */ 1.9 + #define FOLL_SPLIT 0x80 /* don't return transhuge pages, split them */ 1.10 + #define FOLL_HWPOISON 0x100 /* check page is hwpoisoned */ 1.11 ++#define FOLL_COW 0x4000 /* internal GUP flag */ 1.12 + 1.13 + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, 1.14 + void *data); 1.15 +diff --git a/mm/memory.c b/mm/memory.c 1.16 +index 675b211296fd..2917e9b2e4d4 100644 1.17 +--- a/mm/memory.c 1.18 ++++ b/mm/memory.c 1.19 +@@ -1427,6 +1427,24 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, 1.20 + } 1.21 + EXPORT_SYMBOL_GPL(zap_vma_ptes); 1.22 + 1.23 ++static inline bool can_follow_write_pte(pte_t pte, struct page *page, 1.24 ++ unsigned int flags) 1.25 ++{ 1.26 ++ if (pte_write(pte)) 1.27 ++ return true; 1.28 ++ 1.29 ++ /* 1.30 ++ * Make sure that we are really following CoWed page. We do not really 1.31 ++ * have to care about exclusiveness of the page because we only want 1.32 ++ * to ensure that once COWed page hasn't disappeared in the meantime 1.33 ++ * or it hasn't been merged to a KSM page. 1.34 ++ */ 1.35 ++ if ((flags & FOLL_FORCE) && (flags & FOLL_COW)) 1.36 ++ return page && PageAnon(page) && !PageKsm(page); 1.37 ++ 1.38 ++ return false; 1.39 ++} 1.40 ++ 1.41 + /** 1.42 + * follow_page - look up a page descriptor from a user-virtual address 1.43 + * @vma: vm_area_struct mapping @address 1.44 +@@ -1509,10 +1527,13 @@ split_fallthrough: 1.45 + pte = *ptep; 1.46 + if (!pte_present(pte)) 1.47 + goto no_page; 1.48 +- if ((flags & FOLL_WRITE) && !pte_write(pte)) 1.49 +- goto unlock; 1.50 + 1.51 + page = vm_normal_page(vma, address, pte); 1.52 ++ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) { 1.53 ++ pte_unmap_unlock(ptep, ptl); 1.54 ++ return NULL; 1.55 ++ } 1.56 ++ 1.57 + if (unlikely(!page)) { 1.58 + if ((flags & FOLL_DUMP) || 1.59 + !is_zero_pfn(pte_pfn(pte))) 1.60 +@@ -1555,7 +1576,7 @@ split_fallthrough: 1.61 + unlock_page(page); 1.62 + } 1.63 + } 1.64 +-unlock: 1.65 ++ 1.66 + pte_unmap_unlock(ptep, ptl); 1.67 + out: 1.68 + return page; 1.69 +@@ -1789,17 +1810,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 1.70 + * The VM_FAULT_WRITE bit tells us that 1.71 + * do_wp_page has broken COW when necessary, 1.72 + * even if maybe_mkwrite decided not to set 1.73 +- * pte_write. We can thus safely do subsequent 1.74 +- * page lookups as if they were reads. But only 1.75 +- * do so when looping for pte_write is futile: 1.76 +- * in some cases userspace may also be wanting 1.77 +- * to write to the gotten user page, which a 1.78 +- * read fault here might prevent (a readonly 1.79 +- * page might get reCOWed by userspace write). 1.80 ++ * pte_write. We cannot simply drop FOLL_WRITE 1.81 ++ * here because the COWed page might be gone by 1.82 ++ * the time we do the subsequent page lookups. 1.83 + */ 1.84 + if ((ret & VM_FAULT_WRITE) && 1.85 + !(vma->vm_flags & VM_WRITE)) 1.86 +- foll_flags &= ~FOLL_WRITE; 1.87 ++ foll_flags |= FOLL_COW; 1.88 + 1.89 + cond_resched(); 1.90 + }