wok-stable diff linux/stuff/linux-CVE-2016-5195.u @ rev 12465

Up e2fsprogs (1.44.2)
author Pascal Bellard <pascal.bellard@slitaz.org>
date Mon Mar 04 18:42:23 2019 +0100 (2019-03-04)
parents
children
line diff
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/linux/stuff/linux-CVE-2016-5195.u	Mon Mar 04 18:42:23 2019 +0100
     1.3 @@ -0,0 +1,84 @@
     1.4 +--- linux-2.6.37/include/linux/mm.h
     1.5 ++++ linux-2.6.37/include/linux/mm.h
     1.6 +@@ -1415,6 +1415,7 @@
     1.7 + #define FOLL_GET	0x04	/* do get_page on page */
     1.8 + #define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
     1.9 + #define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
    1.10 ++#define FOLL_COW	0x4000	/* internal GUP flag */
    1.11 + 
    1.12 + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
    1.13 + 			void *data);
    1.14 +--- linux-2.6.37/mm/memory.c
    1.15 ++++ linux-2.6.37/mm/memory.c
    1.16 +@@ -1225,6 +1225,24 @@
    1.17 + }
    1.18 + EXPORT_SYMBOL_GPL(zap_vma_ptes);
    1.19 + 
    1.20 ++static inline bool can_follow_write_pte(pte_t pte, struct page *page,
    1.21 ++					unsigned int flags)
    1.22 ++{
    1.23 ++	if (pte_write(pte))
    1.24 ++		return true;
    1.25 ++
    1.26 ++	/*
    1.27 ++	 * Make sure that we are really following CoWed page. We do not really
    1.28 ++	 * have to care about exclusiveness of the page because we only want
    1.29 ++	 * to ensure that once COWed page hasn't disappeared in the meantime
    1.30 ++	 * or it hasn't been merged to a KSM page.
    1.31 ++	 */
    1.32 ++	if ((flags & FOLL_FORCE) && (flags & FOLL_COW))
    1.33 ++		return page && PageAnon(page) && !PageKsm(page);
    1.34 ++
    1.35 ++	return false;
    1.36 ++}
    1.37 ++
    1.38 + /**
    1.39 +  * follow_page - look up a page descriptor from a user-virtual address
    1.40 +  * @vma: vm_area_struct mapping @address
    1.41 +@@ -1286,10 +1304,13 @@
    1.42 + 	pte = *ptep;
    1.43 + 	if (!pte_present(pte))
    1.44 + 		goto no_page;
    1.45 +-	if ((flags & FOLL_WRITE) && !pte_write(pte))
    1.46 +-		goto unlock;
    1.47 + 
    1.48 + 	page = vm_normal_page(vma, address, pte);
    1.49 ++	if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, page, flags)) {
    1.50 ++		pte_unmap_unlock(ptep, ptl);
    1.51 ++		return NULL;
    1.52 ++	}
    1.53 ++
    1.54 + 	if (unlikely(!page)) {
    1.55 + 		if ((flags & FOLL_DUMP) ||
    1.56 + 		    !is_zero_pfn(pte_pfn(pte)))
    1.57 +@@ -1310,7 +1331,7 @@
    1.58 + 		 */
    1.59 + 		mark_page_accessed(page);
    1.60 + 	}
    1.61 +-unlock:
    1.62 ++
    1.63 + 	pte_unmap_unlock(ptep, ptl);
    1.64 + out:
    1.65 + 	return page;
    1.66 +@@ -1464,17 +1485,13 @@
    1.67 + 				 * The VM_FAULT_WRITE bit tells us that
    1.68 + 				 * do_wp_page has broken COW when necessary,
    1.69 + 				 * even if maybe_mkwrite decided not to set
    1.70 +-				 * pte_write. We can thus safely do subsequent
    1.71 +-				 * page lookups as if they were reads. But only
    1.72 +-				 * do so when looping for pte_write is futile:
    1.73 +-				 * in some cases userspace may also be wanting
    1.74 +-				 * to write to the gotten user page, which a
    1.75 +-				 * read fault here might prevent (a readonly
    1.76 +-				 * page might get reCOWed by userspace write).
    1.77 ++				 * pte_write. We cannot simply drop FOLL_WRITE
    1.78 ++				 * here because the COWed page might be gone by
    1.79 ++				 * the time we do the subsequent page lookups.
    1.80 + 				 */
    1.81 + 				if ((ret & VM_FAULT_WRITE) &&
    1.82 + 				    !(vma->vm_flags & VM_WRITE))
    1.83 +-					foll_flags &= ~FOLL_WRITE;
    1.84 ++					foll_flags |= FOLL_COW;
    1.85 + 
    1.86 + 				cond_resched();
    1.87 + 			}