diff options
-rw-r--r-- | include/linux/hmm.h | 46 | ||||
-rw-r--r-- | mm/hmm.c | 26 |
2 files changed, 34 insertions, 38 deletions
diff --git a/include/linux/hmm.h b/include/linux/hmm.h index dd907f614dfe..54d684fe3b90 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h @@ -80,8 +80,6 @@ struct hmm; /* - * hmm_pfn_t - HMM uses its own pfn type to keep several flags per page - * * Flags: * HMM_PFN_VALID: pfn is valid. It has, at least, read permission. * HMM_PFN_WRITE: CPU page table has write permission set @@ -93,8 +91,6 @@ struct hmm; * set and the pfn value is undefined. * HMM_PFN_DEVICE_UNADDRESSABLE: unaddressable device memory (ZONE_DEVICE) */ -typedef unsigned long hmm_pfn_t; - #define HMM_PFN_VALID (1 << 0) #define HMM_PFN_WRITE (1 << 1) #define HMM_PFN_ERROR (1 << 2) @@ -104,14 +100,14 @@ typedef unsigned long hmm_pfn_t; #define HMM_PFN_SHIFT 6 /* - * hmm_pfn_t_to_page() - return struct page pointed to by a valid hmm_pfn_t - * @pfn: hmm_pfn_t to convert to struct page - * Returns: struct page pointer if pfn is a valid hmm_pfn_t, NULL otherwise + * hmm_pfn_to_page() - return struct page pointed to by a valid HMM pfn + * @pfn: HMM pfn value to get corresponding struct page from + * Returns: struct page pointer if pfn is a valid HMM pfn, NULL otherwise * - * If the hmm_pfn_t is valid (ie valid flag set) then return the struct page - * matching the pfn value stored in the hmm_pfn_t. Otherwise return NULL. + * If the HMM pfn is valid (ie valid flag set) then return the struct page + * matching the pfn value stored in the HMM pfn. Otherwise return NULL. */ -static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) +static inline struct page *hmm_pfn_to_page(uint64_t pfn) { if (!(pfn & HMM_PFN_VALID)) return NULL; @@ -119,11 +115,11 @@ static inline struct page *hmm_pfn_t_to_page(hmm_pfn_t pfn) } /* - * hmm_pfn_t_to_pfn() - return pfn value store in a hmm_pfn_t - * @pfn: hmm_pfn_t to extract pfn from - * Returns: pfn value if hmm_pfn_t is valid, -1UL otherwise + * hmm_pfn_to_pfn() - return pfn value store in a HMM pfn + * @pfn: HMM pfn value to extract pfn from + * Returns: pfn value if HMM pfn is valid, -1UL otherwise */ -static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) +static inline unsigned long hmm_pfn_to_pfn(uint64_t pfn) { if (!(pfn & HMM_PFN_VALID)) return -1UL; @@ -131,21 +127,21 @@ static inline unsigned long hmm_pfn_t_to_pfn(hmm_pfn_t pfn) } /* - * hmm_pfn_t_from_page() - create a valid hmm_pfn_t value from struct page - * @page: struct page pointer for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the page + * hmm_pfn_from_page() - create a valid HMM pfn value from struct page + * @page: struct page pointer for which to create the HMM pfn + * Returns: valid HMM pfn for the page */ -static inline hmm_pfn_t hmm_pfn_t_from_page(struct page *page) +static inline uint64_t hmm_pfn_from_page(struct page *page) { return (page_to_pfn(page) << HMM_PFN_SHIFT) | HMM_PFN_VALID; } /* - * hmm_pfn_t_from_pfn() - create a valid hmm_pfn_t value from pfn - * @pfn: pfn value for which to create the hmm_pfn_t - * Returns: valid hmm_pfn_t for the pfn + * hmm_pfn_from_pfn() - create a valid HMM pfn value from pfn + * @pfn: pfn value for which to create the HMM pfn + * Returns: valid HMM pfn for the pfn */ -static inline hmm_pfn_t hmm_pfn_t_from_pfn(unsigned long pfn) +static inline uint64_t hmm_pfn_from_pfn(unsigned long pfn) { return (pfn << HMM_PFN_SHIFT) | HMM_PFN_VALID; } @@ -284,7 +280,7 @@ struct hmm_range { struct list_head list; unsigned long start; unsigned long end; - hmm_pfn_t *pfns; + uint64_t *pfns; bool valid; }; @@ -307,7 +303,7 @@ bool hmm_vma_range_done(struct hmm_range *range); /* * Fault memory on behalf of device driver. Unlike handle_mm_fault(), this will - * not migrate any device memory back to system memory. The hmm_pfn_t array will + * not migrate any device memory back to system memory. The HMM pfn array will * be updated with the fault result and current snapshot of the CPU page table * for the range. * @@ -316,7 +312,7 @@ bool hmm_vma_range_done(struct hmm_range *range); * function returns -EAGAIN. * * Return value does not reflect if the fault was successful for every single - * address or not. Therefore, the caller must to inspect the hmm_pfn_t array to + * address or not. Therefore, the caller must to inspect the HMM pfn array to * determine fault status for each address. * * Trying to fault inside an invalid vma will result in -EINVAL. @@ -304,7 +304,7 @@ struct hmm_vma_walk { static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr, - hmm_pfn_t *pfn) + uint64_t *pfn) { unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_REMOTE; struct hmm_vma_walk *hmm_vma_walk = walk->private; @@ -324,7 +324,7 @@ static int hmm_vma_do_fault(struct mm_walk *walk, return -EAGAIN; } -static void hmm_pfns_special(hmm_pfn_t *pfns, +static void hmm_pfns_special(uint64_t *pfns, unsigned long addr, unsigned long end) { @@ -338,7 +338,7 @@ static int hmm_pfns_bad(unsigned long addr, { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - hmm_pfn_t *pfns = range->pfns; + uint64_t *pfns = range->pfns; unsigned long i; i = (addr - range->start) >> PAGE_SHIFT; @@ -348,7 +348,7 @@ static int hmm_pfns_bad(unsigned long addr, return 0; } -static void hmm_pfns_clear(hmm_pfn_t *pfns, +static void hmm_pfns_clear(uint64_t *pfns, unsigned long addr, unsigned long end) { @@ -362,7 +362,7 @@ static int hmm_vma_walk_hole(unsigned long addr, { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - hmm_pfn_t *pfns = range->pfns; + uint64_t *pfns = range->pfns; unsigned long i; hmm_vma_walk->last = addr; @@ -387,7 +387,7 @@ static int hmm_vma_walk_clear(unsigned long addr, { struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; - hmm_pfn_t *pfns = range->pfns; + uint64_t *pfns = range->pfns; unsigned long i; hmm_vma_walk->last = addr; @@ -414,7 +414,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_range *range = hmm_vma_walk->range; struct vm_area_struct *vma = walk->vma; - hmm_pfn_t *pfns = range->pfns; + uint64_t *pfns = range->pfns; unsigned long addr = start, i; bool write_fault; pte_t *ptep; @@ -431,7 +431,7 @@ again: if (pmd_devmap(*pmdp) || pmd_trans_huge(*pmdp)) { unsigned long pfn; - hmm_pfn_t flag = 0; + uint64_t flag = 0; pmd_t pmd; /* @@ -456,7 +456,7 @@ again: pfn = pmd_pfn(pmd) + pte_index(addr); flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; for (; addr < end; addr += PAGE_SIZE, i++, pfn++) - pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; + pfns[i] = hmm_pfn_from_pfn(pfn) | flag; return 0; } @@ -490,7 +490,7 @@ again: * device and report anything else as error. */ if (is_device_private_entry(entry)) { - pfns[i] = hmm_pfn_t_from_pfn(swp_offset(entry)); + pfns[i] = hmm_pfn_from_pfn(swp_offset(entry)); if (is_write_device_private_entry(entry)) { pfns[i] |= HMM_PFN_WRITE; } else if (write_fault) @@ -515,7 +515,7 @@ again: if (write_fault && !pte_write(pte)) goto fault; - pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)); + pfns[i] = hmm_pfn_from_pfn(pte_pfn(pte)); pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; continue; @@ -678,8 +678,8 @@ EXPORT_SYMBOL(hmm_vma_range_done); * This is similar to a regular CPU page fault except that it will not trigger * any memory migration if the memory being faulted is not accessible by CPUs. * - * On error, for one virtual address in the range, the function will set the - * hmm_pfn_t error flag for the corresponding pfn entry. + * On error, for one virtual address in the range, the function will mark the + * corresponding HMM pfn entry with an error flag. * * Expected use pattern: * retry: |