lkml.org 
[lkml]   [2013]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
SubjectRe: [PATCH 33/35] x86, 64bit, mm: hibernate use generic mapping_init
Date
On Thursday, January 24, 2013 12:20:14 PM Yinghai Lu wrote:
> We should set mappings only for usable memory ranges under max_pfn
> Otherwise causes same problem that is fixed by
>
> x86, mm: Only direct map addresses that are marked as E820_RAM
>
> Make it only map range in pfn_mapped array.

Well.

While I don't have fundamental objections, I can't really ACK the patch,
because I haven't been following arch/x86 development for several months
and I can't really say how this is supposed to work after the change.

Thanks,
Rafael


> Signed-off-by: Yinghai Lu <yinghai@kernel.org>
> Cc: Pavel Machek <pavel@ucw.cz>
> Cc: Rafael J. Wysocki <rjw@sisk.pl>
> Cc: linux-pm@vger.kernel.org
> ---
> arch/x86/power/hibernate_64.c | 66 ++++++++++++++---------------------------
> 1 file changed, 22 insertions(+), 44 deletions(-)
>
> diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
> index 460f314..a0fde91 100644
> --- a/arch/x86/power/hibernate_64.c
> +++ b/arch/x86/power/hibernate_64.c
> @@ -11,6 +11,8 @@
> #include <linux/gfp.h>
> #include <linux/smp.h>
> #include <linux/suspend.h>
> +
> +#include <asm/init.h>
> #include <asm/proto.h>
> #include <asm/page.h>
> #include <asm/pgtable.h>
> @@ -39,41 +41,21 @@ pgd_t *temp_level4_pgt;
>
> void *relocated_restore_code;
>
> -static int res_phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
> +static void *alloc_pgt_page(void *context)
> {
> - long i, j;
> -
> - i = pud_index(address);
> - pud = pud + i;
> - for (; i < PTRS_PER_PUD; pud++, i++) {
> - unsigned long paddr;
> - pmd_t *pmd;
> -
> - paddr = address + i*PUD_SIZE;
> - if (paddr >= end)
> - break;
> -
> - pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
> - if (!pmd)
> - return -ENOMEM;
> - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
> - for (j = 0; j < PTRS_PER_PMD; pmd++, j++, paddr += PMD_SIZE) {
> - unsigned long pe;
> -
> - if (paddr >= end)
> - break;
> - pe = __PAGE_KERNEL_LARGE_EXEC | paddr;
> - pe &= __supported_pte_mask;
> - set_pmd(pmd, __pmd(pe));
> - }
> - }
> - return 0;
> + return (void *)get_safe_page(GFP_ATOMIC);
> }
>
> static int set_up_temporary_mappings(void)
> {
> - unsigned long start, end, next;
> - int error;
> + struct x86_mapping_info info = {
> + .alloc_pgt_page = alloc_pgt_page,
> + .pmd_flag = __PAGE_KERNEL_LARGE_EXEC,
> + .kernel_mapping = true,
> + };
> + unsigned long mstart, mend;
> + int result;
> + int i;
>
> temp_level4_pgt = (pgd_t *)get_safe_page(GFP_ATOMIC);
> if (!temp_level4_pgt)
> @@ -84,21 +66,17 @@ static int set_up_temporary_mappings(void)
> init_level4_pgt[pgd_index(__START_KERNEL_map)]);
>
> /* Set up the direct mapping from scratch */
> - start = (unsigned long)pfn_to_kaddr(0);
> - end = (unsigned long)pfn_to_kaddr(max_pfn);
> -
> - for (; start < end; start = next) {
> - pud_t *pud = (pud_t *)get_safe_page(GFP_ATOMIC);
> - if (!pud)
> - return -ENOMEM;
> - next = start + PGDIR_SIZE;
> - if (next > end)
> - next = end;
> - if ((error = res_phys_pud_init(pud, __pa(start), __pa(next))))
> - return error;
> - set_pgd(temp_level4_pgt + pgd_index(start),
> - mk_kernel_pgd(__pa(pud)));
> + for (i = 0; i < nr_pfn_mapped; i++) {
> + mstart = pfn_mapped[i].start << PAGE_SHIFT;
> + mend = pfn_mapped[i].end << PAGE_SHIFT;
> +
> + result = kernel_ident_mapping_init(&info, temp_level4_pgt,
> + mstart, mend);
> +
> + if (result)
> + return result;
> }
> +
> return 0;
> }
>
>
--
I speak only for myself.
Rafael J. Wysocki, Intel Open Source Technology Center.


\
 
 \ /
  Last update: 2013-01-25 00:24    [W:0.393 / U:0.032 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site