lkml.org 
[lkml]   [2022]   [Nov]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
SubjectRe: [PATCH V2 2/2] arm64: errata: Workaround possible Cortex-A715 [ESR|FAR]_ELx corruption
From


On 11/15/22 19:32, Catalin Marinas wrote:
> On Sun, Nov 13, 2022 at 06:56:45AM +0530, Anshuman Khandual wrote:
>> diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
>> index 35e9a468d13e..6552947ca7fa 100644
>> --- a/arch/arm64/mm/hugetlbpage.c
>> +++ b/arch/arm64/mm/hugetlbpage.c
>> @@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
>> {
>> return __hugetlb_valid_size(size);
>> }
>> +
>> +pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
>> +{
>> + if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198)) {
>> + pte_t pte = READ_ONCE(*ptep);
>
> Not sure whether the generated code would be any different but we should
> probably add the check for the CPU capability in the 'if' condition
> above, before the READ_ONCE (which expands to some asm volatile):

Sure, will do.

>
> if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
> cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
> pte_t pte = ...
> ...
> }

The local variable 'pte_t pte' can be dropped as well.

>
>> + /*
>> + * Break-before-make (BBM) is required for all user space mappings
>> + * when the permission changes from executable to non-executable
>> + * in cases where cpu is affected with errata #2645198.
>> + */
>> + if (pte_user_exec(pte) && cpus_have_const_cap(ARM64_WORKAROUND_2645198))
>> + return huge_ptep_clear_flush(vma, addr, ptep);
>> + }
>> + return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
>> +}
>> +
>> +void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
>> + pte_t old_pte, pte_t pte)
>> +{
>> + set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
>> +}
>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
>> index 9a7c38965154..c1fb0ce1473c 100644
>> --- a/arch/arm64/mm/mmu.c
>> +++ b/arch/arm64/mm/mmu.c
>> @@ -1702,3 +1702,24 @@ static int __init prevent_bootmem_remove_init(void)
>> }
>> early_initcall(prevent_bootmem_remove_init);
>> #endif
>> +
>> +pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
>> +{
>> + if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198)) {
>> + pte_t pte = READ_ONCE(*ptep);
>> + /*
>> + * Break-before-make (BBM) is required for all user space mappings
>> + * when the permission changes from executable to non-executable
>> + * in cases where cpu is affected with errata #2645198.
>> + */
>> + if (pte_user_exec(pte) && cpus_have_const_cap(ARM64_WORKAROUND_2645198))
>> + return ptep_clear_flush(vma, addr, ptep);
>> + }
>
> Same here.

Sure, will do.

>
>> + return ptep_get_and_clear(vma->vm_mm, addr, ptep);
>> +}
>> +
>> +void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
>> + pte_t old_pte, pte_t pte)
>> +{
>> + __set_pte_at(vma->vm_mm, addr, ptep, pte);
>> +}
>
Planning to apply the following change after this patch.

diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 6552947ca7fa..cd8d96e1fa1a 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -562,14 +562,14 @@ bool __init arch_hugetlb_valid_size(unsigned long size)

pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198)) {
- pte_t pte = READ_ONCE(*ptep);
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
- if (pte_user_exec(pte) && cpus_have_const_cap(ARM64_WORKAROUND_2645198))
+ if (pte_user_exec(READ_ONCE(*ptep)))
return huge_ptep_clear_flush(vma, addr, ptep);
}
return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index c1fb0ce1473c..ec305ea3942c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1705,14 +1705,14 @@ early_initcall(prevent_bootmem_remove_init);

pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
- if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198)) {
- pte_t pte = READ_ONCE(*ptep);
+ if (IS_ENABLED(CONFIG_ARM64_WORKAROUND_2645198) &&
+ cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
/*
* Break-before-make (BBM) is required for all user space mappings
* when the permission changes from executable to non-executable
* in cases where cpu is affected with errata #2645198.
*/
- if (pte_user_exec(pte) && cpus_have_const_cap(ARM64_WORKAROUND_2645198))
+ if (pte_user_exec(READ_ONCE(*ptep)))
return ptep_clear_flush(vma, addr, ptep);
}
return ptep_get_and_clear(vma->vm_mm, addr, ptep);
\
 
 \ /
  Last update: 2022-11-16 05:43    [W:0.085 / U:0.420 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site