From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: <3RhZUYAMKCss6ttx55x2v.t532zst-r26yr95B8tvDr8v.58x@flex--pcc.bounces.google.com> Received: from mail-yb1-xb4a.google.com (mail-yb1-xb4a.google.com [IPv6:2607:f8b0:4864:20::b4a]) by sourceware.org (Postfix) with ESMTPS id 8F606385041C for ; Fri, 19 Mar 2021 03:11:02 +0000 (GMT) DMARC-Filter: OpenDMARC Filter v1.3.2 sourceware.org 8F606385041C Received: by mail-yb1-xb4a.google.com with SMTP id v6so50811329ybk.9 for ; Thu, 18 Mar 2021 20:11:02 -0700 (PDT) X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:date:message-id:mime-version:subject:from:to:cc; bh=bCbJd+P4sd810vdDojMTg0wxtRKXuk+niGlwoxFtOls=; b=WjkEcUe9s/fsh6hUtKqZxkg4M+kdI233UaS/m+K8EWu6AKDsjoPjmQlgWHZ+ZYvFdB X3en6v44TrmGKJUR8WED73kTvR2YUodY496zf9CB2TZayi6P4l2tP9s9HrPZkKGut+hy 9Y71XBQGCU+QuO9wk/6Y22FS2cBE9Yek2EtfUZfocDXhs/bpklZ9mgkqsDrX3egtLWaI UCB07HPLXikFnwMqyzaVHU3l4jq3tuHqe8PyIDAPUqaI2OFLeSP9kHS4kRyj3iSQ6ezW EA+JO0WfbTDpVpeC5UNH1Hf7RFAzG7AcG6xbkejHDWUR10alyBOx1lp8++h/IK4fZGeN Gm7g== X-Gm-Message-State: AOAM5327eZZ3mNTQsMJ7qvoYA6cfGFuAz99H60rSL10Fc1HEgPECRYW0 2UEeCICOC3i1dS/P8sjm5HJQW2s= X-Google-Smtp-Source: ABdhPJxeT8xt3kK+xpHImNje5fUjx5a70RjayIvBNn8i20JxqinNV5A6soaTMLc7VKifQYWak/spRso= X-Received: from pcc-desktop.svl.corp.google.com ([2620:15c:2ce:0:455:89c6:cb5a:9b7f]) (user=pcc job=sendgmr) by 2002:a25:e651:: with SMTP id d78mr3615858ybh.93.1616123462041; Thu, 18 Mar 2021 20:11:02 -0700 (PDT) Date: Thu, 18 Mar 2021 20:10:52 -0700 Message-Id: <13d725cb8e741950fb9d6e64b2cd9bd54ff7c3f9.1616123271.git.pcc@google.com> Mime-Version: 1.0 X-Mailer: git-send-email 2.31.0.rc2.261.g7f71774620-goog Subject: [PATCH v8 1/3] arm64: mte: make the per-task SCTLR_EL1 field usable elsewhere From: Peter Collingbourne To: Catalin Marinas , Evgenii Stepanov , Kostya Serebryany , Vincenzo Frascino , Dave Martin , Szabolcs Nagy , Florian Weimer Cc: Peter Collingbourne , Linux ARM , Kevin Brodsky , Andrey Konovalov , Will Deacon , linux-api@vger.kernel.org, libc-alpha@sourceware.org Content-Type: text/plain; charset="UTF-8" X-Spam-Status: No, score=-20.3 required=5.0 tests=BAYES_00, DKIMWL_WL_MED, DKIM_SIGNED, DKIM_VALID, DKIM_VALID_AU, DKIM_VALID_EF, GIT_PATCH_0, KAM_MANYTO, RCVD_IN_DNSWL_NONE, SPF_HELO_NONE, SPF_PASS, TXREP, USER_IN_DEF_DKIM_WL autolearn=ham autolearn_force=no version=3.4.2 X-Spam-Checker-Version: SpamAssassin 3.4.2 (2018-09-13) on server2.sourceware.org X-BeenThere: libc-alpha@sourceware.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Libc-alpha mailing list List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , X-List-Received-Date: Fri, 19 Mar 2021 03:11:04 -0000 In an upcoming change we are going to introduce per-task SCTLR_EL1 bits for PAC. Move the existing per-task SCTLR_EL1 field out of the MTE-specific code so that we will be able to use it from both the PAC and MTE code paths and make the task switching code more efficient. Signed-off-by: Peter Collingbourne Link: https://linux-review.googlesource.com/id/Ic65fac78a7926168fa68f9e8da591c9e04ff7278 --- v8: - rebase to 5.12-rc3 v7: - drop CONFIG_ARM64_NEED_SCTLR_USER arch/arm64/include/asm/mte.h | 4 --- arch/arm64/include/asm/processor.h | 6 +++- arch/arm64/kernel/mte.c | 47 ++++++------------------------ arch/arm64/kernel/process.c | 30 +++++++++++++++---- 4 files changed, 38 insertions(+), 49 deletions(-) diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 9b557a457f24..1a4909b44297 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -40,7 +40,6 @@ void mte_free_tag_storage(char *storage); void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void flush_mte_state(void); -void mte_thread_switch(struct task_struct *next); void mte_suspend_exit(void); long set_mte_ctrl(struct task_struct *task, unsigned long arg); long get_mte_ctrl(struct task_struct *task); @@ -63,9 +62,6 @@ static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void flush_mte_state(void) { } -static inline void mte_thread_switch(struct task_struct *next) -{ -} static inline void mte_suspend_exit(void) { } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ca2cd75d3286..80895bb30490 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -151,11 +151,13 @@ struct thread_struct { struct ptrauth_keys_kernel keys_kernel; #endif #ifdef CONFIG_ARM64_MTE - u64 sctlr_tcf0; u64 gcr_user_excl; #endif + u64 sctlr_user; }; +#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK + static inline void arch_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { @@ -247,6 +249,8 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +void set_task_sctlr_el1(u64 sctlr); + /* Thread switching */ extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index b3c70a612c7a..ea8a90259e7f 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -124,26 +124,6 @@ bool mte_report_once(void) return READ_ONCE(report_fault_once); } -static void update_sctlr_el1_tcf0(u64 tcf0) -{ - /* ISB required for the kernel uaccess routines */ - sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); - isb(); -} - -static void set_sctlr_el1_tcf0(u64 tcf0) -{ - /* - * mte_thread_switch() checks current->thread.sctlr_tcf0 as an - * optimisation. Disable preemption so that it does not see - * the variable update before the SCTLR_EL1.TCF0 one. - */ - preempt_disable(); - current->thread.sctlr_tcf0 = tcf0; - update_sctlr_el1_tcf0(tcf0); - preempt_enable(); -} - static void update_gcr_el1_excl(u64 excl) { @@ -176,21 +156,12 @@ void flush_mte_state(void) write_sysreg_s(0, SYS_TFSRE0_EL1); clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ - set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); + set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) | + SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } -void mte_thread_switch(struct task_struct *next) -{ - if (!system_supports_mte()) - return; - - /* avoid expensive SCTLR_EL1 accesses if no change */ - if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) - update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); -} - void mte_suspend_exit(void) { if (!system_supports_mte()) @@ -201,7 +172,7 @@ void mte_suspend_exit(void) long set_mte_ctrl(struct task_struct *task, unsigned long arg) { - u64 tcf0; + u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK; u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & SYS_GCR_EL1_EXCL_MASK; @@ -210,23 +181,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) switch (arg & PR_MTE_TCF_MASK) { case PR_MTE_TCF_NONE: - tcf0 = SCTLR_EL1_TCF0_NONE; + sctlr |= SCTLR_EL1_TCF0_NONE; break; case PR_MTE_TCF_SYNC: - tcf0 = SCTLR_EL1_TCF0_SYNC; + sctlr |= SCTLR_EL1_TCF0_SYNC; break; case PR_MTE_TCF_ASYNC: - tcf0 = SCTLR_EL1_TCF0_ASYNC; + sctlr |= SCTLR_EL1_TCF0_ASYNC; break; default: return -EINVAL; } if (task != current) { - task->thread.sctlr_tcf0 = tcf0; + task->thread.sctlr_user = sctlr; task->thread.gcr_user_excl = gcr_excl; } else { - set_sctlr_el1_tcf0(tcf0); + set_task_sctlr_el1(sctlr); set_gcr_el1_excl(gcr_excl); } @@ -243,7 +214,7 @@ long get_mte_ctrl(struct task_struct *task) ret = incl << PR_MTE_TAG_SHIFT; - switch (task->thread.sctlr_tcf0) { + switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) { case SCTLR_EL1_TCF0_NONE: ret |= PR_MTE_TCF_NONE; break; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 325c83b1a24d..bb89f6208a20 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -529,6 +529,27 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void update_sctlr_el1(u64 sctlr) +{ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +void set_task_sctlr_el1(u64 sctlr) +{ + /* + * __switch_to() checks current->thread.sctlr as an + * optimisation. Disable preemption so that it does not see + * the variable update before the SCTLR_EL1 one. + */ + preempt_disable(); + current->thread.sctlr_user = sctlr; + update_sctlr_el1(sctlr); + preempt_enable(); +} + /* * Thread switching. */ @@ -553,12 +574,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, */ dsb(ish); - /* - * MTE thread switching must happen after the DSB above to ensure that - * any asynchronous tag check faults have been logged in the TFSR*_EL1 - * registers. - */ - mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); /* the actual thread switch */ last = cpu_switch_to(prev, next); -- 2.31.0.rc2.261.g7f71774620-goog