File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/mmu_context.h.tar
Back
usr/src/linux-headers-5.15.0-133/arch/h8300/include/asm/mmu_context.h 0000644 00000000221 15030061365 0020333 0 ustar 00 #ifndef _ASM_H8300_MMU_CONTEXT_H #define _ASM_H8300_MMU_CONTEXT_H #include <asm-generic/nommu_context.h> #endif /* _ASM_H8300_MMU_CONTEXT_H */ usr/src/linux-headers-5.15.0-133/arch/sh/include/asm/mmu_context.h 0000644 00000010107 15030100131 0020170 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2003 - 2007 Paul Mundt * * ASID handling idea taken from MIPS implementation. */ #ifndef __ASM_SH_MMU_CONTEXT_H #define __ASM_SH_MMU_CONTEXT_H #include <cpu/mmu_context.h> #include <asm/tlbflush.h> #include <linux/uaccess.h> #include <linux/mm_types.h> #include <asm/io.h> #include <asm-generic/mm_hooks.h> /* * The MMU "context" consists of two things: * (a) TLB cache version (or round, cycle whatever expression you like) * (b) ASID (Address Space IDentifier) */ #ifdef CONFIG_CPU_HAS_PTEAEX #define MMU_CONTEXT_ASID_MASK 0x0000ffff #else #define MMU_CONTEXT_ASID_MASK 0x000000ff #endif #define MMU_CONTEXT_VERSION_MASK (~0UL & ~MMU_CONTEXT_ASID_MASK) #define MMU_CONTEXT_FIRST_VERSION (MMU_CONTEXT_ASID_MASK + 1) /* Impossible ASID value, to differentiate from NO_CONTEXT. */ #define MMU_NO_ASID MMU_CONTEXT_FIRST_VERSION #define NO_CONTEXT 0UL #define asid_cache(cpu) (cpu_data[cpu].asid_cache) #ifdef CONFIG_MMU #define cpu_context(cpu, mm) ((mm)->context.id[cpu]) #define cpu_asid(cpu, mm) \ (cpu_context((cpu), (mm)) & MMU_CONTEXT_ASID_MASK) /* * Virtual Page Number mask */ #define MMU_VPN_MASK 0xfffff000 #include <asm/mmu_context_32.h> /* * Get MMU context if needed. */ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu) { unsigned long asid = asid_cache(cpu); /* Check if we have old version of context. */ if (((cpu_context(cpu, mm) ^ asid) & MMU_CONTEXT_VERSION_MASK) == 0) /* It's up to date, do nothing */ return; /* It's old, we need to get new context with new version. */ if (!(++asid & MMU_CONTEXT_ASID_MASK)) { /* * We exhaust ASID of this version. * Flush all TLB and start new cycle. */ local_flush_tlb_all(); /* * Fix version; Note that we avoid version #0 * to distinguish NO_CONTEXT. */ if (!asid) asid = MMU_CONTEXT_FIRST_VERSION; } cpu_context(cpu, mm) = asid_cache(cpu) = asid; } /* * Initialize the context related info for a new mm_struct * instance. */ #define init_new_context init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; for_each_online_cpu(i) cpu_context(i, mm) = NO_CONTEXT; return 0; } /* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ static inline void activate_context(struct mm_struct *mm, unsigned int cpu) { get_mmu_context(mm, cpu); set_asid(cpu_asid(cpu, mm)); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { unsigned int cpu = smp_processor_id(); if (likely(prev != next)) { cpumask_set_cpu(cpu, mm_cpumask(next)); set_TTB(next->pgd); activate_context(next, cpu); } else if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) activate_context(next, cpu); } #include <asm-generic/mmu_context.h> #else #define set_asid(asid) do { } while (0) #define get_asid() (0) #define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; }) #define switch_and_save_asid(asid) (0) #define set_TTB(pgd) do { } while (0) #define get_TTB() (0) #include <asm-generic/nommu_context.h> #endif /* CONFIG_MMU */ #if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) /* * If this processor has an MMU, we need methods to turn it off/on .. * paging_init() will also have to be updated for the processor in * question. */ static inline void enable_mmu(void) { unsigned int cpu = smp_processor_id(); /* Enable MMU */ __raw_writel(MMU_CONTROL_INIT, MMUCR); ctrl_barrier(); if (asid_cache(cpu) == NO_CONTEXT) asid_cache(cpu) = MMU_CONTEXT_FIRST_VERSION; set_asid(asid_cache(cpu) & MMU_CONTEXT_ASID_MASK); } static inline void disable_mmu(void) { unsigned long cr; cr = __raw_readl(MMUCR); cr &= ~MMU_CONTROL_INIT; __raw_writel(cr, MMUCR); ctrl_barrier(); } #else /* * MMU control handlers for processors lacking memory * management hardware. */ #define enable_mmu() do { } while (0) #define disable_mmu() do { } while (0) #endif #endif /* __ASM_SH_MMU_CONTEXT_H */ usr/src/linux-headers-5.15.0-142/arch/h8300/include/asm/mmu_context.h 0000644 00000000221 15030102142 0020320 0 ustar 00 #ifndef _ASM_H8300_MMU_CONTEXT_H #define _ASM_H8300_MMU_CONTEXT_H #include <asm-generic/nommu_context.h> #endif /* _ASM_H8300_MMU_CONTEXT_H */ usr/src/linux-headers-5.15.0-133/arch/arm64/include/asm/mmu_context.h 0000644 00000015440 15030103616 0020527 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0-only */ /* * Based on arch/arm/include/asm/mmu_context.h * * Copyright (C) 1996 Russell King. * Copyright (C) 2012 ARM Ltd. */ #ifndef __ASM_MMU_CONTEXT_H #define __ASM_MMU_CONTEXT_H #ifndef __ASSEMBLY__ #include <linux/compiler.h> #include <linux/sched.h> #include <linux/sched/hotplug.h> #include <linux/mm_types.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/proc-fns.h> #include <asm-generic/mm_hooks.h> #include <asm/cputype.h> #include <asm/sysreg.h> #include <asm/tlbflush.h> extern bool rodata_full; static inline void contextidr_thread_switch(struct task_struct *next) { if (!IS_ENABLED(CONFIG_PID_IN_CONTEXTIDR)) return; write_sysreg(task_pid_nr(next), contextidr_el1); isb(); } /* * Set TTBR0 to reserved_pg_dir. No translations will be possible via TTBR0. */ static inline void cpu_set_reserved_ttbr0(void) { unsigned long ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); write_sysreg(ttbr, ttbr0_el1); isb(); } void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm); static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) { BUG_ON(pgd == swapper_pg_dir); cpu_set_reserved_ttbr0(); cpu_do_switch_mm(virt_to_phys(pgd),mm); } /* * TCR.T0SZ value to use when the ID map is active. Usually equals * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in * physical memory, in which case it will be smaller. */ extern u64 idmap_t0sz; extern u64 idmap_ptrs_per_pgd; /* * Ensure TCR.T0SZ is set to the provided value. */ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) { unsigned long tcr = read_sysreg(tcr_el1); if ((tcr & TCR_T0SZ_MASK) >> TCR_T0SZ_OFFSET == t0sz) return; tcr &= ~TCR_T0SZ_MASK; tcr |= t0sz << TCR_T0SZ_OFFSET; write_sysreg(tcr, tcr_el1); isb(); } #define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) /* * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm. * * The idmap lives in the same VA range as userspace, but uses global entries * and may use a different TCR_EL1.T0SZ. To avoid issues resulting from * speculative TLB fetches, we must temporarily install the reserved page * tables while we invalidate the TLBs and set up the correct TCR_EL1.T0SZ. * * If current is a not a user task, the mm covers the TTBR1_EL1 page tables, * which should not be installed in TTBR0_EL1. In this case we can leave the * reserved page tables in place. */ static inline void cpu_uninstall_idmap(void) { struct mm_struct *mm = current->active_mm; cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); if (mm != &init_mm && !system_uses_ttbr0_pan()) cpu_switch_mm(mm->pgd, mm); } static inline void cpu_install_idmap(void) { cpu_set_reserved_ttbr0(); local_flush_tlb_all(); cpu_set_idmap_tcr_t0sz(); cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm); } /* * Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD, * avoiding the possibility of conflicting TLB entries being allocated. */ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp) { typedef void (ttbr_replace_func)(phys_addr_t); extern ttbr_replace_func idmap_cpu_replace_ttbr1; ttbr_replace_func *replace_phys; /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); if (system_supports_cnp() && !WARN_ON(pgdp != lm_alias(swapper_pg_dir))) { /* * cpu_replace_ttbr1() is used when there's a boot CPU * up (i.e. cpufeature framework is not up yet) and * latter only when we enable CNP via cpufeature's * enable() callback. * Also we rely on the cpu_hwcap bit being set before * calling the enable() function. */ ttbr1 |= TTBR_CNP_BIT; } replace_phys = (void *)__pa_symbol(function_nocfi(idmap_cpu_replace_ttbr1)); cpu_install_idmap(); replace_phys(ttbr1); cpu_uninstall_idmap(); } /* * It would be nice to return ASIDs back to the allocator, but unfortunately * that introduces a race with a generation rollover where we could erroneously * free an ASID allocated in a future generation. We could workaround this by * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap), * but we'd then need to make sure that we didn't dirty any TLBs afterwards. * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you * take CPU migration into account. */ void check_and_switch_context(struct mm_struct *mm); #define init_new_context(tsk, mm) init_new_context(tsk, mm) static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { atomic64_set(&mm->context.id, 0); refcount_set(&mm->context.pinned, 0); return 0; } #ifdef CONFIG_ARM64_SW_TTBR0_PAN static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { u64 ttbr; if (!system_uses_ttbr0_pan()) return; if (mm == &init_mm) ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir)); else ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr); } #else static inline void update_saved_ttbr0(struct task_struct *tsk, struct mm_struct *mm) { } #endif #define enter_lazy_tlb enter_lazy_tlb static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { /* * We don't actually care about the ttbr0 mapping, so point it at the * zero page. */ update_saved_ttbr0(tsk, &init_mm); } static inline void __switch_mm(struct mm_struct *next) { /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. */ if (next == &init_mm) { cpu_set_reserved_ttbr0(); return; } check_and_switch_context(next); } static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk) { if (prev != next) __switch_mm(next); /* * Update the saved TTBR0_EL1 of the scheduled-in task as the previous * value may have not been initialised yet (activate_mm caller) or the * ASID has changed since the last run (following the context switch * of another thread of the same process). */ update_saved_ttbr0(tsk, next); } static inline const struct cpumask * task_cpu_possible_mask(struct task_struct *p) { if (!static_branch_unlikely(&arm64_mismatched_32bit_el0)) return cpu_possible_mask; if (!is_compat_thread(task_thread_info(p))) return cpu_possible_mask; return system_32bit_el0_cpumask(); } #define task_cpu_possible_mask task_cpu_possible_mask void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); unsigned long arm64_mm_context_get(struct mm_struct *mm); void arm64_mm_context_put(struct mm_struct *mm); #include <asm-generic/mmu_context.h> #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_MMU_CONTEXT_H */ usr/src/linux-headers-5.15.0-142/arch/nios2/include/asm/mmu_context.h 0000644 00000002751 15030104332 0020625 0 ustar 00 /* * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch> * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle * Copyright (C) 1999 Silicon Graphics, Inc. * * based on MIPS asm/mmu_context.h * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #ifndef _ASM_NIOS2_MMU_CONTEXT_H #define _ASM_NIOS2_MMU_CONTEXT_H #include <linux/mm_types.h> #include <asm-generic/mm_hooks.h> extern void mmu_context_init(void); extern unsigned long get_pid_from_context(mm_context_t *ctx); /* * For the fast tlb miss handlers, we keep a pointer to the current pgd. * processor. */ extern pgd_t *pgd_current; /* * Initialize the context related info for a new mm_struct instance. * * Set all new contexts to 0, that way the generation will never match * the currently running generation when this context is switched in. */ #define init_new_context init_new_context static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context = 0; return 0; } void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk); /* * After we have set current->mm to a new value, this activates * the context for the new mm so we see the new mappings. */ #define activate_mm activate_mm void activate_mm(struct mm_struct *prev, struct mm_struct *next); #include <asm-generic/mmu_context.h> #endif /* _ASM_NIOS2_MMU_CONTEXT_H */
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings