File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/iommu.h.tar
Back
usr/src/linux-headers-5.15.0-133/include/trace/events/iommu.h 0000644 00000006041 15030445426 0017274 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * iommu trace points * * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> * */ #undef TRACE_SYSTEM #define TRACE_SYSTEM iommu #if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IOMMU_H #include <linux/tracepoint.h> struct device; DECLARE_EVENT_CLASS(iommu_group_event, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev), TP_STRUCT__entry( __field(int, gid) __string(device, dev_name(dev)) ), TP_fast_assign( __entry->gid = group_id; __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: groupID=%d device=%s", __entry->gid, __get_str(device) ) ); DEFINE_EVENT(iommu_group_event, add_device_to_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DEFINE_EVENT(iommu_group_event, remove_device_from_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DECLARE_EVENT_CLASS(iommu_device_event, TP_PROTO(struct device *dev), TP_ARGS(dev), TP_STRUCT__entry( __string(device, dev_name(dev)) ), TP_fast_assign( __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: device=%s", __get_str(device) ) ); DEFINE_EVENT(iommu_device_event, attach_device_to_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); DEFINE_EVENT(iommu_device_event, detach_device_from_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); TRACE_EVENT(map, TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), TP_ARGS(iova, paddr, size), TP_STRUCT__entry( __field(u64, iova) __field(u64, paddr) __field(size_t, size) ), TP_fast_assign( __entry->iova = iova; __entry->paddr = paddr; __entry->size = size; ), TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", __entry->iova, __entry->paddr, __entry->size ) ); TRACE_EVENT(unmap, TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), TP_ARGS(iova, size, unmapped_size), TP_STRUCT__entry( __field(u64, iova) __field(size_t, size) __field(size_t, unmapped_size) ), TP_fast_assign( __entry->iova = iova; __entry->size = size; __entry->unmapped_size = unmapped_size; ), TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", __entry->iova, __entry->size, __entry->unmapped_size ) ); DECLARE_EVENT_CLASS(iommu_error, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __field(u64, iova) __field(int, flags) ), TP_fast_assign( __assign_str(device, dev_name(dev)); __assign_str(driver, dev_driver_string(dev)); __entry->iova = iova; __entry->flags = flags; ), TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", __get_str(driver), __get_str(device), __entry->iova, __entry->flags ) ); DEFINE_EVENT(iommu_error, io_page_fault, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags) ); #endif /* _TRACE_IOMMU_H */ /* This part must be outside protection */ #include <trace/define_trace.h> usr/src/linux-headers-5.15.0-142/include/trace/events/iommu.h 0000644 00000006041 15030453056 0017272 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * iommu trace points * * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> * */ #undef TRACE_SYSTEM #define TRACE_SYSTEM iommu #if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IOMMU_H #include <linux/tracepoint.h> struct device; DECLARE_EVENT_CLASS(iommu_group_event, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev), TP_STRUCT__entry( __field(int, gid) __string(device, dev_name(dev)) ), TP_fast_assign( __entry->gid = group_id; __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: groupID=%d device=%s", __entry->gid, __get_str(device) ) ); DEFINE_EVENT(iommu_group_event, add_device_to_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DEFINE_EVENT(iommu_group_event, remove_device_from_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DECLARE_EVENT_CLASS(iommu_device_event, TP_PROTO(struct device *dev), TP_ARGS(dev), TP_STRUCT__entry( __string(device, dev_name(dev)) ), TP_fast_assign( __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: device=%s", __get_str(device) ) ); DEFINE_EVENT(iommu_device_event, attach_device_to_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); DEFINE_EVENT(iommu_device_event, detach_device_from_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); TRACE_EVENT(map, TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), TP_ARGS(iova, paddr, size), TP_STRUCT__entry( __field(u64, iova) __field(u64, paddr) __field(size_t, size) ), TP_fast_assign( __entry->iova = iova; __entry->paddr = paddr; __entry->size = size; ), TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", __entry->iova, __entry->paddr, __entry->size ) ); TRACE_EVENT(unmap, TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), TP_ARGS(iova, size, unmapped_size), TP_STRUCT__entry( __field(u64, iova) __field(size_t, size) __field(size_t, unmapped_size) ), TP_fast_assign( __entry->iova = iova; __entry->size = size; __entry->unmapped_size = unmapped_size; ), TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", __entry->iova, __entry->size, __entry->unmapped_size ) ); DECLARE_EVENT_CLASS(iommu_error, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __field(u64, iova) __field(int, flags) ), TP_fast_assign( __assign_str(device, dev_name(dev)); __assign_str(driver, dev_driver_string(dev)); __entry->iova = iova; __entry->flags = flags; ), TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", __get_str(driver), __get_str(device), __entry->iova, __entry->flags ) ); DEFINE_EVENT(iommu_error, io_page_fault, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags) ); #endif /* _TRACE_IOMMU_H */ /* This part must be outside protection */ #include <trace/define_trace.h> usr/include/linux/iommu.h 0000644 00000027360 15030523166 0011447 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * IOMMU user API definitions */ #ifndef _IOMMU_H #define _IOMMU_H #include <linux/types.h> #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ #define IOMMU_FAULT_PERM_EXEC (1 << 2) /* exec */ #define IOMMU_FAULT_PERM_PRIV (1 << 3) /* privileged */ /* Generic fault types, can be expanded IRQ remapping fault */ enum iommu_fault_type { IOMMU_FAULT_DMA_UNRECOV = 1, /* unrecoverable fault */ IOMMU_FAULT_PAGE_REQ, /* page request fault */ }; enum iommu_fault_reason { IOMMU_FAULT_REASON_UNKNOWN = 0, /* Could not access the PASID table (fetch caused external abort) */ IOMMU_FAULT_REASON_PASID_FETCH, /* PASID entry is invalid or has configuration errors */ IOMMU_FAULT_REASON_BAD_PASID_ENTRY, /* * PASID is out of range (e.g. exceeds the maximum PASID * supported by the IOMMU) or disabled. */ IOMMU_FAULT_REASON_PASID_INVALID, /* * An external abort occurred fetching (or updating) a translation * table descriptor */ IOMMU_FAULT_REASON_WALK_EABT, /* * Could not access the page table entry (Bad address), * actual translation fault */ IOMMU_FAULT_REASON_PTE_FETCH, /* Protection flag check failed */ IOMMU_FAULT_REASON_PERMISSION, /* access flag check failed */ IOMMU_FAULT_REASON_ACCESS, /* Output address of a translation stage caused Address Size fault */ IOMMU_FAULT_REASON_OOR_ADDRESS, }; /** * struct iommu_fault_unrecoverable - Unrecoverable fault data * @reason: reason of the fault, from &enum iommu_fault_reason * @flags: parameters of this fault (IOMMU_FAULT_UNRECOV_* values) * @pasid: Process Address Space ID * @perm: requested permission access using by the incoming transaction * (IOMMU_FAULT_PERM_* values) * @addr: offending page address * @fetch_addr: address that caused a fetch abort, if any */ struct iommu_fault_unrecoverable { __u32 reason; #define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0) #define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1) #define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2) __u32 flags; __u32 pasid; __u32 perm; __u64 addr; __u64 fetch_addr; }; /** * struct iommu_fault_page_request - Page Request data * @flags: encodes whether the corresponding fields are valid and whether this * is the last page in group (IOMMU_FAULT_PAGE_REQUEST_* values). * When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response * must have the same PASID value as the page request. When it is clear, * the page response should not have a PASID. * @pasid: Process Address Space ID * @grpid: Page Request Group Index * @perm: requested page permissions (IOMMU_FAULT_PERM_* values) * @addr: page address * @private_data: device-specific private information */ struct iommu_fault_page_request { #define IOMMU_FAULT_PAGE_REQUEST_PASID_VALID (1 << 0) #define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1) #define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2) #define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3) __u32 flags; __u32 pasid; __u32 grpid; __u32 perm; __u64 addr; __u64 private_data[2]; }; /** * struct iommu_fault - Generic fault data * @type: fault type from &enum iommu_fault_type * @padding: reserved for future use (should be zero) * @event: fault event, when @type is %IOMMU_FAULT_DMA_UNRECOV * @prm: Page Request message, when @type is %IOMMU_FAULT_PAGE_REQ * @padding2: sets the fault size to allow for future extensions */ struct iommu_fault { __u32 type; __u32 padding; union { struct iommu_fault_unrecoverable event; struct iommu_fault_page_request prm; __u8 padding2[56]; }; }; /** * enum iommu_page_response_code - Return status of fault handlers * @IOMMU_PAGE_RESP_SUCCESS: Fault has been handled and the page tables * populated, retry the access. This is "Success" in PCI PRI. * @IOMMU_PAGE_RESP_FAILURE: General error. Drop all subsequent faults from * this device if possible. This is "Response Failure" in PCI PRI. * @IOMMU_PAGE_RESP_INVALID: Could not handle this fault, don't retry the * access. This is "Invalid Request" in PCI PRI. */ enum iommu_page_response_code { IOMMU_PAGE_RESP_SUCCESS = 0, IOMMU_PAGE_RESP_INVALID, IOMMU_PAGE_RESP_FAILURE, }; /** * struct iommu_page_response - Generic page response information * @argsz: User filled size of this data * @version: API version of this structure * @flags: encodes whether the corresponding fields are valid * (IOMMU_FAULT_PAGE_RESPONSE_* values) * @pasid: Process Address Space ID * @grpid: Page Request Group Index * @code: response code from &enum iommu_page_response_code */ struct iommu_page_response { __u32 argsz; #define IOMMU_PAGE_RESP_VERSION_1 1 __u32 version; #define IOMMU_PAGE_RESP_PASID_VALID (1 << 0) __u32 flags; __u32 pasid; __u32 grpid; __u32 code; }; /* defines the granularity of the invalidation */ enum iommu_inv_granularity { IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */ IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */ IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */ IOMMU_INV_GRANU_NR, /* number of invalidation granularities */ }; /** * struct iommu_inv_addr_info - Address Selective Invalidation Structure * * @flags: indicates the granularity of the address-selective invalidation * - If the PASID bit is set, the @pasid field is populated and the invalidation * relates to cache entries tagged with this PASID and matching the address * range. * - If ARCHID bit is set, @archid is populated and the invalidation relates * to cache entries tagged with this architecture specific ID and matching * the address range. * - Both PASID and ARCHID can be set as they may tag different caches. * - If neither PASID or ARCHID is set, global addr invalidation applies. * - The LEAF flag indicates whether only the leaf PTE caching needs to be * invalidated and other paging structure caches can be preserved. * @pasid: process address space ID * @archid: architecture-specific ID * @addr: first stage/level input address * @granule_size: page/block size of the mapping in bytes * @nb_granules: number of contiguous granules to be invalidated */ struct iommu_inv_addr_info { #define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0) #define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1) #define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2) __u32 flags; __u32 archid; __u64 pasid; __u64 addr; __u64 granule_size; __u64 nb_granules; }; /** * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure * * @flags: indicates the granularity of the PASID-selective invalidation * - If the PASID bit is set, the @pasid field is populated and the invalidation * relates to cache entries tagged with this PASID and matching the address * range. * - If the ARCHID bit is set, the @archid is populated and the invalidation * relates to cache entries tagged with this architecture specific ID and * matching the address range. * - Both PASID and ARCHID can be set as they may tag different caches. * - At least one of PASID or ARCHID must be set. * @pasid: process address space ID * @archid: architecture-specific ID */ struct iommu_inv_pasid_info { #define IOMMU_INV_PASID_FLAGS_PASID (1 << 0) #define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1) __u32 flags; __u32 archid; __u64 pasid; }; /** * struct iommu_cache_invalidate_info - First level/stage invalidation * information * @argsz: User filled size of this data * @version: API version of this structure * @cache: bitfield that allows to select which caches to invalidate * @granularity: defines the lowest granularity used for the invalidation: * domain > PASID > addr * @padding: reserved for future use (should be zero) * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR * * Not all the combinations of cache/granularity are valid: * * +--------------+---------------+---------------+---------------+ * | type / | DEV_IOTLB | IOTLB | PASID | * | granularity | | | cache | * +==============+===============+===============+===============+ * | DOMAIN | N/A | Y | Y | * +--------------+---------------+---------------+---------------+ * | PASID | Y | Y | Y | * +--------------+---------------+---------------+---------------+ * | ADDR | Y | Y | N/A | * +--------------+---------------+---------------+---------------+ * * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than * @version and @cache. * * If multiple cache types are invalidated simultaneously, they all * must support the used granularity. */ struct iommu_cache_invalidate_info { __u32 argsz; #define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1 __u32 version; /* IOMMU paging structure cache */ #define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */ #define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */ #define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */ #define IOMMU_CACHE_INV_TYPE_NR (3) __u8 cache; __u8 granularity; __u8 padding[6]; union { struct iommu_inv_pasid_info pasid_info; struct iommu_inv_addr_info addr_info; } granu; }; /** * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest * SVA binding. * * @flags: VT-d PASID table entry attributes * @pat: Page attribute table data to compute effective memory type * @emt: Extended memory type * * Only guest vIOMMU selectable and effective options are passed down to * the host IOMMU. */ struct iommu_gpasid_bind_data_vtd { #define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */ #define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */ #define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */ #define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */ #define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */ #define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */ #define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */ #define IOMMU_SVA_VTD_GPASID_LAST (1 << 7) __u64 flags; __u32 pat; __u32 emt; }; #define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \ IOMMU_SVA_VTD_GPASID_EMTE | \ IOMMU_SVA_VTD_GPASID_PCD | \ IOMMU_SVA_VTD_GPASID_PWT) /** * struct iommu_gpasid_bind_data - Information about device and guest PASID binding * @argsz: User filled size of this data * @version: Version of this data structure * @format: PASID table entry format * @flags: Additional information on guest bind request * @gpgd: Guest page directory base of the guest mm to bind * @hpasid: Process address space ID used for the guest mm in host IOMMU * @gpasid: Process address space ID used for the guest mm in guest IOMMU * @addr_width: Guest virtual address width * @padding: Reserved for future use (should be zero) * @vtd: Intel VT-d specific data * * Guest to host PASID mapping can be an identity or non-identity, where guest * has its own PASID space. For non-identify mapping, guest to host PASID lookup * is needed when VM programs guest PASID into an assigned device. VMM may * trap such PASID programming then request host IOMMU driver to convert guest * PASID to host PASID based on this bind data. */ struct iommu_gpasid_bind_data { __u32 argsz; #define IOMMU_GPASID_BIND_VERSION_1 1 __u32 version; #define IOMMU_PASID_FORMAT_INTEL_VTD 1 #define IOMMU_PASID_FORMAT_LAST 2 __u32 format; __u32 addr_width; #define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */ __u64 flags; __u64 gpgd; __u64 hpasid; __u64 gpasid; __u8 padding[8]; /* Vendor specific data */ union { struct iommu_gpasid_bind_data_vtd vtd; } vendor; }; #endif /* _IOMMU_H */ usr/src/linux-headers-5.15.0-141/include/trace/events/iommu.h 0000644 00000006041 15030545560 0017273 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ /* * iommu trace points * * Copyright (C) 2013 Shuah Khan <shuah.kh@samsung.com> * */ #undef TRACE_SYSTEM #define TRACE_SYSTEM iommu #if !defined(_TRACE_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_IOMMU_H #include <linux/tracepoint.h> struct device; DECLARE_EVENT_CLASS(iommu_group_event, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev), TP_STRUCT__entry( __field(int, gid) __string(device, dev_name(dev)) ), TP_fast_assign( __entry->gid = group_id; __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: groupID=%d device=%s", __entry->gid, __get_str(device) ) ); DEFINE_EVENT(iommu_group_event, add_device_to_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DEFINE_EVENT(iommu_group_event, remove_device_from_group, TP_PROTO(int group_id, struct device *dev), TP_ARGS(group_id, dev) ); DECLARE_EVENT_CLASS(iommu_device_event, TP_PROTO(struct device *dev), TP_ARGS(dev), TP_STRUCT__entry( __string(device, dev_name(dev)) ), TP_fast_assign( __assign_str(device, dev_name(dev)); ), TP_printk("IOMMU: device=%s", __get_str(device) ) ); DEFINE_EVENT(iommu_device_event, attach_device_to_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); DEFINE_EVENT(iommu_device_event, detach_device_from_domain, TP_PROTO(struct device *dev), TP_ARGS(dev) ); TRACE_EVENT(map, TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), TP_ARGS(iova, paddr, size), TP_STRUCT__entry( __field(u64, iova) __field(u64, paddr) __field(size_t, size) ), TP_fast_assign( __entry->iova = iova; __entry->paddr = paddr; __entry->size = size; ), TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", __entry->iova, __entry->paddr, __entry->size ) ); TRACE_EVENT(unmap, TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), TP_ARGS(iova, size, unmapped_size), TP_STRUCT__entry( __field(u64, iova) __field(size_t, size) __field(size_t, unmapped_size) ), TP_fast_assign( __entry->iova = iova; __entry->size = size; __entry->unmapped_size = unmapped_size; ), TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", __entry->iova, __entry->size, __entry->unmapped_size ) ); DECLARE_EVENT_CLASS(iommu_error, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags), TP_STRUCT__entry( __string(device, dev_name(dev)) __string(driver, dev_driver_string(dev)) __field(u64, iova) __field(int, flags) ), TP_fast_assign( __assign_str(device, dev_name(dev)); __assign_str(driver, dev_driver_string(dev)); __entry->iova = iova; __entry->flags = flags; ), TP_printk("IOMMU:%s %s iova=0x%016llx flags=0x%04x", __get_str(driver), __get_str(device), __entry->iova, __entry->flags ) ); DEFINE_EVENT(iommu_error, io_page_fault, TP_PROTO(struct device *dev, unsigned long iova, int flags), TP_ARGS(dev, iova, flags) ); #endif /* _TRACE_IOMMU_H */ /* This part must be outside protection */ #include <trace/define_trace.h> usr/src/linux-headers-5.15.0-142/arch/x86/include/asm/iommu.h 0000644 00000001414 15030622432 0017005 0 ustar 00 /* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H #include <linux/acpi.h> #include <asm/e820/api.h> extern int force_iommu, no_iommu; extern int iommu_detected; /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) static inline int __init arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr) { u64 start = rmrr->base_address; u64 end = rmrr->end_address + 1; int entry_type; entry_type = e820__get_entry_type(start, end); if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS) return 0; pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n", start, end - 1); return -EINVAL; } #endif /* _ASM_X86_IOMMU_H */
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings