mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 03:44:45 +01:00
- A set of commits that introduces cxl_memdev_attach and pave way for soft reserved handling, type2 accelerator enabling, and LSA 2.0 enabling. All these series require the endpoint driver to settle before continuing the memdev driver probe. dax/hmem, e820, resource: Defer Soft Reserved insertion until hmem is ready cxl/mem: Introduce cxl_memdev_attach for CXL-dependent operation cxl/mem: Drop @host argument to devm_cxl_add_memdev() cxl/mem: Convert devm_cxl_add_memdev() to scope-based-cleanup cxl/port: Arrange for always synchronous endpoint attach cxl/mem: Arrange for always-synchronous memdev attach cxl/mem: Fix devm_cxl_memdev_edac_release() confusion - A set to address CXL port error protocol handling and reporting. The large patch series was split into 3 parts. Part 1 and 2 are included here with part 3 coming later. Part 1 consists of a series of code refactoring to PCI AER sub-system that addresses CXL and also CXL RAS code to prepare for port error handling. Part 2 refactors the CXL code to move management of component registers to cxl_port objects to allow all CXL AER errors to be handled through the cxl_port hierarchy. Part 2: cxl/port: Move endpoint component register management to cxl_port cxl/port: Map Port RAS registers cxl/port: Move dport RAS setup to dport add time cxl/port: Move dport probe operations to a driver event cxl/port: Move decoder setup before dport creation cxl/port: Cleanup dport removal with a devres group cxl/port: Reduce number of @dport variables in cxl_port_add_dport() cxl/port: Cleanup handling of the nr_dports 0 -> 1 transition Part 1: cxl: Update RAS handler interfaces to also support CXL Ports cxl/mem: Clarify @host for devm_cxl_add_nvdimm() PCI/AER: Update struct aer_err_info with kernel-doc formatting PCI/AER: Report CXL or PCIe bus type in AER trace logging PCI/AER: Use guard() in cxl_rch_handle_error_iter() PCI/AER: Move CXL RCH error handling to aer_cxl_rch.c PCI/AER: Update is_internal_error() to be non-static is_aer_internal_error() PCI/AER: Export pci_aer_unmask_internal_errors() cxl/pci: Move CXL driver's RCH error handling into core/ras_rch.c PCI/AER: Replace PCIEAER_CXL symbol with CXL_RAS cxl/pci: Remove CXL VH handling in CONFIG_PCIEAER_CXL conditional blocks from core/pci.c PCI: Replace cxl_error_is_native() with pcie_aer_is_native() cxl/pci: Remove unnecessary CXL RCH handling helper functions cxl/pci: Remove unnecessary CXL Endpoint handling helper functions PCI: Introduce pcie_is_cxl() PCI: Update CXL DVSEC definitions PCI: Move CXL DVSEC definitions into uapi/linux/pci_regs.h - A set of patches to provide AMD Zen5 platform address translation for CXL using ACPI PRMT. Set includes a conventions document to explain why this is needed and how it's implemented. cxl: Disable HPA/SPA translation handlers for Normalized Addressing cxl/region: Factor out code into cxl_region_setup_poison() cxl/atl: Lock decoders that need address translation cxl: Enable AMD Zen5 address translation using ACPI PRMT cxl/acpi: Prepare use of EFI runtime services cxl: Introduce callback for HPA address ranges translation cxl/region: Use region data to get the root decoder cxl/region: Add @hpa_range argument to function cxl_calc_interleave_pos() cxl/region: Separate region parameter setup and region construction cxl: Simplify cxl_root_ops allocation and handling cxl/region: Store HPA range in struct cxl_region cxl/region: Store root decoder in struct cxl_region cxl/region: Rename misleading variable name @hpa to @hpa_range Documentation/driver-api/cxl: ACPI PRM Address Translation Support and AMD Zen5 enablement cxl, doc: Moving conventions in separate files cxl, doc: Remove isonum.txt inclusion - A set of misc CXL patches of fixes, cleanups, and updates. Including CXL address translation for unaligned MOD3 regions. cxl: Fix premature commit_end increment on decoder commit failure cxl/region: Use do_div() for 64-bit modulo operation cxl/region: Translate HPA to DPA and memdev in unaligned regions cxl/region: Translate DPA->HPA in unaligned MOD3 regions cxl/core: Fix cxl_dport debugfs EINJ entries cxl/acpi: Remove cxl_acpi_set_cache_size() cxl/hdm: Fix newline character in dev_err() messages cxl/pci: Remove outdated FIXME comment and BUILD_BUG_ON Documentation/driver-api/cxl: device hotplug section Documentation/driver-api/cxl: BIOS/EFI expectation update -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEE5DAy15EJMCV1R6v9YGjFFmlTOEoFAmmOFXcACgkQYGjFFmlT OEojaxAApQJFLyX1MkPbhtm6j6GRzzEAEWTBX2XsmliZf1JhfahsNMWI69kO33rm LddF+nyZNEl/foyHgUaxVzlQwqWuihyp7Qk2djXnMzLsuCAsWhPbB9j0RgJUN8h5 N4U76AmOdmhLlXH4CCqoW2jNy0OjxNdgp1FtTHv7VO7RxgRE9MFJRkLulKxB03wy t6lRZXPofEFcHen40DlYRtW26vy1BYUO0dng2f16DxWrb1ztdACH/zVqCJJtdoFc FAT5EaQCeRYZ9Yz4dONw3DcUjYlG6NcRN9FWNiptBn1Pb7pUX55Le8lfD3qZg0an m3lWRs1T/lGz7pWmz4GPUKDwGFCEqLqd4oSz5v+dFR3JJxjJpRzKa19y5TfqK/LF diqNZsDD9gCXE1HXzNr1YcbllpU2cPRPf58gWG9bLmG5xUUmScib8LoTMfgcCJW5 SlC6kf7BFLkJfDTcFaILc/UANeZaLGhrV0vyJntfGyT5EqKOcfjQEvrZvofA8mef bdxt0IRDW4D+7kkcuR33OipTVUFG3ban8yYq4zXD64dmeHF76gwdJm3nyXsqdtpc IYIIhz0W6pbTKjJ2fy1rZcTac1ZaALstyaF4bYWIjyF3NylPM8tDi48DFr+DGgeX xkFs2B9p5vY5Cq73gCmSWsi3PBPTjWzeRp7YZrV6VoBd9uqewUs= =blFQ -----END PGP SIGNATURE----- Merge tag 'cxl-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl Pull CXL updates from Dave Jiang: - Introduce cxl_memdev_attach and pave way for soft reserved handling, type2 accelerator enabling, and LSA 2.0 enabling. All these series require the endpoint driver to settle before continuing the memdev driver probe. - Address CXL port error protocol handling and reporting. The large patch series was split into three parts. The first two parts are included here with the final part coming later. The first part consists of a series of code refactoring to PCI AER sub-system that addresses CXL and also CXL RAS code to prepare for port error handling. The second part refactors the CXL code to move management of component registers to cxl_port objects to allow all CXL AER errors to be handled through the cxl_port hierarchy. - Provide AMD Zen5 platform address translation for CXL using ACPI PRMT. This includes a conventions document to explain why this is needed and how it's implemented. - Misc CXL patches of fixes, cleanups, and updates. Including CXL address translation for unaligned MOD3 regions. [ TLA service: CXL is "Compute Express Link" ] * tag 'cxl-for-7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (59 commits) cxl: Disable HPA/SPA translation handlers for Normalized Addressing cxl/region: Factor out code into cxl_region_setup_poison() cxl/atl: Lock decoders that need address translation cxl: Enable AMD Zen5 address translation using ACPI PRMT cxl/acpi: Prepare use of EFI runtime services cxl: Introduce callback for HPA address ranges translation cxl/region: Use region data to get the root decoder cxl/region: Add @hpa_range argument to function cxl_calc_interleave_pos() cxl/region: Separate region parameter setup and region construction cxl: Simplify cxl_root_ops allocation and handling cxl/region: Store HPA range in struct cxl_region cxl/region: Store root decoder in struct cxl_region cxl/region: Rename misleading variable name @hpa to @hpa_range Documentation/driver-api/cxl: ACPI PRM Address Translation Support and AMD Zen5 enablement cxl, doc: Moving conventions in separate files cxl, doc: Remove isonum.txt inclusion cxl/port: Unify endpoint and switch port lookup cxl/port: Move endpoint component register management to cxl_port cxl/port: Map Port RAS registers cxl/port: Move dport RAS setup to dport add time ...
449 lines
16 KiB
C
449 lines
16 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* ioport.h Definitions of routines for detecting, reserving and
|
|
* allocating system resources.
|
|
*
|
|
* Authors: Linus Torvalds
|
|
*/
|
|
|
|
#ifndef _LINUX_IOPORT_H
|
|
#define _LINUX_IOPORT_H
|
|
|
|
#ifndef __ASSEMBLY__
|
|
#include <linux/args.h>
|
|
#include <linux/bits.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/minmax.h>
|
|
#include <linux/types.h>
|
|
/*
|
|
* Resources are tree-like, allowing
|
|
* nesting etc..
|
|
*/
|
|
struct resource {
|
|
resource_size_t start;
|
|
resource_size_t end;
|
|
const char *name;
|
|
unsigned long flags;
|
|
unsigned long desc;
|
|
struct resource *parent, *sibling, *child;
|
|
};
|
|
|
|
/*
|
|
* IO resources have these defined flags.
|
|
*
|
|
* PCI devices expose these flags to userspace in the "resource" sysfs file,
|
|
* so don't move them.
|
|
*/
|
|
#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
|
|
|
|
#define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */
|
|
#define IORESOURCE_IO 0x00000100 /* PCI/ISA I/O ports */
|
|
#define IORESOURCE_MEM 0x00000200
|
|
#define IORESOURCE_REG 0x00000300 /* Register offsets */
|
|
#define IORESOURCE_IRQ 0x00000400
|
|
#define IORESOURCE_DMA 0x00000800
|
|
#define IORESOURCE_BUS 0x00001000
|
|
|
|
#define IORESOURCE_PREFETCH 0x00002000 /* No side effects */
|
|
#define IORESOURCE_READONLY 0x00004000
|
|
#define IORESOURCE_CACHEABLE 0x00008000
|
|
#define IORESOURCE_RANGELENGTH 0x00010000
|
|
#define IORESOURCE_SHADOWABLE 0x00020000
|
|
|
|
#define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */
|
|
#define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */
|
|
|
|
#define IORESOURCE_MEM_64 0x00100000
|
|
#define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */
|
|
#define IORESOURCE_MUXED 0x00400000 /* Resource is software muxed */
|
|
|
|
#define IORESOURCE_EXT_TYPE_BITS 0x01000000 /* Resource extended types */
|
|
#define IORESOURCE_SYSRAM 0x01000000 /* System RAM (modifier) */
|
|
|
|
/* IORESOURCE_SYSRAM specific bits. */
|
|
#define IORESOURCE_SYSRAM_DRIVER_MANAGED 0x02000000 /* Always detected via a driver. */
|
|
#define IORESOURCE_SYSRAM_MERGEABLE 0x04000000 /* Resource can be merged. */
|
|
|
|
#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
|
|
|
|
#define IORESOURCE_DISABLED 0x10000000
|
|
#define IORESOURCE_UNSET 0x20000000 /* No address assigned yet */
|
|
#define IORESOURCE_AUTO 0x40000000
|
|
#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
|
|
|
|
/* I/O resource extended types */
|
|
#define IORESOURCE_SYSTEM_RAM (IORESOURCE_MEM|IORESOURCE_SYSRAM)
|
|
|
|
/* PnP IRQ specific bits (IORESOURCE_BITS) */
|
|
#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
|
|
#define IORESOURCE_IRQ_LOWEDGE (1<<1)
|
|
#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
|
|
#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
|
|
#define IORESOURCE_IRQ_SHAREABLE (1<<4)
|
|
#define IORESOURCE_IRQ_OPTIONAL (1<<5)
|
|
#define IORESOURCE_IRQ_WAKECAPABLE (1<<6)
|
|
|
|
/* PnP DMA specific bits (IORESOURCE_BITS) */
|
|
#define IORESOURCE_DMA_TYPE_MASK (3<<0)
|
|
#define IORESOURCE_DMA_8BIT (0<<0)
|
|
#define IORESOURCE_DMA_8AND16BIT (1<<0)
|
|
#define IORESOURCE_DMA_16BIT (2<<0)
|
|
|
|
#define IORESOURCE_DMA_MASTER (1<<2)
|
|
#define IORESOURCE_DMA_BYTE (1<<3)
|
|
#define IORESOURCE_DMA_WORD (1<<4)
|
|
|
|
#define IORESOURCE_DMA_SPEED_MASK (3<<6)
|
|
#define IORESOURCE_DMA_COMPATIBLE (0<<6)
|
|
#define IORESOURCE_DMA_TYPEA (1<<6)
|
|
#define IORESOURCE_DMA_TYPEB (2<<6)
|
|
#define IORESOURCE_DMA_TYPEF (3<<6)
|
|
|
|
/* PnP memory I/O specific bits (IORESOURCE_BITS) */
|
|
#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
|
|
#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
|
|
#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
|
|
#define IORESOURCE_MEM_TYPE_MASK (3<<3)
|
|
#define IORESOURCE_MEM_8BIT (0<<3)
|
|
#define IORESOURCE_MEM_16BIT (1<<3)
|
|
#define IORESOURCE_MEM_8AND16BIT (2<<3)
|
|
#define IORESOURCE_MEM_32BIT (3<<3)
|
|
#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
|
|
#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
|
|
#define IORESOURCE_MEM_NONPOSTED (1<<7)
|
|
|
|
/* PnP I/O specific bits (IORESOURCE_BITS) */
|
|
#define IORESOURCE_IO_16BIT_ADDR (1<<0)
|
|
#define IORESOURCE_IO_FIXED (1<<1)
|
|
#define IORESOURCE_IO_SPARSE (1<<2)
|
|
|
|
/* PCI ROM control bits (IORESOURCE_BITS) */
|
|
#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
|
|
#define IORESOURCE_ROM_SHADOW (1<<1) /* Use RAM image, not ROM BAR */
|
|
|
|
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
|
|
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
|
|
#define IORESOURCE_PCI_EA_BEI (1<<5) /* BAR Equivalent Indicator */
|
|
|
|
/*
|
|
* I/O Resource Descriptors
|
|
*
|
|
* Descriptors are used by walk_iomem_res_desc() and region_intersects()
|
|
* for searching a specific resource range in the iomem table. Assign
|
|
* a new descriptor when a resource range supports the search interfaces.
|
|
* Otherwise, resource.desc must be set to IORES_DESC_NONE (0).
|
|
*/
|
|
enum {
|
|
IORES_DESC_NONE = 0,
|
|
IORES_DESC_CRASH_KERNEL = 1,
|
|
IORES_DESC_ACPI_TABLES = 2,
|
|
IORES_DESC_ACPI_NV_STORAGE = 3,
|
|
IORES_DESC_PERSISTENT_MEMORY = 4,
|
|
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
|
|
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
|
|
IORES_DESC_RESERVED = 7,
|
|
IORES_DESC_SOFT_RESERVED = 8,
|
|
IORES_DESC_CXL = 9,
|
|
};
|
|
|
|
/*
|
|
* Flags controlling ioremap() behavior.
|
|
*/
|
|
enum {
|
|
IORES_MAP_SYSTEM_RAM = BIT(0),
|
|
IORES_MAP_ENCRYPTED = BIT(1),
|
|
};
|
|
|
|
/* helpers to define resources */
|
|
#define DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, _desc) \
|
|
(struct resource) { \
|
|
.start = (_start), \
|
|
.end = (_start) + (_size) - 1, \
|
|
.name = (_name), \
|
|
.flags = (_flags), \
|
|
.desc = (_desc), \
|
|
}
|
|
|
|
#define DEFINE_RES_NAMED(_start, _size, _name, _flags) \
|
|
DEFINE_RES_NAMED_DESC(_start, _size, _name, _flags, IORES_DESC_NONE)
|
|
#define __DEFINE_RES0() \
|
|
DEFINE_RES_NAMED(0, 0, NULL, IORESOURCE_UNSET)
|
|
#define __DEFINE_RES3(_start, _size, _flags) \
|
|
DEFINE_RES_NAMED(_start, _size, NULL, _flags)
|
|
#define DEFINE_RES(...) \
|
|
CONCATENATE(__DEFINE_RES, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
|
|
|
|
#define DEFINE_RES_IO_NAMED(_start, _size, _name) \
|
|
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_IO)
|
|
#define DEFINE_RES_IO(_start, _size) \
|
|
DEFINE_RES_IO_NAMED((_start), (_size), NULL)
|
|
|
|
#define DEFINE_RES_MEM_NAMED(_start, _size, _name) \
|
|
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_MEM)
|
|
#define DEFINE_RES_MEM(_start, _size) \
|
|
DEFINE_RES_MEM_NAMED((_start), (_size), NULL)
|
|
|
|
#define DEFINE_RES_REG_NAMED(_start, _size, _name) \
|
|
DEFINE_RES_NAMED((_start), (_size), (_name), IORESOURCE_REG)
|
|
#define DEFINE_RES_REG(_start, _size) \
|
|
DEFINE_RES_REG_NAMED((_start), (_size), NULL)
|
|
|
|
#define DEFINE_RES_IRQ_NAMED(_irq, _name) \
|
|
DEFINE_RES_NAMED((_irq), 1, (_name), IORESOURCE_IRQ)
|
|
#define DEFINE_RES_IRQ(_irq) \
|
|
DEFINE_RES_IRQ_NAMED((_irq), NULL)
|
|
|
|
#define DEFINE_RES_DMA_NAMED(_dma, _name) \
|
|
DEFINE_RES_NAMED((_dma), 1, (_name), IORESOURCE_DMA)
|
|
#define DEFINE_RES_DMA(_dma) \
|
|
DEFINE_RES_DMA_NAMED((_dma), NULL)
|
|
|
|
/**
|
|
* typedef resource_alignf - Resource alignment callback
|
|
* @data: Private data used by the callback
|
|
* @res: Resource candidate range (an empty resource space)
|
|
* @size: The minimum size of the empty space
|
|
* @align: Alignment from the constraints
|
|
*
|
|
* Callback allows calculating resource placement and alignment beyond min,
|
|
* max, and align fields in the struct resource_constraint.
|
|
*
|
|
* Return: Start address for the resource.
|
|
*/
|
|
typedef resource_size_t (*resource_alignf)(void *data,
|
|
const struct resource *res,
|
|
resource_size_t size,
|
|
resource_size_t align);
|
|
|
|
/**
|
|
* struct resource_constraint - constraints to be met while searching empty
|
|
* resource space
|
|
* @min: The minimum address for the memory range
|
|
* @max: The maximum address for the memory range
|
|
* @align: Alignment for the start address of the empty space
|
|
* @alignf: Additional alignment constraints callback
|
|
* @alignf_data: Data provided for @alignf callback
|
|
*
|
|
* Contains the range and alignment constraints that have to be met during
|
|
* find_resource_space(). @alignf can be NULL indicating no alignment beyond
|
|
* @align is necessary.
|
|
*/
|
|
struct resource_constraint {
|
|
resource_size_t min, max, align;
|
|
resource_alignf alignf;
|
|
void *alignf_data;
|
|
};
|
|
|
|
/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
|
|
extern struct resource ioport_resource;
|
|
extern struct resource iomem_resource;
|
|
extern struct resource soft_reserve_resource;
|
|
|
|
extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
|
|
extern int request_resource(struct resource *root, struct resource *new);
|
|
extern int release_resource(struct resource *new);
|
|
void release_child_resources(struct resource *new);
|
|
extern void reserve_region_with_split(struct resource *root,
|
|
resource_size_t start, resource_size_t end,
|
|
const char *name);
|
|
extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
|
|
extern int insert_resource(struct resource *parent, struct resource *new);
|
|
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
|
|
extern int remove_resource(struct resource *old);
|
|
extern void arch_remove_reservations(struct resource *avail);
|
|
extern int allocate_resource(struct resource *root, struct resource *new,
|
|
resource_size_t size, resource_size_t min,
|
|
resource_size_t max, resource_size_t align,
|
|
resource_alignf alignf,
|
|
void *alignf_data);
|
|
struct resource *lookup_resource(struct resource *root, resource_size_t start);
|
|
int adjust_resource(struct resource *res, resource_size_t start,
|
|
resource_size_t size);
|
|
resource_size_t resource_alignment(struct resource *res);
|
|
|
|
/**
|
|
* resource_set_size - Calculate resource end address from size and start
|
|
* @res: Resource descriptor
|
|
* @size: Size of the resource
|
|
*
|
|
* Calculate the end address for @res based on @size.
|
|
*
|
|
* Note: The start address of @res must be set when calling this function.
|
|
* Prefer resource_set_range() if setting both the start address and @size.
|
|
*/
|
|
static inline void resource_set_size(struct resource *res, resource_size_t size)
|
|
{
|
|
res->end = res->start + size - 1;
|
|
}
|
|
|
|
/**
|
|
* resource_set_range - Set resource start and end addresses
|
|
* @res: Resource descriptor
|
|
* @start: Start address for the resource
|
|
* @size: Size of the resource
|
|
*
|
|
* Set @res start address and calculate the end address based on @size.
|
|
*/
|
|
static inline void resource_set_range(struct resource *res,
|
|
resource_size_t start,
|
|
resource_size_t size)
|
|
{
|
|
res->start = start;
|
|
resource_set_size(res, size);
|
|
}
|
|
|
|
static inline resource_size_t resource_size(const struct resource *res)
|
|
{
|
|
return res->end - res->start + 1;
|
|
}
|
|
static inline unsigned long resource_type(const struct resource *res)
|
|
{
|
|
return res->flags & IORESOURCE_TYPE_BITS;
|
|
}
|
|
static inline unsigned long resource_ext_type(const struct resource *res)
|
|
{
|
|
return res->flags & IORESOURCE_EXT_TYPE_BITS;
|
|
}
|
|
/* True iff r1 completely contains r2 */
|
|
static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
|
|
{
|
|
if (resource_type(r1) != resource_type(r2))
|
|
return false;
|
|
if (r1->flags & IORESOURCE_UNSET || r2->flags & IORESOURCE_UNSET)
|
|
return false;
|
|
return r1->start <= r2->start && r1->end >= r2->end;
|
|
}
|
|
|
|
/* True if any part of r1 overlaps r2 */
|
|
static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
|
|
{
|
|
return r1->start <= r2->end && r1->end >= r2->start;
|
|
}
|
|
|
|
static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
|
|
struct resource *r)
|
|
{
|
|
if (!resource_overlaps(r1, r2))
|
|
return false;
|
|
r->start = max(r1->start, r2->start);
|
|
r->end = min(r1->end, r2->end);
|
|
return true;
|
|
}
|
|
|
|
static inline bool resource_union(const struct resource *r1, const struct resource *r2,
|
|
struct resource *r)
|
|
{
|
|
if (!resource_overlaps(r1, r2))
|
|
return false;
|
|
r->start = min(r1->start, r2->start);
|
|
r->end = max(r1->end, r2->end);
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* Check if this resource is added to a resource tree or detached. Caller is
|
|
* responsible for not racing assignment.
|
|
*/
|
|
static inline bool resource_assigned(const struct resource *res)
|
|
{
|
|
return res->parent;
|
|
}
|
|
|
|
int find_resource_space(struct resource *root, struct resource *new,
|
|
resource_size_t size, struct resource_constraint *constraint);
|
|
|
|
/* Convenience shorthand with allocation */
|
|
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
|
|
#define request_muxed_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), IORESOURCE_MUXED)
|
|
#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
|
|
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
|
|
#define request_mem_region_muxed(start, n, name) \
|
|
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_MUXED)
|
|
#define request_mem_region_exclusive(start,n,name) \
|
|
__request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
|
|
#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
|
|
|
|
extern struct resource * __request_region(struct resource *,
|
|
resource_size_t start,
|
|
resource_size_t n,
|
|
const char *name, int flags);
|
|
|
|
/* Compatibility cruft */
|
|
#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
|
|
#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
|
|
|
|
extern void __release_region(struct resource *, resource_size_t,
|
|
resource_size_t);
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
extern void release_mem_region_adjustable(resource_size_t, resource_size_t);
|
|
#endif
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
extern void merge_system_ram_resource(struct resource *res);
|
|
#endif
|
|
|
|
/* Wrappers for managed devices */
|
|
struct device;
|
|
|
|
extern int devm_request_resource(struct device *dev, struct resource *root,
|
|
struct resource *new);
|
|
extern void devm_release_resource(struct device *dev, struct resource *new);
|
|
|
|
#define devm_request_region(dev,start,n,name) \
|
|
__devm_request_region(dev, &ioport_resource, (start), (n), (name))
|
|
#define devm_request_mem_region(dev,start,n,name) \
|
|
__devm_request_region(dev, &iomem_resource, (start), (n), (name))
|
|
|
|
extern struct resource * __devm_request_region(struct device *dev,
|
|
struct resource *parent, resource_size_t start,
|
|
resource_size_t n, const char *name);
|
|
|
|
#define devm_release_region(dev, start, n) \
|
|
__devm_release_region(dev, &ioport_resource, (start), (n))
|
|
#define devm_release_mem_region(dev, start, n) \
|
|
__devm_release_region(dev, &iomem_resource, (start), (n))
|
|
|
|
extern void __devm_release_region(struct device *dev, struct resource *parent,
|
|
resource_size_t start, resource_size_t n);
|
|
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
|
|
extern bool iomem_is_exclusive(u64 addr);
|
|
extern bool resource_is_exclusive(struct resource *resource, u64 addr,
|
|
resource_size_t size);
|
|
|
|
extern int
|
|
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
|
void *arg, int (*func)(unsigned long, unsigned long, void *));
|
|
extern int
|
|
walk_mem_res(u64 start, u64 end, void *arg,
|
|
int (*func)(struct resource *, void *));
|
|
extern int
|
|
walk_system_ram_res(u64 start, u64 end, void *arg,
|
|
int (*func)(struct resource *, void *));
|
|
extern int
|
|
walk_system_ram_res_rev(u64 start, u64 end, void *arg,
|
|
int (*func)(struct resource *, void *));
|
|
extern int
|
|
walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, u64 end,
|
|
void *arg, int (*func)(struct resource *, void *));
|
|
extern int walk_soft_reserve_res(u64 start, u64 end, void *arg,
|
|
int (*func)(struct resource *, void *));
|
|
extern int
|
|
region_intersects_soft_reserve(resource_size_t start, size_t size);
|
|
|
|
struct resource *devm_request_free_mem_region(struct device *dev,
|
|
struct resource *base, unsigned long size);
|
|
struct resource *request_free_mem_region(struct resource *base,
|
|
unsigned long size, const char *name);
|
|
struct resource *alloc_free_mem_region(struct resource *base,
|
|
unsigned long size, unsigned long align, const char *name);
|
|
|
|
static inline void irqresource_disabled(struct resource *res, u32 irq)
|
|
{
|
|
res->start = irq;
|
|
res->end = irq;
|
|
res->flags |= IORESOURCE_IRQ | IORESOURCE_DISABLED | IORESOURCE_UNSET;
|
|
}
|
|
|
|
extern struct address_space *iomem_get_mapping(void);
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
#endif /* _LINUX_IOPORT_H */
|