| // SPDX-License-Identifier: GPL-2.0 |
| /* |
| * PCI Bus Services, see include/linux/pci.h for further explanation. |
| * |
| * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, |
| * David Mosberger-Tang |
| * |
| * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> |
| */ |
| |
| #include <linux/acpi.h> |
| #include <linux/kernel.h> |
| #include <linux/delay.h> |
| #include <linux/dmi.h> |
| #include <linux/init.h> |
| #include <linux/msi.h> |
| #include <linux/of.h> |
| #include <linux/pci.h> |
| #include <linux/pm.h> |
| #include <linux/slab.h> |
| #include <linux/module.h> |
| #include <linux/spinlock.h> |
| #include <linux/string.h> |
| #include <linux/log2.h> |
| #include <linux/logic_pio.h> |
| #include <linux/pm_wakeup.h> |
| #include <linux/interrupt.h> |
| #include <linux/device.h> |
| #include <linux/pm_runtime.h> |
| #include <linux/pci_hotplug.h> |
| #include <linux/vmalloc.h> |
| #include <asm/dma.h> |
| #include <linux/aer.h> |
| #include <linux/bitfield.h> |
| #include "pci.h" |
| |
| DEFINE_MUTEX(pci_slot_mutex); |
| |
| const char *pci_power_names[] = { |
| "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", |
| }; |
| EXPORT_SYMBOL_GPL(pci_power_names); |
| |
| #ifdef CONFIG_X86_32 |
| int isa_dma_bridge_buggy; |
| EXPORT_SYMBOL(isa_dma_bridge_buggy); |
| #endif |
| |
| int pci_pci_problems; |
| EXPORT_SYMBOL(pci_pci_problems); |
| |
| unsigned int pci_pm_d3hot_delay; |
| |
| static void pci_pme_list_scan(struct work_struct *work); |
| |
| static LIST_HEAD(pci_pme_list); |
| static DEFINE_MUTEX(pci_pme_list_mutex); |
| static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); |
| |
| struct pci_pme_device { |
| struct list_head list; |
| struct pci_dev *dev; |
| }; |
| |
| #define PME_TIMEOUT 1000 /* How long between PME checks */ |
| |
| static void pci_dev_d3_sleep(struct pci_dev *dev) |
| { |
| unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); |
| unsigned int upper; |
| |
| if (delay_ms) { |
| /* Use a 20% upper bound, 1ms minimum */ |
| upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); |
| usleep_range(delay_ms * USEC_PER_MSEC, |
| (delay_ms + upper) * USEC_PER_MSEC); |
| } |
| } |
| |
| bool pci_reset_supported(struct pci_dev *dev) |
| { |
| return dev->reset_methods[0] != 0; |
| } |
| |
| #ifdef CONFIG_PCI_DOMAINS |
| int pci_domains_supported = 1; |
| #endif |
| |
| #define DEFAULT_CARDBUS_IO_SIZE (256) |
| #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) |
| /* pci=cbmemsize=nnM,cbiosize=nn can override this */ |
| unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; |
| unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; |
| |
| #define DEFAULT_HOTPLUG_IO_SIZE (256) |
| #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) |
| #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) |
| /* hpiosize=nn can override this */ |
| unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
| /* |
| * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, |
| * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; |
| * pci=hpmemsize=nnM overrides both |
| */ |
| unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; |
| unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; |
| |
| #define DEFAULT_HOTPLUG_BUS_SIZE 1 |
| unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; |
| |
| |
| /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ |
| #ifdef CONFIG_PCIE_BUS_TUNE_OFF |
| enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
| #elif defined CONFIG_PCIE_BUS_SAFE |
| enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; |
| #elif defined CONFIG_PCIE_BUS_PERFORMANCE |
| enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; |
| #elif defined CONFIG_PCIE_BUS_PEER2PEER |
| enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; |
| #else |
| enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; |
| #endif |
| |
| /* |
| * The default CLS is used if arch didn't set CLS explicitly and not |
| * all pci devices agree on the same value. Arch can override either |
| * the dfl or actual value as it sees fit. Don't forget this is |
| * measured in 32-bit words, not bytes. |
| */ |
| u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; |
| u8 pci_cache_line_size; |
| |
| /* |
| * If we set up a device for bus mastering, we need to check the latency |
| * timer as certain BIOSes forget to set it properly. |
| */ |
| unsigned int pcibios_max_latency = 255; |
| |
| /* If set, the PCIe ARI capability will not be used. */ |
| static bool pcie_ari_disabled; |
| |
| /* If set, the PCIe ATS capability will not be used. */ |
| static bool pcie_ats_disabled; |
| |
| /* If set, the PCI config space of each device is printed during boot. */ |
| bool pci_early_dump; |
| |
| bool pci_ats_disabled(void) |
| { |
| return pcie_ats_disabled; |
| } |
| EXPORT_SYMBOL_GPL(pci_ats_disabled); |
| |
| /* Disable bridge_d3 for all PCIe ports */ |
| static bool pci_bridge_d3_disable; |
| /* Force bridge_d3 for all PCIe ports */ |
| static bool pci_bridge_d3_force; |
| |
| static int __init pcie_port_pm_setup(char *str) |
| { |
| if (!strcmp(str, "off")) |
| pci_bridge_d3_disable = true; |
| else if (!strcmp(str, "force")) |
| pci_bridge_d3_force = true; |
| return 1; |
| } |
| __setup("pcie_port_pm=", pcie_port_pm_setup); |
| |
| /** |
| * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children |
| * @bus: pointer to PCI bus structure to search |
| * |
| * Given a PCI bus, returns the highest PCI bus number present in the set |
| * including the given PCI bus and its list of child PCI buses. |
| */ |
| unsigned char pci_bus_max_busnr(struct pci_bus *bus) |
| { |
| struct pci_bus *tmp; |
| unsigned char max, n; |
| |
| max = bus->busn_res.end; |
| list_for_each_entry(tmp, &bus->children, node) { |
| n = pci_bus_max_busnr(tmp); |
| if (n > max) |
| max = n; |
| } |
| return max; |
| } |
| EXPORT_SYMBOL_GPL(pci_bus_max_busnr); |
| |
| /** |
| * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS |
| * @pdev: the PCI device |
| * |
| * Returns error bits set in PCI_STATUS and clears them. |
| */ |
| int pci_status_get_and_clear_errors(struct pci_dev *pdev) |
| { |
| u16 status; |
| int ret; |
| |
| ret = pci_read_config_word(pdev, PCI_STATUS, &status); |
| if (ret != PCIBIOS_SUCCESSFUL) |
| return -EIO; |
| |
| status &= PCI_STATUS_ERROR_BITS; |
| if (status) |
| pci_write_config_word(pdev, PCI_STATUS, status); |
| |
| return status; |
| } |
| EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); |
| |
| #ifdef CONFIG_HAS_IOMEM |
| static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar, |
| bool write_combine) |
| { |
| struct resource *res = &pdev->resource[bar]; |
| resource_size_t start = res->start; |
| resource_size_t size = resource_size(res); |
| |
| /* |
| * Make sure the BAR is actually a memory resource, not an IO resource |
| */ |
| if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { |
| pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res); |
| return NULL; |
| } |
| |
| if (write_combine) |
| return ioremap_wc(start, size); |
| |
| return ioremap(start, size); |
| } |
| |
| void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) |
| { |
| return __pci_ioremap_resource(pdev, bar, false); |
| } |
| EXPORT_SYMBOL_GPL(pci_ioremap_bar); |
| |
| void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar) |
| { |
| return __pci_ioremap_resource(pdev, bar, true); |
| } |
| EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); |
| #endif |
| |
| /** |
| * pci_dev_str_match_path - test if a path string matches a device |
| * @dev: the PCI device to test |
| * @path: string to match the device against |
| * @endptr: pointer to the string after the match |
| * |
| * Test if a string (typically from a kernel parameter) formatted as a |
| * path of device/function addresses matches a PCI device. The string must |
| * be of the form: |
| * |
| * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* |
| * |
| * A path for a device can be obtained using 'lspci -t'. Using a path |
| * is more robust against bus renumbering than using only a single bus, |
| * device and function address. |
| * |
| * Returns 1 if the string matches the device, 0 if it does not and |
| * a negative error code if it fails to parse the string. |
| */ |
| static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, |
| const char **endptr) |
| { |
| int ret; |
| unsigned int seg, bus, slot, func; |
| char *wpath, *p; |
| char end; |
| |
| *endptr = strchrnul(path, ';'); |
| |
| wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); |
| if (!wpath) |
| return -ENOMEM; |
| |
| while (1) { |
| p = strrchr(wpath, '/'); |
| if (!p) |
| break; |
| ret = sscanf(p, "/%x.%x%c", &slot, &func, &end); |
| if (ret != 2) { |
| ret = -EINVAL; |
| goto free_and_exit; |
| } |
| |
| if (dev->devfn != PCI_DEVFN(slot, func)) { |
| ret = 0; |
| goto free_and_exit; |
| } |
| |
| /* |
| * Note: we don't need to get a reference to the upstream |
| * bridge because we hold a reference to the top level |
| * device which should hold a reference to the bridge, |
| * and so on. |
| */ |
| dev = pci_upstream_bridge(dev); |
| if (!dev) { |
| ret = 0; |
| goto free_and_exit; |
| } |
| |
| *p = 0; |
| } |
| |
| ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, |
| &func, &end); |
| if (ret != 4) { |
| seg = 0; |
| ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end); |
| if (ret != 3) { |
| ret = -EINVAL; |
| goto free_and_exit; |
| } |
| } |
| |
| ret = (seg == pci_domain_nr(dev->bus) && |
| bus == dev->bus->number && |
| dev->devfn == PCI_DEVFN(slot, func)); |
| |
| free_and_exit: |
| kfree(wpath); |
| return ret; |
| } |
| |
| /** |
| * pci_dev_str_match - test if a string matches a device |
| * @dev: the PCI device to test |
| * @p: string to match the device against |
| * @endptr: pointer to the string after the match |
| * |
| * Test if a string (typically from a kernel parameter) matches a specified |
| * PCI device. The string may be of one of the following formats: |
| * |
| * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* |
| * pci:<vendor>:<device>[:<subvendor>:<subdevice>] |
| * |
| * The first format specifies a PCI bus/device/function address which |
| * may change if new hardware is inserted, if motherboard firmware changes, |
| * or due to changes caused in kernel parameters. If the domain is |
| * left unspecified, it is taken to be 0. In order to be robust against |
| * bus renumbering issues, a path of PCI device/function numbers may be used |
| * to address the specific device. The path for a device can be determined |
| * through the use of 'lspci -t'. |
| * |
| * The second format matches devices using IDs in the configuration |
| * space which may match multiple devices in the system. A value of 0 |
| * for any field will match all devices. (Note: this differs from |
| * in-kernel code that uses PCI_ANY_ID which is ~0; this is for |
| * legacy reasons and convenience so users don't have to specify |
| * FFFFFFFFs on the command line.) |
| * |
| * Returns 1 if the string matches the device, 0 if it does not and |
| * a negative error code if the string cannot be parsed. |
| */ |
| static int pci_dev_str_match(struct pci_dev *dev, const char *p, |
| const char **endptr) |
| { |
| int ret; |
| int count; |
| unsigned short vendor, device, subsystem_vendor, subsystem_device; |
| |
| if (strncmp(p, "pci:", 4) == 0) { |
| /* PCI vendor/device (subvendor/subdevice) IDs are specified */ |
| p += 4; |
| ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device, |
| &subsystem_vendor, &subsystem_device, &count); |
| if (ret != 4) { |
| ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count); |
| if (ret != 2) |
| return -EINVAL; |
| |
| subsystem_vendor = 0; |
| subsystem_device = 0; |
| } |
| |
| p += count; |
| |
| if ((!vendor || vendor == dev->vendor) && |
| (!device || device == dev->device) && |
| (!subsystem_vendor || |
| subsystem_vendor == dev->subsystem_vendor) && |
| (!subsystem_device || |
| subsystem_device == dev->subsystem_device)) |
| goto found; |
| } else { |
| /* |
| * PCI Bus, Device, Function IDs are specified |
| * (optionally, may include a path of devfns following it) |
| */ |
| ret = pci_dev_str_match_path(dev, p, &p); |
| if (ret < 0) |
| return ret; |
| else if (ret) |
| goto found; |
| } |
| |
| *endptr = p; |
| return 0; |
| |
| found: |
| *endptr = p; |
| return 1; |
| } |
| |
| static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, |
| u8 pos, int cap, int *ttl) |
| { |
| u8 id; |
| u16 ent; |
| |
| pci_bus_read_config_byte(bus, devfn, pos, &pos); |
| |
| while ((*ttl)--) { |
| if (pos < 0x40) |
| break; |
| pos &= ~3; |
| pci_bus_read_config_word(bus, devfn, pos, &ent); |
| |
| id = ent & 0xff; |
| if (id == 0xff) |
| break; |
| if (id == cap) |
| return pos; |
| pos = (ent >> 8); |
| } |
| return 0; |
| } |
| |
| static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, |
| u8 pos, int cap) |
| { |
| int ttl = PCI_FIND_CAP_TTL; |
| |
| return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); |
| } |
| |
| u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) |
| { |
| return __pci_find_next_cap(dev->bus, dev->devfn, |
| pos + PCI_CAP_LIST_NEXT, cap); |
| } |
| EXPORT_SYMBOL_GPL(pci_find_next_capability); |
| |
| static u8 __pci_bus_find_cap_start(struct pci_bus *bus, |
| unsigned int devfn, u8 hdr_type) |
| { |
| u16 status; |
| |
| pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); |
| if (!(status & PCI_STATUS_CAP_LIST)) |
| return 0; |
| |
| switch (hdr_type) { |
| case PCI_HEADER_TYPE_NORMAL: |
| case PCI_HEADER_TYPE_BRIDGE: |
| return PCI_CAPABILITY_LIST; |
| case PCI_HEADER_TYPE_CARDBUS: |
| return PCI_CB_CAPABILITY_LIST; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * pci_find_capability - query for devices' capabilities |
| * @dev: PCI device to query |
| * @cap: capability code |
| * |
| * Tell if a device supports a given PCI capability. |
| * Returns the address of the requested capability structure within the |
| * device's PCI configuration space or 0 in case the device does not |
| * support it. Possible values for @cap include: |
| * |
| * %PCI_CAP_ID_PM Power Management |
| * %PCI_CAP_ID_AGP Accelerated Graphics Port |
| * %PCI_CAP_ID_VPD Vital Product Data |
| * %PCI_CAP_ID_SLOTID Slot Identification |
| * %PCI_CAP_ID_MSI Message Signalled Interrupts |
| * %PCI_CAP_ID_CHSWP CompactPCI HotSwap |
| * %PCI_CAP_ID_PCIX PCI-X |
| * %PCI_CAP_ID_EXP PCI Express |
| */ |
| u8 pci_find_capability(struct pci_dev *dev, int cap) |
| { |
| u8 pos; |
| |
| pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); |
| if (pos) |
| pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); |
| |
| return pos; |
| } |
| EXPORT_SYMBOL(pci_find_capability); |
| |
| /** |
| * pci_bus_find_capability - query for devices' capabilities |
| * @bus: the PCI bus to query |
| * @devfn: PCI device to query |
| * @cap: capability code |
| * |
| * Like pci_find_capability() but works for PCI devices that do not have a |
| * pci_dev structure set up yet. |
| * |
| * Returns the address of the requested capability structure within the |
| * device's PCI configuration space or 0 in case the device does not |
| * support it. |
| */ |
| u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) |
| { |
| u8 hdr_type, pos; |
| |
| pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); |
| |
| pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); |
| if (pos) |
| pos = __pci_find_next_cap(bus, devfn, pos, cap); |
| |
| return pos; |
| } |
| EXPORT_SYMBOL(pci_bus_find_capability); |
| |
| /** |
| * pci_find_next_ext_capability - Find an extended capability |
| * @dev: PCI device to query |
| * @start: address at which to start looking (0 to start at beginning of list) |
| * @cap: capability code |
| * |
| * Returns the address of the next matching extended capability structure |
| * within the device's PCI configuration space or 0 if the device does |
| * not support it. Some capabilities can occur several times, e.g., the |
| * vendor-specific capability, and this provides a way to find them all. |
| */ |
| u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap) |
| { |
| u32 header; |
| int ttl; |
| u16 pos = PCI_CFG_SPACE_SIZE; |
| |
| /* minimum 8 bytes per capability */ |
| ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; |
| |
| if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) |
| return 0; |
| |
| if (start) |
| pos = start; |
| |
| if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) |
| return 0; |
| |
| /* |
| * If we have no capabilities, this is indicated by cap ID, |
| * cap version and next pointer all being 0. |
| */ |
| if (header == 0) |
| return 0; |
| |
| while (ttl-- > 0) { |
| if (PCI_EXT_CAP_ID(header) == cap && pos != start) |
| return pos; |
| |
| pos = PCI_EXT_CAP_NEXT(header); |
| if (pos < PCI_CFG_SPACE_SIZE) |
| break; |
| |
| if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) |
| break; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); |
| |
| /** |
| * pci_find_ext_capability - Find an extended capability |
| * @dev: PCI device to query |
| * @cap: capability code |
| * |
| * Returns the address of the requested extended capability structure |
| * within the device's PCI configuration space or 0 if the device does |
| * not support it. Possible values for @cap include: |
| * |
| * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting |
| * %PCI_EXT_CAP_ID_VC Virtual Channel |
| * %PCI_EXT_CAP_ID_DSN Device Serial Number |
| * %PCI_EXT_CAP_ID_PWR Power Budgeting |
| */ |
| u16 pci_find_ext_capability(struct pci_dev *dev, int cap) |
| { |
| return pci_find_next_ext_capability(dev, 0, cap); |
| } |
| EXPORT_SYMBOL_GPL(pci_find_ext_capability); |
| |
| /** |
| * pci_get_dsn - Read and return the 8-byte Device Serial Number |
| * @dev: PCI device to query |
| * |
| * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial |
| * Number. |
| * |
| * Returns the DSN, or zero if the capability does not exist. |
| */ |
| u64 pci_get_dsn(struct pci_dev *dev) |
| { |
| u32 dword; |
| u64 dsn; |
| int pos; |
| |
| pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); |
| if (!pos) |
| return 0; |
| |
| /* |
| * The Device Serial Number is two dwords offset 4 bytes from the |
| * capability position. The specification says that the first dword is |
| * the lower half, and the second dword is the upper half. |
| */ |
| pos += 4; |
| pci_read_config_dword(dev, pos, &dword); |
| dsn = (u64)dword; |
| pci_read_config_dword(dev, pos + 4, &dword); |
| dsn |= ((u64)dword) << 32; |
| |
| return dsn; |
| } |
| EXPORT_SYMBOL_GPL(pci_get_dsn); |
| |
| static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap) |
| { |
| int rc, ttl = PCI_FIND_CAP_TTL; |
| u8 cap, mask; |
| |
| if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) |
| mask = HT_3BIT_CAP_MASK; |
| else |
| mask = HT_5BIT_CAP_MASK; |
| |
| pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, |
| PCI_CAP_ID_HT, &ttl); |
| while (pos) { |
| rc = pci_read_config_byte(dev, pos + 3, &cap); |
| if (rc != PCIBIOS_SUCCESSFUL) |
| return 0; |
| |
| if ((cap & mask) == ht_cap) |
| return pos; |
| |
| pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, |
| pos + PCI_CAP_LIST_NEXT, |
| PCI_CAP_ID_HT, &ttl); |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * pci_find_next_ht_capability - query a device's HyperTransport capabilities |
| * @dev: PCI device to query |
| * @pos: Position from which to continue searching |
| * @ht_cap: HyperTransport capability code |
| * |
| * To be used in conjunction with pci_find_ht_capability() to search for |
| * all capabilities matching @ht_cap. @pos should always be a value returned |
| * from pci_find_ht_capability(). |
| * |
| * NB. To be 100% safe against broken PCI devices, the caller should take |
| * steps to avoid an infinite loop. |
| */ |
| u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap) |
| { |
| return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); |
| } |
| EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); |
| |
| /** |
| * pci_find_ht_capability - query a device's HyperTransport capabilities |
| * @dev: PCI device to query |
| * @ht_cap: HyperTransport capability code |
| * |
| * Tell if a device supports a given HyperTransport capability. |
| * Returns an address within the device's PCI configuration space |
| * or 0 in case the device does not support the request capability. |
| * The address points to the PCI capability, of type PCI_CAP_ID_HT, |
| * which has a HyperTransport capability matching @ht_cap. |
| */ |
| u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) |
| { |
| u8 pos; |
| |
| pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); |
| if (pos) |
| pos = __pci_find_next_ht_cap(dev, pos, ht_cap); |
| |
| return pos; |
| } |
| EXPORT_SYMBOL_GPL(pci_find_ht_capability); |
| |
| /** |
| * pci_find_vsec_capability - Find a vendor-specific extended capability |
| * @dev: PCI device to query |
| * @vendor: Vendor ID for which capability is defined |
| * @cap: Vendor-specific capability ID |
| * |
| * If @dev has Vendor ID @vendor, search for a VSEC capability with |
| * VSEC ID @cap. If found, return the capability offset in |
| * config space; otherwise return 0. |
| */ |
| u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) |
| { |
| u16 vsec = 0; |
| u32 header; |
| |
| if (vendor != dev->vendor) |
| return 0; |
| |
| while ((vsec = pci_find_next_ext_capability(dev, vsec, |
| PCI_EXT_CAP_ID_VNDR))) { |
| if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, |
| &header) == PCIBIOS_SUCCESSFUL && |
| PCI_VNDR_HEADER_ID(header) == cap) |
| return vsec; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(pci_find_vsec_capability); |
| |
| /** |
| * pci_find_dvsec_capability - Find DVSEC for vendor |
| * @dev: PCI device to query |
| * @vendor: Vendor ID to match for the DVSEC |
| * @dvsec: Designated Vendor-specific capability ID |
| * |
| * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability |
| * offset in config space; otherwise return 0. |
| */ |
| u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec) |
| { |
| int pos; |
| |
| pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC); |
| if (!pos) |
| return 0; |
| |
| while (pos) { |
| u16 v, id; |
| |
| pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v); |
| pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id); |
| if (vendor == v && dvsec == id) |
| return pos; |
| |
| pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC); |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(pci_find_dvsec_capability); |
| |
| /** |
| * pci_find_parent_resource - return resource region of parent bus of given |
| * region |
| * @dev: PCI device structure contains resources to be searched |
| * @res: child resource record for which parent is sought |
| * |
| * For given resource region of given device, return the resource region of |
| * parent bus the given region is contained in. |
| */ |
| struct resource *pci_find_parent_resource(const struct pci_dev *dev, |
| struct resource *res) |
| { |
| const struct pci_bus *bus = dev->bus; |
| struct resource *r; |
| int i; |
| |
| pci_bus_for_each_resource(bus, r, i) { |
| if (!r) |
| continue; |
| if (resource_contains(r, res)) { |
| |
| /* |
| * If the window is prefetchable but the BAR is |
| * not, the allocator made a mistake. |
| */ |
| if (r->flags & IORESOURCE_PREFETCH && |
| !(res->flags & IORESOURCE_PREFETCH)) |
| return NULL; |
| |
| /* |
| * If we're below a transparent bridge, there may |
| * be both a positively-decoded aperture and a |
| * subtractively-decoded region that contain the BAR. |
| * We want the positively-decoded one, so this depends |
| * on pci_bus_for_each_resource() giving us those |
| * first. |
| */ |
| return r; |
| } |
| } |
| return NULL; |
| } |
| EXPORT_SYMBOL(pci_find_parent_resource); |
| |
| /** |
| * pci_find_resource - Return matching PCI device resource |
| * @dev: PCI device to query |
| * @res: Resource to look for |
| * |
| * Goes over standard PCI resources (BARs) and checks if the given resource |
| * is partially or fully contained in any of them. In that case the |
| * matching resource is returned, %NULL otherwise. |
| */ |
| struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) |
| { |
| int i; |
| |
| for (i = 0; i < PCI_STD_NUM_BARS; i++) { |
| struct resource *r = &dev->resource[i]; |
| |
| if (r->start && resource_contains(r, res)) |
| return r; |
| } |
| |
| return NULL; |
| } |
| EXPORT_SYMBOL(pci_find_resource); |
| |
| /** |
| * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos |
| * @dev: the PCI device to operate on |
| * @pos: config space offset of status word |
| * @mask: mask of bit(s) to care about in status word |
| * |
| * Return 1 when mask bit(s) in status word clear, 0 otherwise. |
| */ |
| int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) |
| { |
| int i; |
| |
| /* Wait for Transaction Pending bit clean */ |
| for (i = 0; i < 4; i++) { |
| u16 status; |
| if (i) |
| msleep((1 << (i - 1)) * 100); |
| |
| pci_read_config_word(dev, pos, &status); |
| if (!(status & mask)) |
| return 1; |
| } |
| |
| return 0; |
| } |
| |
| static int pci_acs_enable; |
| |
| /** |
| * pci_request_acs - ask for ACS to be enabled if supported |
| */ |
| void pci_request_acs(void) |
| { |
| pci_acs_enable = 1; |
| } |
| |
| static const char *disable_acs_redir_param; |
| |
| /** |
| * pci_disable_acs_redir - disable ACS redirect capabilities |
| * @dev: the PCI device |
| * |
| * For only devices specified in the disable_acs_redir parameter. |
| */ |
| static void pci_disable_acs_redir(struct pci_dev *dev) |
| { |
| int ret = 0; |
| const char *p; |
| int pos; |
| u16 ctrl; |
| |
| if (!disable_acs_redir_param) |
| return; |
| |
| p = disable_acs_redir_param; |
| while (*p) { |
| ret = pci_dev_str_match(dev, p, &p); |
| if (ret < 0) { |
| pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", |
| disable_acs_redir_param); |
| |
| break; |
| } else if (ret == 1) { |
| /* Found a match */ |
| break; |
| } |
| |
| if (*p != ';' && *p != ',') { |
| /* End of param or invalid format */ |
| break; |
| } |
| p++; |
| } |
| |
| if (ret != 1) |
| return; |
| |
| if (!pci_dev_specific_disable_acs_redir(dev)) |
| return; |
| |
| pos = dev->acs_cap; |
| if (!pos) { |
| pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); |
| return; |
| } |
| |
| pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
| |
| /* P2P Request & Completion Redirect */ |
| ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); |
| |
| pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
| |
| pci_info(dev, "disabled ACS redirect\n"); |
| } |
| |
| /** |
| * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities |
| * @dev: the PCI device |
| */ |
| static void pci_std_enable_acs(struct pci_dev *dev) |
| { |
| int pos; |
| u16 cap; |
| u16 ctrl; |
| |
| pos = dev->acs_cap; |
| if (!pos) |
| return; |
| |
| pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); |
| pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); |
| |
| /* Source Validation */ |
| ctrl |= (cap & PCI_ACS_SV); |
| |
| /* P2P Request Redirect */ |
| ctrl |= (cap & PCI_ACS_RR); |
| |
| /* P2P Completion Redirect */ |
| ctrl |= (cap & PCI_ACS_CR); |
| |
| /* Upstream Forwarding */ |
| ctrl |= (cap & PCI_ACS_UF); |
| |
| /* Enable Translation Blocking for external devices and noats */ |
| if (pci_ats_disabled() || dev->external_facing || dev->untrusted) |
| ctrl |= (cap & PCI_ACS_TB); |
| |
| pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); |
| } |
| |
| /** |
| * pci_enable_acs - enable ACS if hardware support it |
| * @dev: the PCI device |
| */ |
| static void pci_enable_acs(struct pci_dev *dev) |
| { |
| if (!pci_acs_enable) |
| goto disable_acs_redir; |
| |
| if (!pci_dev_specific_enable_acs(dev)) |
| goto disable_acs_redir; |
| |
| pci_std_enable_acs(dev); |
| |
| disable_acs_redir: |
| /* |
| * Note: pci_disable_acs_redir() must be called even if ACS was not |
| * enabled by the kernel because it may have been enabled by |
| * platform firmware. So if we are told to disable it, we should |
| * always disable it after setting the kernel's default |
| * preferences. |
| */ |
| pci_disable_acs_redir(dev); |
| } |
| |
| /** |
| * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) |
| * @dev: PCI device to have its BARs restored |
| * |
| * Restore the BAR values for a given device, so as to make it |
| * accessible by its driver. |
| */ |
| static void pci_restore_bars(struct pci_dev *dev) |
| { |
| int i; |
| |
| for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) |
| pci_update_resource(dev, i); |
| } |
| |
| static inline bool platform_pci_power_manageable(struct pci_dev *dev) |
| { |
| if (pci_use_mid_pm()) |
| return true; |
| |
| return acpi_pci_power_manageable(dev); |
| } |
| |
| static inline int platform_pci_set_power_state(struct pci_dev *dev, |
| pci_power_t t) |
| { |
| if (pci_use_mid_pm()) |
| return mid_pci_set_power_state(dev, t); |
| |
| return acpi_pci_set_power_state(dev, t); |
| } |
| |
| static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) |
| { |
| if (pci_use_mid_pm()) |
| return mid_pci_get_power_state(dev); |
| |
| return acpi_pci_get_power_state(dev); |
| } |
| |
| static inline void platform_pci_refresh_power_state(struct pci_dev *dev) |
| { |
| if (!pci_use_mid_pm()) |
| acpi_pci_refresh_power_state(dev); |
| } |
| |
| static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) |
| { |
| if (pci_use_mid_pm()) |
| return PCI_POWER_ERROR; |
| |
| return acpi_pci_choose_state(dev); |
| } |
| |
| static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) |
| { |
| if (pci_use_mid_pm()) |
| return PCI_POWER_ERROR; |
| |
| return acpi_pci_wakeup(dev, enable); |
| } |
| |
| static inline bool platform_pci_need_resume(struct pci_dev *dev) |
| { |
| if (pci_use_mid_pm()) |
| return false; |
| |
| return acpi_pci_need_resume(dev); |
| } |
| |
| static inline bool platform_pci_bridge_d3(struct pci_dev *dev) |
| { |
| if (pci_use_mid_pm()) |
| return false; |
| |
| return acpi_pci_bridge_d3(dev); |
| } |
| |
| /** |
| * pci_update_current_state - Read power state of given device and cache it |
| * @dev: PCI device to handle. |
| * @state: State to cache in case the device doesn't have the PM capability |
| * |
| * The power state is read from the PMCSR register, which however is |
| * inaccessible in D3cold. The platform firmware is therefore queried first |
| * to detect accessibility of the register. In case the platform firmware |
| * reports an incorrect state or the device isn't power manageable by the |
| * platform at all, we try to detect D3cold by testing accessibility of the |
| * vendor ID in config space. |
| */ |
| void pci_update_current_state(struct pci_dev *dev, pci_power_t state) |
| { |
| if (platform_pci_get_power_state(dev) == PCI_D3cold) { |
| dev->current_state = PCI_D3cold; |
| } else if (dev->pm_cap) { |
| u16 pmcsr; |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| if (PCI_POSSIBLE_ERROR(pmcsr)) { |
| dev->current_state = PCI_D3cold; |
| return; |
| } |
| dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
| } else { |
| dev->current_state = state; |
| } |
| } |
| |
| /** |
| * pci_refresh_power_state - Refresh the given device's power state data |
| * @dev: Target PCI device. |
| * |
| * Ask the platform to refresh the devices power state information and invoke |
| * pci_update_current_state() to update its current PCI power state. |
| */ |
| void pci_refresh_power_state(struct pci_dev *dev) |
| { |
| platform_pci_refresh_power_state(dev); |
| pci_update_current_state(dev, dev->current_state); |
| } |
| |
| /** |
| * pci_platform_power_transition - Use platform to change device power state |
| * @dev: PCI device to handle. |
| * @state: State to put the device into. |
| */ |
| int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) |
| { |
| int error; |
| |
| error = platform_pci_set_power_state(dev, state); |
| if (!error) |
| pci_update_current_state(dev, state); |
| else if (!dev->pm_cap) /* Fall back to PCI_D0 */ |
| dev->current_state = PCI_D0; |
| |
| return error; |
| } |
| EXPORT_SYMBOL_GPL(pci_platform_power_transition); |
| |
| static int pci_resume_one(struct pci_dev *pci_dev, void *ign) |
| { |
| pm_request_resume(&pci_dev->dev); |
| return 0; |
| } |
| |
| /** |
| * pci_resume_bus - Walk given bus and runtime resume devices on it |
| * @bus: Top bus of the subtree to walk. |
| */ |
| void pci_resume_bus(struct pci_bus *bus) |
| { |
| if (bus) |
| pci_walk_bus(bus, pci_resume_one, NULL); |
| } |
| |
| static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) |
| { |
| int delay = 1; |
| u32 id; |
| |
| /* |
| * After reset, the device should not silently discard config |
| * requests, but it may still indicate that it needs more time by |
| * responding to them with CRS completions. The Root Port will |
| * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete |
| * the read (except when CRS SV is enabled and the read was for the |
| * Vendor ID; in that case it synthesizes 0x0001 data). |
| * |
| * Wait for the device to return a non-CRS completion. Read the |
| * Command register instead of Vendor ID so we don't have to |
| * contend with the CRS SV value. |
| */ |
| pci_read_config_dword(dev, PCI_COMMAND, &id); |
| while (PCI_POSSIBLE_ERROR(id)) { |
| if (delay > timeout) { |
| pci_warn(dev, "not ready %dms after %s; giving up\n", |
| delay - 1, reset_type); |
| return -ENOTTY; |
| } |
| |
| if (delay > PCI_RESET_WAIT) |
| pci_info(dev, "not ready %dms after %s; waiting\n", |
| delay - 1, reset_type); |
| |
| msleep(delay); |
| delay *= 2; |
| pci_read_config_dword(dev, PCI_COMMAND, &id); |
| } |
| |
| if (delay > PCI_RESET_WAIT) |
| pci_info(dev, "ready %dms after %s\n", delay - 1, |
| reset_type); |
| |
| return 0; |
| } |
| |
| /** |
| * pci_power_up - Put the given device into D0 |
| * @dev: PCI device to power up |
| * |
| * On success, return 0 or 1, depending on whether or not it is necessary to |
| * restore the device's BARs subsequently (1 is returned in that case). |
| * |
| * On failure, return a negative error code. Always return failure if @dev |
| * lacks a Power Management Capability, even if the platform was able to |
| * put the device in D0 via non-PCI means. |
| */ |
| int pci_power_up(struct pci_dev *dev) |
| { |
| bool need_restore; |
| pci_power_t state; |
| u16 pmcsr; |
| |
| platform_pci_set_power_state(dev, PCI_D0); |
| |
| if (!dev->pm_cap) { |
| state = platform_pci_get_power_state(dev); |
| if (state == PCI_UNKNOWN) |
| dev->current_state = PCI_D0; |
| else |
| dev->current_state = state; |
| |
| return -EIO; |
| } |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| if (PCI_POSSIBLE_ERROR(pmcsr)) { |
| pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", |
| pci_power_name(dev->current_state)); |
| dev->current_state = PCI_D3cold; |
| return -EIO; |
| } |
| |
| state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
| |
| need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && |
| !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); |
| |
| if (state == PCI_D0) |
| goto end; |
| |
| /* |
| * Force the entire word to 0. This doesn't affect PME_Status, disables |
| * PME_En, and sets PowerState to 0. |
| */ |
| pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); |
| |
| /* Mandatory transition delays; see PCI PM 1.2. */ |
| if (state == PCI_D3hot) |
| pci_dev_d3_sleep(dev); |
| else if (state == PCI_D2) |
| udelay(PCI_PM_D2_DELAY); |
| |
| end: |
| dev->current_state = PCI_D0; |
| if (need_restore) |
| return 1; |
| |
| return 0; |
| } |
| |
| /** |
| * pci_set_full_power_state - Put a PCI device into D0 and update its state |
| * @dev: PCI device to power up |
| * |
| * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register |
| * to confirm the state change, restore its BARs if they might be lost and |
| * reconfigure ASPM in acordance with the new power state. |
| * |
| * If pci_restore_state() is going to be called right after a power state change |
| * to D0, it is more efficient to use pci_power_up() directly instead of this |
| * function. |
| */ |
| static int pci_set_full_power_state(struct pci_dev *dev) |
| { |
| u16 pmcsr; |
| int ret; |
| |
| ret = pci_power_up(dev); |
| if (ret < 0) { |
| if (dev->current_state == PCI_D0) |
| return 0; |
| |
| return ret; |
| } |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
| if (dev->current_state != PCI_D0) { |
| pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n", |
| pci_power_name(dev->current_state)); |
| } else if (ret > 0) { |
| /* |
| * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT |
| * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning |
| * from D3hot to D0 _may_ perform an internal reset, thereby |
| * going to "D0 Uninitialized" rather than "D0 Initialized". |
| * For example, at least some versions of the 3c905B and the |
| * 3c556B exhibit this behaviour. |
| * |
| * At least some laptop BIOSen (e.g. the Thinkpad T21) leave |
| * devices in a D3hot state at boot. Consequently, we need to |
| * restore at least the BARs so that the device will be |
| * accessible to its driver. |
| */ |
| pci_restore_bars(dev); |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * __pci_dev_set_current_state - Set current state of a PCI device |
| * @dev: Device to handle |
| * @data: pointer to state to be set |
| */ |
| static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) |
| { |
| pci_power_t state = *(pci_power_t *)data; |
| |
| dev->current_state = state; |
| return 0; |
| } |
| |
| /** |
| * pci_bus_set_current_state - Walk given bus and set current state of devices |
| * @bus: Top bus of the subtree to walk. |
| * @state: state to be set |
| */ |
| void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) |
| { |
| if (bus) |
| pci_walk_bus(bus, __pci_dev_set_current_state, &state); |
| } |
| |
| /** |
| * pci_set_low_power_state - Put a PCI device into a low-power state. |
| * @dev: PCI device to handle. |
| * @state: PCI power state (D1, D2, D3hot) to put the device into. |
| * |
| * Use the device's PCI_PM_CTRL register to put it into a low-power state. |
| * |
| * RETURN VALUE: |
| * -EINVAL if the requested state is invalid. |
| * -EIO if device does not support PCI PM or its PM capabilities register has a |
| * wrong version, or device doesn't support the requested state. |
| * 0 if device already is in the requested state. |
| * 0 if device's power state has been successfully changed. |
| */ |
| static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) |
| { |
| u16 pmcsr; |
| |
| if (!dev->pm_cap) |
| return -EIO; |
| |
| /* |
| * Validate transition: We can enter D0 from any state, but if |
| * we're already in a low-power state, we can only go deeper. E.g., |
| * we can go from D1 to D3, but we can't go directly from D3 to D1; |
| * we'd have to go from D3 to D0, then to D1. |
| */ |
| if (dev->current_state <= PCI_D3cold && dev->current_state > state) { |
| pci_dbg(dev, "Invalid power transition (from %s to %s)\n", |
| pci_power_name(dev->current_state), |
| pci_power_name(state)); |
| return -EINVAL; |
| } |
| |
| /* Check if this device supports the desired state */ |
| if ((state == PCI_D1 && !dev->d1_support) |
| || (state == PCI_D2 && !dev->d2_support)) |
| return -EIO; |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| if (PCI_POSSIBLE_ERROR(pmcsr)) { |
| pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n", |
| pci_power_name(dev->current_state), |
| pci_power_name(state)); |
| dev->current_state = PCI_D3cold; |
| return -EIO; |
| } |
| |
| pmcsr &= ~PCI_PM_CTRL_STATE_MASK; |
| pmcsr |= state; |
| |
| /* Enter specified state */ |
| pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| |
| /* Mandatory power management transition delays; see PCI PM 1.2. */ |
| if (state == PCI_D3hot) |
| pci_dev_d3_sleep(dev); |
| else if (state == PCI_D2) |
| udelay(PCI_PM_D2_DELAY); |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; |
| if (dev->current_state != state) |
| pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n", |
| pci_power_name(dev->current_state), |
| pci_power_name(state)); |
| |
| return 0; |
| } |
| |
| /** |
| * pci_set_power_state - Set the power state of a PCI device |
| * @dev: PCI device to handle. |
| * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. |
| * |
| * Transition a device to a new power state, using the platform firmware and/or |
| * the device's PCI PM registers. |
| * |
| * RETURN VALUE: |
| * -EINVAL if the requested state is invalid. |
| * -EIO if device does not support PCI PM or its PM capabilities register has a |
| * wrong version, or device doesn't support the requested state. |
| * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. |
| * 0 if device already is in the requested state. |
| * 0 if the transition is to D3 but D3 is not supported. |
| * 0 if device's power state has been successfully changed. |
| */ |
| int pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
| { |
| int error; |
| |
| /* Bound the state we're entering */ |
| if (state > PCI_D3cold) |
| state = PCI_D3cold; |
| else if (state < PCI_D0) |
| state = PCI_D0; |
| else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
| |
| /* |
| * If the device or the parent bridge do not support PCI |
| * PM, ignore the request if we're doing anything other |
| * than putting it into D0 (which would only happen on |
| * boot). |
| */ |
| return 0; |
| |
| /* Check if we're already there */ |
| if (dev->current_state == state) |
| return 0; |
| |
| if (state == PCI_D0) |
| return pci_set_full_power_state(dev); |
| |
| /* |
| * This device is quirked not to be put into D3, so don't put it in |
| * D3 |
| */ |
| if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) |
| return 0; |
| |
| if (state == PCI_D3cold) { |
| /* |
| * To put the device in D3cold, put it into D3hot in the native |
| * way, then put it into D3cold using platform ops. |
| */ |
| error = pci_set_low_power_state(dev, PCI_D3hot); |
| |
| if (pci_platform_power_transition(dev, PCI_D3cold)) |
| return error; |
| |
| /* Powering off a bridge may power off the whole hierarchy */ |
| if (dev->current_state == PCI_D3cold) |
| pci_bus_set_current_state(dev->subordinate, PCI_D3cold); |
| } else { |
| error = pci_set_low_power_state(dev, state); |
| |
| if (pci_platform_power_transition(dev, state)) |
| return error; |
| } |
| |
| return 0; |
| } |
| EXPORT_SYMBOL(pci_set_power_state); |
| |
| #define PCI_EXP_SAVE_REGS 7 |
| |
| static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, |
| u16 cap, bool extended) |
| { |
| struct pci_cap_saved_state *tmp; |
| |
| hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { |
| if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) |
| return tmp; |
| } |
| return NULL; |
| } |
| |
| struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) |
| { |
| return _pci_find_saved_cap(dev, cap, false); |
| } |
| |
| struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) |
| { |
| return _pci_find_saved_cap(dev, cap, true); |
| } |
| |
| static int pci_save_pcie_state(struct pci_dev *dev) |
| { |
| int i = 0; |
| struct pci_cap_saved_state *save_state; |
| u16 *cap; |
| |
| if (!pci_is_pcie(dev)) |
| return 0; |
| |
| save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); |
| if (!save_state) { |
| pci_err(dev, "buffer not found in %s\n", __func__); |
| return -ENOMEM; |
| } |
| |
| cap = (u16 *)&save_state->cap.data[0]; |
| pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); |
| pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); |
| |
| return 0; |
| } |
| |
| void pci_bridge_reconfigure_ltr(struct pci_dev *dev) |
| { |
| #ifdef CONFIG_PCIEASPM |
| struct pci_dev *bridge; |
| u32 ctl; |
| |
| bridge = pci_upstream_bridge(dev); |
| if (bridge && bridge->ltr_path) { |
| pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); |
| if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { |
| pci_dbg(bridge, "re-enabling LTR\n"); |
| pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, |
| PCI_EXP_DEVCTL2_LTR_EN); |
| } |
| } |
| #endif |
| } |
| |
| static void pci_restore_pcie_state(struct pci_dev *dev) |
| { |
| int i = 0; |
| struct pci_cap_saved_state *save_state; |
| u16 *cap; |
| |
| save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); |
| if (!save_state) |
| return; |
| |
| /* |
| * Downstream ports reset the LTR enable bit when link goes down. |
| * Check and re-configure the bit here before restoring device. |
| * PCIe r5.0, sec 7.5.3.16. |
| */ |
| pci_bridge_reconfigure_ltr(dev); |
| |
| cap = (u16 *)&save_state->cap.data[0]; |
| pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); |
| pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); |
| } |
| |
| static int pci_save_pcix_state(struct pci_dev *dev) |
| { |
| int pos; |
| struct pci_cap_saved_state *save_state; |
| |
| pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
| if (!pos) |
| return 0; |
| |
| save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); |
| if (!save_state) { |
| pci_err(dev, "buffer not found in %s\n", __func__); |
| return -ENOMEM; |
| } |
| |
| pci_read_config_word(dev, pos + PCI_X_CMD, |
| (u16 *)save_state->cap.data); |
| |
| return 0; |
| } |
| |
| static void pci_restore_pcix_state(struct pci_dev *dev) |
| { |
| int i = 0, pos; |
| struct pci_cap_saved_state *save_state; |
| u16 *cap; |
| |
| save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); |
| pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); |
| if (!save_state || !pos) |
| return; |
| cap = (u16 *)&save_state->cap.data[0]; |
| |
| pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); |
| } |
| |
| static void pci_save_ltr_state(struct pci_dev *dev) |
| { |
| int ltr; |
| struct pci_cap_saved_state *save_state; |
| u32 *cap; |
| |
| if (!pci_is_pcie(dev)) |
| return; |
| |
| ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); |
| if (!ltr) |
| return; |
| |
| save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); |
| if (!save_state) { |
| pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); |
| return; |
| } |
| |
| /* Some broken devices only support dword access to LTR */ |
| cap = &save_state->cap.data[0]; |
| pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); |
| } |
| |
| static void pci_restore_ltr_state(struct pci_dev *dev) |
| { |
| struct pci_cap_saved_state *save_state; |
| int ltr; |
| u32 *cap; |
| |
| save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); |
| ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); |
| if (!save_state || !ltr) |
| return; |
| |
| /* Some broken devices only support dword access to LTR */ |
| cap = &save_state->cap.data[0]; |
| pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); |
| } |
| |
| /** |
| * pci_save_state - save the PCI configuration space of a device before |
| * suspending |
| * @dev: PCI device that we're dealing with |
| */ |
| int pci_save_state(struct pci_dev *dev) |
| { |
| int i; |
| /* XXX: 100% dword access ok here? */ |
| for (i = 0; i < 16; i++) { |
| pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); |
| pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n", |
| i * 4, dev->saved_config_space[i]); |
| } |
| dev->state_saved = true; |
| |
| i = pci_save_pcie_state(dev); |
| if (i != 0) |
| return i; |
| |
| i = pci_save_pcix_state(dev); |
| if (i != 0) |
| return i; |
| |
| pci_save_ltr_state(dev); |
| pci_save_dpc_state(dev); |
| pci_save_aer_state(dev); |
| pci_save_ptm_state(dev); |
| return pci_save_vc_state(dev); |
| } |
| EXPORT_SYMBOL(pci_save_state); |
| |
| static void pci_restore_config_dword(struct pci_dev *pdev, int offset, |
| u32 saved_val, int retry, bool force) |
| { |
| u32 val; |
| |
| pci_read_config_dword(pdev, offset, &val); |
| if (!force && val == saved_val) |
| return; |
| |
| for (;;) { |
| pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n", |
| offset, val, saved_val); |
| pci_write_config_dword(pdev, offset, saved_val); |
| if (retry-- <= 0) |
| return; |
| |
| pci_read_config_dword(pdev, offset, &val); |
| if (val == saved_val) |
| return; |
| |
| mdelay(1); |
| } |
| } |
| |
| static void pci_restore_config_space_range(struct pci_dev *pdev, |
| int start, int end, int retry, |
| bool force) |
| { |
| int index; |
| |
| for (index = end; index >= start; index--) |
| pci_restore_config_dword(pdev, 4 * index, |
| pdev->saved_config_space[index], |
| retry, force); |
| } |
| |
| static void pci_restore_config_space(struct pci_dev *pdev) |
| { |
| if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { |
| pci_restore_config_space_range(pdev, 10, 15, 0, false); |
| /* Restore BARs before the command register. */ |
| pci_restore_config_space_range(pdev, 4, 9, 10, false); |
| pci_restore_config_space_range(pdev, 0, 3, 0, false); |
| } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { |
| pci_restore_config_space_range(pdev, 12, 15, 0, false); |
| |
| /* |
| * Force rewriting of prefetch registers to avoid S3 resume |
| * issues on Intel PCI bridges that occur when these |
| * registers are not explicitly written. |
| */ |
| pci_restore_config_space_range(pdev, 9, 11, 0, true); |
| pci_restore_config_space_range(pdev, 0, 8, 0, false); |
| } else { |
| pci_restore_config_space_range(pdev, 0, 15, 0, false); |
| } |
| } |
| |
| static void pci_restore_rebar_state(struct pci_dev *pdev) |
| { |
| unsigned int pos, nbars, i; |
| u32 ctrl; |
| |
| pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); |
| if (!pos) |
| return; |
| |
| pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); |
| nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> |
| PCI_REBAR_CTRL_NBAR_SHIFT; |
| |
| for (i = 0; i < nbars; i++, pos += 8) { |
| struct resource *res; |
| int bar_idx, size; |
| |
| pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); |
| bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; |
| res = pdev->resource + bar_idx; |
| size = pci_rebar_bytes_to_size(resource_size(res)); |
| ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; |
| ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; |
| pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); |
| } |
| } |
| |
| /** |
| * pci_restore_state - Restore the saved state of a PCI device |
| * @dev: PCI device that we're dealing with |
| */ |
| void pci_restore_state(struct pci_dev *dev) |
| { |
| if (!dev->state_saved) |
| return; |
| |
| /* |
| * Restore max latencies (in the LTR capability) before enabling |
| * LTR itself (in the PCIe capability). |
| */ |
| pci_restore_ltr_state(dev); |
| |
| pci_restore_pcie_state(dev); |
| pci_restore_pasid_state(dev); |
| pci_restore_pri_state(dev); |
| pci_restore_ats_state(dev); |
| pci_restore_vc_state(dev); |
| pci_restore_rebar_state(dev); |
| pci_restore_dpc_state(dev); |
| pci_restore_ptm_state(dev); |
| |
| pci_aer_clear_status(dev); |
| pci_restore_aer_state(dev); |
| |
| pci_restore_config_space(dev); |
| |
| pci_restore_pcix_state(dev); |
| pci_restore_msi_state(dev); |
| |
| /* Restore ACS and IOV configuration state */ |
| pci_enable_acs(dev); |
| pci_restore_iov_state(dev); |
| |
| dev->state_saved = false; |
| } |
| EXPORT_SYMBOL(pci_restore_state); |
| |
| struct pci_saved_state { |
| u32 config_space[16]; |
| struct pci_cap_saved_data cap[]; |
| }; |
| |
| /** |
| * pci_store_saved_state - Allocate and return an opaque struct containing |
| * the device saved state. |
| * @dev: PCI device that we're dealing with |
| * |
| * Return NULL if no state or error. |
| */ |
| struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) |
| { |
| struct pci_saved_state *state; |
| struct pci_cap_saved_state *tmp; |
| struct pci_cap_saved_data *cap; |
| size_t size; |
| |
| if (!dev->state_saved) |
| return NULL; |
| |
| size = sizeof(*state) + sizeof(struct pci_cap_saved_data); |
| |
| hlist_for_each_entry(tmp, &dev->saved_cap_space, next) |
| size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; |
| |
| state = kzalloc(size, GFP_KERNEL); |
| if (!state) |
| return NULL; |
| |
| memcpy(state->config_space, dev->saved_config_space, |
| sizeof(state->config_space)); |
| |
| cap = state->cap; |
| hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { |
| size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; |
| memcpy(cap, &tmp->cap, len); |
| cap = (struct pci_cap_saved_data *)((u8 *)cap + len); |
| } |
| /* Empty cap_save terminates list */ |
| |
| return state; |
| } |
| EXPORT_SYMBOL_GPL(pci_store_saved_state); |
| |
| /** |
| * pci_load_saved_state - Reload the provided save state into struct pci_dev. |
| * @dev: PCI device that we're dealing with |
| * @state: Saved state returned from pci_store_saved_state() |
| */ |
| int pci_load_saved_state(struct pci_dev *dev, |
| struct pci_saved_state *state) |
| { |
| struct pci_cap_saved_data *cap; |
| |
| dev->state_saved = false; |
| |
| if (!state) |
| return 0; |
| |
| memcpy(dev->saved_config_space, state->config_space, |
| sizeof(state->config_space)); |
| |
| cap = state->cap; |
| while (cap->size) { |
| struct pci_cap_saved_state *tmp; |
| |
| tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); |
| if (!tmp || tmp->cap.size != cap->size) |
| return -EINVAL; |
| |
| memcpy(tmp->cap.data, cap->data, tmp->cap.size); |
| cap = (struct pci_cap_saved_data *)((u8 *)cap + |
| sizeof(struct pci_cap_saved_data) + cap->size); |
| } |
| |
| dev->state_saved = true; |
| return 0; |
| } |
| EXPORT_SYMBOL_GPL(pci_load_saved_state); |
| |
| /** |
| * pci_load_and_free_saved_state - Reload the save state pointed to by state, |
| * and free the memory allocated for it. |
| * @dev: PCI device that we're dealing with |
| * @state: Pointer to saved state returned from pci_store_saved_state() |
| */ |
| int pci_load_and_free_saved_state(struct pci_dev *dev, |
| struct pci_saved_state **state) |
| { |
| int ret = pci_load_saved_state(dev, *state); |
| kfree(*state); |
| *state = NULL; |
| return ret; |
| } |
| EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); |
| |
| int __weak pcibios_enable_device(struct pci_dev *dev, int bars) |
| { |
| return pci_enable_resources(dev, bars); |
| } |
| |
| static int do_pci_enable_device(struct pci_dev *dev, int bars) |
| { |
| int err; |
| struct pci_dev *bridge; |
| u16 cmd; |
| u8 pin; |
| |
| err = pci_set_power_state(dev, PCI_D0); |
| if (err < 0 && err != -EIO) |
| return err; |
| |
| bridge = pci_upstream_bridge(dev); |
| if (bridge) |
| pcie_aspm_powersave_config_link(bridge); |
| |
| err = pcibios_enable_device(dev, bars); |
| if (err < 0) |
| return err; |
| pci_fixup_device(pci_fixup_enable, dev); |
| |
| if (dev->msi_enabled || dev->msix_enabled) |
| return 0; |
| |
| pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
| if (pin) { |
| pci_read_config_word(dev, PCI_COMMAND, &cmd); |
| if (cmd & PCI_COMMAND_INTX_DISABLE) |
| pci_write_config_word(dev, PCI_COMMAND, |
| cmd & ~PCI_COMMAND_INTX_DISABLE); |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * pci_reenable_device - Resume abandoned device |
| * @dev: PCI device to be resumed |
| * |
| * NOTE: This function is a backend of pci_default_resume() and is not supposed |
| * to be called by normal code, write proper resume handler and use it instead. |
| */ |
| int pci_reenable_device(struct pci_dev *dev) |
| { |
| if (pci_is_enabled(dev)) |
| return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); |
| return 0; |
| } |
| EXPORT_SYMBOL(pci_reenable_device); |
| |
| static void pci_enable_bridge(struct pci_dev *dev) |
| { |
| struct pci_dev *bridge; |
| int retval; |
| |
| bridge = pci_upstream_bridge(dev); |
| if (bridge) |
| pci_enable_bridge(bridge); |
| |
| if (pci_is_enabled(dev)) { |
| if (!dev->is_busmaster) |
| pci_set_master(dev); |
| return; |
| } |
| |
| retval = pci_enable_device(dev); |
| if (retval) |
| pci_err(dev, "Error enabling bridge (%d), continuing\n", |
| retval); |
| pci_set_master(dev); |
| } |
| |
| static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) |
| { |
| struct pci_dev *bridge; |
| int err; |
| int i, bars = 0; |
| |
| /* |
| * Power state could be unknown at this point, either due to a fresh |
| * boot or a device removal call. So get the current power state |
| * so that things like MSI message writing will behave as expected |
| * (e.g. if the device really is in D0 at enable time). |
| */ |
| pci_update_current_state(dev, dev->current_state); |
| |
| if (atomic_inc_return(&dev->enable_cnt) > 1) |
| return 0; /* already enabled */ |
| |
| bridge = pci_upstream_bridge(dev); |
| if (bridge) |
| pci_enable_bridge(bridge); |
| |
| /* only skip sriov related */ |
| for (i = 0; i <= PCI_ROM_RESOURCE; i++) |
| if (dev->resource[i].flags & flags) |
| bars |= (1 << i); |
| for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) |
| if (dev->resource[i].flags & flags) |
| bars |= (1 << i); |
| |
| err = do_pci_enable_device(dev, bars); |
| if (err < 0) |
| atomic_dec(&dev->enable_cnt); |
| return err; |
| } |
| |
| /** |
| * pci_enable_device_io - Initialize a device for use with IO space |
| * @dev: PCI device to be initialized |
| * |
| * Initialize device before it's used by a driver. Ask low-level code |
| * to enable I/O resources. Wake up the device if it was suspended. |
| * Beware, this function can fail. |
| */ |
| int pci_enable_device_io(struct pci_dev *dev) |
| { |
| return pci_enable_device_flags(dev, IORESOURCE_IO); |
| } |
| EXPORT_SYMBOL(pci_enable_device_io); |
| |
| /** |
| * pci_enable_device_mem - Initialize a device for use with Memory space |
| * @dev: PCI device to be initialized |
| * |
| * Initialize device before it's used by a driver. Ask low-level code |
| * to enable Memory resources. Wake up the device if it was suspended. |
| * Beware, this function can fail. |
| */ |
| int pci_enable_device_mem(struct pci_dev *dev) |
| { |
| return pci_enable_device_flags(dev, IORESOURCE_MEM); |
| } |
| EXPORT_SYMBOL(pci_enable_device_mem); |
| |
| /** |
| * pci_enable_device - Initialize device before it's used by a driver. |
| * @dev: PCI device to be initialized |
| * |
| * Initialize device before it's used by a driver. Ask low-level code |
| * to enable I/O and memory. Wake up the device if it was suspended. |
| * Beware, this function can fail. |
| * |
| * Note we don't actually enable the device many times if we call |
| * this function repeatedly (we just increment the count). |
| */ |
| int pci_enable_device(struct pci_dev *dev) |
| { |
| return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); |
| } |
| EXPORT_SYMBOL(pci_enable_device); |
| |
| /* |
| * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X |
| * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so |
| * there's no need to track it separately. pci_devres is initialized |
| * when a device is enabled using managed PCI device enable interface. |
| */ |
| struct pci_devres { |
| unsigned int enabled:1; |
| unsigned int pinned:1; |
| unsigned int orig_intx:1; |
| unsigned int restore_intx:1; |
| unsigned int mwi:1; |
| u32 region_mask; |
| }; |
| |
| static void pcim_release(struct device *gendev, void *res) |
| { |
| struct pci_dev *dev = to_pci_dev(gendev); |
| struct pci_devres *this = res; |
| int i; |
| |
| for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) |
| if (this->region_mask & (1 << i)) |
| pci_release_region(dev, i); |
| |
| if (this->mwi) |
| pci_clear_mwi(dev); |
| |
| if (this->restore_intx) |
| pci_intx(dev, this->orig_intx); |
| |
| if (this->enabled && !this->pinned) |
| pci_disable_device(dev); |
| } |
| |
| static struct pci_devres *get_pci_dr(struct pci_dev *pdev) |
| { |
| struct pci_devres *dr, *new_dr; |
| |
| dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); |
| if (dr) |
| return dr; |
| |
| new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); |
| if (!new_dr) |
| return NULL; |
| return devres_get(&pdev->dev, new_dr, NULL, NULL); |
| } |
| |
| static struct pci_devres *find_pci_dr(struct pci_dev *pdev) |
| { |
| if (pci_is_managed(pdev)) |
| return devres_find(&pdev->dev, pcim_release, NULL, NULL); |
| return NULL; |
| } |
| |
| /** |
| * pcim_enable_device - Managed pci_enable_device() |
| * @pdev: PCI device to be initialized |
| * |
| * Managed pci_enable_device(). |
| */ |
| int pcim_enable_device(struct pci_dev *pdev) |
| { |
| struct pci_devres *dr; |
| int rc; |
| |
| dr = get_pci_dr(pdev); |
| if (unlikely(!dr)) |
| return -ENOMEM; |
| if (dr->enabled) |
| return 0; |
| |
| rc = pci_enable_device(pdev); |
| if (!rc) { |
| pdev->is_managed = 1; |
| dr->enabled = 1; |
| } |
| return rc; |
| } |
| EXPORT_SYMBOL(pcim_enable_device); |
| |
| /** |
| * pcim_pin_device - Pin managed PCI device |
| * @pdev: PCI device to pin |
| * |
| * Pin managed PCI device @pdev. Pinned device won't be disabled on |
| * driver detach. @pdev must have been enabled with |
| * pcim_enable_device(). |
| */ |
| void pcim_pin_device(struct pci_dev *pdev) |
| { |
| struct pci_devres *dr; |
| |
| dr = find_pci_dr(pdev); |
| WARN_ON(!dr || !dr->enabled); |
| if (dr) |
| dr->pinned = 1; |
| } |
| EXPORT_SYMBOL(pcim_pin_device); |
| |
| /* |
| * pcibios_device_add - provide arch specific hooks when adding device dev |
| * @dev: the PCI device being added |
| * |
| * Permits the platform to provide architecture specific functionality when |
| * devices are added. This is the default implementation. Architecture |
| * implementations can override this. |
| */ |
| int __weak pcibios_device_add(struct pci_dev *dev) |
| { |
| return 0; |
| } |
| |
| /** |
| * pcibios_release_device - provide arch specific hooks when releasing |
| * device dev |
| * @dev: the PCI device being released |
| * |
| * Permits the platform to provide architecture specific functionality when |
| * devices are released. This is the default implementation. Architecture |
| * implementations can override this. |
| */ |
| void __weak pcibios_release_device(struct pci_dev *dev) {} |
| |
| /** |
| * pcibios_disable_device - disable arch specific PCI resources for device dev |
| * @dev: the PCI device to disable |
| * |
| * Disables architecture specific PCI resources for the device. This |
| * is the default implementation. Architecture implementations can |
| * override this. |
| */ |
| void __weak pcibios_disable_device(struct pci_dev *dev) {} |
| |
| /** |
| * pcibios_penalize_isa_irq - penalize an ISA IRQ |
| * @irq: ISA IRQ to penalize |
| * @active: IRQ active or not |
| * |
| * Permits the platform to provide architecture-specific functionality when |
| * penalizing ISA IRQs. This is the default implementation. Architecture |
| * implementations can override this. |
| */ |
| void __weak pcibios_penalize_isa_irq(int irq, int active) {} |
| |
| static void do_pci_disable_device(struct pci_dev *dev) |
| { |
| u16 pci_command; |
| |
| pci_read_config_word(dev, PCI_COMMAND, &pci_command); |
| if (pci_command & PCI_COMMAND_MASTER) { |
| pci_command &= ~PCI_COMMAND_MASTER; |
| pci_write_config_word(dev, PCI_COMMAND, pci_command); |
| } |
| |
| pcibios_disable_device(dev); |
| } |
| |
| /** |
| * pci_disable_enabled_device - Disable device without updating enable_cnt |
| * @dev: PCI device to disable |
| * |
| * NOTE: This function is a backend of PCI power management routines and is |
| * not supposed to be called drivers. |
| */ |
| void pci_disable_enabled_device(struct pci_dev *dev) |
| { |
| if (pci_is_enabled(dev)) |
| do_pci_disable_device(dev); |
| } |
| |
| /** |
| * pci_disable_device - Disable PCI device after use |
| * @dev: PCI device to be disabled |
| * |
| * Signal to the system that the PCI device is not in use by the system |
| * anymore. This only involves disabling PCI bus-mastering, if active. |
| * |
| * Note we don't actually disable the device until all callers of |
| * pci_enable_device() have called pci_disable_device(). |
| */ |
| void pci_disable_device(struct pci_dev *dev) |
| { |
| struct pci_devres *dr; |
| |
| dr = find_pci_dr(dev); |
| if (dr) |
| dr->enabled = 0; |
| |
| dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, |
| "disabling already-disabled device"); |
| |
| if (atomic_dec_return(&dev->enable_cnt) != 0) |
| return; |
| |
| do_pci_disable_device(dev); |
| |
| dev->is_busmaster = 0; |
| } |
| EXPORT_SYMBOL(pci_disable_device); |
| |
| /** |
| * pcibios_set_pcie_reset_state - set reset state for device dev |
| * @dev: the PCIe device reset |
| * @state: Reset state to enter into |
| * |
| * Set the PCIe reset state for the device. This is the default |
| * implementation. Architecture implementations can override this. |
| */ |
| int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, |
| enum pcie_reset_state state) |
| { |
| return -EINVAL; |
| } |
| |
| /** |
| * pci_set_pcie_reset_state - set reset state for device dev |
| * @dev: the PCIe device reset |
| * @state: Reset state to enter into |
| * |
| * Sets the PCI reset state for the device. |
| */ |
| int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
| { |
| return pcibios_set_pcie_reset_state(dev, state); |
| } |
| EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); |
| |
| #ifdef CONFIG_PCIEAER |
| void pcie_clear_device_status(struct pci_dev *dev) |
| { |
| u16 sta; |
| |
| pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); |
| pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); |
| } |
| #endif |
| |
| /** |
| * pcie_clear_root_pme_status - Clear root port PME interrupt status. |
| * @dev: PCIe root port or event collector. |
| */ |
| void pcie_clear_root_pme_status(struct pci_dev *dev) |
| { |
| pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); |
| } |
| |
| /** |
| * pci_check_pme_status - Check if given device has generated PME. |
| * @dev: Device to check. |
| * |
| * Check the PME status of the device and if set, clear it and clear PME enable |
| * (if set). Return 'true' if PME status and PME enable were both set or |
| * 'false' otherwise. |
| */ |
| bool pci_check_pme_status(struct pci_dev *dev) |
| { |
| int pmcsr_pos; |
| u16 pmcsr; |
| bool ret = false; |
| |
| if (!dev->pm_cap) |
| return false; |
| |
| pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; |
| pci_read_config_word(dev, pmcsr_pos, &pmcsr); |
| if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) |
| return false; |
| |
| /* Clear PME status. */ |
| pmcsr |= PCI_PM_CTRL_PME_STATUS; |
| if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { |
| /* Disable PME to avoid interrupt flood. */ |
| pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; |
| ret = true; |
| } |
| |
| pci_write_config_word(dev, pmcsr_pos, pmcsr); |
| |
| return ret; |
| } |
| |
| /** |
| * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. |
| * @dev: Device to handle. |
| * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. |
| * |
| * Check if @dev has generated PME and queue a resume request for it in that |
| * case. |
| */ |
| static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) |
| { |
| if (pme_poll_reset && dev->pme_poll) |
| dev->pme_poll = false; |
| |
| if (pci_check_pme_status(dev)) { |
| pci_wakeup_event(dev); |
| pm_request_resume(&dev->dev); |
| } |
| return 0; |
| } |
| |
| /** |
| * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. |
| * @bus: Top bus of the subtree to walk. |
| */ |
| void pci_pme_wakeup_bus(struct pci_bus *bus) |
| { |
| if (bus) |
| pci_walk_bus(bus, pci_pme_wakeup, (void *)true); |
| } |
| |
| |
| /** |
| * pci_pme_capable - check the capability of PCI device to generate PME# |
| * @dev: PCI device to handle. |
| * @state: PCI state from which device will issue PME#. |
| */ |
| bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) |
| { |
| if (!dev->pm_cap) |
| return false; |
| |
| return !!(dev->pme_support & (1 << state)); |
| } |
| EXPORT_SYMBOL(pci_pme_capable); |
| |
| static void pci_pme_list_scan(struct work_struct *work) |
| { |
| struct pci_pme_device *pme_dev, *n; |
| |
| mutex_lock(&pci_pme_list_mutex); |
| list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { |
| if (pme_dev->dev->pme_poll) { |
| struct pci_dev *bridge; |
| |
| bridge = pme_dev->dev->bus->self; |
| /* |
| * If bridge is in low power state, the |
| * configuration space of subordinate devices |
| * may be not accessible |
| */ |
| if (bridge && bridge->current_state != PCI_D0) |
| continue; |
| /* |
| * If the device is in D3cold it should not be |
| * polled either. |
| */ |
| if (pme_dev->dev->current_state == PCI_D3cold) |
| continue; |
| |
| pci_pme_wakeup(pme_dev->dev, NULL); |
| } else { |
| list_del(&pme_dev->list); |
| kfree(pme_dev); |
| } |
| } |
| if (!list_empty(&pci_pme_list)) |
| queue_delayed_work(system_freezable_wq, &pci_pme_work, |
| msecs_to_jiffies(PME_TIMEOUT)); |
| mutex_unlock(&pci_pme_list_mutex); |
| } |
| |
| static void __pci_pme_active(struct pci_dev *dev, bool enable) |
| { |
| u16 pmcsr; |
| |
| if (!dev->pme_support) |
| return; |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| /* Clear PME_Status by writing 1 to it and enable PME# */ |
| pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; |
| if (!enable) |
| pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; |
| |
| pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| } |
| |
| /** |
| * pci_pme_restore - Restore PME configuration after config space restore. |
| * @dev: PCI device to update. |
| */ |
| void pci_pme_restore(struct pci_dev *dev) |
| { |
| u16 pmcsr; |
| |
| if (!dev->pme_support) |
| return; |
| |
| pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| if (dev->wakeup_prepared) { |
| pmcsr |= PCI_PM_CTRL_PME_ENABLE; |
| pmcsr &= ~PCI_PM_CTRL_PME_STATUS; |
| } else { |
| pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; |
| pmcsr |= PCI_PM_CTRL_PME_STATUS; |
| } |
| pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| } |
| |
| /** |
| * pci_pme_active - enable or disable PCI device's PME# function |
| * @dev: PCI device to handle. |
| * @enable: 'true' to enable PME# generation; 'false' to disable it. |
| * |
| * The caller must verify that the device is capable of generating PME# before |
| * calling this function with @enable equal to 'true'. |
| */ |
| void pci_pme_active(struct pci_dev *dev, bool enable) |
| { |
| __pci_pme_active(dev, enable); |
| |
| /* |
| * PCI (as opposed to PCIe) PME requires that the device have |
| * its PME# line hooked up correctly. Not all hardware vendors |
| * do this, so the PME never gets delivered and the device |
| * remains asleep. The easiest way around this is to |
| * periodically walk the list of suspended devices and check |
| * whether any have their PME flag set. The assumption is that |
| * we'll wake up often enough anyway that this won't be a huge |
| * hit, and the power savings from the devices will still be a |
| * win. |
| * |
| * Although PCIe uses in-band PME message instead of PME# line |
| * to report PME, PME does not work for some PCIe devices in |
| * reality. For example, there are devices that set their PME |
| * status bits, but don't really bother to send a PME message; |
| * there are PCI Express Root Ports that don't bother to |
| * trigger interrupts when they receive PME messages from the |
| * devices below. So PME poll is used for PCIe devices too. |
| */ |
| |
| if (dev->pme_poll) { |
| struct pci_pme_device *pme_dev; |
| if (enable) { |
| pme_dev = kmalloc(sizeof(struct pci_pme_device), |
| GFP_KERNEL); |
| if (!pme_dev) { |
| pci_warn(dev, "can't enable PME#\n"); |
| return; |
| } |
| pme_dev->dev = dev; |
| mutex_lock(&pci_pme_list_mutex); |
| list_add(&pme_dev->list, &pci_pme_list); |
| if (list_is_singular(&pci_pme_list)) |
| queue_delayed_work(system_freezable_wq, |
| &pci_pme_work, |
| msecs_to_jiffies(PME_TIMEOUT)); |
| mutex_unlock(&pci_pme_list_mutex); |
| } else { |
| mutex_lock(&pci_pme_list_mutex); |
| list_for_each_entry(pme_dev, &pci_pme_list, list) { |
| if (pme_dev->dev == dev) { |
| list_del(&pme_dev->list); |
| kfree(pme_dev); |
| break; |
| } |
| } |
| mutex_unlock(&pci_pme_list_mutex); |
| } |
| } |
| |
| pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); |
| } |
| EXPORT_SYMBOL(pci_pme_active); |
| |
| /** |
| * __pci_enable_wake - enable PCI device as wakeup event source |
| * @dev: PCI device affected |
| * @state: PCI state from which device will issue wakeup events |
| * @enable: True to enable event generation; false to disable |
| * |
| * This enables the device as a wakeup event source, or disables it. |
| * When such events involves platform-specific hooks, those hooks are |
| * called automatically by this routine. |
| * |
| * Devices with legacy power management (no standard PCI PM capabilities) |
| * always require such platform hooks. |
| * |
| * RETURN VALUE: |
| * 0 is returned on success |
| * -EINVAL is returned if device is not supposed to wake up the system |
| * Error code depending on the platform is returned if both the platform and |
| * the native mechanism fail to enable the generation of wake-up events |
| */ |
| static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) |
| { |
| int ret = 0; |
| |
| /* |
| * Bridges that are not power-manageable directly only signal |
| * wakeup on behalf of subordinate devices which is set up |
| * elsewhere, so skip them. However, bridges that are |
| * power-manageable may signal wakeup for themselves (for example, |
| * on a hotplug event) and they need to be covered here. |
| */ |
| if (!pci_power_manageable(dev)) |
| return 0; |
| |
| /* Don't do the same thing twice in a row for one device. */ |
| if (!!enable == !!dev->wakeup_prepared) |
| return 0; |
| |
| /* |
| * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don |
| * Anderson we should be doing PME# wake enable followed by ACPI wake |
| * enable. To disable wake-up we call the platform first, for symmetry. |
| */ |
| |
| if (enable) { |
| int error; |
| |
| /* |
| * Enable PME signaling if the device can signal PME from |
| * D3cold regardless of whether or not it can signal PME from |
| * the current target state, because that will allow it to |
| * signal PME when the hierarchy above it goes into D3cold and |
| * the device itself ends up in D3cold as a result of that. |
| */ |
| if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) |
| pci_pme_active(dev, true); |
| else |
| ret = 1; |
| error = platform_pci_set_wakeup(dev, true); |
| if (ret) |
| ret = error; |
| if (!ret) |
| dev->wakeup_prepared = true; |
| } else { |
| platform_pci_set_wakeup(dev, false); |
| pci_pme_active(dev, false); |
| dev->wakeup_prepared = false; |
| } |
| |
| return ret; |
| } |
| |
| /** |
| * pci_enable_wake - change wakeup settings for a PCI device |
| * @pci_dev: Target device |
| * @state: PCI state from which device will issue wakeup events |
| * @enable: Whether or not to enable event generation |
| * |
| * If @enable is set, check device_may_wakeup() for the device before calling |
| * __pci_enable_wake() for it. |
| */ |
| int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) |
| { |
| if (enable && !device_may_wakeup(&pci_dev->dev)) |
| return -EINVAL; |
| |
| return __pci_enable_wake(pci_dev, state, enable); |
| } |
| EXPORT_SYMBOL(pci_enable_wake); |
| |
| /** |
| * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
| * @dev: PCI device to prepare |
| * @enable: True to enable wake-up event generation; false to disable |
| * |
| * Many drivers want the device to wake up the system from D3_hot or D3_cold |
| * and this function allows them to set that up cleanly - pci_enable_wake() |
| * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI |
| * ordering constraints. |
| * |
| * This function only returns error code if the device is not allowed to wake |
| * up the system from sleep or it is not capable of generating PME# from both |
| * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. |
| */ |
| int pci_wake_from_d3(struct pci_dev *dev, bool enable) |
| { |
| return pci_pme_capable(dev, PCI_D3cold) ? |
| pci_enable_wake(dev, PCI_D3cold, enable) : |
| pci_enable_wake(dev, PCI_D3hot, enable); |
| } |
| EXPORT_SYMBOL(pci_wake_from_d3); |
| |
| /** |
| * pci_target_state - find an appropriate low power state for a given PCI dev |
| * @dev: PCI device |
| * @wakeup: Whether or not wakeup functionality will be enabled for the device. |
| * |
| * Use underlying platform code to find a supported low power state for @dev. |
| * If the platform can't manage @dev, return the deepest state from which it |
| * can generate wake events, based on any available PME info. |
| */ |
| static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) |
| { |
| if (platform_pci_power_manageable(dev)) { |
| /* |
| * Call the platform to find the target state for the device. |
| */ |
| pci_power_t state = platform_pci_choose_state(dev); |
| |
| switch (state) { |
| case PCI_POWER_ERROR: |
| case PCI_UNKNOWN: |
| return PCI_D3hot; |
| |
| case PCI_D1: |
| case PCI_D2: |
| if (pci_no_d1d2(dev)) |
| return PCI_D3hot; |
| } |
| |
| return state; |
| } |
| |
| /* |
| * If the device is in D3cold even though it's not power-manageable by |
| * the platform, it may have been powered down by non-standard means. |
| * Best to let it slumber. |
| */ |
| if (dev->current_state == PCI_D3cold) |
| return PCI_D3cold; |
| else if (!dev->pm_cap) |
| return PCI_D0; |
| |
| if (wakeup && dev->pme_support) { |
| pci_power_t state = PCI_D3hot; |
| |
| /* |
| * Find the deepest state from which the device can generate |
| * PME#. |
| */ |
| while (state && !(dev->pme_support & (1 << state))) |
| state--; |
| |
| if (state) |
| return state; |
| else if (dev->pme_support & 1) |
| return PCI_D0; |
| } |
| |
| return PCI_D3hot; |
| } |
| |
| /** |
| * pci_prepare_to_sleep - prepare PCI device for system-wide transition |
| * into a sleep state |
| * @dev: Device to handle. |
| * |
| * Choose the power state appropriate for the device depending on whether |
| * it can wake up the system and/or is power manageable by the platform |
| * (PCI_D3hot is the default) and put the device into that state. |
| */ |
| int pci_prepare_to_sleep(struct pci_dev *dev) |
| { |
| bool wakeup = device_may_wakeup(&dev->dev); |
| pci_power_t target_state = pci_target_state(dev, wakeup); |
| int error; |
| |
| if (target_state == PCI_POWER_ERROR) |
| return -EIO; |
| |
| pci_enable_wake(dev, target_state, wakeup); |
| |
| error = pci_set_power_state(dev, target_state); |
| |
| if (error) |
| pci_enable_wake(dev, target_state, false); |
| |
| return error; |
| } |
| EXPORT_SYMBOL(pci_prepare_to_sleep); |
| |
| /** |
| * pci_back_from_sleep - turn PCI device on during system-wide transition |
| * into working state |
| * @dev: Device to handle. |
| * |
| * Disable device's system wake-up capability and put it into D0. |
| */ |
| int pci_back_from_sleep(struct pci_dev *dev) |
| { |
| int ret = pci_set_power_state(dev, PCI_D0); |
| |
| if (ret) |
| return ret; |
| |
| pci_enable_wake(dev, PCI_D0, false); |
| return 0; |
| } |
| EXPORT_SYMBOL(pci_back_from_sleep); |
| |
| /** |
| * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. |
| * @dev: PCI device being suspended. |
| * |
| * Prepare @dev to generate wake-up events at run time and put it into a low |
| * power state. |
| */ |
| int pci_finish_runtime_suspend(struct pci_dev *dev) |
| { |
| pci_power_t target_state; |
| int error; |
| |
| target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); |
| if (target_state == PCI_POWER_ERROR) |
| return -EIO; |
| |
| __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); |
| |
| error = pci_set_power_state(dev, target_state); |
| |
| if (error) |
| pci_enable_wake(dev, target_state, false); |
| |
| return error; |
| } |
| |
| /** |
| * pci_dev_run_wake - Check if device can generate run-time wake-up events. |
| * @dev: Device to check. |
| * |
| * Return true if the device itself is capable of generating wake-up events |
| * (through the platform or using the native PCIe PME) or if the device supports |
| * PME and one of its upstream bridges can generate wake-up events. |
| */ |
| bool pci_dev_run_wake(struct pci_dev *dev) |
| { |
| struct pci_bus *bus = dev->bus; |
| |
| if (!dev->pme_support) |
| return false; |
| |
| /* PME-capable in principle, but not from the target power state */ |
| if (!pci_pme_capable(dev, pci_target_state(dev, true))) |
| return false; |
| |
| if (device_can_wakeup(&dev->dev)) |
| return true; |
| |
| while (bus->parent) { |
| struct pci_dev *bridge = bus->self; |
| |
| if (device_can_wakeup(&bridge->dev)) |
| return true; |
| |
| bus = bus->parent; |
| } |
| |
| /* We have reached the root bus. */ |
| if (bus->bridge) |
| return device_can_wakeup(bus->bridge); |
| |
| return false; |
| } |
| EXPORT_SYMBOL_GPL(pci_dev_run_wake); |
| |
| /** |
| * pci_dev_need_resume - Check if it is necessary to resume the device. |
| * @pci_dev: Device to check. |
| * |
| * Return 'true' if the device is not runtime-suspended or it has to be |
| * reconfigured due to wakeup settings difference between system and runtime |
| * suspend, or the current power state of it is not suitable for the upcoming |
| * (system-wide) transition. |
| */ |
| bool pci_dev_need_resume(struct pci_dev *pci_dev) |
| { |
| struct device *dev = &pci_dev->dev; |
| pci_power_t target_state; |
| |
| if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) |
| return true; |
| |
| target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); |
| |
| /* |
| * If the earlier platform check has not triggered, D3cold is just power |
| * removal on top of D3hot, so no need to resume the device in that |
| * case. |
| */ |
| return target_state != pci_dev->current_state && |
| target_state != PCI_D3cold && |
| pci_dev->current_state != PCI_D3hot; |
| } |
| |
| /** |
| * pci_dev_adjust_pme - Adjust PME setting for a suspended device. |
| * @pci_dev: Device to check. |
| * |
| * If the device is suspended and it is not configured for system wakeup, |
| * disable PME for it to prevent it from waking up the system unnecessarily. |
| * |
| * Note that if the device's power state is D3cold and the platform check in |
| * pci_dev_need_resume() has not triggered, the device's configuration need not |
| * be changed. |
| */ |
| void pci_dev_adjust_pme(struct pci_dev *pci_dev) |
| { |
| struct device *dev = &pci_dev->dev; |
| |
| spin_lock_irq(&dev->power.lock); |
| |
| if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && |
| pci_dev->current_state < PCI_D3cold) |
| __pci_pme_active(pci_dev, false); |
| |
| spin_unlock_irq(&dev->power.lock); |
| } |
| |
| /** |
| * pci_dev_complete_resume - Finalize resume from system sleep for a device. |
| * @pci_dev: Device to handle. |
| * |
| * If the device is runtime suspended and wakeup-capable, enable PME for it as |
| * it might have been disabled during the prepare phase of system suspend if |
| * the device was not configured for system wakeup. |
| */ |
| void pci_dev_complete_resume(struct pci_dev *pci_dev) |
| { |
| struct device *dev = &pci_dev->dev; |
| |
| if (!pci_dev_run_wake(pci_dev)) |
| return; |
| |
| spin_lock_irq(&dev->power.lock); |
| |
| if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold) |
| __pci_pme_active(pci_dev, true); |
| |
| spin_unlock_irq(&dev->power.lock); |
| } |
| |
| /** |
| * pci_choose_state - Choose the power state of a PCI device. |
| * @dev: Target PCI device. |
| * @state: Target state for the whole system. |
| * |
| * Returns PCI power state suitable for @dev and @state. |
| */ |
| pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) |
| { |
| if (state.event == PM_EVENT_ON) |
| return PCI_D0; |
| |
| return pci_target_state(dev, false); |
| } |
| EXPORT_SYMBOL(pci_choose_state); |
| |
| void pci_config_pm_runtime_get(struct pci_dev *pdev) |
| { |
| struct device *dev = &pdev->dev; |
| struct device *parent = dev->parent; |
| |
| if (parent) |
| pm_runtime_get_sync(parent); |
| pm_runtime_get_noresume(dev); |
| /* |
| * pdev->current_state is set to PCI_D3cold during suspending, |
| * so wait until suspending completes |
| */ |
| pm_runtime_barrier(dev); |
| /* |
| * Only need to resume devices in D3cold, because config |
| * registers are still accessible for devices suspended but |
| * not in D3cold. |
| */ |
| if (pdev->current_state == PCI_D3cold) |
| pm_runtime_resume(dev); |
| } |
| |
| void pci_config_pm_runtime_put(struct pci_dev *pdev) |
| { |
| struct device *dev = &pdev->dev; |
| struct device *parent = dev->parent; |
| |
| pm_runtime_put(dev); |
| if (parent) |
| pm_runtime_put_sync(parent); |
| } |
| |
| static const struct dmi_system_id bridge_d3_blacklist[] = { |
| #ifdef CONFIG_X86 |
| { |
| /* |
| * Gigabyte X299 root port is not marked as hotplug capable |
| * which allows Linux to power manage it. However, this |
| * confuses the BIOS SMI handler so don't power manage root |
| * ports on that system. |
| */ |
| .ident = "X299 DESIGNARE EX-CF", |
| .matches = { |
| DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), |
| DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), |
| }, |
| }, |
| { |
| /* |
| * Downstream device is not accessible after putting a root port |
| * into D3cold and back into D0 on Elo Continental Z2 board |
| */ |
| .ident = "Elo Continental Z2", |
| .matches = { |
| DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), |
| DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), |
| DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), |
| }, |
| }, |
| #endif |
| { } |
| }; |
| |
| /** |
| * pci_bridge_d3_possible - Is it possible to put the bridge into D3 |
| * @bridge: Bridge to check |
| * |
| * This function checks if it is possible to move the bridge to D3. |
| * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt. |
| */ |
| bool pci_bridge_d3_possible(struct pci_dev *bridge) |
| { |
| if (!pci_is_pcie(bridge)) |
| return false; |
| |
| switch (pci_pcie_type(bridge)) { |
| case PCI_EXP_TYPE_ROOT_PORT: |
| case PCI_EXP_TYPE_UPSTREAM: |
| case PCI_EXP_TYPE_DOWNSTREAM: |
| if (pci_bridge_d3_disable) |
| return false; |
| |
| /* |
| * Hotplug ports handled by firmware in System Management Mode |
| * may not be put into D3 by the OS (Thunderbolt on non-Macs). |
| */ |
| if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) |
| return false; |
| |
| if (pci_bridge_d3_force) |
| return true; |
| |
| /* Even the oldest 2010 Thunderbolt controller supports D3. */ |
| if (bridge->is_thunderbolt) |
| return true; |
| |
| /* Platform might know better if the bridge supports D3 */ |
| if (platform_pci_bridge_d3(bridge)) |
| return true; |
| |
| /* |
| * Hotplug ports handled natively by the OS were not validated |
| * by vendors for runtime D3 at least until 2018 because there |
| * was no OS support. |
| */ |
| if (bridge->is_hotplug_bridge) |
| return false; |
| |
| if (dmi_check_system(bridge_d3_blacklist)) |
| return false; |
| |
| /* |
| * It should be safe to put PCIe ports from 2015 or newer |
| * to D3. |
| */ |
| if (dmi_get_bios_year() >= 2015) |
| return true; |
| break; |
| } |
| |
| return false; |
| } |
| |
| static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) |
| { |
| bool *d3cold_ok = data; |
| |
| if (/* The device needs to be allowed to go D3cold ... */ |
| dev->no_d3cold || !dev->d3cold_allowed || |
| |
| /* ... and if it is wakeup capable to do so from D3cold. */ |
| (device_may_wakeup(&dev->dev) && |
| !pci_pme_capable(dev, PCI_D3cold)) || |
| |
| /* If it is a bridge it must be allowed to go to D3. */ |
| !pci_power_manageable(dev)) |
| |
| *d3cold_ok = false; |
| |
| return !*d3cold_ok; |
| } |
| |
| /* |
| * pci_bridge_d3_update - Update bridge D3 capabilities |
| * @dev: PCI device which is changed |
| * |
| * Update upstream bridge PM capabilities accordingly depending on if the |
| * device PM configuration was changed or the device is being removed. The |
| * change is also propagated upstream. |
| */ |
| void pci_bridge_d3_update(struct pci_dev *dev) |
| { |
| bool remove = !device_is_registered(&dev->dev); |
| struct pci_dev *bridge; |
| bool d3cold_ok = true; |
| |
| bridge = pci_upstream_bridge(dev); |
| if (!bridge || !pci_bridge_d3_possible(bridge)) |
| return; |
| |
| /* |
| * If D3 is currently allowed for the bridge, removing one of its |
| * children won't change that. |
| */ |
| if (remove && bridge->bridge_d3) |
| return; |
| |
| /* |
| * If D3 is currently allowed for the bridge and a child is added or |
| * changed, disallowance of D3 can only be caused by that child, so |
| * we only need to check that single device, not any of its siblings. |
| * |
| * If D3 is currently not allowed for the bridge, checking the device |
| * first may allow us to skip checking its siblings. |
| */ |
| if (!remove) |
| pci_dev_check_d3cold(dev, &d3cold_ok); |
| |
| /* |
| * If D3 is currently not allowed for the bridge, this may be caused |
| * either by the device being changed/removed or any of its siblings, |
| * so we need to go through all children to find out if one of them |
| * continues to block D3. |
| */ |
| if (d3cold_ok && !bridge->bridge_d3) |
| pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, |
| &d3cold_ok); |
| |
| if (bridge->bridge_d3 != d3cold_ok) { |
| bridge->bridge_d3 = d3cold_ok; |
| /* Propagate change to upstream bridges */ |
| pci_bridge_d3_update(bridge); |
| } |
| } |
| |
| /** |
| * pci_d3cold_enable - Enable D3cold for device |
| * @dev: PCI device to handle |
| * |
| * This function can be used in drivers to enable D3cold from the device |
| * they handle. It also updates upstream PCI bridge PM capabilities |
| * accordingly. |
| */ |
| void pci_d3cold_enable(struct pci_dev *dev) |
| { |
| if (dev->no_d3cold) { |
| dev->no_d3cold = false; |
| pci_bridge_d3_update(dev); |
| } |
| } |
| EXPORT_SYMBOL_GPL(pci_d3cold_enable); |
| |
| /** |
| * pci_d3cold_disable - Disable D3cold for device |
| * @dev: PCI device to handle |
| * |
| * This function can be used in drivers to disable D3cold from the device |
| * they handle. It also updates upstream PCI bridge PM capabilities |
| * accordingly. |
| */ |
| void pci_d3cold_disable(struct pci_dev *dev) |
| { |
| if (!dev->no_d3cold) { |
| dev->no_d3cold = true; |
| pci_bridge_d3_update(dev); |
| } |
| } |
| EXPORT_SYMBOL_GPL(pci_d3cold_disable); |
| |
| /** |
| * pci_pm_init - Initialize PM functions of given PCI device |
| * @dev: PCI device to handle. |
| */ |
| void pci_pm_init(struct pci_dev *dev) |
| { |
| int pm; |
| u16 status; |
| u16 pmc; |
| |
| pm_runtime_forbid(&dev->dev); |
| pm_runtime_set_active(&dev->dev); |
| pm_runtime_enable(&dev->dev); |
| device_enable_async_suspend(&dev->dev); |
| dev->wakeup_prepared = false; |
| |
| dev->pm_cap = 0; |
| dev->pme_support = 0; |
| |
| /* find PCI PM capability in list */ |
| pm = pci_find_capability(dev, PCI_CAP_ID_PM); |
| if (!pm) |
| return; |
| /* Check device's ability to generate PME# */ |
| pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); |
| |
| if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { |
| pci_err(dev, "unsupported PM cap regs version (%u)\n", |
| pmc & PCI_PM_CAP_VER_MASK); |
| return; |
| } |
| |
| dev->pm_cap = pm; |
| dev->d3hot_delay = PCI_PM_D3HOT_WAIT; |
| dev->d3cold_delay = PCI_PM_D3COLD_WAIT; |
| dev->bridge_d3 = pci_bridge_d3_possible(dev); |
| dev->d3cold_allowed = true; |
| |
| dev->d1_support = false; |
| dev->d2_support = false; |
| if (!pci_no_d1d2(dev)) { |
| if (pmc & PCI_PM_CAP_D1) |
| dev->d1_support = true; |
| if (pmc & PCI_PM_CAP_D2) |
| dev->d2_support = true; |
| |
| if (dev->d1_support || dev->d2_support) |
| pci_info(dev, "supports%s%s\n", |
| dev->d1_support ? " D1" : "", |
| dev->d2_support ? " D2" : ""); |
| } |
| |
| pmc &= PCI_PM_CAP_PME_MASK; |
| if (pmc) { |
| pci_info(dev, "PME# supported from%s%s%s%s%s\n", |
| (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
| (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
| (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
| (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", |
| (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); |
| dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; |
| dev->pme_poll = true; |
| /* |
| * Make device's PM flags reflect the wake-up capability, but |
| * let the user space enable it to wake up the system as needed. |
| */ |
| device_set_wakeup_capable(&dev->dev, true); |
| /* Disable the PME# generation functionality */ |
| pci_pme_active(dev, false); |
| } |
| |
| pci_read_config_word(dev, PCI_STATUS, &status); |
| if (status & PCI_STATUS_IMM_READY) |
| dev->imm_ready = 1; |
| } |
| |
| static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) |
| { |
| unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI; |
| |
| switch (prop) { |
| case PCI_EA_P_MEM: |
| case PCI_EA_P_VF_MEM: |
| flags |= IORESOURCE_MEM; |
| break; |
| case PCI_EA_P_MEM_PREFETCH: |
| case PCI_EA_P_VF_MEM_PREFETCH: |
| flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; |
| break; |
| case PCI_EA_P_IO: |
| flags |= IORESOURCE_IO; |
| break; |
| default: |
| return 0; |
| } |
| |
| return flags; |
| } |
| |
| static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, |
| u8 prop) |
| { |
| if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO) |
| return &dev->resource[bei]; |
| #ifdef CONFIG_PCI_IOV |
| else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 && |
| (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH)) |
| return &dev->resource[PCI_IOV_RESOURCES + |
| bei - PCI_EA_BEI_VF_BAR0]; |
| #endif |
| else if (bei == PCI_EA_BEI_ROM) |
| return &dev->resource[PCI_ROM_RESOURCE]; |
| else |
| return NULL; |
| } |
| |
| /* Read an Enhanced Allocation (EA) entry */ |
| static int pci_ea_read(struct pci_dev *dev, int offset) |
| { |
| struct resource *res; |
| int ent_size, ent_offset = offset; |
| resource_size_t start, end; |
| unsigned long flags; |
| u32 dw0, bei, base, max_offset; |
| u8 prop; |
| bool support_64 = (sizeof(resource_size_t) >= 8); |
| |
| pci_read_config_dword(dev, ent_offset, &dw0); |
| ent_offset += 4; |
| |
| /* Entry size field indicates DWORDs after 1st */ |
| ent_size = ((dw0 & PCI_EA_ES) + 1) << 2; |
| |
| if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */ |
| goto out; |
| |
| bei = (dw0 & PCI_EA_BEI) >> 4; |
| prop = (dw0 & PCI_EA_PP) >> 8; |
| |
| /* |
| * If the Property is in the reserved range, try the Secondary |
| * Property instead. |
| */ |
| if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED) |
| prop = (dw0 & PCI_EA_SP) >> 16; |
| if (prop > PCI_EA_P_BRIDGE_IO) |
| goto out; |
| |
| res = pci_ea_get_resource(dev, bei, prop); |
| if (!res) { |
| pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); |
| goto out; |
| } |
| |
| flags = pci_ea_flags(dev, prop); |
| if (!flags) { |
| pci_err(dev, "Unsupported EA properties: %#x\n", prop); |
| goto out; |
| } |
| |
| /* Read Base */ |
| pci_read_config_dword(dev, ent_offset, &base); |
| start = (base & PCI_EA_FIELD_MASK); |
| ent_offset += 4; |
| |
| /* Read MaxOffset */ |
| pci_read_config_dword(dev, ent_offset, &max_offset); |
| ent_offset += 4; |
| |
| /* Read Base MSBs (if 64-bit entry) */ |
| if (base & PCI_EA_IS_64) { |
| u32 base_upper; |
| |
| pci_read_config_dword(dev, ent_offset, &base_upper); |
| ent_offset += 4; |
| |
| flags |= IORESOURCE_MEM_64; |
| |
| /* entry starts above 32-bit boundary, can't use */ |
| if (!support_64 && base_upper) |
| goto out; |
| |
| if (support_64) |
| start |= ((u64)base_upper << 32); |
| } |
| |
| end = start + (max_offset | 0x03); |
| |
| /* Read MaxOffset MSBs (if 64-bit entry) */ |
| if (max_offset & PCI_EA_IS_64) { |
| u32 max_offset_upper; |
| |
| pci_read_config_dword(dev, ent_offset, &max_offset_upper); |
| ent_offset += 4; |
| |
| flags |= IORESOURCE_MEM_64; |
| |
| /* entry too big, can't use */ |
| if (!support_64 && max_offset_upper) |
| goto out; |
| |
| if (support_64) |
| end += ((u64)max_offset_upper << 32); |
| } |
| |
| if (end < start) { |
| pci_err(dev, "EA Entry crosses address boundary\n"); |
| goto out; |
| } |
| |
| if (ent_size != ent_offset - offset) { |
| pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n", |
| ent_size, ent_offset - offset); |
| goto out; |
| } |
| |
| res->name = pci_name(dev); |
| res->start = start; |
| res->end = end; |
| res->flags = flags; |
| |
| if (bei <= PCI_EA_BEI_BAR5) |
| pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
| bei, res, prop); |
| else if (bei == PCI_EA_BEI_ROM) |
| pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", |
| res, prop); |
| else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) |
|