diff options
Diffstat (limited to 'drivers/pci')
-rw-r--r-- | drivers/pci/controller/cadence/pcie-cadence-host.c | 27 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pci-imx6.c | 23 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware-host.c | 13 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.c | 20 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.h | 1 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-qcom.c | 73 | ||||
-rw-r--r-- | drivers/pci/controller/pci-ftpci100.c | 14 | ||||
-rw-r--r-- | drivers/pci/controller/pcie-rcar-host.c | 25 | ||||
-rw-r--r-- | drivers/pci/hotplug/acpiphp_glue.c | 5 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_ctrl.c | 21 | ||||
-rw-r--r-- | drivers/pci/hotplug/pciehp_hpc.c | 12 | ||||
-rw-r--r-- | drivers/pci/of.c | 14 | ||||
-rw-r--r-- | drivers/pci/pci-acpi.c | 53 | ||||
-rw-r--r-- | drivers/pci/pci.c | 192 | ||||
-rw-r--r-- | drivers/pci/pci.h | 19 | ||||
-rw-r--r-- | drivers/pci/pcie/aspm.c | 101 | ||||
-rw-r--r-- | drivers/pci/probe.c | 12 | ||||
-rw-r--r-- | drivers/pci/quirks.c | 111 |
18 files changed, 492 insertions, 244 deletions
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 940c7dd701d6..5b14f7ee3c79 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -12,6 +12,8 @@ #include "pcie-cadence.h" +#define LINK_RETRAIN_TIMEOUT HZ + static u64 bar_max_size[] = { [RP_BAR0] = _ULL(128 * SZ_2G), [RP_BAR1] = SZ_2G, @@ -77,6 +79,27 @@ static struct pci_ops cdns_pcie_host_ops = { .write = pci_generic_config_write, }; +static int cdns_pcie_host_training_complete(struct cdns_pcie *pcie) +{ + u32 pcie_cap_off = CDNS_PCIE_RP_CAP_OFFSET; + unsigned long end_jiffies; + u16 lnk_stat; + + /* Wait for link training to complete. Exit after timeout. */ + end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; + do { + lnk_stat = cdns_pcie_rp_readw(pcie, pcie_cap_off + PCI_EXP_LNKSTA); + if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) + break; + usleep_range(0, 1000); + } while (time_before(jiffies, end_jiffies)); + + if (!(lnk_stat & PCI_EXP_LNKSTA_LT)) + return 0; + + return -ETIMEDOUT; +} + static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) { struct device *dev = pcie->dev; @@ -118,6 +141,10 @@ static int cdns_pcie_retrain(struct cdns_pcie *pcie) cdns_pcie_rp_writew(pcie, pcie_cap_off + PCI_EXP_LNKCTL, lnk_ctl); + ret = cdns_pcie_host_training_complete(pcie); + if (ret) + return ret; + ret = cdns_pcie_host_wait_for_link(pcie); } return ret; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52906f999f2b..27aaa2a6bf39 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -80,6 +80,7 @@ struct imx6_pcie { struct clk *pcie; struct clk *pcie_aux; struct regmap *iomuxc_gpr; + u16 msi_ctrl; u32 controller_id; struct reset_control *pciephy_reset; struct reset_control *apps_reset; @@ -1178,6 +1179,26 @@ pm_turnoff_sleep: usleep_range(1000, 10000); } +static void imx6_pcie_msi_save_restore(struct imx6_pcie *imx6_pcie, bool save) +{ + u8 offset; + u16 val; + struct dw_pcie *pci = imx6_pcie->pci; + + if (pci_msi_enabled()) { + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_MSI); + if (save) { + val = dw_pcie_readw_dbi(pci, offset + PCI_MSI_FLAGS); + imx6_pcie->msi_ctrl = val; + } else { + dw_pcie_dbi_ro_wr_en(pci); + val = imx6_pcie->msi_ctrl; + dw_pcie_writew_dbi(pci, offset + PCI_MSI_FLAGS, val); + dw_pcie_dbi_ro_wr_dis(pci); + } + } +} + static int imx6_pcie_suspend_noirq(struct device *dev) { struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); @@ -1186,6 +1207,7 @@ static int imx6_pcie_suspend_noirq(struct device *dev) if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) return 0; + imx6_pcie_msi_save_restore(imx6_pcie, true); imx6_pcie_pm_turnoff(imx6_pcie); imx6_pcie_stop_link(imx6_pcie->pci); imx6_pcie_host_exit(pp); @@ -1205,6 +1227,7 @@ static int imx6_pcie_resume_noirq(struct device *dev) ret = imx6_pcie_host_init(pp); if (ret) return ret; + imx6_pcie_msi_save_restore(imx6_pcie, false); dw_pcie_setup_rc(pp); if (imx6_pcie->link_is_up) diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 9952057c8819..cf61733bf78d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -485,14 +485,19 @@ int dw_pcie_host_init(struct dw_pcie_rp *pp) if (ret) goto err_remove_edma; - if (!dw_pcie_link_up(pci)) { + if (dw_pcie_link_up(pci)) { + dw_pcie_print_link_status(pci); + } else { ret = dw_pcie_start_link(pci); if (ret) goto err_remove_edma; - } - /* Ignore errors, the link may come up later */ - dw_pcie_wait_for_link(pci); + if (pci->ops && pci->ops->start_link) { + ret = dw_pcie_wait_for_link(pci); + if (ret) + goto err_stop_link; + } + } bridge->sysdata = pp; diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 8e33e6e59e68..df092229e97d 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -644,9 +644,20 @@ void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index) dw_pcie_writel_atu(pci, dir, index, PCIE_ATU_REGION_CTRL2, 0); } -int dw_pcie_wait_for_link(struct dw_pcie *pci) +void dw_pcie_print_link_status(struct dw_pcie *pci) { u32 offset, val; + + offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); + + dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", + FIELD_GET(PCI_EXP_LNKSTA_CLS, val), + FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); +} + +int dw_pcie_wait_for_link(struct dw_pcie *pci) +{ int retries; /* Check if the link is up or not */ @@ -662,12 +673,7 @@ int dw_pcie_wait_for_link(struct dw_pcie *pci) return -ETIMEDOUT; } - offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); - val = dw_pcie_readw_dbi(pci, offset + PCI_EXP_LNKSTA); - - dev_info(pci->dev, "PCIe Gen.%u x%u link up\n", - FIELD_GET(PCI_EXP_LNKSTA_CLS, val), - FIELD_GET(PCI_EXP_LNKSTA_NLW, val)); + dw_pcie_print_link_status(pci); return 0; } diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 79713ce075cc..615660640801 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -429,6 +429,7 @@ void dw_pcie_setup(struct dw_pcie *pci); void dw_pcie_iatu_detect(struct dw_pcie *pci); int dw_pcie_edma_detect(struct dw_pcie *pci); void dw_pcie_edma_remove(struct dw_pcie *pci); +void dw_pcie_print_link_status(struct dw_pcie *pci); static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) { diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 4ab30892f6ef..7a87a47eb7ed 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -61,7 +61,6 @@ /* DBI registers */ #define AXI_MSTR_RESP_COMP_CTRL0 0x818 #define AXI_MSTR_RESP_COMP_CTRL1 0x81c -#define MISC_CONTROL_1_REG 0x8bc /* MHI registers */ #define PARF_DEBUG_CNT_PM_LINKST_IN_L2 0xc04 @@ -132,9 +131,6 @@ /* AXI_MSTR_RESP_COMP_CTRL1 register fields */ #define CFG_BRIDGE_SB_INIT BIT(0) -/* MISC_CONTROL_1_REG register fields */ -#define DBI_RO_WR_EN 1 - /* PCI_EXP_SLTCAP register fields */ #define PCIE_CAP_SLOT_POWER_LIMIT_VAL FIELD_PREP(PCI_EXP_SLTCAP_SPLV, 250) #define PCIE_CAP_SLOT_POWER_LIMIT_SCALE FIELD_PREP(PCI_EXP_SLTCAP_SPLS, 1) @@ -144,7 +140,6 @@ PCI_EXP_SLTCAP_AIP | \ PCI_EXP_SLTCAP_PIP | \ PCI_EXP_SLTCAP_HPS | \ - PCI_EXP_SLTCAP_HPC | \ PCI_EXP_SLTCAP_EIP | \ PCIE_CAP_SLOT_POWER_LIMIT_VAL | \ PCIE_CAP_SLOT_POWER_LIMIT_SCALE) @@ -274,6 +269,20 @@ static int qcom_pcie_start_link(struct dw_pcie *pci) return 0; } +static void qcom_pcie_clear_hpc(struct dw_pcie *pci) +{ + u16 offset = dw_pcie_find_capability(pci, PCI_CAP_ID_EXP); + u32 val; + + dw_pcie_dbi_ro_wr_en(pci); + + val = readl(pci->dbi_base + offset + PCI_EXP_SLTCAP); + val &= ~PCI_EXP_SLTCAP_HPC; + writel(val, pci->dbi_base + offset + PCI_EXP_SLTCAP); + + dw_pcie_dbi_ro_wr_dis(pci); +} + static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) { u32 val; @@ -429,6 +438,8 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie) writel(CFG_BRIDGE_SB_INIT, pci->dbi_base + AXI_MSTR_RESP_COMP_CTRL1); + qcom_pcie_clear_hpc(pcie->pci); + return 0; } @@ -512,6 +523,8 @@ static int qcom_pcie_post_init_1_0_0(struct qcom_pcie *pcie) writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT); } + qcom_pcie_clear_hpc(pcie->pci); + return 0; } @@ -607,6 +620,8 @@ static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) val |= EN; writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); + qcom_pcie_clear_hpc(pcie->pci); + return 0; } @@ -692,34 +707,6 @@ static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) return 0; } -static int qcom_pcie_post_init_2_4_0(struct qcom_pcie *pcie) -{ - u32 val; - - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PARF_PHY_CTRL); - val &= ~PHY_TEST_PWR_DOWN; - writel(val, pcie->parf + PARF_PHY_CTRL); - - /* change DBI base address */ - writel(0, pcie->parf + PARF_DBI_BASE_ADDR); - - /* MAC PHY_POWERDOWN MUX DISABLE */ - val = readl(pcie->parf + PARF_SYS_CTRL); - val &= ~MAC_PHY_POWERDOWN_IN_P2_D_MUX_EN; - writel(val, pcie->parf + PARF_SYS_CTRL); - - val = readl(pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); - val |= BYPASS; - writel(val, pcie->parf + PARF_MHI_CLOCK_RESET_CTRL); - - val = readl(pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); - val |= EN; - writel(val, pcie->parf + PARF_AXI_MSTR_WR_ADDR_HALT_V2); - - return 0; -} - static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; @@ -826,7 +813,9 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) writel(0, pcie->parf + PARF_Q2A_FLUSH); writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); - writel(DBI_RO_WR_EN, pci->dbi_base + MISC_CONTROL_1_REG); + + dw_pcie_dbi_ro_wr_en(pci); + writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); @@ -836,6 +825,8 @@ static int qcom_pcie_post_init_2_3_3(struct qcom_pcie *pcie) writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + PCI_EXP_DEVCTL2); + dw_pcie_dbi_ro_wr_dis(pci); + return 0; } @@ -966,6 +957,13 @@ err_disable_regulators: return ret; } +static int qcom_pcie_post_init_2_7_0(struct qcom_pcie *pcie) +{ + qcom_pcie_clear_hpc(pcie->pci); + + return 0; +} + static void qcom_pcie_deinit_2_7_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_7_0 *res = &pcie->res.v2_7_0; @@ -1136,6 +1134,7 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) writel(0, pcie->parf + PARF_Q2A_FLUSH); dw_pcie_dbi_ro_wr_en(pci); + writel(PCIE_CAP_SLOT_VAL, pci->dbi_base + offset + PCI_EXP_SLTCAP); val = readl(pci->dbi_base + offset + PCI_EXP_LNKCAP); @@ -1145,6 +1144,8 @@ static int qcom_pcie_post_init_2_9_0(struct qcom_pcie *pcie) writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + offset + PCI_EXP_DEVCTL2); + dw_pcie_dbi_ro_wr_dis(pci); + for (i = 0; i < 256; i++) writel(0, pcie->parf + PARF_BDF_TO_SID_TABLE_N + (4 * i)); @@ -1251,7 +1252,7 @@ static const struct qcom_pcie_ops ops_2_3_2 = { static const struct qcom_pcie_ops ops_2_4_0 = { .get_resources = qcom_pcie_get_resources_2_4_0, .init = qcom_pcie_init_2_4_0, - .post_init = qcom_pcie_post_init_2_4_0, + .post_init = qcom_pcie_post_init_2_3_2, .deinit = qcom_pcie_deinit_2_4_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1269,6 +1270,7 @@ static const struct qcom_pcie_ops ops_2_3_3 = { static const struct qcom_pcie_ops ops_2_7_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, + .post_init = qcom_pcie_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, }; @@ -1277,6 +1279,7 @@ static const struct qcom_pcie_ops ops_2_7_0 = { static const struct qcom_pcie_ops ops_1_9_0 = { .get_resources = qcom_pcie_get_resources_2_7_0, .init = qcom_pcie_init_2_7_0, + .post_init = qcom_pcie_post_init_2_7_0, .deinit = qcom_pcie_deinit_2_7_0, .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, .config_sid = qcom_pcie_config_sid_1_9_0, diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index ecd3009df586..6e7981d2ed5e 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -429,22 +429,12 @@ static int faraday_pci_probe(struct platform_device *pdev) p->dev = dev; /* Retrieve and enable optional clocks */ - clk = devm_clk_get(dev, "PCLK"); + clk = devm_clk_get_enabled(dev, "PCLK"); if (IS_ERR(clk)) return PTR_ERR(clk); - ret = clk_prepare_enable(clk); - if (ret) { - dev_err(dev, "could not prepare PCLK\n"); - return ret; - } - p->bus_clk = devm_clk_get(dev, "PCICLK"); + p->bus_clk = devm_clk_get_enabled(dev, "PCICLK"); if (IS_ERR(p->bus_clk)) return PTR_ERR(p->bus_clk); - ret = clk_prepare_enable(p->bus_clk); - if (ret) { - dev_err(dev, "could not prepare PCICLK\n"); - return ret; - } p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index e80e56b2a842..88975e40ee2f 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -41,21 +41,6 @@ struct rcar_msi { int irq2; }; -#ifdef CONFIG_ARM -/* - * Here we keep a static copy of the remapped PCIe controller address. - * This is only used on aarch32 systems, all of which have one single - * PCIe controller, to provide quick access to the PCIe controller in - * the L1 link state fixup function, called from the ARM fault handler. - */ -static void __iomem *pcie_base; -/* - * Static copy of PCIe device pointer, so we can check whether the - * device is runtime suspended or not. - */ -static struct device *pcie_dev; -#endif - /* Structure representing the PCIe interface */ struct rcar_pcie_host { struct rcar_pcie pcie; @@ -684,7 +669,7 @@ static void rcar_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) } static struct irq_chip rcar_msi_bottom_chip = { - .name = "Rcar MSI", + .name = "R-Car MSI", .irq_ack = rcar_msi_irq_ack, .irq_mask = rcar_msi_irq_mask, .irq_unmask = rcar_msi_irq_unmask, @@ -813,7 +798,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie_host *host) /* * Setup MSI data target using RC base address address, which - * is guaranteed to be in the low 32bit range on any RCar HW. + * is guaranteed to be in the low 32bit range on any R-Car HW. */ rcar_pci_write_reg(pcie, lower_32_bits(res.start) | MSIFE, PCIEMSIALR); rcar_pci_write_reg(pcie, upper_32_bits(res.start), PCIEMSIAUR); @@ -879,12 +864,6 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host) } host->msi.irq2 = i; -#ifdef CONFIG_ARM - /* Cache static copy for L1 link state fixup hook on aarch32 */ - pcie_base = pcie->base; - pcie_dev = pcie->dev; -#endif - return 0; err_irq2: diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 5b1f271c6034..328d1e416014 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -498,7 +498,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) acpiphp_native_scan_bridge(dev); } } else { - LIST_HEAD(add_list); int max, pass; acpiphp_rescan_slot(slot); @@ -512,12 +511,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - __pci_bus_size_bridges(dev->subordinate, - &add_list); } } } - __pci_bus_assign_resources(bus, &add_list, NULL); + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 529c34808440..dcdbfcf404dd 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -166,11 +166,11 @@ void pciehp_handle_button_press(struct controller *ctrl) case ON_STATE: if (ctrl->state == ON_STATE) { ctrl->state = BLINKINGOFF_STATE; - ctrl_info(ctrl, "Slot(%s): Powering off due to button press\n", + ctrl_info(ctrl, "Slot(%s): Button press: will power off in 5 sec\n", slot_name(ctrl)); } else { ctrl->state = BLINKINGON_STATE; - ctrl_info(ctrl, "Slot(%s) Powering on due to button press\n", + ctrl_info(ctrl, "Slot(%s): Button press: will power on in 5 sec\n", slot_name(ctrl)); } /* blink power indicator and turn off attention */ @@ -185,22 +185,23 @@ void pciehp_handle_button_press(struct controller *ctrl) * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ - ctrl_info(ctrl, "Slot(%s): Button cancel\n", slot_name(ctrl)); cancel_delayed_work(&ctrl->button_work); if (ctrl->state == BLINKINGOFF_STATE) { ctrl->state = ON_STATE; pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_ON, PCI_EXP_SLTCTL_ATTN_IND_OFF); + ctrl_info(ctrl, "Slot(%s): Button press: canceling request to power off\n", + slot_name(ctrl)); } else { ctrl->state = OFF_STATE; pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, PCI_EXP_SLTCTL_ATTN_IND_OFF); + ctrl_info(ctrl, "Slot(%s): Button press: canceling request to power on\n", + slot_name(ctrl)); } - ctrl_info(ctrl, "Slot(%s): Action canceled due to button press\n", - slot_name(ctrl)); break; default: - ctrl_err(ctrl, "Slot(%s): Ignoring invalid state %#x\n", + ctrl_err(ctrl, "Slot(%s): Button press: ignoring invalid state %#x\n", slot_name(ctrl), ctrl->state); break; } @@ -256,6 +257,14 @@ void pciehp_handle_presence_or_link_change(struct controller *ctrl, u32 events) present = pciehp_card_present(ctrl); link_active = pciehp_check_link_active(ctrl); if (present <= 0 && link_active <= 0) { + if (ctrl->state == BLINKINGON_STATE) { + ctrl->state = OFF_STATE; + cancel_delayed_work(&ctrl->button_work); + pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF, + INDICATOR_NOOP); + ctrl_info(ctrl, "Slot(%s): Card not present\n", + slot_name(ctrl)); + } mutex_unlock(&ctrl->state_lock); return; } diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index f8c70115b691..8711325605f0 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -722,11 +722,8 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id) } /* Check Attention Button Pressed */ - if (events & PCI_EXP_SLTSTA_ABP) { - ctrl_info(ctrl, "Slot(%s): Attention button pressed\n", - slot_name(ctrl)); + if (events & PCI_EXP_SLTSTA_ABP) pciehp_handle_button_press(ctrl); - } /* Check Power Fault Detected */ if (events & PCI_EXP_SLTSTA_PFD) { @@ -984,7 +981,7 @@ static inline int pcie_hotplug_depth(struct pci_dev *dev) struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; - u32 slot_cap, slot_cap2, link_cap; + u32 slot_cap, slot_cap2; u8 poweron; struct pci_dev *pdev = dev->port; struct pci_bus *subordinate = pdev->subordinate; @@ -1030,9 +1027,6 @@ struct controller *pcie_init(struct pcie_device *dev) if (dmi_first_match(inband_presence_disabled_dmi_table)) ctrl->inband_presence_disabled = 1; - /* Check if Data Link Layer Link Active Reporting is implemented */ - pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap); - /* Clear all remaining event bits in Slot Status register. */ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | @@ -1051,7 +1045,7 @@ struct controller *pcie_init(struct pcie_device *dev) FLAG(slot_cap, PCI_EXP_SLTCAP_EIP), FLAG(slot_cap, PCI_EXP_SLTCAP_NCCS), FLAG(slot_cap2, PCI_EXP_SLTCAP2_IBPD), - FLAG(link_cap, PCI_EXP_LNKCAP_DLLLARC), + FLAG(pdev->link_active_reporting, true), pdev->broken_cmd_compl ? " (with Cmd Compl erratum)" : ""); /* diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 2c25f4fa0225..e51219f9f523 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -39,16 +39,14 @@ int pci_set_of_node(struct pci_dev *dev) return -ENODEV; } - dev->dev.of_node = node; - dev->dev.fwnode = &node->fwnode; + device_set_node(&dev->dev, of_fwnode_handle(node)); return 0; } void pci_release_of_node(struct pci_dev *dev) { of_node_put(dev->dev.of_node); - dev->dev.of_node = NULL; - dev->dev.fwnode = NULL; + device_set_node(&dev->dev, NULL); } void pci_set_bus_of_node(struct pci_bus *bus) @@ -63,17 +61,13 @@ void pci_set_bus_of_node(struct pci_bus *bus) bus->self->external_facing = true; } - bus->dev.of_node = node; - - if (bus->dev.of_node) - bus->dev.fwnode = &bus->dev.of_node->fwnode; + device_set_node(&bus->dev, of_fwnode_handle(node)); } void pci_release_bus_of_node(struct pci_bus *bus) { of_node_put(bus->dev.of_node); - bus->dev.of_node = NULL; - bus->dev.fwnode = NULL; + device_set_node(&bus->dev, NULL); } struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 052a611081ec..a05350a4e49c 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1043,6 +1043,16 @@ bool acpi_pci_bridge_d3(struct pci_dev *dev) return false; } +static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) +{ + int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; + int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), + ACPI_ADR_SPACE_PCI_CONFIG, val); + if (ret) + pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n", + enable ? "connect" : "disconnect", ret); +} + int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); @@ -1053,32 +1063,49 @@ int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) [PCI_D3hot] = ACPI_STATE_D3_HOT, [PCI_D3cold] = ACPI_STATE_D3_COLD, }; - int error = -EINVAL; + int error; /* If the ACPI device has _EJ0, ignore the device */ if (!adev || acpi_has_method(adev->handle, "_EJ0")) return -ENODEV; switch (state) { - case PCI_D3cold: - if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == - PM_QOS_FLAGS_ALL) { - error = -EBUSY; - break; - } - fallthrough; case PCI_D0: case PCI_D1: case PCI_D2: case PCI_D3hot: - error = acpi_device_set_power(adev, state_conv[state]); + case PCI_D3cold: + break; + default: + return -EINVAL; + } + + if (state == PCI_D3cold) { + if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == + PM_QOS_FLAGS_ALL) + return -EBUSY; + + /* Notify AML lack of PCI config space availability */ + acpi_pci_config_space_access(dev, false); } - if (!error) - pci_dbg(dev, "power state changed by ACPI to %s\n", - acpi_power_state_string(adev->power.state)); + error = acpi_device_set_power(adev, state_conv[state]); + if (error) + return error; - return error; + pci_dbg(dev, "power state changed by ACPI to %s\n", + acpi_power_state_string(adev->power.state)); + + /* + * Notify AML of PCI config space availability. Config space is + * accessible in all states except D3cold; the only transitions + * that change availability are transitions to D3cold and from + * D3cold to D0. + */ + if (state == PCI_D0) + acpi_pci_config_space_access(dev, true); + + return 0; } pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 5ede93222bc1..60230da957e0 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -65,6 +65,13 @@ struct pci_pme_device { #define PME_TIMEOUT 1000 /* How long between PME checks */ /* + * Following exit from Conventional Reset, devices must be ready within 1 sec + * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional + * Reset (PCIe r6.0 sec 5.8). + */ +#define PCI_RESET_WAIT 1000 /* msec */ + +/* * Devices may extend the 1 sec period through Request Retry Status * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper * limit, but 60 sec ought to be enough for any device to become @@ -1156,7 +1163,14 @@ void pci_resume_bus(struct pci_bus *bus) static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) { int delay = 1; - u32 id; + bool retrain = false; + struct pci_dev *bridge; + + if (pci_is_pcie(dev)) { + bridge = pci_upstream_bridge(dev); + if (bridge) + retrain = true; + } /* * After reset, the device should not silently discard config @@ -1170,21 +1184,33 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) * Command register instead of Vendor ID so we don't have to * contend with the CRS SV value. */ - pci_read_config_dword(dev, PCI_COMMAND, &id); - while (PCI_POSSIBLE_ERROR(id)) { + for (;;) { + u32 id; + + pci_read_config_dword(dev, PCI_COMMAND, &id); + if (!PCI_POSSIBLE_ERROR(id)) + break; + if (delay > timeout) { pci_warn(dev, "not ready %dms after %s; giving up\n", delay - 1, reset_type); return -ENOTTY; } - if (delay > PCI_RESET_WAIT) + if (delay > PCI_RESET_WAIT) { + if (retrain) { + retrain = false; + if (pcie_failed_link_retrain(bridge)) { + delay = 1; + continue; + } + } pci_info(dev, "not ready %dms after %s; waiting\n", delay - 1, reset_type); + } msleep(delay); delay *= 2; - pci_read_config_dword(dev, PCI_COMMAND, &id); } if (delay > PCI_RESET_WAIT) @@ -2949,13 +2975,13 @@ static const struct dmi_system_id bridge_d3_blacklist[] = { { /* * Downstream device is not accessible after putting a root port - * into D3cold and back into D0 on Elo i2. + * into D3cold and back into D0 on Elo Continental Z2 board */ - .ident = "Elo i2", + .ident = "Elo Continental Z2", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"), - DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"), - DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"), + DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), + DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), + DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), }, }, #endif @@ -4857,6 +4883,79 @@ static int pci_pm_reset(struct pci_dev *dev, bool probe) } /** + * pcie_wait_for_link_status - Wait for link status change + * @pdev: Device whose link to wait for. + * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE. + * @active: Waiting for active or inactive? + * + * Return 0 if successful, or -ETIMEDOUT if status has not changed within + * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. + */ +static int pcie_wait_for_link_status(struct pci_dev *pdev, + bool use_lt, bool active) +{ + u16 lnksta_mask, lnksta_match; + unsigned long end_jiffies; + u16 lnksta; + + lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA; + lnksta_match = active ? lnksta_mask : 0; + + end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS); + do { + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); + if ((lnksta & lnksta_mask) == lnksta_match) + return 0; + msleep(1); + } while (time_before(jiffies, end_jiffies)); + + return -ETIMEDOUT; +} + +/** + * pcie_retrain_link - Request a link retrain and wait for it to complete + * @pdev: Device whose link to retrain. + * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. + * + * Retrain completion status is retrieved from the Link Status Register + * according to @use_lt. It is not verified whether the use of the DLLLA + * bit is valid. + * + * Return 0 if successful, or -ETIMEDOUT if training has not completed + * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. + */ +int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) +{ + int rc; + u16 lnkctl; + + /* + * Ensure the updated LNKCTL parameters are used during link + * training by checking that there is no ongoing link training to + * avoid LTSSM race as recommended in Implementation Note at the + * end of PCIe r6.0.1 sec 7.5.3.7. + */ + rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); + if (rc) + return rc; + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &lnkctl); + lnkctl |= PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl); + if (pdev->clear_retrain_link) { + /* + * Due to an erratum in some devices the Retrain Link bit + * needs to be cleared again manually to allow the link + * training to succeed. + */ + lnkctl &= ~PCI_EXP_LNKCTL_RL; + pcie_capability_write_word(pdev, PCI_EXP_LNKCTL, lnkctl); + } + + return pcie_wait_for_link_status(pdev, use_lt, !use_lt); +} + +/** * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? @@ -4867,16 +4966,14 @@ static int pci_pm_reset(struct pci_dev *dev, bool probe) static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) { - int timeout = 1000; - bool ret; - u16 lnk_status; + int rc; /* * Some controllers might not implement link active reporting. In this * case, we wait for 1000 ms + any delay requested by the caller. */ if (!pdev->link_active_reporting) { - msleep(timeout + delay); + msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay); return true; } @@ -4891,20 +4988,21 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, */ if (active) msleep(20); - for (;;) { - pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status); - ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA); - if (ret == active) - break; - if (timeout <= 0) - break; - msleep(10); - timeout -= 10; - } - if (active && ret) + rc = pcie_wait_for_link_status(pdev, false, active); + if (active) { + if (rc) + rc = pcie_failed_link_retrain(pdev); + if (rc) + return false; + msleep(delay); + return true; + } + + if (rc) + return false; - return ret == active; + return true; } /** @@ -5011,11 +5109,9 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) * * However, 100 ms is the minimum and the PCIe spec says the * software must allow at least 1s before it can determine that the - * device that did not respond is a broken device. There is - * evidence that 100 ms is not always enough, for example certain - * Titan Ridge xHCI controller does not always respond to - * configuration requests if we only wait for 100 ms (see - * https://bugzilla.kernel.org/show_bug.cgi?id=203885). + * device that did not respond is a broken device. Also device can + * take longer than that to respond if it indicates so through Request + * Retry Status completions. * * Therefore we wait for 100 ms and check for the device presence * until the timeout expires. @@ -5024,16 +5120,36 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) return 0; if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { + u16 status; + pci_dbg(dev, "waiting %d ms for downstream link\n", delay); msleep(delay); - } else { - pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", - delay); - if (!pcie_wait_for_link_delay(dev, true, delay)) { - /* Did not train, no need to wait any further */ - pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); + + if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) + return 0; + + /* + * If the port supports active link reporting we now check + * whether the link is active and if not bail out early with + * the assumption that the device is not present anymore. + */ + if (!dev->link_active_reporting) return -ENOTTY; - } + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status); + if (!(status & PCI_EXP_LNKSTA_DLLLA)) + return -ENOTTY; + + return pci_dev_wait(child, reset_type, + PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); + } + + pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", + delay); + if (!pcie_wait_for_link_delay(dev, true, delay)) { + /* Did not train, no need to wait any further */ + pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); + return -ENOTTY; } return pci_dev_wait(child, reset_type, diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 2475098f6518..a4c397434057 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -11,6 +11,8 @@ #define PCI_VSEC_ID_INTEL_TBT 0x1234 /* Thunderbolt */ +#define PCIE_LINK_RETRAIN_TIMEOUT_MS 1000 + extern const unsigned char pcie_link_speed[]; extern bool pci_early_dump; @@ -64,13 +66,6 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, #define PCI_PM_D3HOT_WAIT 10 /* msec */ #define PCI_PM_D3COLD_WAIT 100 /* msec */ -/* - * Following exit from Conventional Reset, devices must be ready within 1 sec - * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional - * Reset (PCIe r6.0 sec 5.8). - */ -#define PCI_RESET_WAIT 1000 /* msec */ - void pci_update_current_state(struct pci_dev *dev, pci_power_t state); void pci_refresh_power_state(struct pci_dev *dev); int pci_power_up(struct pci_dev *dev); @@ -541,6 +536,7 @@ void pci_acs_init(struct pci_dev *dev); int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); int pci_dev_specific_disable_acs_redir(struct pci_dev *dev); +bool pcie_failed_link_retrain(struct pci_dev *dev); #else static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) @@ -555,6 +551,10 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { return -ENOTTY; } +static inline bool pcie_failed_link_retrain(struct pci_dev *dev) +{ + return false; +} #endif /* PCI error reporting and recovery */ @@ -563,6 +563,7 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pci_ers_result_t (*reset_subordinates)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); +int pcie_retrain_link(struct pci_dev *pdev, bool use_lt); #ifdef CONFIG_PCIEASPM void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); @@ -686,6 +687,8 @@ extern const struct attribute_group aer_stats_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); +void pci_save_aer_state(struct pci_dev *dev); +void pci_restore_aer_state(struct pci_dev *dev); #else static inline void pci_no_aer(void) { } static inline void pci_aer_init(struct pci_dev *d) { } @@ -693,6 +696,8 @@ static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; } +static inline void pci_save_aer_state(struct pci_dev *dev) { } +static inline void pci_restore_aer_state(struct pci_dev *dev) { } #endif #ifdef CONFIG_ACPI diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 66d7514ca111..3dafba0b5f41 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -90,8 +90,6 @@ static const char *policy_str[] = { [POLICY_POWER_SUPERSAVE] = "powersupersave" }; -#define LINK_RETRAIN_TIMEOUT HZ - /* * The L1 PM substate capability is only implemented in function 0 in a * multi function device. @@ -193,36 +191,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_disable = blacklist ? 1 : 0; } -static bool pcie_retrain_link(struct pcie_link_state *link) -{ - struct pci_dev *parent = link->pdev; - unsigned long end_jiffies; - u16 reg16; - - pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); - reg16 |= PCI_EXP_LNKCTL_RL; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - if (parent->clear_retrain_link) { - /* - * Due to an erratum in some devices the Retrain Link bit - * needs to be cleared again manually to allow the link - * training to succeed. - */ - reg16 &= ~PCI_EXP_LNKCTL_RL; - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - } - - /* Wait for link training end. Break out after waiting for timeout */ - end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; - do { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - msleep(1); - } while (time_before(jiffies, end_jiffies)); - return !(reg16 & PCI_EXP_LNKSTA_LT); -} - /* * pcie_aspm_configure_common_clock: check if the 2 ends of a link * could use common clock. If they are, configure them to use the @@ -289,15 +257,15 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) reg16 &= ~PCI_EXP_LNKCTL_CCC; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - if (pcie_retrain_link(link)) - return; + if (pcie_retrain_link(link->pdev, true)) { - /* Training failed. Restore common clock configurations */ - pci_err(parent, "ASPM: Could not configure common clock\n"); - list_for_each_entry(child, &linkbus->devices, bus_list) - pcie_capability_write_word(child, PCI_EXP_LNKCTL, + /* Training failed. Restore common clock configurations */ + pci_err(parent, "ASPM: Could not configure common clock\n"); + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_reg[PCI_FUNC(child->devfn)]); - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); + } } /* Convert L0s latency encoding to ns */ @@ -337,7 +305,7 @@ static u32 calc_l1_acceptable(u32 encoding) } /* Convert L1SS T_pwr encoding to usec */ -static u32 calc_l1ss_pwron(struct pci_dev *pdev, u32 scale, u32 val) +static u32 calc_l12_pwron(struct pci_dev *pdev, u32 scale, u32 val) { switch (scale) { case 0: @@ -471,7 +439,7 @@ static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, } /* Calculate L1.2 PM substate timing parameters */ -static void aspm_calc_l1ss_info(struct pcie_link_state *link, +static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) { struct pci_dev *child = link->downstream, *parent = link->pdev; @@ -481,9 +449,6 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, u32 pctl1, pctl2, cctl1, cctl2; u32 pl1_2_enables, cl1_2_enables; - if (!(link->aspm_support & ASPM_STATE_L1_2_MASK)) - return; - /* Choose the greater of the two Port Common_Mode_Restore_Times */ val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8; @@ -495,13 +460,13 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link, val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19; scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16; - if (calc_l1ss_pwron(parent, scale1, val1) > - calc_l1ss_pwron(child, scale2, val2)) { + if (calc_l12_pwron(parent, scale1, val1) > + calc_l12_pwron(child, scale2, val2)) { ctl2 |= scale1 | (val1 << 3); - t_power_on = calc_l1ss_pwron(parent, scale1, val1); + t_power_on = calc_l12_pwron(parent, scale1, val1); } else { ctl2 |= scale2 | (val2 << 3); - t_power_on = calc_l1ss_pwron(child, scale2, val2); + t_power_on = calc_l12_pwron(child, scale2, val2); } /* @@ -616,8 +581,8 @@ static void aspm_l1ss_init(struct pcie_link_state *link) if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2) link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM; - if (link->aspm_support & ASPM_STATE_L1SS) - aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap); + if (link->aspm_support & ASPM_STATE_L1_2_MASK) + aspm_calc_l12_info(link, parent_l1ss_cap, child_l1ss_cap); } static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist) @@ -1010,21 +975,24 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) down_read(&pci_bus_sem); mutex_lock(&aspm_lock); - /* - * All PCIe functions are in one slot, remove one function will remove - * the whole slot, so just wait until we are the last function left. - */ - if (!list_empty(&parent->subordinate->devices)) - goto out; link = parent->link_state; root = link->root; parent_link = link->parent; - /* All functions are removed, so just disable ASPM for the link */ + /* + * link->downstream is a pointer to the pci_dev of function 0. If + * we remove that function, the pci_dev is about to be deallocated, + * so we can't use link->downstream again. Free the link state to + * avoid this. + * + * If we're removing a non-0 function, it's possible we could + * retain the link state, but PCIe r6.0, sec 7.5.3.7, recommends + * programming the same ASPM Control value for all functions of + * multi-function devices, so disable ASPM for all of them. + */ pcie_config_aspm_link(link, 0); list_del(&link->sibling); - /* Clock PM is for endpoint device */ free_link_state(link); /* Recheck latencies and configure upstream links */ @@ -1032,7 +1000,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev) pcie_update_aspm_capable(root); pcie_config_aspm_path(parent_link); } -out: + mutex_unlock(&aspm_lock); up_read(&pci_bus_sem); } @@ -1095,8 +1063,7 @@ static int __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem) if (state & PCIE_LINK_STATE_L0S) link->aspm_disable |= ASPM_STATE_L0S; if (state & PCIE_LINK_STATE_L1) - /* L1 PM substates require L1 */ - link->aspm_disable |= ASPM_STATE_L1 | ASPM_STATE_L1SS; + link->aspm_disable |= ASPM_STATE_L1; if (state & PCIE_LINK_STATE_L1_1) link->aspm_disable |= ASPM_STATE_L1_1; if (state & PCIE_LINK_STATE_L1_2) @@ -1171,16 +1138,16 @@ int pci_enable_link_state(struct pci_dev *pdev, int state) if (state & PCIE_LINK_STATE_L0S) link->aspm_default |= ASPM_STATE_L0S; if (state & PCIE_LINK_STATE_L1) - /* L1 PM substates require L1 */ - link->aspm_default |= ASPM_STATE_L1 | ASPM_STATE_L1SS; + link->aspm_default |= ASPM_STATE_L1; + /* L1 PM substates require L1 */ if (state & PCIE_LINK_STATE_L1_1) - link->aspm_default |= ASPM_STATE_L1_1; + link->aspm_default |= ASPM_STATE_L1_1 | ASPM_STATE_L1; if (state & PCIE_LINK_STATE_L1_2) - link->aspm_default |= ASPM_STATE_L1_2; + link->aspm_default |= ASPM_STATE_L1_2 | ASPM_STATE_L1; if (state & PCIE_LINK_STATE_L1_1_PCIPM) - link->aspm_default |= ASPM_STATE_L1_1_PCIPM; + link->aspm_default |= ASPM_STATE_L1_1_PCIPM | ASPM_STATE_L1; if (state & PCIE_LINK_STATE_L1_2_PCIPM) - link->aspm_default |= ASPM_STATE_L1_2_PCIPM; + link->aspm_default |= ASPM_STATE_L1_2_PCIPM | ASPM_STATE_L1; pcie_config_aspm_link(link, policy_to_aspm_state(link)); link->clkpm_default = (state & PCIE_LINK_STATE_CLKPM) ? 1 : 0; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 0b2826c4a832..8bac3ce02609 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -820,7 +820,6 @@ static void pci_set_bus_speed(struct pci_bus *bus) pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; - bridge->link_active_reporting = !!(linkcap & PCI_EXP_LNKCAP_DLLLARC); pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); @@ -997,8 +996,10 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge) resource_list_for_each_entry_safe(window, n, &resources) { offset = window->offset; res = window->res; - if (!res->flags && !res->start && !res->end) + if (!res->flags && !res->start && !res->end) { + release_resource(res); continue; + } list_move_tail(&window->node, &bridge->windows); @@ -1527,6 +1528,7 @@ void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; + u32 reg32; int type; struct pci_dev *parent; @@ -1540,6 +1542,10 @@ void set_pcie_port_type(struct pci_dev *pdev) pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap); pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap); + pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, ®32); + if (reg32 & PCI_EXP_LNKCAP_DLLLARC) + pdev->link_active_reporting = 1; + parent = pci_upstream_bridge(pdev); if (!parent) return; @@ -2546,6 +2552,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) dma_set_max_seg_size(&dev->dev, 65536); dma_set_seg_boundary(&dev->dev, 0xffffffff); + pcie_failed_link_retrain(dev); + /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index f4e2a88729fd..f83cc7805297 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -33,6 +33,99 @@ #include <linux/switchtec.h> #include "pci.h" +/* + * Retrain the link of a downstream PCIe port by hand if necessary. + * + * This is needed at least where a downstream port of the ASMedia ASM2824 + * Gen 3 switch is wired to the upstream port of the Pericom PI7C9X2G304 + * Gen 2 switch, and observed with the Delock Riser Card PCI Express x1 > + * 2 x PCIe x1 device, P/N 41433, plugged into the SiFive HiFive Unmatched + * board. + * + * In such a configuration the switches are supposed to negotiate the link + * speed of preferably 5.0GT/s, falling back to 2.5GT/s. However the link + * continues switching between the two speeds indefinitely and the data + * link layer never reaches the active state, with link training reported + * repeatedly active ~84% of the time. Forcing the target link speed to + * 2.5GT/s with the upstream ASM2824 device makes the two switches talk to + * each other correctly however. And more interestingly retraining with a + * higher target link speed afterwards lets the two successfully negotiate + * 5.0GT/s. + * + * With the ASM2824 we can rely on the otherwise optional Data Link Layer + * Link Active status bit and in the failed link training scenario it will + * be off along with the Link Bandwidth Management Status indicating that + * hardware has changed the link speed or width in an attempt to correct + * unreliable link operation. For a port that has been left unconnected + * both bits will be clear. So use this information to detect the problem + * rather than polling the Link Training bit and watching out for flips or + * at least the active status. + * + * Since the exact nature of the problem isn't known and in principle this + * could trigger where an ASM2824 device is downstream rather upstream, + * apply this erratum workaround to any downstream ports as long as they + * support Link Active reporting and have the Link Control 2 register. + * Restrict the speed to 2.5GT/s then with the Target Link Speed field, + * request a retrain and wait 200ms for the data link to go up. + * + * If this turns out successful and we know by the Vendor:Device ID it is + * safe to do so, then lift the restriction, letting the devices negotiate + * a higher speed. Also check for a similar 2.5GT/s speed restriction the + * firmware may have already arranged and lift it with ports that already + * report their data link being up. + * + * Return TRUE if the link has been successfully retrained, otherwise FALSE. + */ +bool pcie_failed_link_retrain(struct pci_dev *dev) +{ + static const struct pci_device_id ids[] = { + { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ + {} + }; + u16 lnksta, lnkctl2; + + if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || + !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) + return false; + + pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == + PCI_EXP_LNKSTA_LBMS) { + pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); + + lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; + lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); + + if (pcie_retrain_link(dev, false)) { + pci_info(dev, "retraining failed\n"); + return false; + } + + pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); + } + + if ((lnksta & PCI_EXP_LNKSTA_DLLLA) && + (lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT && + pci_match_id(ids, dev)) { + u32 lnkcap; + + pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n"); + pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); + lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; + lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; + pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); + + if (pcie_retrain_link(dev, false)) { + pci_info(dev, "retraining failed\n"); + return false; + } + } + + return true; +} + static ktime_t fixup_debug_start(struct pci_dev *dev, void (*fn)(struct pci_dev *dev)) { @@ -2420,9 +2513,9 @@ static void quirk_enable_clear_retrain_link(struct pci_dev *dev) dev->clear_retrain_link = 1; pci_info(dev, "Enable PCIe Retrain Link quirk\n"); } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link); static void fixup_rev1_53c810(struct pci_dev *dev) { @@ -3993,10 +4086,11 @@ static int nvme_disable_and_flr(struct pci_dev *dev, bool probe) } /* - * Intel DC P3700 NVMe controller will timeout waiting for ready status - * to change after NVMe enable if the driver starts interacting with the - * device too soon after FLR. A 250ms delay after FLR has heuristically - * proven to produce reliably working results for device assignment cases. + * Some NVMe controllers such as Intel DC P3700 and Solidigm P44 Pro will + * timeout waiting for ready status to change after NVMe enable if the driver + * starts interacting with the device too soon after FLR. A 250ms delay after + * FLR has heuristically proven to produce reliably working results for device + * assignment cases. */ static int delay_250ms_after_flr(struct pci_dev *dev, bool probe) { @@ -4083,6 +4177,7 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr }, { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr }, { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr }, + { PCI_VENDOR_ID_SOLIDIGM, 0xf1ac, delay_250ms_after_flr }, { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, reset_chelsio_generic_dev }, { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF, @@ -4174,6 +4269,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9235, + quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, |