linux/drivers/cxl/core/ras_rch.c

122 lines
3.1 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2025 AMD Corporation. All rights reserved. */
#include <linux/types.h>
#include <linux/aer.h>
#include "cxl.h"
#include "core.h"
#include "cxlmem.h"
void cxl_dport_map_rch_aer(struct cxl_dport *dport)
{
resource_size_t aer_phys;
struct device *host;
u16 aer_cap;
aer_cap = cxl_rcrb_to_aer(dport->dport_dev, dport->rcrb.base);
if (aer_cap) {
host = dport->reg_map.host;
aer_phys = aer_cap + dport->rcrb.base;
dport->regs.dport_aer =
devm_cxl_iomap_block(host, aer_phys,
sizeof(struct aer_capability_regs));
}
}
void cxl_disable_rch_root_ints(struct cxl_dport *dport)
{
void __iomem *aer_base = dport->regs.dport_aer;
u32 aer_cmd_mask, aer_cmd;
if (!aer_base)
return;
/*
* Disable RCH root port command interrupts.
* CXL 3.0 12.2.1.1 - RCH Downstream Port-detected Errors
*
* This sequence may not be necessary. CXL spec states disabling
* the root cmd register's interrupts is required. But, PCI spec
* shows these are disabled by default on reset.
*/
aer_cmd_mask = (PCI_ERR_ROOT_CMD_COR_EN |
PCI_ERR_ROOT_CMD_NONFATAL_EN |
PCI_ERR_ROOT_CMD_FATAL_EN);
aer_cmd = readl(aer_base + PCI_ERR_ROOT_COMMAND);
aer_cmd &= ~aer_cmd_mask;
writel(aer_cmd, aer_base + PCI_ERR_ROOT_COMMAND);
}
/*
* Copy the AER capability registers using 32 bit read accesses.
* This is necessary because RCRB AER capability is MMIO mapped. Clear the
* status after copying.
*
* @aer_base: base address of AER capability block in RCRB
* @aer_regs: destination for copying AER capability
*/
static bool cxl_rch_get_aer_info(void __iomem *aer_base,
struct aer_capability_regs *aer_regs)
{
int read_cnt = sizeof(struct aer_capability_regs) / sizeof(u32);
u32 *aer_regs_buf = (u32 *)aer_regs;
int n;
if (!aer_base)
return false;
/* Use readl() to guarantee 32-bit accesses */
for (n = 0; n < read_cnt; n++)
aer_regs_buf[n] = readl(aer_base + n * sizeof(u32));
writel(aer_regs->uncor_status, aer_base + PCI_ERR_UNCOR_STATUS);
writel(aer_regs->cor_status, aer_base + PCI_ERR_COR_STATUS);
return true;
}
/* Get AER severity. Return false if there is no error. */
static bool cxl_rch_get_aer_severity(struct aer_capability_regs *aer_regs,
int *severity)
{
if (aer_regs->uncor_status & ~aer_regs->uncor_mask) {
if (aer_regs->uncor_status & PCI_ERR_ROOT_FATAL_RCV)
*severity = AER_FATAL;
else
*severity = AER_NONFATAL;
return true;
}
if (aer_regs->cor_status & ~aer_regs->cor_mask) {
*severity = AER_CORRECTABLE;
return true;
}
return false;
}
void cxl_handle_rdport_errors(struct cxl_dev_state *cxlds)
{
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
struct aer_capability_regs aer_regs;
struct cxl_dport *dport;
int severity;
struct cxl_port *port __free(put_cxl_port) =
cxl_pci_find_port(pdev, &dport);
if (!port)
return;
if (!cxl_rch_get_aer_info(dport->regs.dport_aer, &aer_regs))
return;
if (!cxl_rch_get_aer_severity(&aer_regs, &severity))
return;
pci_print_aer(pdev, severity, &aer_regs);
if (severity == AER_CORRECTABLE)
cxl_handle_cor_ras(&cxlds->cxlmd->dev, dport->regs.ras);
else
cxl_handle_ras(&cxlds->cxlmd->dev, dport->regs.ras);
}