|
|
ae23c9 |
From 5dc7b745eb04e799b95e7e8d17868970a65621df Mon Sep 17 00:00:00 2001
|
|
|
ae23c9 |
From: David Gibson <dgibson@redhat.com>
|
|
|
ae23c9 |
Date: Thu, 30 May 2019 04:37:28 +0100
|
|
|
ae23c9 |
Subject: [PATCH 7/8] spapr: Support NVIDIA V100 GPU with NVLink2
|
|
|
ae23c9 |
|
|
|
ae23c9 |
RH-Author: David Gibson <dgibson@redhat.com>
|
|
|
ae23c9 |
Message-id: <20190530043728.32575-7-dgibson@redhat.com>
|
|
|
ae23c9 |
Patchwork-id: 88423
|
|
|
ae23c9 |
O-Subject: [RHEL-8.1 qemu-kvm PATCH 6/6] spapr: Support NVIDIA V100 GPU with NVLink2
|
|
|
ae23c9 |
Bugzilla: 1710662
|
|
|
ae23c9 |
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
|
|
|
ae23c9 |
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
|
ae23c9 |
|
|
|
ae23c9 |
From: Alexey Kardashevskiy <aik@ozlabs.ru>
|
|
|
ae23c9 |
|
|
|
ae23c9 |
NVIDIA V100 GPUs have on-board RAM which is mapped into the host memory
|
|
|
ae23c9 |
space and accessible as normal RAM via an NVLink bus. The VFIO-PCI driver
|
|
|
ae23c9 |
implements special regions for such GPUs and emulates an NVLink bridge.
|
|
|
ae23c9 |
NVLink2-enabled POWER9 CPUs also provide address translation services
|
|
|
ae23c9 |
which includes an ATS shootdown (ATSD) register exported via the NVLink
|
|
|
ae23c9 |
bridge device.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This adds a quirk to VFIO to map the GPU memory and create an MR;
|
|
|
ae23c9 |
the new MR is stored in a PCI device as a QOM link. The sPAPR PCI uses
|
|
|
ae23c9 |
this to get the MR and map it to the system address space.
|
|
|
ae23c9 |
Another quirk does the same for ATSD.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This adds additional steps to sPAPR PHB setup:
|
|
|
ae23c9 |
|
|
|
ae23c9 |
1. Search for specific GPUs and NPUs, collect findings in
|
|
|
ae23c9 |
sPAPRPHBState::nvgpus, manage system address space mappings;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
2. Add device-specific properties such as "ibm,npu", "ibm,gpu",
|
|
|
ae23c9 |
"memory-block", "link-speed" to advertise the NVLink2 function to
|
|
|
ae23c9 |
the guest;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
3. Add "mmio-atsd" to vPHB to advertise the ATSD capability;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
4. Add new memory blocks (with extra "linux,memory-usable" to prevent
|
|
|
ae23c9 |
the guest OS from accessing the new memory until it is onlined) and
|
|
|
ae23c9 |
npuphb# nodes representing an NPU unit for every vPHB as the GPU driver
|
|
|
ae23c9 |
uses it for link discovery.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This allocates space for GPU RAM and ATSD like we do for MMIOs by
|
|
|
ae23c9 |
adding 2 new parameters to the phb_placement() hook. Older machine types
|
|
|
ae23c9 |
set these to zero.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This puts new memory nodes in a separate NUMA node to as the GPU RAM
|
|
|
ae23c9 |
needs to be configured equally distant from any other node in the system.
|
|
|
ae23c9 |
Unlike the host setup which assigns numa ids from 255 downwards, this
|
|
|
ae23c9 |
adds new NUMA nodes after the user configures nodes or from 1 if none
|
|
|
ae23c9 |
were configured.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
This adds requirement similar to EEH - one IOMMU group per vPHB.
|
|
|
ae23c9 |
The reason for this is that ATSD registers belong to a physical NPU
|
|
|
ae23c9 |
so they cannot invalidate translations on GPUs attached to another NPU.
|
|
|
ae23c9 |
It is guaranteed by the host platform as it does not mix NVLink bridges
|
|
|
ae23c9 |
or GPUs from different NPU in the same IOMMU group. If more than one
|
|
|
ae23c9 |
IOMMU group is detected on a vPHB, this disables ATSD support for that
|
|
|
ae23c9 |
vPHB and prints a warning.
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
|
|
|
ae23c9 |
[aw: for vfio portions]
|
|
|
ae23c9 |
Acked-by: Alex Williamson <alex.williamson@redhat.com>
|
|
|
ae23c9 |
Message-Id: <20190312082103.130561-1-aik@ozlabs.ru>
|
|
|
ae23c9 |
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
|
|
|
ae23c9 |
(cherry picked from commit ec132efaa81f09861a3bd6afad94827e74543b3f)
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Conflicts:
|
|
|
ae23c9 |
hw/ppc/spapr.c
|
|
|
ae23c9 |
hw/ppc/spapr_pci.c
|
|
|
ae23c9 |
hw/vfio/trace-events
|
|
|
ae23c9 |
include/hw/pci-host/spapr.h
|
|
|
ae23c9 |
include/hw/ppc/spapr.h
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Conflicts come for several reasons:
|
|
|
ae23c9 |
1) Some contextual conflicts
|
|
|
ae23c9 |
2) Downstream tree does not have PHB hotplug, so upstream changes to
|
|
|
ae23c9 |
that code need to be dropped, we also need to adapt some hunks to
|
|
|
ae23c9 |
apply to the code as it existed before PHB hotplug was added
|
|
|
ae23c9 |
3) Upstream had a mass renaming of spapr types to give more
|
|
|
ae23c9 |
consistent CamelCasing. We don't have that change downstream, so
|
|
|
ae23c9 |
we need to adjust accordingly.
|
|
|
ae23c9 |
4) We add an explicit include of qemu/units.h, since it's not indirectly
|
|
|
ae23c9 |
included downstream (and it's messy to backport the patch which adds
|
|
|
ae23c9 |
that)
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1710662
|
|
|
ae23c9 |
|
|
|
ae23c9 |
Signed-off-by: David Gibson <dgibson@redhat.com>
|
|
|
ae23c9 |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
ae23c9 |
---
|
|
|
ae23c9 |
hw/ppc/Makefile.objs | 2 +-
|
|
|
ae23c9 |
hw/ppc/spapr.c | 31 ++-
|
|
|
ae23c9 |
hw/ppc/spapr_pci.c | 21 ++-
|
|
|
ae23c9 |
hw/ppc/spapr_pci_nvlink2.c | 450 ++++++++++++++++++++++++++++++++++++++++++++
|
|
|
ae23c9 |
hw/vfio/pci-quirks.c | 131 +++++++++++++
|
|
|
ae23c9 |
hw/vfio/pci.c | 14 ++
|
|
|
ae23c9 |
hw/vfio/pci.h | 2 +
|
|
|
ae23c9 |
hw/vfio/trace-events | 4 +
|
|
|
ae23c9 |
include/hw/pci-host/spapr.h | 46 +++++
|
|
|
ae23c9 |
include/hw/ppc/spapr.h | 5 +-
|
|
|
ae23c9 |
10 files changed, 697 insertions(+), 9 deletions(-)
|
|
|
ae23c9 |
create mode 100644 hw/ppc/spapr_pci_nvlink2.c
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
|
|
|
ae23c9 |
index a46a989..d07e999 100644
|
|
|
ae23c9 |
--- a/hw/ppc/Makefile.objs
|
|
|
ae23c9 |
+++ b/hw/ppc/Makefile.objs
|
|
|
ae23c9 |
@@ -8,7 +8,7 @@ obj-$(CONFIG_PSERIES) += spapr_cpu_core.o spapr_ovec.o
|
|
|
ae23c9 |
# IBM PowerNV
|
|
|
ae23c9 |
obj-$(CONFIG_POWERNV) += pnv.o pnv_xscom.o pnv_core.o pnv_lpc.o pnv_psi.o pnv_occ.o pnv_bmc.o
|
|
|
ae23c9 |
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
|
|
|
ae23c9 |
-obj-y += spapr_pci_vfio.o
|
|
|
ae23c9 |
+obj-y += spapr_pci_vfio.o spapr_pci_nvlink2.o
|
|
|
ae23c9 |
endif
|
|
|
ae23c9 |
obj-$(CONFIG_PSERIES) += spapr_rtas_ddw.o
|
|
|
ae23c9 |
# PowerPC 4xx boards
|
|
|
ae23c9 |
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
|
|
|
ae23c9 |
index b57c0be..c72aad1 100644
|
|
|
ae23c9 |
--- a/hw/ppc/spapr.c
|
|
|
ae23c9 |
+++ b/hw/ppc/spapr.c
|
|
|
ae23c9 |
@@ -910,12 +910,13 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
|
|
|
ae23c9 |
0, cpu_to_be32(SPAPR_MEMORY_BLOCK_SIZE),
|
|
|
ae23c9 |
cpu_to_be32(max_cpus / smp_threads),
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
+ uint32_t maxdomain = cpu_to_be32(spapr->gpu_numa_id > 1 ? 1 : 0);
|
|
|
ae23c9 |
uint32_t maxdomains[] = {
|
|
|
ae23c9 |
cpu_to_be32(4),
|
|
|
ae23c9 |
- cpu_to_be32(0),
|
|
|
ae23c9 |
- cpu_to_be32(0),
|
|
|
ae23c9 |
- cpu_to_be32(0),
|
|
|
ae23c9 |
- cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1),
|
|
|
ae23c9 |
+ maxdomain,
|
|
|
ae23c9 |
+ maxdomain,
|
|
|
ae23c9 |
+ maxdomain,
|
|
|
ae23c9 |
+ cpu_to_be32(spapr->gpu_numa_id),
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
|
|
|
ae23c9 |
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
|
|
|
ae23c9 |
@@ -1515,6 +1516,16 @@ static void spapr_machine_reset(void)
|
|
|
ae23c9 |
ppc_set_compat(first_ppc_cpu, spapr->max_compat_pvr, &error_fatal);
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+ /*
|
|
|
ae23c9 |
+ * NVLink2-connected GPU RAM needs to be placed on a separate NUMA node.
|
|
|
ae23c9 |
+ * We assign a new numa ID per GPU in spapr_pci_collect_nvgpu() which is
|
|
|
ae23c9 |
+ * called from vPHB reset handler so we initialize the counter here.
|
|
|
ae23c9 |
+ * If no NUMA is configured from the QEMU side, we start from 1 as GPU RAM
|
|
|
ae23c9 |
+ * must be equally distant from any other node.
|
|
|
ae23c9 |
+ * The final value of spapr->gpu_numa_id is going to be written to
|
|
|
ae23c9 |
+ * max-associativity-domains in spapr_build_fdt().
|
|
|
ae23c9 |
+ */
|
|
|
ae23c9 |
+ spapr->gpu_numa_id = MAX(1, nb_numa_nodes);
|
|
|
ae23c9 |
qemu_devices_reset();
|
|
|
ae23c9 |
|
|
|
ae23c9 |
/* DRC reset may cause a device to be unplugged. This will cause troubles
|
|
|
ae23c9 |
@@ -3601,7 +3612,8 @@ static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine)
|
|
|
ae23c9 |
static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
|
|
|
ae23c9 |
uint64_t *buid, hwaddr *pio,
|
|
|
ae23c9 |
hwaddr *mmio32, hwaddr *mmio64,
|
|
|
ae23c9 |
- unsigned n_dma, uint32_t *liobns, Error **errp)
|
|
|
ae23c9 |
+ unsigned n_dma, uint32_t *liobns,
|
|
|
ae23c9 |
+ hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
/*
|
|
|
ae23c9 |
* New-style PHB window placement.
|
|
|
ae23c9 |
@@ -3648,6 +3660,9 @@ static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index,
|
|
|
ae23c9 |
*pio = SPAPR_PCI_BASE + index * SPAPR_PCI_IO_WIN_SIZE;
|
|
|
ae23c9 |
*mmio32 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM32_WIN_SIZE;
|
|
|
ae23c9 |
*mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ *nv2gpa = SPAPR_PCI_NV2RAM64_WIN_BASE + index * SPAPR_PCI_NV2RAM64_WIN_SIZE;
|
|
|
ae23c9 |
+ *nv2atsd = SPAPR_PCI_NV2ATSD_WIN_BASE + index * SPAPR_PCI_NV2ATSD_WIN_SIZE;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
static ICSState *spapr_ics_get(XICSFabric *dev, int irq)
|
|
|
ae23c9 |
@@ -4133,7 +4148,8 @@ DEFINE_SPAPR_MACHINE(2_8, "2.8", false);
|
|
|
ae23c9 |
static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
|
|
|
ae23c9 |
uint64_t *buid, hwaddr *pio,
|
|
|
ae23c9 |
hwaddr *mmio32, hwaddr *mmio64,
|
|
|
ae23c9 |
- unsigned n_dma, uint32_t *liobns, Error **errp)
|
|
|
ae23c9 |
+ unsigned n_dma, uint32_t *liobns,
|
|
|
ae23c9 |
+ hwaddr *nv2gpa, hwaddr *nv2atsd, Error **errp)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
/* Legacy PHB placement for pseries-2.7 and earlier machine types */
|
|
|
ae23c9 |
const uint64_t base_buid = 0x800000020000000ULL;
|
|
|
ae23c9 |
@@ -4177,6 +4193,9 @@ static void phb_placement_2_7(sPAPRMachineState *spapr, uint32_t index,
|
|
|
ae23c9 |
* fallback behaviour of automatically splitting a large "32-bit"
|
|
|
ae23c9 |
* window into contiguous 32-bit and 64-bit windows
|
|
|
ae23c9 |
*/
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ *nv2gpa = 0;
|
|
|
ae23c9 |
+ *nv2atsd = 0;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
#if 0 /* Disabled for Red Hat Enterprise Linux */
|
|
|
ae23c9 |
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
|
|
|
ae23c9 |
index f936ce6..d82f957 100644
|
|
|
ae23c9 |
--- a/hw/ppc/spapr_pci.c
|
|
|
ae23c9 |
+++ b/hw/ppc/spapr_pci.c
|
|
|
ae23c9 |
@@ -1326,6 +1326,8 @@ static void spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
|
|
|
ae23c9 |
if (sphb->pcie_ecs && pci_is_express(dev)) {
|
|
|
ae23c9 |
_FDT(fdt_setprop_cell(fdt, offset, "ibm,pci-config-space-type", 0x1));
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ spapr_phb_nvgpu_populate_pcidev_dt(dev, fdt, offset, sphb);
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
/* create OF node for pci device and required OF DT properties */
|
|
|
ae23c9 |
@@ -1559,7 +1561,9 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
|
|
|
ae23c9 |
smc->phb_placement(spapr, sphb->index,
|
|
|
ae23c9 |
&sphb->buid, &sphb->io_win_addr,
|
|
|
ae23c9 |
&sphb->mem_win_addr, &sphb->mem64_win_addr,
|
|
|
ae23c9 |
- windows_supported, sphb->dma_liobn, &local_err);
|
|
|
ae23c9 |
+ windows_supported, sphb->dma_liobn,
|
|
|
ae23c9 |
+ &sphb->nv2_gpa_win_addr, &sphb->nv2_atsd_win_addr,
|
|
|
ae23c9 |
+ &local_err);
|
|
|
ae23c9 |
if (local_err) {
|
|
|
ae23c9 |
error_propagate(errp, local_err);
|
|
|
ae23c9 |
return;
|
|
|
ae23c9 |
@@ -1764,8 +1768,14 @@ void spapr_phb_dma_reset(sPAPRPHBState *sphb)
|
|
|
ae23c9 |
static void spapr_phb_reset(DeviceState *qdev)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
sPAPRPHBState *sphb = SPAPR_PCI_HOST_BRIDGE(qdev);
|
|
|
ae23c9 |
+ Error *errp = NULL;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
spapr_phb_dma_reset(sphb);
|
|
|
ae23c9 |
+ spapr_phb_nvgpu_free(sphb);
|
|
|
ae23c9 |
+ spapr_phb_nvgpu_setup(sphb, &errp);
|
|
|
ae23c9 |
+ if (errp) {
|
|
|
ae23c9 |
+ error_report_err(errp);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
|
|
|
ae23c9 |
/* Reset the IOMMU state */
|
|
|
ae23c9 |
object_child_foreach(OBJECT(qdev), spapr_phb_children_reset, NULL);
|
|
|
ae23c9 |
@@ -1798,6 +1808,8 @@ static Property spapr_phb_properties[] = {
|
|
|
ae23c9 |
pre_2_8_migration, false),
|
|
|
ae23c9 |
DEFINE_PROP_BOOL("pcie-extended-configuration-space", sPAPRPHBState,
|
|
|
ae23c9 |
pcie_ecs, true),
|
|
|
ae23c9 |
+ DEFINE_PROP_UINT64("gpa", sPAPRPHBState, nv2_gpa_win_addr, 0),
|
|
|
ae23c9 |
+ DEFINE_PROP_UINT64("atsd", sPAPRPHBState, nv2_atsd_win_addr, 0),
|
|
|
ae23c9 |
DEFINE_PROP_END_OF_LIST(),
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
|
|
|
ae23c9 |
@@ -2089,6 +2101,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
|
|
|
ae23c9 |
sPAPRTCETable *tcet;
|
|
|
ae23c9 |
PCIBus *bus = PCI_HOST_BRIDGE(phb)->bus;
|
|
|
ae23c9 |
sPAPRFDT s_fdt;
|
|
|
ae23c9 |
+ Error *errp = NULL;
|
|
|
ae23c9 |
|
|
|
ae23c9 |
/* Start populating the FDT */
|
|
|
ae23c9 |
nodename = g_strdup_printf("pci@%" PRIx64, phb->buid);
|
|
|
ae23c9 |
@@ -2170,6 +2183,12 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
|
|
|
ae23c9 |
return ret;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+ spapr_phb_nvgpu_populate_dt(phb, fdt, bus_off, &errp);
|
|
|
ae23c9 |
+ if (errp) {
|
|
|
ae23c9 |
+ error_report_err(errp);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ spapr_phb_nvgpu_ram_populate_dt(phb, fdt);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
return 0;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
|
|
|
ae23c9 |
new file mode 100644
|
|
|
ae23c9 |
index 0000000..60b14d8
|
|
|
ae23c9 |
--- /dev/null
|
|
|
ae23c9 |
+++ b/hw/ppc/spapr_pci_nvlink2.c
|
|
|
ae23c9 |
@@ -0,0 +1,450 @@
|
|
|
ae23c9 |
+/*
|
|
|
ae23c9 |
+ * QEMU sPAPR PCI for NVLink2 pass through
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * Copyright (c) 2019 Alexey Kardashevskiy, IBM Corporation.
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
ae23c9 |
+ * of this software and associated documentation files (the "Software"), to deal
|
|
|
ae23c9 |
+ * in the Software without restriction, including without limitation the rights
|
|
|
ae23c9 |
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
ae23c9 |
+ * copies of the Software, and to permit persons to whom the Software is
|
|
|
ae23c9 |
+ * furnished to do so, subject to the following conditions:
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * The above copyright notice and this permission notice shall be included in
|
|
|
ae23c9 |
+ * all copies or substantial portions of the Software.
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
ae23c9 |
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
ae23c9 |
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
ae23c9 |
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
ae23c9 |
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
ae23c9 |
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
ae23c9 |
+ * THE SOFTWARE.
|
|
|
ae23c9 |
+ */
|
|
|
ae23c9 |
+#include "qemu/osdep.h"
|
|
|
ae23c9 |
+#include "qapi/error.h"
|
|
|
ae23c9 |
+#include "qemu-common.h"
|
|
|
ae23c9 |
+#include "hw/pci/pci.h"
|
|
|
ae23c9 |
+#include "hw/pci-host/spapr.h"
|
|
|
ae23c9 |
+#include "qemu/error-report.h"
|
|
|
ae23c9 |
+#include "hw/ppc/fdt.h"
|
|
|
ae23c9 |
+#include "hw/pci/pci_bridge.h"
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+#define PHANDLE_PCIDEV(phb, pdev) (0x12000000 | \
|
|
|
ae23c9 |
+ (((phb)->index) << 16) | ((pdev)->devfn))
|
|
|
ae23c9 |
+#define PHANDLE_GPURAM(phb, n) (0x110000FF | ((n) << 8) | \
|
|
|
ae23c9 |
+ (((phb)->index) << 16))
|
|
|
ae23c9 |
+#define PHANDLE_NVLINK(phb, gn, nn) (0x00130000 | (((phb)->index) << 8) | \
|
|
|
ae23c9 |
+ ((gn) << 4) | (nn))
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+#define SPAPR_GPU_NUMA_ID (cpu_to_be32(1))
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+struct spapr_phb_pci_nvgpu_config {
|
|
|
ae23c9 |
+ uint64_t nv2_ram_current;
|
|
|
ae23c9 |
+ uint64_t nv2_atsd_current;
|
|
|
ae23c9 |
+ int num; /* number of non empty (i.e. tgt!=0) entries in slots[] */
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot {
|
|
|
ae23c9 |
+ uint64_t tgt;
|
|
|
ae23c9 |
+ uint64_t gpa;
|
|
|
ae23c9 |
+ unsigned numa_id;
|
|
|
ae23c9 |
+ PCIDevice *gpdev;
|
|
|
ae23c9 |
+ int linknum;
|
|
|
ae23c9 |
+ struct {
|
|
|
ae23c9 |
+ uint64_t atsd_gpa;
|
|
|
ae23c9 |
+ PCIDevice *npdev;
|
|
|
ae23c9 |
+ uint32_t link_speed;
|
|
|
ae23c9 |
+ } links[NVGPU_MAX_LINKS];
|
|
|
ae23c9 |
+ } slots[NVGPU_MAX_NUM];
|
|
|
ae23c9 |
+ Error *errp;
|
|
|
ae23c9 |
+};
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static struct spapr_phb_pci_nvgpu_slot *
|
|
|
ae23c9 |
+spapr_nvgpu_get_slot(struct spapr_phb_pci_nvgpu_config *nvgpus, uint64_t tgt)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Search for partially collected "slot" */
|
|
|
ae23c9 |
+ for (i = 0; i < nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ if (nvgpus->slots[i].tgt == tgt) {
|
|
|
ae23c9 |
+ return &nvgpus->slots[i];
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (nvgpus->num == ARRAY_SIZE(nvgpus->slots)) {
|
|
|
ae23c9 |
+ return NULL;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ i = nvgpus->num;
|
|
|
ae23c9 |
+ nvgpus->slots[i].tgt = tgt;
|
|
|
ae23c9 |
+ ++nvgpus->num;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ return &nvgpus->slots[i];
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static void spapr_pci_collect_nvgpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
|
|
|
ae23c9 |
+ PCIDevice *pdev, uint64_t tgt,
|
|
|
ae23c9 |
+ MemoryRegion *mr, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ MachineState *machine = MACHINE(qdev_get_machine());
|
|
|
ae23c9 |
+ sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!nvslot) {
|
|
|
ae23c9 |
+ error_setg(errp, "Found too many GPUs per vPHB");
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ g_assert(!nvslot->gpdev);
|
|
|
ae23c9 |
+ nvslot->gpdev = pdev;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ nvslot->gpa = nvgpus->nv2_ram_current;
|
|
|
ae23c9 |
+ nvgpus->nv2_ram_current += memory_region_size(mr);
|
|
|
ae23c9 |
+ nvslot->numa_id = spapr->gpu_numa_id;
|
|
|
ae23c9 |
+ ++spapr->gpu_numa_id;
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static void spapr_pci_collect_nvnpu(struct spapr_phb_pci_nvgpu_config *nvgpus,
|
|
|
ae23c9 |
+ PCIDevice *pdev, uint64_t tgt,
|
|
|
ae23c9 |
+ MemoryRegion *mr, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = spapr_nvgpu_get_slot(nvgpus, tgt);
|
|
|
ae23c9 |
+ int j;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!nvslot) {
|
|
|
ae23c9 |
+ error_setg(errp, "Found too many NVLink bridges per vPHB");
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ j = nvslot->linknum;
|
|
|
ae23c9 |
+ if (j == ARRAY_SIZE(nvslot->links)) {
|
|
|
ae23c9 |
+ error_setg(errp, "Found too many NVLink bridges per GPU");
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ ++nvslot->linknum;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ g_assert(!nvslot->links[j].npdev);
|
|
|
ae23c9 |
+ nvslot->links[j].npdev = pdev;
|
|
|
ae23c9 |
+ nvslot->links[j].atsd_gpa = nvgpus->nv2_atsd_current;
|
|
|
ae23c9 |
+ nvgpus->nv2_atsd_current += memory_region_size(mr);
|
|
|
ae23c9 |
+ nvslot->links[j].link_speed =
|
|
|
ae23c9 |
+ object_property_get_uint(OBJECT(pdev), "nvlink2-link-speed", NULL);
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static void spapr_phb_pci_collect_nvgpu(PCIBus *bus, PCIDevice *pdev,
|
|
|
ae23c9 |
+ void *opaque)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ PCIBus *sec_bus;
|
|
|
ae23c9 |
+ Object *po = OBJECT(pdev);
|
|
|
ae23c9 |
+ uint64_t tgt = object_property_get_uint(po, "nvlink2-tgt", NULL);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (tgt) {
|
|
|
ae23c9 |
+ Error *local_err = NULL;
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_config *nvgpus = opaque;
|
|
|
ae23c9 |
+ Object *mr_gpu = object_property_get_link(po, "nvlink2-mr[0]", NULL);
|
|
|
ae23c9 |
+ Object *mr_npu = object_property_get_link(po, "nvlink2-atsd-mr[0]",
|
|
|
ae23c9 |
+ NULL);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ g_assert(mr_gpu || mr_npu);
|
|
|
ae23c9 |
+ if (mr_gpu) {
|
|
|
ae23c9 |
+ spapr_pci_collect_nvgpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_gpu),
|
|
|
ae23c9 |
+ &local_err);
|
|
|
ae23c9 |
+ } else {
|
|
|
ae23c9 |
+ spapr_pci_collect_nvnpu(nvgpus, pdev, tgt, MEMORY_REGION(mr_npu),
|
|
|
ae23c9 |
+ &local_err);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ error_propagate(&nvgpus->errp, local_err);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ if ((pci_default_read_config(pdev, PCI_HEADER_TYPE, 1) !=
|
|
|
ae23c9 |
+ PCI_HEADER_TYPE_BRIDGE)) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(pdev));
|
|
|
ae23c9 |
+ if (!sec_bus) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
|
|
|
ae23c9 |
+ spapr_phb_pci_collect_nvgpu, opaque);
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_setup(sPAPRPHBState *sphb, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i, j, valid_gpu_num;
|
|
|
ae23c9 |
+ PCIBus *bus;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Search for GPUs and NPUs */
|
|
|
ae23c9 |
+ if (!sphb->nv2_gpa_win_addr || !sphb->nv2_atsd_win_addr) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ sphb->nvgpus = g_new0(struct spapr_phb_pci_nvgpu_config, 1);
|
|
|
ae23c9 |
+ sphb->nvgpus->nv2_ram_current = sphb->nv2_gpa_win_addr;
|
|
|
ae23c9 |
+ sphb->nvgpus->nv2_atsd_current = sphb->nv2_atsd_win_addr;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ bus = PCI_HOST_BRIDGE(sphb)->bus;
|
|
|
ae23c9 |
+ pci_for_each_device(bus, pci_bus_num(bus),
|
|
|
ae23c9 |
+ spapr_phb_pci_collect_nvgpu, sphb->nvgpus);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (sphb->nvgpus->errp) {
|
|
|
ae23c9 |
+ error_propagate(errp, sphb->nvgpus->errp);
|
|
|
ae23c9 |
+ sphb->nvgpus->errp = NULL;
|
|
|
ae23c9 |
+ goto cleanup_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Add found GPU RAM and ATSD MRs if found */
|
|
|
ae23c9 |
+ for (i = 0, valid_gpu_num = 0; i < sphb->nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ Object *nvmrobj;
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!nvslot->gpdev) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ nvmrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
|
|
ae23c9 |
+ "nvlink2-mr[0]", NULL);
|
|
|
ae23c9 |
+ /* ATSD is pointless without GPU RAM MR so skip those */
|
|
|
ae23c9 |
+ if (!nvmrobj) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ ++valid_gpu_num;
|
|
|
ae23c9 |
+ memory_region_add_subregion(get_system_memory(), nvslot->gpa,
|
|
|
ae23c9 |
+ MEMORY_REGION(nvmrobj));
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (j = 0; j < nvslot->linknum; ++j) {
|
|
|
ae23c9 |
+ Object *atsdmrobj;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ atsdmrobj = object_property_get_link(OBJECT(nvslot->links[j].npdev),
|
|
|
ae23c9 |
+ "nvlink2-atsd-mr[0]", NULL);
|
|
|
ae23c9 |
+ if (!atsdmrobj) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ memory_region_add_subregion(get_system_memory(),
|
|
|
ae23c9 |
+ nvslot->links[j].atsd_gpa,
|
|
|
ae23c9 |
+ MEMORY_REGION(atsdmrobj));
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (valid_gpu_num) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ /* We did not find any interesting GPU */
|
|
|
ae23c9 |
+cleanup_exit:
|
|
|
ae23c9 |
+ g_free(sphb->nvgpus);
|
|
|
ae23c9 |
+ sphb->nvgpus = NULL;
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_free(sPAPRPHBState *sphb)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i, j;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!sphb->nvgpus) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (i = 0; i < sphb->nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
|
|
ae23c9 |
+ Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
|
|
ae23c9 |
+ "nvlink2-mr[0]", NULL);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (nv_mrobj) {
|
|
|
ae23c9 |
+ memory_region_del_subregion(get_system_memory(),
|
|
|
ae23c9 |
+ MEMORY_REGION(nv_mrobj));
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ for (j = 0; j < nvslot->linknum; ++j) {
|
|
|
ae23c9 |
+ PCIDevice *npdev = nvslot->links[j].npdev;
|
|
|
ae23c9 |
+ Object *atsd_mrobj;
|
|
|
ae23c9 |
+ atsd_mrobj = object_property_get_link(OBJECT(npdev),
|
|
|
ae23c9 |
+ "nvlink2-atsd-mr[0]", NULL);
|
|
|
ae23c9 |
+ if (atsd_mrobj) {
|
|
|
ae23c9 |
+ memory_region_del_subregion(get_system_memory(),
|
|
|
ae23c9 |
+ MEMORY_REGION(atsd_mrobj));
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ g_free(sphb->nvgpus);
|
|
|
ae23c9 |
+ sphb->nvgpus = NULL;
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_populate_dt(sPAPRPHBState *sphb, void *fdt, int bus_off,
|
|
|
ae23c9 |
+ Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i, j, atsdnum = 0;
|
|
|
ae23c9 |
+ uint64_t atsd[8]; /* The existing limitation of known guests */
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!sphb->nvgpus) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (i = 0; (i < sphb->nvgpus->num) && (atsdnum < ARRAY_SIZE(atsd)); ++i) {
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!nvslot->gpdev) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ for (j = 0; j < nvslot->linknum; ++j) {
|
|
|
ae23c9 |
+ if (!nvslot->links[j].atsd_gpa) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (atsdnum == ARRAY_SIZE(atsd)) {
|
|
|
ae23c9 |
+ error_report("Only %"PRIuPTR" ATSD registers supported",
|
|
|
ae23c9 |
+ ARRAY_SIZE(atsd));
|
|
|
ae23c9 |
+ break;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ atsd[atsdnum] = cpu_to_be64(nvslot->links[j].atsd_gpa);
|
|
|
ae23c9 |
+ ++atsdnum;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!atsdnum) {
|
|
|
ae23c9 |
+ error_setg(errp, "No ATSD registers found");
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!spapr_phb_eeh_available(sphb)) {
|
|
|
ae23c9 |
+ /*
|
|
|
ae23c9 |
+ * ibm,mmio-atsd contains ATSD registers; these belong to an NPU PHB
|
|
|
ae23c9 |
+ * which we do not emulate as a separate device. Instead we put
|
|
|
ae23c9 |
+ * ibm,mmio-atsd to the vPHB with GPU and make sure that we do not
|
|
|
ae23c9 |
+ * put GPUs from different IOMMU groups to the same vPHB to ensure
|
|
|
ae23c9 |
+ * that the guest will use ATSDs from the corresponding NPU.
|
|
|
ae23c9 |
+ */
|
|
|
ae23c9 |
+ error_setg(errp, "ATSD requires separate vPHB per GPU IOMMU group");
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ _FDT((fdt_setprop(fdt, bus_off, "ibm,mmio-atsd", atsd,
|
|
|
ae23c9 |
+ atsdnum * sizeof(atsd[0]))));
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_ram_populate_dt(sPAPRPHBState *sphb, void *fdt)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i, j, linkidx, npuoff;
|
|
|
ae23c9 |
+ char *npuname;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!sphb->nvgpus) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ npuname = g_strdup_printf("npuphb%d", sphb->index);
|
|
|
ae23c9 |
+ npuoff = fdt_add_subnode(fdt, 0, npuname);
|
|
|
ae23c9 |
+ _FDT(npuoff);
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_cell(fdt, npuoff, "#address-cells", 1));
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0));
|
|
|
ae23c9 |
+ /* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu")));
|
|
|
ae23c9 |
+ g_free(npuname);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) {
|
|
|
ae23c9 |
+ char *linkname = g_strdup_printf("link@%d", linkidx);
|
|
|
ae23c9 |
+ int off = fdt_add_subnode(fdt, npuoff, linkname);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ _FDT(off);
|
|
|
ae23c9 |
+ /* _FDT((fdt_setprop_cell(fdt, off, "reg", linkidx))); */
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_string(fdt, off, "compatible",
|
|
|
ae23c9 |
+ "ibm,npu-link")));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, off, "phandle",
|
|
|
ae23c9 |
+ PHANDLE_NVLINK(sphb, i, j))));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx)));
|
|
|
ae23c9 |
+ g_free(linkname);
|
|
|
ae23c9 |
+ ++linkidx;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Add memory nodes for GPU RAM and mark them unusable */
|
|
|
ae23c9 |
+ for (i = 0; i < sphb->nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
|
|
ae23c9 |
+ Object *nv_mrobj = object_property_get_link(OBJECT(nvslot->gpdev),
|
|
|
ae23c9 |
+ "nvlink2-mr[0]", NULL);
|
|
|
ae23c9 |
+ uint32_t associativity[] = {
|
|
|
ae23c9 |
+ cpu_to_be32(0x4),
|
|
|
ae23c9 |
+ SPAPR_GPU_NUMA_ID,
|
|
|
ae23c9 |
+ SPAPR_GPU_NUMA_ID,
|
|
|
ae23c9 |
+ SPAPR_GPU_NUMA_ID,
|
|
|
ae23c9 |
+ cpu_to_be32(nvslot->numa_id)
|
|
|
ae23c9 |
+ };
|
|
|
ae23c9 |
+ uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
|
|
|
ae23c9 |
+ uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
|
|
|
ae23c9 |
+ char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
|
|
|
ae23c9 |
+ int off = fdt_add_subnode(fdt, 0, mem_name);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ _FDT(off);
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_string(fdt, off, "device_type", "memory")));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop(fdt, off, "reg", mem_reg, sizeof(mem_reg))));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop(fdt, off, "ibm,associativity", associativity,
|
|
|
ae23c9 |
+ sizeof(associativity))));
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_string(fdt, off, "compatible",
|
|
|
ae23c9 |
+ "ibm,coherent-device-memory")));
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ mem_reg[1] = cpu_to_be64(0);
|
|
|
ae23c9 |
+ _FDT((fdt_setprop(fdt, off, "linux,usable-memory", mem_reg,
|
|
|
ae23c9 |
+ sizeof(mem_reg))));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, off, "phandle",
|
|
|
ae23c9 |
+ PHANDLE_GPURAM(sphb, i))));
|
|
|
ae23c9 |
+ g_free(mem_name);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
|
|
|
ae23c9 |
+ sPAPRPHBState *sphb)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int i, j;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (!sphb->nvgpus) {
|
|
|
ae23c9 |
+ return;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (i = 0; i < sphb->nvgpus->num; ++i) {
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_slot *nvslot = &sphb->nvgpus->slots[i];
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Skip "slot" without attached GPU */
|
|
|
ae23c9 |
+ if (!nvslot->gpdev) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ if (dev == nvslot->gpdev) {
|
|
|
ae23c9 |
+ uint32_t npus[nvslot->linknum];
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (j = 0; j < nvslot->linknum; ++j) {
|
|
|
ae23c9 |
+ PCIDevice *npdev = nvslot->links[j].npdev;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ npus[j] = cpu_to_be32(PHANDLE_PCIDEV(sphb, npdev));
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ _FDT(fdt_setprop(fdt, offset, "ibm,npu", npus,
|
|
|
ae23c9 |
+ j * sizeof(npus[0])));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, offset, "phandle",
|
|
|
ae23c9 |
+ PHANDLE_PCIDEV(sphb, dev))));
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ for (j = 0; j < nvslot->linknum; ++j) {
|
|
|
ae23c9 |
+ if (dev != nvslot->links[j].npdev) {
|
|
|
ae23c9 |
+ continue;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, offset, "phandle",
|
|
|
ae23c9 |
+ PHANDLE_PCIDEV(sphb, dev))));
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,gpu",
|
|
|
ae23c9 |
+ PHANDLE_PCIDEV(sphb, nvslot->gpdev)));
|
|
|
ae23c9 |
+ _FDT((fdt_setprop_cell(fdt, offset, "ibm,nvlink",
|
|
|
ae23c9 |
+ PHANDLE_NVLINK(sphb, i, j))));
|
|
|
ae23c9 |
+ /*
|
|
|
ae23c9 |
+ * If we ever want to emulate GPU RAM at the same location as on
|
|
|
ae23c9 |
+ * the host - here is the encoding GPA->TGT:
|
|
|
ae23c9 |
+ *
|
|
|
ae23c9 |
+ * gta = ((sphb->nv2_gpa >> 42) & 0x1) << 42;
|
|
|
ae23c9 |
+ * gta |= ((sphb->nv2_gpa >> 45) & 0x3) << 43;
|
|
|
ae23c9 |
+ * gta |= ((sphb->nv2_gpa >> 49) & 0x3) << 45;
|
|
|
ae23c9 |
+ * gta |= sphb->nv2_gpa & ((1UL << 43) - 1);
|
|
|
ae23c9 |
+ */
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_cell(fdt, offset, "memory-region",
|
|
|
ae23c9 |
+ PHANDLE_GPURAM(sphb, i)));
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_u64(fdt, offset, "ibm,device-tgt-addr",
|
|
|
ae23c9 |
+ nvslot->tgt));
|
|
|
ae23c9 |
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,nvlink-speed",
|
|
|
ae23c9 |
+ nvslot->links[j].link_speed));
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c
|
|
|
ae23c9 |
index 92457ed..1beedca 100644
|
|
|
ae23c9 |
--- a/hw/vfio/pci-quirks.c
|
|
|
ae23c9 |
+++ b/hw/vfio/pci-quirks.c
|
|
|
ae23c9 |
@@ -1968,3 +1968,134 @@ int vfio_add_virt_caps(VFIOPCIDevice *vdev, Error **errp)
|
|
|
ae23c9 |
|
|
|
ae23c9 |
return 0;
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static void vfio_pci_nvlink2_get_tgt(Object *obj, Visitor *v,
|
|
|
ae23c9 |
+ const char *name,
|
|
|
ae23c9 |
+ void *opaque, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ uint64_t tgt = (uintptr_t) opaque;
|
|
|
ae23c9 |
+ visit_type_uint64(v, name, &tgt, errp);
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+static void vfio_pci_nvlink2_get_link_speed(Object *obj, Visitor *v,
|
|
|
ae23c9 |
+ const char *name,
|
|
|
ae23c9 |
+ void *opaque, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ uint32_t link_speed = (uint32_t)(uintptr_t) opaque;
|
|
|
ae23c9 |
+ visit_type_uint32(v, name, &link_speed, errp);
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int ret;
|
|
|
ae23c9 |
+ void *p;
|
|
|
ae23c9 |
+ struct vfio_region_info *nv2reg = NULL;
|
|
|
ae23c9 |
+ struct vfio_info_cap_header *hdr;
|
|
|
ae23c9 |
+ struct vfio_region_info_cap_nvlink2_ssatgt *cap;
|
|
|
ae23c9 |
+ VFIOQuirk *quirk;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
|
|
ae23c9 |
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
|
|
|
ae23c9 |
+ PCI_VENDOR_ID_NVIDIA,
|
|
|
ae23c9 |
+ VFIO_REGION_SUBTYPE_NVIDIA_NVLINK2_RAM,
|
|
|
ae23c9 |
+ &nv2reg);
|
|
|
ae23c9 |
+ if (ret) {
|
|
|
ae23c9 |
+ return ret;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ hdr = vfio_get_region_info_cap(nv2reg, VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
|
|
|
ae23c9 |
+ if (!hdr) {
|
|
|
ae23c9 |
+ ret = -ENODEV;
|
|
|
ae23c9 |
+ goto free_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ cap = (void *) hdr;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ p = mmap(NULL, nv2reg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
|
ae23c9 |
+ MAP_SHARED, vdev->vbasedev.fd, nv2reg->offset);
|
|
|
ae23c9 |
+ if (p == MAP_FAILED) {
|
|
|
ae23c9 |
+ ret = -errno;
|
|
|
ae23c9 |
+ goto free_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ quirk = vfio_quirk_alloc(1);
|
|
|
ae23c9 |
+ memory_region_init_ram_ptr(&quirk->mem[0], OBJECT(vdev), "nvlink2-mr",
|
|
|
ae23c9 |
+ nv2reg->size, p);
|
|
|
ae23c9 |
+ QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
|
|
|
ae23c9 |
+ vfio_pci_nvlink2_get_tgt, NULL, NULL,
|
|
|
ae23c9 |
+ (void *) (uintptr_t) cap->tgt, NULL);
|
|
|
ae23c9 |
+ trace_vfio_pci_nvidia_gpu_setup_quirk(vdev->vbasedev.name, cap->tgt,
|
|
|
ae23c9 |
+ nv2reg->size);
|
|
|
ae23c9 |
+free_exit:
|
|
|
ae23c9 |
+ g_free(nv2reg);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ return ret;
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+ int ret;
|
|
|
ae23c9 |
+ void *p;
|
|
|
ae23c9 |
+ struct vfio_region_info *atsdreg = NULL;
|
|
|
ae23c9 |
+ struct vfio_info_cap_header *hdr;
|
|
|
ae23c9 |
+ struct vfio_region_info_cap_nvlink2_ssatgt *captgt;
|
|
|
ae23c9 |
+ struct vfio_region_info_cap_nvlink2_lnkspd *capspeed;
|
|
|
ae23c9 |
+ VFIOQuirk *quirk;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ ret = vfio_get_dev_region_info(&vdev->vbasedev,
|
|
|
ae23c9 |
+ VFIO_REGION_TYPE_PCI_VENDOR_TYPE |
|
|
|
ae23c9 |
+ PCI_VENDOR_ID_IBM,
|
|
|
ae23c9 |
+ VFIO_REGION_SUBTYPE_IBM_NVLINK2_ATSD,
|
|
|
ae23c9 |
+ &atsdreg);
|
|
|
ae23c9 |
+ if (ret) {
|
|
|
ae23c9 |
+ return ret;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ hdr = vfio_get_region_info_cap(atsdreg,
|
|
|
ae23c9 |
+ VFIO_REGION_INFO_CAP_NVLINK2_SSATGT);
|
|
|
ae23c9 |
+ if (!hdr) {
|
|
|
ae23c9 |
+ ret = -ENODEV;
|
|
|
ae23c9 |
+ goto free_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ captgt = (void *) hdr;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ hdr = vfio_get_region_info_cap(atsdreg,
|
|
|
ae23c9 |
+ VFIO_REGION_INFO_CAP_NVLINK2_LNKSPD);
|
|
|
ae23c9 |
+ if (!hdr) {
|
|
|
ae23c9 |
+ ret = -ENODEV;
|
|
|
ae23c9 |
+ goto free_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ capspeed = (void *) hdr;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ /* Some NVLink bridges may not have assigned ATSD */
|
|
|
ae23c9 |
+ if (atsdreg->size) {
|
|
|
ae23c9 |
+ p = mmap(NULL, atsdreg->size, PROT_READ | PROT_WRITE | PROT_EXEC,
|
|
|
ae23c9 |
+ MAP_SHARED, vdev->vbasedev.fd, atsdreg->offset);
|
|
|
ae23c9 |
+ if (p == MAP_FAILED) {
|
|
|
ae23c9 |
+ ret = -errno;
|
|
|
ae23c9 |
+ goto free_exit;
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ quirk = vfio_quirk_alloc(1);
|
|
|
ae23c9 |
+ memory_region_init_ram_device_ptr(&quirk->mem[0], OBJECT(vdev),
|
|
|
ae23c9 |
+ "nvlink2-atsd-mr", atsdreg->size, p);
|
|
|
ae23c9 |
+ QLIST_INSERT_HEAD(&vdev->bars[0].quirks, quirk, next);
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ object_property_add(OBJECT(vdev), "nvlink2-tgt", "uint64",
|
|
|
ae23c9 |
+ vfio_pci_nvlink2_get_tgt, NULL, NULL,
|
|
|
ae23c9 |
+ (void *) (uintptr_t) captgt->tgt, NULL);
|
|
|
ae23c9 |
+ trace_vfio_pci_nvlink2_setup_quirk_ssatgt(vdev->vbasedev.name, captgt->tgt,
|
|
|
ae23c9 |
+ atsdreg->size);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ object_property_add(OBJECT(vdev), "nvlink2-link-speed", "uint32",
|
|
|
ae23c9 |
+ vfio_pci_nvlink2_get_link_speed, NULL, NULL,
|
|
|
ae23c9 |
+ (void *) (uintptr_t) capspeed->link_speed, NULL);
|
|
|
ae23c9 |
+ trace_vfio_pci_nvlink2_setup_quirk_lnkspd(vdev->vbasedev.name,
|
|
|
ae23c9 |
+ capspeed->link_speed);
|
|
|
ae23c9 |
+free_exit:
|
|
|
ae23c9 |
+ g_free(atsdreg);
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ return ret;
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c
|
|
|
ae23c9 |
index ba3a393..735dcae 100644
|
|
|
ae23c9 |
--- a/hw/vfio/pci.c
|
|
|
ae23c9 |
+++ b/hw/vfio/pci.c
|
|
|
ae23c9 |
@@ -3078,6 +3078,20 @@ static void vfio_realize(PCIDevice *pdev, Error **errp)
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+ if (vdev->vendor_id == PCI_VENDOR_ID_NVIDIA) {
|
|
|
ae23c9 |
+ ret = vfio_pci_nvidia_v100_ram_init(vdev, errp);
|
|
|
ae23c9 |
+ if (ret && ret != -ENODEV) {
|
|
|
ae23c9 |
+ error_report("Failed to setup NVIDIA V100 GPU RAM");
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ if (vdev->vendor_id == PCI_VENDOR_ID_IBM) {
|
|
|
ae23c9 |
+ ret = vfio_pci_nvlink2_init(vdev, errp);
|
|
|
ae23c9 |
+ if (ret && ret != -ENODEV) {
|
|
|
ae23c9 |
+ error_report("Failed to setup NVlink2 bridge");
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+ }
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
vfio_register_err_notifier(vdev);
|
|
|
ae23c9 |
vfio_register_req_notifier(vdev);
|
|
|
ae23c9 |
vfio_setup_resetfn_quirk(vdev);
|
|
|
ae23c9 |
diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h
|
|
|
ae23c9 |
index 629c875..bf07b43 100644
|
|
|
ae23c9 |
--- a/hw/vfio/pci.h
|
|
|
ae23c9 |
+++ b/hw/vfio/pci.h
|
|
|
ae23c9 |
@@ -175,6 +175,8 @@ int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
|
|
|
ae23c9 |
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
|
|
|
ae23c9 |
struct vfio_region_info *info,
|
|
|
ae23c9 |
Error **errp);
|
|
|
ae23c9 |
+int vfio_pci_nvidia_v100_ram_init(VFIOPCIDevice *vdev, Error **errp);
|
|
|
ae23c9 |
+int vfio_pci_nvlink2_init(VFIOPCIDevice *vdev, Error **errp);
|
|
|
ae23c9 |
|
|
|
ae23c9 |
int vfio_display_probe(VFIOPCIDevice *vdev, Error **errp);
|
|
|
ae23c9 |
void vfio_display_finalize(VFIOPCIDevice *vdev);
|
|
|
ae23c9 |
diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events
|
|
|
ae23c9 |
index 9487887..c9a9c14 100644
|
|
|
ae23c9 |
--- a/hw/vfio/trace-events
|
|
|
ae23c9 |
+++ b/hw/vfio/trace-events
|
|
|
ae23c9 |
@@ -84,6 +84,10 @@ vfio_pci_igd_opregion_enabled(const char *name) "%s"
|
|
|
ae23c9 |
vfio_pci_igd_host_bridge_enabled(const char *name) "%s"
|
|
|
ae23c9 |
vfio_pci_igd_lpc_bridge_enabled(const char *name) "%s"
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+vfio_pci_nvidia_gpu_setup_quirk(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
|
|
|
ae23c9 |
+vfio_pci_nvlink2_setup_quirk_ssatgt(const char *name, uint64_t tgt, uint64_t size) "%s tgt=0x%"PRIx64" size=0x%"PRIx64
|
|
|
ae23c9 |
+vfio_pci_nvlink2_setup_quirk_lnkspd(const char *name, uint32_t link_speed) "%s link_speed=0x%x"
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
# hw/vfio/common.c
|
|
|
ae23c9 |
vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)"
|
|
|
ae23c9 |
vfio_region_read(char *name, int index, uint64_t addr, unsigned size, uint64_t data) " (%s:region%d+0x%"PRIx64", %d) = 0x%"PRIx64
|
|
|
ae23c9 |
diff --git a/include/hw/pci-host/spapr.h b/include/hw/pci-host/spapr.h
|
|
|
ae23c9 |
index 0fae4fc..cd29c59 100644
|
|
|
ae23c9 |
--- a/include/hw/pci-host/spapr.h
|
|
|
ae23c9 |
+++ b/include/hw/pci-host/spapr.h
|
|
|
ae23c9 |
@@ -24,6 +24,7 @@
|
|
|
ae23c9 |
#include "hw/pci/pci.h"
|
|
|
ae23c9 |
#include "hw/pci/pci_host.h"
|
|
|
ae23c9 |
#include "hw/ppc/xics.h"
|
|
|
ae23c9 |
+#include "qemu/units.h"
|
|
|
ae23c9 |
|
|
|
ae23c9 |
#define TYPE_SPAPR_PCI_HOST_BRIDGE "spapr-pci-host-bridge"
|
|
|
ae23c9 |
|
|
|
ae23c9 |
@@ -87,6 +88,9 @@ struct sPAPRPHBState {
|
|
|
ae23c9 |
uint32_t mig_liobn;
|
|
|
ae23c9 |
hwaddr mig_mem_win_addr, mig_mem_win_size;
|
|
|
ae23c9 |
hwaddr mig_io_win_addr, mig_io_win_size;
|
|
|
ae23c9 |
+ hwaddr nv2_gpa_win_addr;
|
|
|
ae23c9 |
+ hwaddr nv2_atsd_win_addr;
|
|
|
ae23c9 |
+ struct spapr_phb_pci_nvgpu_config *nvgpus;
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
|
|
|
ae23c9 |
#define SPAPR_PCI_MEM_WIN_BUS_OFFSET 0x80000000ULL
|
|
|
ae23c9 |
@@ -104,6 +108,22 @@ struct sPAPRPHBState {
|
|
|
ae23c9 |
|
|
|
ae23c9 |
#define SPAPR_PCI_MSI_WINDOW 0x40000000000ULL
|
|
|
ae23c9 |
|
|
|
ae23c9 |
+#define SPAPR_PCI_NV2RAM64_WIN_BASE SPAPR_PCI_LIMIT
|
|
|
ae23c9 |
+#define SPAPR_PCI_NV2RAM64_WIN_SIZE (2 * TiB) /* For up to 6 GPUs 256GB each */
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+/* Max number of these GPUsper a physical box */
|
|
|
ae23c9 |
+#define NVGPU_MAX_NUM 6
|
|
|
ae23c9 |
+/* Max number of NVLinks per GPU in any physical box */
|
|
|
ae23c9 |
+#define NVGPU_MAX_LINKS 3
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+/*
|
|
|
ae23c9 |
+ * GPU RAM starts at 64TiB so huge DMA window to cover it all ends at 128TiB
|
|
|
ae23c9 |
+ * which is enough. We do not need DMA for ATSD so we put them at 128TiB.
|
|
|
ae23c9 |
+ */
|
|
|
ae23c9 |
+#define SPAPR_PCI_NV2ATSD_WIN_BASE (128 * TiB)
|
|
|
ae23c9 |
+#define SPAPR_PCI_NV2ATSD_WIN_SIZE (NVGPU_MAX_NUM * NVGPU_MAX_LINKS * \
|
|
|
ae23c9 |
+ 64 * KiB)
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
static inline qemu_irq spapr_phb_lsi_qirq(struct sPAPRPHBState *phb, int pin)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
|
|
|
ae23c9 |
@@ -135,6 +155,13 @@ int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state);
|
|
|
ae23c9 |
int spapr_phb_vfio_eeh_reset(sPAPRPHBState *sphb, int option);
|
|
|
ae23c9 |
int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb);
|
|
|
ae23c9 |
void spapr_phb_vfio_reset(DeviceState *qdev);
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_setup(sPAPRPHBState *sphb, Error **errp);
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_free(sPAPRPHBState *sphb);
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_populate_dt(sPAPRPHBState *sphb, void *fdt, int bus_off,
|
|
|
ae23c9 |
+ Error **errp);
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_ram_populate_dt(sPAPRPHBState *sphb, void *fdt);
|
|
|
ae23c9 |
+void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt, int offset,
|
|
|
ae23c9 |
+ sPAPRPHBState *sphb);
|
|
|
ae23c9 |
#else
|
|
|
ae23c9 |
static inline bool spapr_phb_eeh_available(sPAPRPHBState *sphb)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
@@ -161,6 +188,25 @@ static inline int spapr_phb_vfio_eeh_configure(sPAPRPHBState *sphb)
|
|
|
ae23c9 |
static inline void spapr_phb_vfio_reset(DeviceState *qdev)
|
|
|
ae23c9 |
{
|
|
|
ae23c9 |
}
|
|
|
ae23c9 |
+static inline void spapr_phb_nvgpu_setup(sPAPRPHBState *sphb, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+static inline void spapr_phb_nvgpu_free(sPAPRPHBState *sphb)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+static inline void spapr_phb_nvgpu_populate_dt(sPAPRPHBState *sphb, void *fdt,
|
|
|
ae23c9 |
+ int bus_off, Error **errp)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+static inline void spapr_phb_nvgpu_ram_populate_dt(sPAPRPHBState *sphb,
|
|
|
ae23c9 |
+ void *fdt)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
+static inline void spapr_phb_nvgpu_populate_pcidev_dt(PCIDevice *dev, void *fdt,
|
|
|
ae23c9 |
+ int offset,
|
|
|
ae23c9 |
+ sPAPRPHBState *sphb)
|
|
|
ae23c9 |
+{
|
|
|
ae23c9 |
+}
|
|
|
ae23c9 |
#endif
|
|
|
ae23c9 |
|
|
|
ae23c9 |
void spapr_phb_dma_reset(sPAPRPHBState *sphb);
|
|
|
ae23c9 |
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
|
|
|
ae23c9 |
index beb42bc..72cfa49 100644
|
|
|
ae23c9 |
--- a/include/hw/ppc/spapr.h
|
|
|
ae23c9 |
+++ b/include/hw/ppc/spapr.h
|
|
|
ae23c9 |
@@ -104,7 +104,8 @@ struct sPAPRMachineClass {
|
|
|
ae23c9 |
void (*phb_placement)(sPAPRMachineState *spapr, uint32_t index,
|
|
|
ae23c9 |
uint64_t *buid, hwaddr *pio,
|
|
|
ae23c9 |
hwaddr *mmio32, hwaddr *mmio64,
|
|
|
ae23c9 |
- unsigned n_dma, uint32_t *liobns, Error **errp);
|
|
|
ae23c9 |
+ unsigned n_dma, uint32_t *liobns, hwaddr *nv2gpa,
|
|
|
ae23c9 |
+ hwaddr *nv2atsd, Error **errp);
|
|
|
ae23c9 |
sPAPRResizeHPT resize_hpt_default;
|
|
|
ae23c9 |
sPAPRCapabilities default_caps;
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
@@ -171,6 +172,8 @@ struct sPAPRMachineState {
|
|
|
ae23c9 |
|
|
|
ae23c9 |
bool cmd_line_caps[SPAPR_CAP_NUM];
|
|
|
ae23c9 |
sPAPRCapabilities def, eff, mig;
|
|
|
ae23c9 |
+
|
|
|
ae23c9 |
+ unsigned gpu_numa_id;
|
|
|
ae23c9 |
};
|
|
|
ae23c9 |
|
|
|
ae23c9 |
#define H_SUCCESS 0
|
|
|
ae23c9 |
--
|
|
|
ae23c9 |
1.8.3.1
|
|
|
ae23c9 |
|