Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-9]: src/sys/dev Pull up following revision(s) (requested by riast...
details: https://anonhg.NetBSD.org/src/rev/daf112b0f377
branches: netbsd-9
changeset: 984099:daf112b0f377
user: martin <martin%NetBSD.org@localhost>
date: Mon Jun 21 17:25:48 2021 +0000
description:
Pull up following revision(s) (requested by riastradh in ticket #1305):
sys/dev/ic/nvmevar.h: revision 1.22
sys/dev/ic/nvme.c: revision 1.56
sys/dev/ic/nvme.c: revision 1.57
sys/dev/pci/nvme_pci.c: revision 1.30
nvme(4): Add suspend/resume, derived from OpenBSD.
nvme(4): Move disestablishment of admin q interrupt to nvme_detach.
Nothing re-established this after suspend/resume, so attempting
suspend/resume/suspend would crash, and presumably we would miss
interrupts after resume. This keeps the establish/disestablish more
symmetric in attach/detach.
diffstat:
sys/dev/ic/nvme.c | 96 ++++++++++++++++++++++++++++++++++++++++++++++---
sys/dev/ic/nvmevar.h | 5 ++-
sys/dev/pci/nvme_pci.c | 36 +++++++++++++++++-
3 files changed, 126 insertions(+), 11 deletions(-)
diffs (270 lines):
diff -r b01b7ed8b2f2 -r daf112b0f377 sys/dev/ic/nvme.c
--- a/sys/dev/ic/nvme.c Mon Jun 21 17:23:13 2021 +0000
+++ b/sys/dev/ic/nvme.c Mon Jun 21 17:25:48 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: nvme.c,v 1.44.2.5 2020/12/07 20:04:07 martin Exp $ */
+/* $NetBSD: nvme.c,v 1.44.2.6 2021/06/21 17:25:48 martin Exp $ */
/* $OpenBSD: nvme.c,v 1.49 2016/04/18 05:59:50 dlg Exp $ */
/*
@@ -18,7 +18,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.44.2.5 2020/12/07 20:04:07 martin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvme.c,v 1.44.2.6 2021/06/21 17:25:48 martin Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -81,6 +81,7 @@
static struct nvme_queue *
nvme_q_alloc(struct nvme_softc *, uint16_t, u_int, u_int);
static int nvme_q_create(struct nvme_softc *, struct nvme_queue *);
+static void nvme_q_reset(struct nvme_softc *, struct nvme_queue *);
static int nvme_q_delete(struct nvme_softc *, struct nvme_queue *);
static void nvme_q_submit(struct nvme_softc *, struct nvme_queue *,
struct nvme_ccb *, void (*)(struct nvme_queue *,
@@ -338,7 +339,6 @@
{
uint64_t cap;
uint32_t reg;
- u_int dstrd;
u_int mps = PAGE_SHIFT;
u_int ncq, nsq;
uint16_t adminq_entries = nvme_adminq_size;
@@ -359,7 +359,7 @@
NVME_VS_MNR(reg), NVME_VS_TER(reg));
cap = nvme_read8(sc, NVME_CAP);
- dstrd = NVME_CAP_DSTRD(cap);
+ sc->sc_dstrd = NVME_CAP_DSTRD(cap);
if (NVME_CAP_MPSMIN(cap) > PAGE_SHIFT) {
aprint_error_dev(sc->sc_dev, "NVMe minimum page size %u "
"is greater than CPU page size %u\n",
@@ -382,7 +382,8 @@
return 1;
}
- sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries, dstrd);
+ sc->sc_admin_q = nvme_q_alloc(sc, NVME_ADMIN_Q, adminq_entries,
+ sc->sc_dstrd);
if (sc->sc_admin_q == NULL) {
aprint_error_dev(sc->sc_dev,
"unable to allocate admin queue\n");
@@ -427,7 +428,8 @@
sc->sc_q = kmem_zalloc(sizeof(*sc->sc_q) * sc->sc_nq, KM_SLEEP);
for (i = 0; i < sc->sc_nq; i++) {
- sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries, dstrd);
+ sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries,
+ sc->sc_dstrd);
if (sc->sc_q[i] == NULL) {
aprint_error_dev(sc->sc_dev,
"unable to allocate io queue\n");
@@ -550,6 +552,7 @@
return error;
/* from now on we are committed to detach, following will never fail */
+ sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
for (i = 0; i < sc->sc_nq; i++)
nvme_q_free(sc, sc->sc_q[i]);
kmem_free(sc->sc_q, sizeof(*sc->sc_q) * sc->sc_nq);
@@ -558,6 +561,68 @@
return 0;
}
+int
+nvme_suspend(struct nvme_softc *sc)
+{
+
+ return nvme_shutdown(sc);
+}
+
+int
+nvme_resume(struct nvme_softc *sc)
+{
+ int ioq_entries = nvme_ioq_size;
+ uint64_t cap;
+ int i, error;
+
+ error = nvme_disable(sc);
+ if (error) {
+ device_printf(sc->sc_dev, "unable to disable controller\n");
+ return error;
+ }
+
+ nvme_q_reset(sc, sc->sc_admin_q);
+
+ error = nvme_enable(sc, ffs(sc->sc_mps) - 1);
+ if (error) {
+ device_printf(sc->sc_dev, "unable to enable controller\n");
+ return error;
+ }
+
+ for (i = 0; i < sc->sc_nq; i++) {
+ cap = nvme_read8(sc, NVME_CAP);
+ if (ioq_entries > NVME_CAP_MQES(cap))
+ ioq_entries = NVME_CAP_MQES(cap);
+ sc->sc_q[i] = nvme_q_alloc(sc, i + 1, ioq_entries,
+ sc->sc_dstrd);
+ if (sc->sc_q[i] == NULL) {
+ error = ENOMEM;
+ device_printf(sc->sc_dev, "unable to allocate io q %d"
+ "\n", i);
+ goto disable;
+ }
+ if (nvme_q_create(sc, sc->sc_q[i]) != 0) {
+ error = EIO;
+ device_printf(sc->sc_dev, "unable to create io q %d"
+ "\n", i);
+ nvme_q_free(sc, sc->sc_q[i]);
+ goto free_q;
+ }
+ }
+
+ nvme_write4(sc, NVME_INTMC, 1);
+
+ return 0;
+
+free_q:
+ while (i --> 0)
+ nvme_q_free(sc, sc->sc_q[i]);
+disable:
+ (void)nvme_disable(sc);
+
+ return error;
+}
+
static int
nvme_shutdown(struct nvme_softc *sc)
{
@@ -575,7 +640,6 @@
disabled = true;
}
}
- sc->sc_intr_disestablish(sc, NVME_ADMIN_Q);
if (disabled)
goto disable;
@@ -1820,6 +1884,24 @@
}
static void
+nvme_q_reset(struct nvme_softc *sc, struct nvme_queue *q)
+{
+
+ memset(NVME_DMA_KVA(q->q_sq_dmamem), 0, NVME_DMA_LEN(q->q_sq_dmamem));
+ memset(NVME_DMA_KVA(q->q_cq_dmamem), 0, NVME_DMA_LEN(q->q_cq_dmamem));
+
+ q->q_sqtdbl = NVME_SQTDBL(q->q_id, sc->sc_dstrd);
+ q->q_cqhdbl = NVME_CQHDBL(q->q_id, sc->sc_dstrd);
+
+ q->q_sq_tail = 0;
+ q->q_cq_head = 0;
+ q->q_cq_phase = NVME_CQE_PHASE;
+
+ nvme_dmamem_sync(sc, q->q_sq_dmamem, BUS_DMASYNC_PREWRITE);
+ nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_PREREAD);
+}
+
+static void
nvme_q_free(struct nvme_softc *sc, struct nvme_queue *q)
{
nvme_ccbs_free(q);
diff -r b01b7ed8b2f2 -r daf112b0f377 sys/dev/ic/nvmevar.h
--- a/sys/dev/ic/nvmevar.h Mon Jun 21 17:23:13 2021 +0000
+++ b/sys/dev/ic/nvmevar.h Mon Jun 21 17:25:48 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: nvmevar.h,v 1.20 2019/06/28 15:08:47 jmcneill Exp $ */
+/* $NetBSD: nvmevar.h,v 1.20.2.1 2021/06/21 17:25:48 martin Exp $ */
/* $OpenBSD: nvmevar.h,v 1.8 2016/04/14 11:18:32 dlg Exp $ */
/*
@@ -122,6 +122,7 @@
size_t sc_mps; /* memory page size */
size_t sc_mdts; /* max data trasfer size */
u_int sc_max_sgl; /* max S/G segments */
+ u_int sc_dstrd;
struct nvm_identify_controller
sc_identify;
@@ -162,6 +163,8 @@
int nvme_detach(struct nvme_softc *, int flags);
int nvme_rescan(device_t, const char *, const int *);
void nvme_childdet(device_t, device_t);
+int nvme_suspend(struct nvme_softc *);
+int nvme_resume(struct nvme_softc *);
int nvme_intr(void *);
void nvme_softintr_intx(void *);
int nvme_intr_msi(void *);
diff -r b01b7ed8b2f2 -r daf112b0f377 sys/dev/pci/nvme_pci.c
--- a/sys/dev/pci/nvme_pci.c Mon Jun 21 17:23:13 2021 +0000
+++ b/sys/dev/pci/nvme_pci.c Mon Jun 21 17:25:48 2021 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: nvme_pci.c,v 1.26 2019/01/23 06:56:19 msaitoh Exp $ */
+/* $NetBSD: nvme_pci.c,v 1.26.4.1 2021/06/21 17:25:48 martin Exp $ */
/* $OpenBSD: nvme_pci.c,v 1.3 2016/04/14 11:18:32 dlg Exp $ */
/*
@@ -43,7 +43,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.26 2019/01/23 06:56:19 msaitoh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: nvme_pci.c,v 1.26.4.1 2021/06/21 17:25:48 martin Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -82,6 +82,8 @@
static void nvme_pci_attach(device_t, device_t, void *);
static int nvme_pci_detach(device_t, int);
static int nvme_pci_rescan(device_t, const char *, const int *);
+static bool nvme_pci_suspend(device_t, const pmf_qual_t *);
+static bool nvme_pci_resume(device_t, const pmf_qual_t *);
CFATTACH_DECL3_NEW(nvme_pci, sizeof(struct nvme_pci_softc),
nvme_pci_match, nvme_pci_attach, nvme_pci_detach, NULL, nvme_pci_rescan,
@@ -230,7 +232,7 @@
goto softintr_free;
}
- if (!pmf_device_register(self, NULL, NULL))
+ if (!pmf_device_register(self, nvme_pci_suspend, nvme_pci_resume))
aprint_error_dev(self, "couldn't establish power handler\n");
SET(sc->sc_flags, NVME_F_ATTACHED);
@@ -254,6 +256,34 @@
return nvme_rescan(self, attr, flags);
}
+static bool
+nvme_pci_suspend(device_t self, const pmf_qual_t *qual)
+{
+ struct nvme_pci_softc *psc = device_private(self);
+ struct nvme_softc *sc = &psc->psc_nvme;
+ int error;
+
+ error = nvme_suspend(sc);
+ if (error)
+ return false;
+
+ return true;
+}
+
+static bool
+nvme_pci_resume(device_t self, const pmf_qual_t *qual)
+{
+ struct nvme_pci_softc *psc = device_private(self);
+ struct nvme_softc *sc = &psc->psc_nvme;
+ int error;
+
+ error = nvme_resume(sc);
+ if (error)
+ return false;
+
+ return true;
+}
+
static int
nvme_pci_detach(device_t self, int flags)
{
Home |
Main Index |
Thread Index |
Old Index