Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/thorpej_scsipi]: src/sys/dev/ic Adapt to scsipi API changes, and add Tag...
details: https://anonhg.NetBSD.org/src/rev/ebb050ef6d3e
branches: thorpej_scsipi
changeset: 477270:ebb050ef6d3e
user: thorpej <thorpej%NetBSD.org@localhost>
date: Tue Oct 19 17:47:01 1999 +0000
description:
Adapt to scsipi API changes, and add Tagged Queueing support.
diffstat:
sys/dev/ic/bha.c | 567 ++++++++++++++++++++++++++++-----------------------
sys/dev/ic/bhareg.h | 6 +-
sys/dev/ic/bhavar.h | 12 +-
3 files changed, 324 insertions(+), 261 deletions(-)
diffs (truncated from 803 to 300 lines):
diff -r 669cd0aa6a7f -r ebb050ef6d3e sys/dev/ic/bha.c
--- a/sys/dev/ic/bha.c Tue Oct 19 17:44:55 1999 +0000
+++ b/sys/dev/ic/bha.c Tue Oct 19 17:47:01 1999 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: bha.c,v 1.33 1999/10/09 22:46:20 mycroft Exp $ */
+/* $NetBSD: bha.c,v 1.33.2.1 1999/10/19 17:47:01 thorpej Exp $ */
/*-
* Copyright (c) 1997, 1998, 1999 The NetBSD Foundation, Inc.
@@ -91,9 +91,12 @@
int bha_cmd __P((bus_space_tag_t, bus_space_handle_t, struct bha_softc *,
int, u_char *, int, u_char *));
-int bha_scsi_cmd __P((struct scsipi_xfer *));
+void bha_scsipi_request __P((struct scsipi_channel *,
+ scsipi_adapter_req_t, void *));
void bha_minphys __P((struct buf *));
+void bha_get_xfer_mode __P((struct bha_softc *, struct scsipi_periph *));
+
void bha_done __P((struct bha_softc *, struct bha_ccb *));
int bha_poll __P((struct bha_softc *, struct scsipi_xfer *, int));
void bha_timeout __P((void *arg));
@@ -113,14 +116,6 @@
struct bha_ccb *bha_get_ccb __P((struct bha_softc *, int));
void bha_free_ccb __P((struct bha_softc *, struct bha_ccb *));
-/* the below structure is so we have a default dev struct for out link struct */
-struct scsipi_device bha_dev = {
- NULL, /* Use default error handler */
- NULL, /* have a queue, served by this */
- NULL, /* have no async handler */
- NULL, /* Use default 'done' routine */
-};
-
#define BHA_RESET_TIMEOUT 2000 /* time to wait for reset (mSec) */
#define BHA_ABORT_TIMEOUT 2000 /* time to wait for abort (mSec) */
@@ -166,6 +161,8 @@
struct bha_softc *sc;
struct bha_probe_data *bpd;
{
+ struct scsipi_adapter *adapt = &sc->sc_adapter;
+ struct scsipi_channel *chan = &sc->sc_channel;
int initial_ccbs;
/*
@@ -182,30 +179,31 @@
}
/*
- * Fill in the adapter.
+ * Fill in the scsipi_adapter.
*/
- sc->sc_adapter.scsipi_cmd = bha_scsi_cmd;
- sc->sc_adapter.scsipi_minphys = bha_minphys;
+ memset(adapt, 0, sizeof(*adapt));
+ adapt->adapt_dev = &sc->sc_dev;
+ adapt->adapt_nchannels = 1;
+ /* adapt_openings initialized below */
+ adapt->adapt_max_periph = sc->sc_mbox_count;
+ adapt->adapt_request = bha_scsipi_request;
+ adapt->adapt_minphys = bha_minphys;
/*
- * fill in the prototype scsipi_link.
+ * Fill in the scsipi_channel.
*/
- sc->sc_link.scsipi_scsi.channel = SCSI_CHANNEL_ONLY_ONE;
- sc->sc_link.adapter_softc = sc;
- sc->sc_link.scsipi_scsi.adapter_target = sc->sc_scsi_id;
- sc->sc_link.adapter = &sc->sc_adapter;
- sc->sc_link.device = &bha_dev;
- sc->sc_link.openings = 4;
- sc->sc_link.scsipi_scsi.max_target =
- (sc->sc_flags & BHAF_WIDE) ? 15 : 7;
- sc->sc_link.scsipi_scsi.max_lun =
- (sc->sc_flags & BHAF_WIDE_LUN) ? 31 : 7;
- sc->sc_link.type = BUS_SCSI;
+ memset(chan, 0, sizeof(*chan));
+ chan->chan_adapter = adapt;
+ chan->chan_bustype = &scsi_bustype;
+ chan->chan_channel = 0;
+ chan->chan_flags = SCSIPI_CHAN_CANGROW;
+ chan->chan_ntargets = (sc->sc_flags & BHAF_WIDE) ? 16 : 8;
+ chan->chan_nluns = (sc->sc_flags & BHAF_WIDE_LUN) ? 32 : 8;
+ chan->chan_id = sc->sc_scsi_id;
TAILQ_INIT(&sc->sc_free_ccb);
TAILQ_INIT(&sc->sc_waiting_ccb);
TAILQ_INIT(&sc->sc_allocating_ccbs);
- TAILQ_INIT(&sc->sc_queue);
if (bha_create_mailbox(sc) != 0)
return;
@@ -217,10 +215,12 @@
return;
}
+ adapt->adapt_openings = sc->sc_cur_ccbs;
+
if (bha_init(sc) != 0)
return;
- (void) config_found(&sc->sc_dev, &sc->sc_link, scsiprint);
+ (void) config_found(&sc->sc_dev, &sc->sc_channel, scsiprint);
}
/*
@@ -279,205 +279,187 @@
*****************************************************************************/
/*
- * bha_scsi_cmd:
+ * bha_scsipi_request:
*
- * Start a SCSI operation.
+ * Perform a request for the SCSIPI layer.
*/
-int
-bha_scsi_cmd(xs)
+void
+bha_scsipi_request(chan, req, arg)
+ struct scsipi_channel *chan;
+ scsipi_adapter_req_t req;
+ void *arg;
+{
+ struct scsipi_adapter *adapt = chan->chan_adapter;
+ struct bha_softc *sc = (void *)adapt->adapt_dev;
struct scsipi_xfer *xs;
-{
- struct scsipi_link *sc_link = xs->sc_link;
- struct bha_softc *sc = sc_link->adapter_softc;
+ struct scsipi_periph *periph;
bus_dma_tag_t dmat = sc->sc_dmat;
struct bha_ccb *ccb;
int error, seg, flags, s;
- int fromqueue = 0, dontqueue = 0;
SC_DEBUG(sc_link, SDEV_DB2, ("bha_scsi_cmd\n"));
- s = splbio(); /* protect the queue */
+ switch (req) {
+ case ADAPTER_REQ_RUN_XFER:
+ xs = arg;
+ periph = xs->xs_periph;
+ flags = xs->xs_control;
- /*
- * If we're running the queue from bha_done(), we've been
- * called with the first queue entry as our argument.
- */
- if (xs == TAILQ_FIRST(&sc->sc_queue)) {
- TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
- fromqueue = 1;
- goto get_ccb;
- }
-
- /* Polled requests can't be queued for later. */
- dontqueue = xs->xs_control & XS_CTL_POLL;
-
- /*
- * If there are jobs in the queue, run them first.
- */
- if (TAILQ_FIRST(&sc->sc_queue) != NULL) {
+ /* Get a CCB to use. */
+ ccb = bha_get_ccb(sc, flags);
+#ifdef DIAGNOSTIC
/*
- * If we can't queue, we have to abort, since
- * we have to preserve order.
+ * This should never happen as we track the resources
+ * in the mid-layer.
*/
- if (dontqueue) {
- splx(s);
- xs->error = XS_DRIVER_STUFFUP;
- return (TRY_AGAIN_LATER);
+ if (ccb == NULL) {
+ scsipi_printaddr(periph);
+ printf("unable to allocate ccb\n");
+ panic("bha_scsipi_request");
}
-
- /*
- * Swap with the first queue entry.
- */
- TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
- xs = TAILQ_FIRST(&sc->sc_queue);
- TAILQ_REMOVE(&sc->sc_queue, xs, adapter_q);
- fromqueue = 1;
- }
+#endif
- get_ccb:
- /*
- * get a ccb to use. If the transfer
- * is from a buf (possibly from interrupt time)
- * then we can't allow it to sleep
- */
- flags = xs->xs_control;
- if ((ccb = bha_get_ccb(sc, flags)) == NULL) {
- /*
- * If we can't queue, we lose.
- */
- if (dontqueue) {
- splx(s);
- xs->error = XS_DRIVER_STUFFUP;
- return (TRY_AGAIN_LATER);
- }
+ ccb->xs = xs;
+ ccb->timeout = xs->timeout;
/*
- * Stuff ourselves into the queue, in front
- * if we came off in the first place.
+ * Put all the arguments for the xfer in the ccb
*/
- if (fromqueue)
- TAILQ_INSERT_HEAD(&sc->sc_queue, xs, adapter_q);
- else
- TAILQ_INSERT_TAIL(&sc->sc_queue, xs, adapter_q);
- splx(s);
- return (SUCCESSFULLY_QUEUED);
- }
-
- splx(s); /* done playing with the queue */
-
- ccb->xs = xs;
- ccb->timeout = xs->timeout;
-
- /*
- * Put all the arguments for the xfer in the ccb
- */
- if (flags & XS_CTL_RESET) {
- ccb->opcode = BHA_RESET_CCB;
- ccb->scsi_cmd_length = 0;
- } else {
- /* can't use S/G if zero length */
- ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB
- : BHA_INITIATOR_CCB);
- bcopy(xs->cmd, &ccb->scsi_cmd,
- ccb->scsi_cmd_length = xs->cmdlen);
- }
-
- if (xs->datalen) {
- /*
- * Map the DMA transfer.
- */
-#ifdef TFS
- if (flags & XS_CTL_DATA_UIO) {
- error = bus_dmamap_load_uio(dmat,
- ccb->dmamap_xfer, (struct uio *)xs->data,
- (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
- BUS_DMA_WAITOK);
- } else
-#endif /* TFS */
- {
- error = bus_dmamap_load(dmat,
- ccb->dmamap_xfer, xs->data, xs->datalen, NULL,
- (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
- BUS_DMA_WAITOK);
- }
-
- if (error) {
- if (error == EFBIG) {
- printf("%s: bha_scsi_cmd, more than %d"
- " dma segments\n",
- sc->sc_dev.dv_xname, BHA_NSEG);
- } else {
- printf("%s: bha_scsi_cmd, error %d loading"
- " dma map\n",
- sc->sc_dev.dv_xname, error);
- }
- goto bad;
+ if (flags & XS_CTL_RESET) {
+ ccb->opcode = BHA_RESET_CCB;
+ ccb->scsi_cmd_length = 0;
+ } else {
+ /* can't use S/G if zero length */
+ ccb->opcode = (xs->datalen ? BHA_INIT_SCAT_GATH_CCB
+ : BHA_INITIATOR_CCB);
+ bcopy(xs->cmd, &ccb->scsi_cmd,
+ ccb->scsi_cmd_length = xs->cmdlen);
}
- bus_dmamap_sync(dmat, ccb->dmamap_xfer, 0,
- ccb->dmamap_xfer->dm_mapsize,
- (flags & XS_CTL_DATA_IN) ? BUS_DMASYNC_PREREAD :
- BUS_DMASYNC_PREWRITE);
+ if (xs->datalen) {
+ /*
+ * Map the DMA transfer.
+ */
+#ifdef TFS
+ if (flags & XS_CTL_DATA_UIO) {
+ error = bus_dmamap_load_uio(dmat,
+ ccb->dmamap_xfer, (struct uio *)xs->data,
+ (flags & XS_CTL_NOSLEEP) ? BUS_DMA_NOWAIT :
+ BUS_DMA_WAITOK);
Home |
Main Index |
Thread Index |
Old Index