Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-1-4]: src/sys/dev/sbus Pull up revisions 1.12, 1.14-1.18 (via patc...
details: https://anonhg.NetBSD.org/src/rev/531ced6cc2e4
branches: netbsd-1-4
changeset: 470019:531ced6cc2e4
user: he <he%NetBSD.org@localhost>
date: Sat Jan 08 22:44:56 2000 +0000
description:
Pull up revisions 1.12,1.14-1.18 (via patch, requested by mjacob):
Untangle Qlogic firmware copyright problems and update firmware.
Untangle some MD support issues for said firmware. Add 1280 (Dual
LVD), 1240 (Dual Ultra), 2200 (2nd Generation Qlogic FC chipset).
Fix some synchronous negotiation problems for parallel SCSI.
Firm up some Fabric Support issues.
diffstat:
sys/dev/sbus/isp_sbus.c | 142 ++++++++++++++++++++++++++---------------------
1 files changed, 79 insertions(+), 63 deletions(-)
diffs (273 lines):
diff -r 57e03f78b5fd -r 531ced6cc2e4 sys/dev/sbus/isp_sbus.c
--- a/sys/dev/sbus/isp_sbus.c Sat Jan 08 22:44:22 2000 +0000
+++ b/sys/dev/sbus/isp_sbus.c Sat Jan 08 22:44:56 2000 +0000
@@ -1,5 +1,4 @@
-/* $NetBSD: isp_sbus.c,v 1.10.2.1 1999/04/26 15:46:47 perry Exp $ */
-/* release_03_25_99 */
+/* $NetBSD: isp_sbus.c,v 1.10.2.2 2000/01/08 22:44:56 he Exp $ */
/*
* SBus specific probe and attach routines for Qlogic ISP SCSI adapters.
*
@@ -53,10 +52,17 @@
static void isp_sbus_wr_reg __P((struct ispsoftc *, int, u_int16_t));
static int isp_sbus_mbxdma __P((struct ispsoftc *));
static int isp_sbus_dmasetup __P((struct ispsoftc *, struct scsipi_xfer *,
- ispreq_t *, u_int8_t *, u_int8_t));
+ ispreq_t *, u_int16_t *, u_int16_t));
static void isp_sbus_dmateardown __P((struct ispsoftc *, struct scsipi_xfer *,
u_int32_t));
+#ifndef ISP_1000_RISC_CODE
+#define ISP_1000_RISC_CODE NULL
+#endif
+#ifndef ISP_CODE_ORG
+#define ISP_CODE_ORG 0x1000
+#endif
+
static struct ispmdvec mdvec = {
isp_sbus_rd_reg,
isp_sbus_wr_reg,
@@ -66,12 +72,8 @@
NULL,
NULL,
NULL,
- ISP_RISC_CODE,
- ISP_CODE_LENGTH,
- ISP_CODE_ORG,
- ISP_CODE_VERSION,
- BIU_BURST_ENABLE,
- 0
+ ISP_1000_RISC_CODE, 0, ISP_CODE_ORG, 0,
+ BIU_BURST_ENABLE
};
struct isp_sbussoftc {
@@ -83,7 +85,7 @@
int sbus_node;
int sbus_pri;
struct ispmdvec sbus_mdvec;
- bus_dmamap_t sbus_dmamap[MAXISPREQUEST];
+ bus_dmamap_t *sbus_dmamap;
int16_t sbus_poff[_NREG_BLKS];
};
@@ -138,7 +140,8 @@
sbc->sbus_bustag = sa->sa_bustag;
sbc->sbus_dmatag = sa->sa_dmatag;
- sbc->sbus_pri = sa->sa_pri;
+ if (sa->sa_nintr != 0)
+ sbc->sbus_pri = sa->sa_pri;
sbc->sbus_mdvec = mdvec;
if (sa->sa_npromvaddrs != 0) {
@@ -192,6 +195,7 @@
sbc->sbus_poff[RISC_BLOCK >> _BLK_REG_SHFT] = SBUS_RISC_REGS_OFF;
sbc->sbus_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
+ isp->isp_confopts = self->dv_cfdata->cf_flags;
ISP_LOCK(isp);
isp_reset(isp);
if (isp->isp_state != ISP_RESETSTATE) {
@@ -205,8 +209,7 @@
return;
}
- for (i = 0; i < MAXISPREQUEST; i++) {
-
+ for (i = 0; i < isp->isp_maxcmds; i++) {
/* Allocate a DMA handle */
if (bus_dmamap_create(sbc->sbus_dmatag, MAXPHYS, 1, MAXPHYS, 0,
BUS_DMA_NOWAIT, &sbc->sbus_dmamap[i]) != 0) {
@@ -242,7 +245,7 @@
}
static void
-isp_sbus_wr_reg (isp, regoff, val)
+isp_sbus_wr_reg(isp, regoff, val)
struct ispsoftc *isp;
int regoff;
u_int16_t val;
@@ -259,26 +262,44 @@
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dma_segment_t seg;
- size_t len, rseg;
+ int rseg;
+ size_t n;
+ bus_size_t len;
+
+ if (isp->isp_rquest_dma)
+ return (0);
- /*
- * NOTE: Since most Sun machines aren't I/O coherent,
- * map the mailboxes through kdvma space to force them
- * to be uncached.
- */
-
+ n = sizeof (ISP_SCSI_XFER_T **) * isp->isp_maxcmds;
+ isp->isp_xflist = (ISP_SCSI_XFER_T **) malloc(n, M_DEVBUF, M_WAITOK);
+ if (isp->isp_xflist == NULL) {
+ printf("%s: cannot alloc xflist array\n", isp->isp_name);
+ return (1);
+ }
+ bzero(isp->isp_xflist, n);
+ n = sizeof (bus_dmamap_t) * isp->isp_maxcmds;
+ sbc->sbus_dmamap = (bus_dmamap_t *) malloc(n, M_DEVBUF, M_WAITOK);
+ if (sbc->sbus_dmamap == NULL) {
+ free(isp->isp_xflist, M_DEVBUF);
+ printf("%s: cannot alloc dmamap array\n", isp->isp_name);
+ return (1);
+ }
/*
* Allocate and map the request queue.
*/
len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN);
if (bus_dmamem_alloc(sbc->sbus_dmatag, len, NBPG, 0,
- &seg, 1, &rseg, BUS_DMA_NOWAIT) != 0)
+ &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
+ free(sbc->sbus_dmamap, M_DEVBUF);
+ free(isp->isp_xflist, M_DEVBUF);
return (1);
+ }
if (bus_dmamem_map(sbc->sbus_dmatag, &seg, rseg, len,
- (caddr_t *)&isp->isp_rquest,
- BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
+ (caddr_t *)&isp->isp_rquest, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
+ free(sbc->sbus_dmamap, M_DEVBUF);
+ free(isp->isp_xflist, M_DEVBUF);
return (1);
+ }
isp->isp_rquest_dma = seg.ds_addr;
/*
@@ -286,20 +307,24 @@
*/
len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN);
if (bus_dmamem_alloc(sbc->sbus_dmatag, len, NBPG, 0,
- &seg, 1, &rseg, BUS_DMA_NOWAIT) != 0)
+ &seg, 1, &rseg, BUS_DMA_NOWAIT)) {
+ free(sbc->sbus_dmamap, M_DEVBUF);
+ free(isp->isp_xflist, M_DEVBUF);
return (1);
-
+ }
if (bus_dmamem_map(sbc->sbus_dmatag, &seg, rseg, len,
- (caddr_t *)&isp->isp_result,
- BUS_DMA_NOWAIT|BUS_DMA_COHERENT) != 0)
+ (caddr_t *)&isp->isp_result, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) {
+ free(sbc->sbus_dmamap, M_DEVBUF);
+ free(isp->isp_xflist, M_DEVBUF);
return (1);
+ }
isp->isp_result_dma = seg.ds_addr;
-
return (0);
}
/*
- * TODO: If kdvma_mapin fails, try using multiple smaller chunks..
+ * Map a DMA request.
+ * We're guaranteed that rq->req_handle is a value from 1 to isp->isp_maxcmds.
*/
static int
@@ -307,42 +332,32 @@
struct ispsoftc *isp;
struct scsipi_xfer *xs;
ispreq_t *rq;
- u_int8_t *iptrp;
- u_int8_t optr;
+ u_int16_t *iptrp;
+ u_int16_t optr;
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dmamap_t dmamap;
int dosleep = (xs->flags & SCSI_NOSLEEP) != 0;
+ int in = (xs->flags & SCSI_DATA_IN) != 0;
if (xs->datalen == 0) {
rq->req_seg_count = 1;
- return (CMD_QUEUED);
+ goto mbxsync;
}
-
- if (rq->req_handle > RQUEST_QUEUE_LEN || rq->req_handle < 1) {
- panic("%s: bad handle (%d) in isp_sbus_dmasetup\n",
- isp->isp_name, rq->req_handle);
- /* NOTREACHED */
- }
-
+ assert(rq->req_handle != 0 && rq->req_handle <= isp->isp_maxcmds);
dmamap = sbc->sbus_dmamap[rq->req_handle - 1];
if (dmamap->dm_nsegs != 0) {
panic("%s: dma map already allocated\n", isp->isp_name);
/* NOTREACHED */
}
- if (bus_dmamap_load(sbc->sbus_dmatag, dmamap,
- xs->data, xs->datalen, NULL,
- dosleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
+ if (bus_dmamap_load(sbc->sbus_dmatag, dmamap, xs->data, xs->datalen,
+ NULL, dosleep ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT) != 0) {
XS_SETERR(xs, HBA_BOTCH);
return (CMD_COMPLETE);
}
- bus_dmamap_sync(sbc->sbus_dmatag, dmamap,
- dmamap->dm_segs[0].ds_addr, xs->datalen,
- (xs->flags & SCSI_DATA_IN)
- ? BUS_DMASYNC_PREREAD
- : BUS_DMASYNC_PREWRITE);
-
- if (xs->flags & SCSI_DATA_IN) {
+ bus_dmamap_sync(sbc->sbus_dmatag, dmamap, dmamap->dm_segs[0].ds_addr,
+ xs->datalen, in? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
+ if (in) {
rq->req_flags |= REQFLAG_DATA_IN;
} else {
rq->req_flags |= REQFLAG_DATA_OUT;
@@ -350,6 +365,15 @@
rq->req_dataseg[0].ds_count = xs->datalen;
rq->req_dataseg[0].ds_base = dmamap->dm_segs[0].ds_addr;
rq->req_seg_count = 1;
+mbxsync:
+ ISP_SWIZZLE_REQUEST(isp, rq);
+#if 0
+ /*
+ * If we ever map cacheable memory, we need to do something like this.
+ */
+ bus_dmamap_sync(sbc->sbus_dmat, sbc->sbus_rquest_dmap, 0,
+ sbc->sbus_rquest_dmap->dm_mapsize, BUS_DMASYNC_PREWRITE);
+#endif
return (CMD_QUEUED);
}
@@ -361,22 +385,14 @@
{
struct isp_sbussoftc *sbc = (struct isp_sbussoftc *) isp;
bus_dmamap_t dmamap;
-
- if (handle >= RQUEST_QUEUE_LEN) {
- panic("%s: bad handle (%d) in isp_sbus_dmateardown\n",
- isp->isp_name, handle);
- /* NOTREACHED */
- }
-
- dmamap = sbc->sbus_dmamap[handle];
+ assert(handle != 0 && handle <= isp->isp_maxcmds);
+ dmamap = sbc->sbus_dmamap[handle-1];
if (dmamap->dm_nsegs == 0) {
panic("%s: dma map not already allocated\n", isp->isp_name);
/* NOTREACHED */
}
- bus_dmamap_sync(sbc->sbus_dmatag, dmamap,
- dmamap->dm_segs[0].ds_addr, xs->datalen,
- (xs->flags & SCSI_DATA_IN)
- ? BUS_DMASYNC_POSTREAD
- : BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_sync(sbc->sbus_dmatag, dmamap, dmamap->dm_segs[0].ds_addr,
+ xs->datalen, (xs->flags & SCSI_DATA_IN)?
+ BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(sbc->sbus_dmatag, dmamap);
}
Home |
Main Index |
Thread Index |
Old Index