Current-Users archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
sdmmc Multi segment DMA support
Hi! all,
I want support for multi segment DMA on sdmmc layer.
Some SDIO controllers cannot handle two or more DMA segments. For
instance, Marvell SDIO Controller and, perhaps, sdhc(4) also. I
supported this by securing the bounce buffer for DMA in the sdmmc
layer.
This works on Marvell Sheevaplug.
Can you try this without 'notyet'.
Index: sdhc.c
===================================================================
RCS file: /cvsroot/src/sys/dev/sdmmc/sdhc.c,v
retrieving revision 1.7
diff -u -r1.7 sdhc.c
--- sdhc.c 27 Mar 2010 03:04:52 -0000 1.7
+++ sdhc.c 23 Sep 2010 12:25:04 -0000
@@ -209,7 +209,6 @@
caps = HREAD4(hp, SDHC_CAPABILITIES);
mutex_exit(&hp->host_mtx);
-#if notyet
/* Use DMA if the host system and the controller support it. */
if (ISSET(sc->sc_flags, SDHC_FLAG_FORCE_DMA)
|| ((ISSET(sc->sc_flags, SDHC_FLAG_USE_DMA)
@@ -217,7 +216,6 @@
SET(hp->flags, SHF_USE_DMA);
aprint_normal_dev(sc->sc_dev, "using DMA transfer\n");
}
-#endif
/*
* Determine the base clock frequency. (2.2.24)
@@ -291,10 +289,8 @@
saa.saa_clkmin = hp->clkbase / 256;
saa.saa_clkmax = hp->clkbase;
saa.saa_caps = SMC_CAPS_4BIT_MODE|SMC_CAPS_AUTO_STOP;
-#if notyet
if (ISSET(hp->flags, SHF_USE_DMA))
saa.saa_caps |= SMC_CAPS_DMA;
-#endif
hp->sdmmc = config_found(sc->sc_dev, &saa, NULL);
return 0;
Thanks,
--
kiyohara
The final patch of Marvell that contains Sheevaplug will coming soon.
And, it is likely to be merged. ;-)
Index: sdmmc.c
===================================================================
RCS file: /cvsroot/src/sys/dev/sdmmc/sdmmc.c,v
retrieving revision 1.3
diff -u -r1.3 sdmmc.c
--- sdmmc.c 20 Sep 2010 09:06:03 -0000 1.3
+++ sdmmc.c 23 Sep 2010 12:06:30 -0000
@@ -61,6 +61,8 @@
#include <sys/systm.h>
#include <sys/callout.h>
+#include <machine/vmparam.h>
+
#include <dev/sdmmc/sdmmc_ioreg.h>
#include <dev/sdmmc/sdmmcchip.h>
#include <dev/sdmmc/sdmmcreg.h>
@@ -587,12 +589,58 @@
sf->cis.product = SDMMC_PRODUCT_INVALID;
sf->cis.function = SDMMC_FUNCTION_INVALID;
+ if (ISSET(sc->sc_flags, SMF_MEM_MODE) &&
+ ISSET(sc->sc_caps, SMC_CAPS_DMA) &&
+ !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ bus_dma_segment_t ds;
+ int rseg, error;
+
+ error = bus_dmamap_create(sc->sc_dmat, SDMMC_SECTOR_SIZE, 1,
+ SDMMC_SECTOR_SIZE, 0, BUS_DMA_WAITOK, &sf->bbuf_dmap);
+ if (error)
+ goto fail1;
+ error = bus_dmamem_alloc(sc->sc_dmat, SDMMC_SECTOR_SIZE,
+ PAGE_SIZE, 0, &ds, 1, &rseg, BUS_DMA_WAITOK);
+ if (error)
+ goto fail2;
+ error = bus_dmamem_map(sc->sc_dmat, &ds, 1, SDMMC_SECTOR_SIZE,
+ &sf->bbuf, BUS_DMA_WAITOK);
+ if (error)
+ goto fail3;
+ error = bus_dmamap_load(sc->sc_dmat, sf->bbuf_dmap,
+ sf->bbuf, SDMMC_SECTOR_SIZE, NULL,
+ BUS_DMA_WAITOK|BUS_DMA_READ|BUS_DMA_WRITE);
+ if (!error)
+ goto out;
+
+ bus_dmamem_unmap(sc->sc_dmat, sf->bbuf, SDMMC_SECTOR_SIZE);
+fail3:
+ bus_dmamem_free(sc->sc_dmat, &ds, 1);
+fail2:
+ bus_dmamap_destroy(sc->sc_dmat, sf->bbuf_dmap);
+fail1:
+ free(sf, M_DEVBUF);
+ sf = NULL;
+ }
+out:
+
return sf;
}
void
sdmmc_function_free(struct sdmmc_function *sf)
{
+ struct sdmmc_softc *sc = sf->sc;
+
+ if (ISSET(sc->sc_flags, SMF_MEM_MODE) &&
+ ISSET(sc->sc_caps, SMC_CAPS_DMA) &&
+ !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ bus_dmamap_unload(sc->sc_dmat, sf->bbuf_dmap);
+ bus_dmamem_unmap(sc->sc_dmat, sf->bbuf, SDMMC_SECTOR_SIZE);
+ bus_dmamem_free(sc->sc_dmat,
+ sf->bbuf_dmap->dm_segs, sf->bbuf_dmap->dm_nsegs);
+ bus_dmamap_destroy(sc->sc_dmat, sf->bbuf_dmap);
+ }
free(sf, M_DEVBUF);
}
Index: sdmmc_mem.c
===================================================================
RCS file: /cvsroot/src/sys/dev/sdmmc/sdmmc_mem.c,v
retrieving revision 1.11
diff -u -r1.11 sdmmc_mem.c
--- sdmmc_mem.c 23 Sep 2010 12:03:27 -0000 1.11
+++ sdmmc_mem.c 23 Sep 2010 12:06:31 -0000
@@ -253,6 +253,13 @@
break;
}
+ if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE))
+ /*
+ * Change ROD to Push-pull.
+ * Go to Data Transfer Mode, if possible.
+ */
+ sdmmc_chip_bus_rod(sc->sc_sct, sc->sc_sch, 0);
+
/*
* All cards are either inactive or awaiting further commands.
* Read the CSDs and decode the raw CID for each card.
@@ -934,10 +941,12 @@
sdmmc_mem_single_read_block(struct sdmmc_function *sf, uint32_t blkno,
u_char *data, size_t datalen)
{
+ struct sdmmc_softc *sc __unused = sf->sc;
int error = 0;
int i;
KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
+ KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
error = sdmmc_mem_read_block_subr(sf, blkno + i,
@@ -954,7 +963,7 @@
{
struct sdmmc_softc *sc = sf->sc;
struct sdmmc_command cmd;
- int error;
+ int error, bbuf, seg, off, len, num;
if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
error = sdmmc_select_card(sc, sf);
@@ -962,6 +971,10 @@
goto out;
}
+ bbuf = 0;
+ num = 0;
+ seg = off = len = 0;
+retry:
memset(&cmd, 0, sizeof(cmd));
cmd.c_data = data;
cmd.c_datalen = datalen;
@@ -972,8 +985,30 @@
if (!ISSET(sf->flags, SFF_SDHC))
cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
cmd.c_flags = SCF_CMD_ADTC | SCF_CMD_READ | SCF_RSP_R1 | SCF_RSP_SPI_R1;
- if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
+ if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
cmd.c_dmamap = sc->sc_dmap;
+ if (!ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ len = sc->sc_dmap->dm_segs[seg].ds_len - off;
+ len &= ~(SDMMC_SECTOR_SIZE - 1);
+ cmd.c_datalen = len;
+ cmd.c_dmaseg = seg;
+ cmd.c_dmaoff = off;
+ bbuf = 0;
+ if (len == 0) {
+ /* Use bounce buffer */
+ bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap,
+ 0, SDMMC_SECTOR_SIZE, BUS_DMASYNC_PREREAD);
+ cmd.c_datalen = SDMMC_SECTOR_SIZE;
+ cmd.c_dmamap = sf->bbuf_dmap;
+ cmd.c_dmaseg = 0;
+ cmd.c_dmaoff = 0;
+ bbuf = 1;
+ len = SDMMC_SECTOR_SIZE;
+ }
+ cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
+ MMC_READ_BLOCK_MULTIPLE : MMC_READ_BLOCK_SINGLE;
+ }
+ }
error = sdmmc_mmc_command(sc, &cmd);
if (error)
@@ -1005,6 +1040,34 @@
} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
}
+ if (ISSET(sc->sc_caps, SMC_CAPS_DMA) &&
+ !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ bus_dma_segment_t *dm_segs = sc->sc_dmap->dm_segs;
+
+ if (bbuf) {
+ bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap,
+ 0, SDMMC_SECTOR_SIZE, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, num,
+ SDMMC_SECTOR_SIZE, BUS_DMASYNC_POSTREAD);
+ memcpy(data, sf->bbuf, SDMMC_SECTOR_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, num,
+ SDMMC_SECTOR_SIZE, BUS_DMASYNC_PREREAD);
+ }
+ num += len;
+ data += len;
+ datalen -= len;
+ blkno += (len / SDMMC_SECTOR_SIZE);
+
+ while (off + len >= dm_segs[seg].ds_len) {
+ len -= dm_segs[seg++].ds_len;
+ off = 0;
+ }
+ off += len;
+
+ if (seg < sc->sc_dmap->dm_nsegs)
+ goto retry;
+ }
+
out:
return error;
}
@@ -1065,10 +1128,12 @@
sdmmc_mem_single_write_block(struct sdmmc_function *sf, uint32_t blkno,
u_char *data, size_t datalen)
{
+ struct sdmmc_softc *sc __unused = sf->sc;
int error = 0;
int i;
KASSERT((datalen % SDMMC_SECTOR_SIZE) == 0);
+ KASSERT(!ISSET(sc->sc_caps, SMC_CAPS_DMA));
for (i = 0; i < datalen / SDMMC_SECTOR_SIZE; i++) {
error = sdmmc_mem_write_block_subr(sf, blkno + i,
@@ -1085,7 +1150,7 @@
{
struct sdmmc_softc *sc = sf->sc;
struct sdmmc_command cmd;
- int error;
+ int error, bbuf, seg, off, len, num;
if (!ISSET(sc->sc_caps, SMC_CAPS_SPI_MODE)) {
error = sdmmc_select_card(sc, sf);
@@ -1093,6 +1158,10 @@
goto out;
}
+ bbuf = 0;
+ num = 0;
+ seg = off = len = 0;
+retry:
memset(&cmd, 0, sizeof(cmd));
cmd.c_data = data;
cmd.c_datalen = datalen;
@@ -1103,8 +1172,35 @@
if (!ISSET(sf->flags, SFF_SDHC))
cmd.c_arg <<= SDMMC_SECTOR_SIZE_SB;
cmd.c_flags = SCF_CMD_ADTC | SCF_RSP_R1;
- if (ISSET(sc->sc_caps, SMC_CAPS_DMA))
+ if (ISSET(sc->sc_caps, SMC_CAPS_DMA)) {
cmd.c_dmamap = sc->sc_dmap;
+ if (!ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ len = sc->sc_dmap->dm_segs[seg].ds_len - off;
+ len &= ~(SDMMC_SECTOR_SIZE - 1);
+ cmd.c_datalen = len;
+ cmd.c_dmaseg = seg;
+ cmd.c_dmaoff = off;
+ bbuf = 0;
+ if (len == 0) {
+ /* Use bounce buffer */
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, num,
+ SDMMC_SECTOR_SIZE, BUS_DMASYNC_POSTWRITE);
+ memcpy(sf->bbuf, data, SDMMC_SECTOR_SIZE);
+ bus_dmamap_sync(sc->sc_dmat, sc->sc_dmap, num,
+ SDMMC_SECTOR_SIZE, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap, 0,
+ SDMMC_SECTOR_SIZE, BUS_DMASYNC_PREWRITE);
+ cmd.c_datalen = SDMMC_SECTOR_SIZE;
+ cmd.c_dmamap = sf->bbuf_dmap;
+ cmd.c_dmaseg = 0;
+ cmd.c_dmaoff = 0;
+ bbuf = 1;
+ len = SDMMC_SECTOR_SIZE;
+ }
+ cmd.c_opcode = (cmd.c_datalen / cmd.c_blklen) > 1 ?
+ MMC_WRITE_BLOCK_MULTIPLE : MMC_WRITE_BLOCK_SINGLE;
+ }
+ }
error = sdmmc_mmc_command(sc, &cmd);
if (error)
@@ -1135,6 +1231,28 @@
} while (!ISSET(MMC_R1(cmd.c_resp), MMC_R1_READY_FOR_DATA));
}
+ if (ISSET(sc->sc_caps, SMC_CAPS_DMA) &&
+ !ISSET(sc->sc_caps, SMC_CAPS_MULTI_SEG_DMA)) {
+ bus_dma_segment_t *dm_segs = sc->sc_dmap->dm_segs;
+
+ if (bbuf)
+ bus_dmamap_sync(sc->sc_dmat, sf->bbuf_dmap,
+ 0, SDMMC_SECTOR_SIZE, BUS_DMASYNC_POSTWRITE);
+ num += len;
+ data += len;
+ datalen -= len;
+ blkno += (len / SDMMC_SECTOR_SIZE);
+
+ while (off + len >= dm_segs[seg].ds_len) {
+ len -= dm_segs[seg++].ds_len;
+ off = 0;
+ }
+ off += len;
+
+ if (seg < sc->sc_dmap->dm_nsegs)
+ goto retry;
+ }
+
out:
return error;
}
Index: sdmmcchip.h
===================================================================
RCS file: /cvsroot/src/sys/dev/sdmmc/sdmmcchip.h,v
retrieving revision 1.2
diff -u -r1.2 sdmmcchip.h
--- sdmmcchip.h 6 Apr 2010 15:10:09 -0000 1.2
+++ sdmmcchip.h 23 Sep 2010 12:06:31 -0000
@@ -44,10 +44,11 @@
/* write protect */
int (*write_protect)(sdmmc_chipset_handle_t);
- /* bus power, clock frequency and width */
+ /* bus power, clock frequency, width and ROD(OpenDrain/PushPull) */
int (*bus_power)(sdmmc_chipset_handle_t, uint32_t);
int (*bus_clock)(sdmmc_chipset_handle_t, int);
int (*bus_width)(sdmmc_chipset_handle_t, int);
+ int (*bus_rod)(sdmmc_chipset_handle_t, int);
/* command execution */
void (*exec_command)(sdmmc_chipset_handle_t,
@@ -72,13 +73,15 @@
/* write protect */
#define sdmmc_chip_write_protect(tag, handle) \
((tag)->write_protect((handle)))
-/* bus power, clock frequency and width */
+/* bus power, clock frequency, width and rod */
#define sdmmc_chip_bus_power(tag, handle, ocr) \
((tag)->bus_power((handle), (ocr)))
#define sdmmc_chip_bus_clock(tag, handle, freq)
\
((tag)->bus_clock((handle), (freq)))
#define sdmmc_chip_bus_width(tag, handle, width) \
((tag)->bus_width((handle), (width)))
+#define sdmmc_chip_bus_rod(tag, handle, width) \
+ ((tag)->bus_rod((handle), (width)))
/* command execution */
#define sdmmc_chip_exec_command(tag, handle, cmdp) \
((tag)->exec_command((handle), (cmdp)))
Index: sdmmcvar.h
===================================================================
RCS file: /cvsroot/src/sys/dev/sdmmc/sdmmcvar.h,v
retrieving revision 1.6
diff -u -r1.6 sdmmcvar.h
--- sdmmcvar.h 23 Sep 2010 12:03:27 -0000 1.6
+++ sdmmcvar.h 23 Sep 2010 12:06:31 -0000
@@ -85,6 +85,8 @@
uint32_t c_arg; /* SD/MMC command argument */
sdmmc_response c_resp; /* response buffer */
bus_dmamap_t c_dmamap;
+ int c_dmaseg; /* DMA segment number */
+ int c_dmaoff; /* offset in DMA segment */
void *c_data; /* buffer to send or read into */
int c_datalen; /* length of data buffer */
int c_blklen; /* block length */
@@ -176,6 +178,8 @@
sdmmc_response raw_cid; /* temp. storage for decoding */
uint32_t raw_scr[2];
struct sdmmc_scr scr; /* decoded CSR value */
+ void *bbuf; /* bounce buffer */
+ bus_dmamap_t bbuf_dmap; /* DMA map for bounce buffer */
};
/*
@@ -211,6 +215,7 @@
#define SMC_CAPS_POLL_CARD_DET 0x0010 /* Polling card detect */
#define SMC_CAPS_SINGLE_ONLY 0x0020 /* only single read/write */
#define SMC_CAPS_8BIT_MODE 0x0040 /* 8-bits data bus width */
+#define SMC_CAPS_MULTI_SEG_DMA 0x0080 /* multiple segment DMA transfer */
/* function */
int sc_function_count; /* number of I/O functions (SDIO) */
Home |
Main Index |
Thread Index |
Old Index