Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/netbsd-1-6]: src/sys/dev/pci Pull up revisions 1.43-1.45 (requested by m...
details: https://anonhg.NetBSD.org/src/rev/e3c54a78696c
branches: netbsd-1-6
changeset: 529625:e3c54a78696c
user: he <he%NetBSD.org@localhost>
date: Sun Dec 01 19:44:35 2002 +0000
description:
Pull up revisions 1.43-1.45 (requested by mycroft in ticket #837):
Simplify blitting code slightly, fix fenceposts.
Copy forward when moving to the right if no overlap.
Force GPSR to 0 before a blit, so text doesn't shift right.
Fixes a few problems causing occasional corruption with
TGA/TGA2 consoles.
diffstat:
sys/dev/pci/tga.c | 131 +++++++++++++++++++++++++----------------------------
1 files changed, 61 insertions(+), 70 deletions(-)
diffs (205 lines):
diff -r 7cf483719aeb -r e3c54a78696c sys/dev/pci/tga.c
--- a/sys/dev/pci/tga.c Sun Dec 01 19:36:17 2002 +0000
+++ b/sys/dev/pci/tga.c Sun Dec 01 19:44:35 2002 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: tga.c,v 1.41 2002/03/17 19:41:00 atatat Exp $ */
+/* $NetBSD: tga.c,v 1.41.6.1 2002/12/01 19:44:35 he Exp $ */
/*
* Copyright (c) 1995, 1996 Carnegie-Mellon University.
@@ -28,7 +28,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: tga.c,v 1.41 2002/03/17 19:41:00 atatat Exp $");
+__KERNEL_RCSID(0, "$NetBSD: tga.c,v 1.41.6.1 2002/12/01 19:44:35 he Exp $");
#include <sys/param.h>
#include <sys/systm.h>
@@ -1072,32 +1072,6 @@
return -1;
}
- wb = w * (dst->ri_depth / 8);
- if (sy >= dy) {
- ystart = 0;
- yend = h;
- ydir = 1;
- } else {
- ystart = h;
- yend = 0;
- ydir = -1;
- }
- if (sx >= dx) { /* moving to the left */
- xstart = 0;
- xend = w * (dst->ri_depth / 8) - 4;
- xdir = 1;
- } else { /* moving to the right */
- xstart = wb - ( wb >= 4*64 ? 4*64 : wb >= 64 ? 64 : 4 );
- xend = 0;
- xdir = -1;
- }
-#define XINC4 4
-#define XINC64 64
-#define XINC256 (64*4)
- yinc = ydir * dst->ri_stride;
- ystart *= dst->ri_stride;
- yend *= dst->ri_stride;
-
srcb = sy * src->ri_stride + sx * (src->ri_depth/8);
dstb = dy * dst->ri_stride + dx * (dst->ri_depth/8);
tga_srcb = offset + (sy + src->ri_yorigin) * src->ri_stride +
@@ -1105,8 +1079,31 @@
tga_dstb = offset + (dy + dst->ri_yorigin) * dst->ri_stride +
(dx + dst->ri_xorigin) * (dst->ri_depth/8);
- TGAWALREG(dc, TGA_REG_GMOR, 3, 0x0007); /* Copy mode */
+ if (sy >= dy) {
+ ystart = 0;
+ yend = (h - 1) * dst->ri_stride;
+ ydir = 1;
+ } else {
+ ystart = (h - 1) * dst->ri_stride;
+ yend = 0;
+ ydir = -1;
+ }
+ yinc = ydir * dst->ri_stride;
+
+ wb = w * (dst->ri_depth / 8);
+ if (sx >= dx || (sx + w) <= dx) { /* copy forwards */
+ xstart = 0;
+ xend = wb;
+ xdir = 1;
+ } else { /* copy backwards */
+ xstart = wb;
+ xend = 0;
+ xdir = -1;
+ }
+
+ TGAWALREG(dc, TGA_REG_GMOR, 3, 0x0007); /* Copy mode */
TGAWALREG(dc, TGA_REG_GOPR, 3, map_rop[rop]); /* Set up the op */
+ TGAWALREG(dc, TGA_REG_GPSR, 3, 0); /* No shift */
/*
* we have 3 sizes of pixels to move in X direction:
@@ -1118,11 +1115,9 @@
if (xdir == 1) { /* move to the left */
for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
-
/* 4*64 byte chunks */
- for (xleft = wb, x = xstart;
- x <= xend && xleft >= 4*64;
- x += XINC256, xleft -= XINC256) {
+ for (xleft = wb, x = xstart; xleft >= 4*64;
+ x += 4*64, xleft -= 4*64) {
/* XXX XXX Eight writes to different addresses should fill
* XXX XXX up the write buffers on 21064 and 21164 chips,
@@ -1141,35 +1136,33 @@
}
/* 64 byte chunks */
- for ( ; x <= xend && xleft >= 64;
- x += XINC64, xleft -= XINC64) {
+ for (; xleft >= 64; x += 64, xleft -= 64) {
TGAWALREG(dc, TGA_REG_GCSR, 0, tga_srcb + y + x + 0 * 64);
TGAWALREG(dc, TGA_REG_GCDR, 0, tga_dstb + y + x + 0 * 64);
}
+
lastx = x; lastleft = xleft; /* remember for CPU loop */
-
}
TGAWALREG(dc, TGA_REG_GOPR, 0, 0x0003); /* op -> dst = src */
TGAWALREG(dc, TGA_REG_GMOR, 0, 0x0000); /* Simple mode */
- for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
- /* 4 byte granularity */
- for (x = lastx, xleft = lastleft;
- x <= xend && xleft >= 4;
- x += XINC4, xleft -= XINC4) {
- *(uint32_t *)(dst->ri_bits + dstb + y + x) =
- *(uint32_t *)(dst->ri_bits + srcb + y + x);
+ if (lastleft) {
+ for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
+ /* 4 byte granularity */
+ for (x = lastx, xleft = lastleft; xleft >= 4;
+ x += 4, xleft -= 4) {
+ *(uint32_t *)(dst->ri_bits + dstb + y + x + 0 * 4) =
+ *(uint32_t *)(dst->ri_bits + srcb + y + x + 0 * 4);
+ }
}
}
}
else { /* above move to the left, below move to the right */
for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
-
/* 4*64 byte chunks */
- for (xleft = wb, x = xstart;
- x >= xend && xleft >= 4*64;
- x -= XINC256, xleft -= XINC256) {
+ for (xleft = wb, x = xstart; xleft >= 4*64;
+ x -= 4*64, xleft -= 4*64) {
/* XXX XXX Eight writes to different addresses should fill
* XXX XXX up the write buffers on 21064 and 21164 chips,
@@ -1177,37 +1170,35 @@
* XXX XXX require further unrolling of this loop, or the
* XXX XXX insertion of memory barriers.
*/
- TGAWALREG(dc, TGA_REG_GCSR, 0, tga_srcb + y + x + 3 * 64);
- TGAWALREG(dc, TGA_REG_GCDR, 0, tga_dstb + y + x + 3 * 64);
- TGAWALREG(dc, TGA_REG_GCSR, 1, tga_srcb + y + x + 2 * 64);
- TGAWALREG(dc, TGA_REG_GCDR, 1, tga_dstb + y + x + 2 * 64);
- TGAWALREG(dc, TGA_REG_GCSR, 2, tga_srcb + y + x + 1 * 64);
- TGAWALREG(dc, TGA_REG_GCDR, 2, tga_dstb + y + x + 1 * 64);
- TGAWALREG(dc, TGA_REG_GCSR, 3, tga_srcb + y + x + 0 * 64);
- TGAWALREG(dc, TGA_REG_GCDR, 3, tga_dstb + y + x + 0 * 64);
+ TGAWALREG(dc, TGA_REG_GCSR, 0, tga_srcb + y + x - 1 * 64);
+ TGAWALREG(dc, TGA_REG_GCDR, 0, tga_dstb + y + x - 1 * 64);
+ TGAWALREG(dc, TGA_REG_GCSR, 1, tga_srcb + y + x - 2 * 64);
+ TGAWALREG(dc, TGA_REG_GCDR, 1, tga_dstb + y + x - 2 * 64);
+ TGAWALREG(dc, TGA_REG_GCSR, 2, tga_srcb + y + x - 3 * 64);
+ TGAWALREG(dc, TGA_REG_GCDR, 2, tga_dstb + y + x - 3 * 64);
+ TGAWALREG(dc, TGA_REG_GCSR, 3, tga_srcb + y + x - 4 * 64);
+ TGAWALREG(dc, TGA_REG_GCDR, 3, tga_dstb + y + x - 4 * 64);
}
- if (xleft) x += XINC256 - XINC64;
+ /* 64 byte chunks */
+ for (; xleft >= 64; x -= 64, xleft -= 64) {
+ TGAWALREG(dc, TGA_REG_GCSR, 0, tga_srcb + y + x - 1 * 64);
+ TGAWALREG(dc, TGA_REG_GCDR, 0, tga_dstb + y + x - 1 * 64);
+ }
- /* 64 byte chunks */
- for ( ; x >= xend && xleft >= 64;
- x -= XINC64, xleft -= XINC64) {
- TGAWALREG(dc, TGA_REG_GCSR, 0, tga_srcb + y + x + 0 * 64);
- TGAWALREG(dc, TGA_REG_GCDR, 0, tga_dstb + y + x + 0 * 64);
- }
- if (xleft) x += XINC64 - XINC4;
lastx = x; lastleft = xleft; /* remember for CPU loop */
}
TGAWALREG(dc, TGA_REG_GOPR, 0, 0x0003); /* op -> dst = src */
TGAWALREG(dc, TGA_REG_GMOR, 0, 0x0000); /* Simple mode */
- for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
- /* 4 byte granularity */
- for (x = lastx, xleft = lastleft;
- x >= xend && xleft >= 4;
- x -= XINC4, xleft -= XINC4) {
- *(uint32_t *)(dst->ri_bits + dstb + y + x) =
- *(uint32_t *)(dst->ri_bits + srcb + y + x);
+ if (lastleft) {
+ for (y = ystart; (ydir * y) <= (ydir * yend); y += yinc) {
+ /* 4 byte granularity */
+ for (x = lastx, xleft = lastleft; xleft >= 4;
+ x -= 4, xleft -= 4) {
+ *(uint32_t *)(dst->ri_bits + dstb + y + x - 1 * 4) =
+ *(uint32_t *)(dst->ri_bits + srcb + y + x - 1 * 4);
+ }
}
}
}
Home |
Main Index |
Thread Index |
Old Index