pkgsrc-Changes-HG archive

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]

[pkgsrc/trunk]: pkgsrc/lang/STk Fix multi-line strings, to make this build wh...



details:   https://anonhg.NetBSD.org/pkgsrc/rev/608a5e2c117b
branches:  trunk
changeset: 477866:608a5e2c117b
user:      kristerw <kristerw%pkgsrc.org@localhost>
date:      Sat Jul 10 17:52:10 2004 +0000

description:
Fix multi-line strings, to make this build when using gcc3.

diffstat:

 lang/STk/distinfo         |    3 +-
 lang/STk/patches/patch-ai |  539 ++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 541 insertions(+), 1 deletions(-)

diffs (truncated from 557 to 300 lines):

diff -r 76d8d6c57876 -r 608a5e2c117b lang/STk/distinfo
--- a/lang/STk/distinfo Sat Jul 10 17:39:50 2004 +0000
+++ b/lang/STk/distinfo Sat Jul 10 17:52:10 2004 +0000
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.3 2003/05/10 00:26:33 jtb Exp $
+$NetBSD: distinfo,v 1.4 2004/07/10 17:52:10 kristerw Exp $
 
 SHA1 (STk-4.0.1.tar.gz) = 50dc98c2b64db1c9cca54411cb406feb4cb57600
 Size (STk-4.0.1.tar.gz) = 3673684 bytes
@@ -10,3 +10,4 @@
 SHA1 (patch-af) = 2625837a9d784071b2bfed71e2798891c6c47b71
 SHA1 (patch-ag) = dde1a30f1b0f0d781d8d2389af6bc2393dd40729
 SHA1 (patch-ah) = 353ac24f2a097e360067a292828ef2f469e137c8
+SHA1 (patch-ai) = f83af050a51c95f2de6d0cd109388e822727a724
diff -r 76d8d6c57876 -r 608a5e2c117b lang/STk/patches/patch-ai
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/lang/STk/patches/patch-ai Sat Jul 10 17:52:10 2004 +0000
@@ -0,0 +1,539 @@
+$NetBSD: patch-ai,v 1.1 2004/07/10 17:52:10 kristerw Exp $
+
+--- Mp/gmp-1.3.2/longlong.h.orig       2004-07-10 19:29:15.000000000 +0200
++++ Mp/gmp-1.3.2/longlong.h    2004-07-10 19:46:54.000000000 +0200
+@@ -91,7 +91,7 @@
+ 
+ #if defined (__a29k__) || defined (___AM29K__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("add %1,%4,%5
++  __asm__ ("add %1,%4,%5\n\
+       addc %0,%2,%3"                                                  \
+          : "=r" ((unsigned long int)(sh)),                            \
+           "=&r" ((unsigned long int)(sl))                             \
+@@ -100,7 +100,7 @@
+            "%r" ((unsigned long int)(al)),                            \
+            "rI" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("sub %1,%4,%5
++  __asm__ ("sub %1,%4,%5\n\
+       subc %0,%2,%3"                                                  \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -149,7 +149,7 @@
+ 
+ #if defined (__arm__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("adds %1,%4,%5
++  __asm__ ("adds %1,%4,%5\n\
+       adc %0,%2,%3"                                                   \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -158,7 +158,7 @@
+            "%r" ((unsigned long int)(al)),                            \
+            "rI" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("subs %1,%4,%5
++  __asm__ ("subs %1,%4,%5\n\
+       sbc %0,%2,%3"                                                   \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -170,7 +170,7 @@
+ 
+ #if defined (__gmicro__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("add.w %5,%1
++  __asm__ ("add.w %5,%1\n\
+       addx %3,%0"                                                     \
+          : "=g" ((unsigned long int)(sh)),                            \
+            "=&g" ((unsigned long int)(sl))                            \
+@@ -179,7 +179,7 @@
+            "%1" ((unsigned long int)(al)),                            \
+            "g" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("sub.w %5,%1
++  __asm__ ("sub.w %5,%1\n\
+       subx %3,%0"                                                     \
+          : "=g" ((unsigned long int)(sh)),                            \
+            "=&g" ((unsigned long int)(sl))                            \
+@@ -209,7 +209,7 @@
+ 
+ #if defined (__hppa)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("add %4,%5,%1
++  __asm__ ("add %4,%5,%1\n\
+       addc %2,%3,%0"                                                  \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -218,7 +218,7 @@
+            "%rM" ((unsigned long int)(al)),                           \
+            "rM" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("sub %4,%5,%1
++  __asm__ ("sub %4,%5,%1\n\
+       subb %2,%3,%0"                                                  \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -249,28 +249,28 @@
+   do {                                                                        \
+     unsigned long int __tmp;                                          \
+     __asm__ (                                                         \
+-       "ldi           1,%0
+-      extru,=         %1,15,16,%%r0           ; Bits 31..16 zero?
+-      extru,tr        %1,15,16,%1             ; No.  Shift down, skip add.
+-      ldo             16(%0),%0               ; Yes.  Perform add.
+-      extru,=         %1,23,8,%%r0            ; Bits 15..8 zero?
+-      extru,tr        %1,23,8,%1              ; No.  Shift down, skip add.
+-      ldo             8(%0),%0                ; Yes.  Perform add.
+-      extru,=         %1,27,4,%%r0            ; Bits 7..4 zero?
+-      extru,tr        %1,27,4,%1              ; No.  Shift down, skip add.
+-      ldo             4(%0),%0                ; Yes.  Perform add.
+-      extru,=         %1,29,2,%%r0            ; Bits 3..2 zero?
+-      extru,tr        %1,29,2,%1              ; No.  Shift down, skip add.
+-      ldo             2(%0),%0                ; Yes.  Perform add.
+-      extru           %1,30,1,%1              ; Extract bit 1.
+-      sub             %0,%1,%0                ; Subtract it.
++       "ldi           1,%0\n\
++      extru,=         %1,15,16,%%r0           ; Bits 31..16 zero?\n\
++      extru,tr        %1,15,16,%1             ; No.  Shift down, skip add.\n\
++      ldo             16(%0),%0               ; Yes.  Perform add.\n\
++      extru,=         %1,23,8,%%r0            ; Bits 15..8 zero?\n\
++      extru,tr        %1,23,8,%1              ; No.  Shift down, skip add.\n\
++      ldo             8(%0),%0                ; Yes.  Perform add.\n\
++      extru,=         %1,27,4,%%r0            ; Bits 7..4 zero?\n\
++      extru,tr        %1,27,4,%1              ; No.  Shift down, skip add.\n\
++      ldo             4(%0),%0                ; Yes.  Perform add.\n\
++      extru,=         %1,29,2,%%r0            ; Bits 3..2 zero?\n\
++      extru,tr        %1,29,2,%1              ; No.  Shift down, skip add.\n\
++      ldo             2(%0),%0                ; Yes.  Perform add.\n\
++      extru           %1,30,1,%1              ; Extract bit 1.\n\
++      sub             %0,%1,%0                ; Subtract it.\n\
+       " : "=r" (count), "=r" (__tmp) : "1" (x));                      \
+   } while (0)
+ #endif
+ 
+ #if defined (__i386__) || defined (__i486__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("addl %5,%1
++  __asm__ ("addl %5,%1\n\
+       adcl %3,%0"                                                     \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -279,7 +279,7 @@
+            "%1" ((unsigned long int)(al)),                            \
+            "g" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("subl %5,%1
++  __asm__ ("subl %5,%1\n\
+       sbbl %3,%0"                                                     \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -367,7 +367,7 @@
+ 
+ #if defined (___IBMR2__) /* IBM RS6000 */
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("a%I5 %1,%4,%5
++  __asm__ ("a%I5 %1,%4,%5\n\
+       ae %0,%2,%3"                                                    \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -376,7 +376,7 @@
+            "%r" ((unsigned long int)(al)),                            \
+            "rI" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("sf%I4 %1,%5,%4
++  __asm__ ("sf%I4 %1,%5,%4\n\
+       sfe %0,%3,%2"                                                   \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -415,7 +415,7 @@
+ 
+ #if defined (__mc68000__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("add%.l %5,%1
++  __asm__ ("add%.l %5,%1\n\
+       addx%.l %3,%0"                                                  \
+          : "=d" ((unsigned long int)(sh)),                            \
+            "=&d" ((unsigned long int)(sl))                            \
+@@ -424,7 +424,7 @@
+            "%1" ((unsigned long int)(al)),                            \
+            "g" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("sub%.l %5,%1
++  __asm__ ("sub%.l %5,%1\n\
+       subx%.l %3,%0"                                                  \
+          : "=d" ((unsigned long int)(sh)),                            \
+            "=&d" ((unsigned long int)(sl))                            \
+@@ -463,31 +463,31 @@
+ /* This ought to be improved by relying on reload to move inputs and
+    outputs to their positions.  */
+ #define umul_ppmm(xh, xl, a, b) \
+-  __asm__ ("| Inlined umul_ppmm
+-      movel   %2,d0
+-      movel   %3,d1
+-      movel   d0,d2
+-      swap    d0
+-      movel   d1,d3
+-      swap    d1
+-      movew   d2,d4
+-      mulu    d3,d4
+-      mulu    d1,d2
+-      mulu    d0,d3
+-      mulu    d0,d1
+-      movel   d4,d0
+-      eorw    d0,d0
+-      swap    d0
+-      addl    d0,d2
+-      addl    d3,d2
+-      jcc     1f
+-      addl    #65536,d1
+-1:    swap    d2
+-      moveq   #0,d0
+-      movew   d2,d0
+-      movew   d4,d2
+-      movel   d2,%1
+-      addl    d1,d0
++  __asm__ ("| Inlined umul_ppmm\n\
++      movel   %2,d0\n\
++      movel   %3,d1\n\
++      movel   d0,d2\n\
++      swap    d0\n\
++      movel   d1,d3\n\
++      swap    d1\n\
++      movew   d2,d4\n\
++      mulu    d3,d4\n\
++      mulu    d1,d2\n\
++      mulu    d0,d3\n\
++      mulu    d0,d1\n\
++      movel   d4,d0\n\
++      eorw    d0,d0\n\
++      swap    d0\n\
++      addl    d0,d2\n\
++      addl    d3,d2\n\
++      jcc     1f\n\
++      addl    #65536,d1\n\
++1:    swap    d2\n\
++      moveq   #0,d0\n\
++      movew   d2,d0\n\
++      movew   d4,d2\n\
++      movel   d2,%1\n\
++      addl    d1,d0\n\
+       movel   d0,%0"                                                  \
+          : "=g" ((unsigned long int)(xh)),                            \
+            "=g" ((unsigned long int)(xl))                             \
+@@ -501,7 +501,7 @@
+ 
+ #if defined (__m88000__)
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("addu.co %1,%r4,%r5
++  __asm__ ("addu.co %1,%r4,%r5\n\
+       addu.ci %0,%r2,%r3"                                             \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -510,7 +510,7 @@
+            "%rJ" ((unsigned long int)(al)),                           \
+            "rJ" ((unsigned long int)(bl)))
+ #define sub_ddmmss(sh, sl, ah, al, bh, bl) \
+-  __asm__ ("subu.co %1,%r4,%r5
++  __asm__ ("subu.co %1,%r4,%r5\n\
+       subu.ci %0,%r2,%r3"                                             \
+          : "=r" ((unsigned long int)(sh)),                            \
+            "=&r" ((unsigned long int)(sl))                            \
+@@ -543,11 +543,11 @@
+   } while (0)
+ 
+ #define udiv_qrnnd(q, r, n1, n0, d) \
+-  __asm__ ("or        r10,%2,0
+-      or      r11,%3,0
+-      divu.d  r10,r10,%4
+-      mulu    %1,%4,r11
+-      subu    %1,%3,%1
++  __asm__ ("or        r10,%2,0\n\
++      or      r11,%3,0\n\
++      divu.d  r10,r10,%4\n\
++      mulu    %1,%4,r11\n\
++      subu    %1,%3,%1\n\
+       or      %0,r11,0"                                               \
+          : "=r" (q),                                                  \
+            "=&r" (r)                                                  \
+@@ -569,8 +569,8 @@
+            "d" ((unsigned long int)(v)))
+ #else
+ #define umul_ppmm(w1, w0, u, v) \
+-  __asm__ ("multu %2,%3
+-      mflo %0
++  __asm__ ("multu %2,%3\n\
++      mflo %0\n\
+       mfhi %1"                                                        \
+          : "=d" ((unsigned long int)(w0)),                            \
+            "=d" ((unsigned long int)(w1))                             \
+@@ -599,10 +599,10 @@
+              "g" ((unsigned long int)(v)));                           \
+     __w; })
+ #define udiv_qrnnd(q, r, n1, n0, d) \
+-  __asm__ ("movd %2,r0
+-      movd %3,r1
+-      deid %4,r0
+-      movd r1,%0
++  __asm__ ("movd %2,r0\n\
++      movd %3,r1\n\
++      deid %4,r0\n\
++      movd r1,%0\n\
+       movd r0,%1"                                                     \



Home | Main Index | Thread Index | Old Index