aboutsummaryrefslogtreecommitdiff
path: root/arch/s390/lib
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2012-08-14 13:20:20 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2012-09-26 15:44:50 +0200
commit535c611ddd3eb076f96579131e30bc5dd02a3b1c (patch)
treef6e3fdb6c8870ab01e35e79a4f017c6f6a365fda /arch/s390/lib
parent68d9884dbc4215b6693c108eb35a02bd78f7956e (diff)
s390/string: provide asm lib functions for memcpy and memcmp
Our memcpy and memcmp variants were implemented by calling the corresponding gcc builtin variants. However gcc is free to replace a call to __builtin_memcmp with a call to memcmp which, when called, will result in an endless recursion within memcmp. So let's provide asm variants and also fix the variants that are used for uncompressing the kernel image. In addition remove all other occurences of builtin function calls. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/mem64.S88
-rw-r--r--arch/s390/lib/string.c56
4 files changed, 187 insertions, 52 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 761ab8b56af..6ab0d0b5cec 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,7 @@
lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
obj-y += usercopy.o
-obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o
+obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
+obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
new file mode 100644
index 00000000000..14ca9244b61
--- /dev/null
+++ b/arch/s390/lib/mem32.S
@@ -0,0 +1,92 @@
+/*
+ * String handling functions.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * memset implementation
+ *
+ * This code corresponds to the C construct below. We do distinguish
+ * between clearing (c == 0) and setting a memory array (c != 0) simply
+ * because nearly all memset invocations in the kernel clear memory and
+ * the xc instruction is preferred in such cases.
+ *
+ * void *memset(void *s, int c, size_t n)
+ * {
+ * if (likely(c == 0))
+ * return __builtin_memset(s, 0, n);
+ * return __builtin_memset(s, c, n);
+ * }
+ */
+ENTRY(memset)
+ basr %r5,%r0
+.Lmemset_base:
+ ltr %r4,%r4
+ bzr %r14
+ ltr %r3,%r3
+ jnz .Lmemset_fill
+ ahi %r4,-1
+ lr %r3,%r4
+ srl %r3,8
+ ltr %r3,%r3
+ lr %r1,%r2
+ je .Lmemset_clear_rest
+.Lmemset_clear_loop:
+ xc 0(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brct %r3,.Lmemset_clear_loop
+.Lmemset_clear_rest:
+ ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
+ br %r14
+.Lmemset_fill:
+ stc %r3,0(%r2)
+ chi %r4,1
+ lr %r1,%r2
+ ber %r14
+ ahi %r4,-2
+ lr %r3,%r4
+ srl %r3,8
+ ltr %r3,%r3
+ je .Lmemset_fill_rest
+.Lmemset_fill_loop:
+ mvc 1(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brct %r3,.Lmemset_fill_loop
+.Lmemset_fill_rest:
+ ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
+ br %r14
+.Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+.Lmemset_mvc:
+ mvc 1(1,%r1),0(%r1)
+
+/*
+ * memcpy implementation
+ *
+ * void *memcpy(void *dest, const void *src, size_t n)
+ */
+ENTRY(memcpy)
+ basr %r5,%r0
+.Lmemcpy_base:
+ ltr %r4,%r4
+ bzr %r14
+ ahi %r4,-1
+ lr %r0,%r4
+ srl %r0,8
+ ltr %r0,%r0
+ lr %r1,%r2
+ jnz .Lmemcpy_loop
+.Lmemcpy_rest:
+ ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
+ br %r14
+.Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brct %r0,.Lmemcpy_loop
+ j .Lmemcpy_rest
+.Lmemcpy_mvc:
+ mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/mem64.S b/arch/s390/lib/mem64.S
new file mode 100644
index 00000000000..c6d553e85ab
--- /dev/null
+++ b/arch/s390/lib/mem64.S
@@ -0,0 +1,88 @@
+/*
+ * String handling functions.
+ *
+ * Copyright IBM Corp. 2012
+ */
+
+#include <linux/linkage.h>
+
+/*
+ * memset implementation
+ *
+ * This code corresponds to the C construct below. We do distinguish
+ * between clearing (c == 0) and setting a memory array (c != 0) simply
+ * because nearly all memset invocations in the kernel clear memory and
+ * the xc instruction is preferred in such cases.
+ *
+ * void *memset(void *s, int c, size_t n)
+ * {
+ * if (likely(c == 0))
+ * return __builtin_memset(s, 0, n);
+ * return __builtin_memset(s, c, n);
+ * }
+ */
+ENTRY(memset)
+ ltgr %r4,%r4
+ bzr %r14
+ ltgr %r3,%r3
+ jnz .Lmemset_fill
+ aghi %r4,-1
+ srlg %r3,%r4,8
+ ltgr %r3,%r3
+ lgr %r1,%r2
+ jz .Lmemset_clear_rest
+.Lmemset_clear_loop:
+ xc 0(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r3,.Lmemset_clear_loop
+.Lmemset_clear_rest:
+ larl %r3,.Lmemset_xc
+ ex %r4,0(%r3)
+ br %r14
+.Lmemset_fill:
+ stc %r3,0(%r2)
+ cghi %r4,1
+ lgr %r1,%r2
+ ber %r14
+ aghi %r4,-2
+ srlg %r3,%r4,8
+ ltgr %r3,%r3
+ jz .Lmemset_fill_rest
+.Lmemset_fill_loop:
+ mvc 1(256,%r1),0(%r1)
+ la %r1,256(%r1)
+ brctg %r3,.Lmemset_fill_loop
+.Lmemset_fill_rest:
+ larl %r3,.Lmemset_mvc
+ ex %r4,0(%r3)
+ br %r14
+.Lmemset_xc:
+ xc 0(1,%r1),0(%r1)
+.Lmemset_mvc:
+ mvc 1(1,%r1),0(%r1)
+
+/*
+ * memcpy implementation
+ *
+ * void *memcpy(void *dest, const void *src, size_t n)
+ */
+ENTRY(memcpy)
+ ltgr %r4,%r4
+ bzr %r14
+ aghi %r4,-1
+ srlg %r5,%r4,8
+ ltgr %r5,%r5
+ lgr %r1,%r2
+ jnz .Lmemcpy_loop
+.Lmemcpy_rest:
+ larl %r5,.Lmemcpy_mvc
+ ex %r4,0(%r5)
+ br %r14
+.Lmemcpy_loop:
+ mvc 0(256,%r1),0(%r3)
+ la %r1,256(%r1)
+ la %r3,256(%r3)
+ brctg %r5,.Lmemcpy_loop
+ j .Lmemcpy_rest
+.Lmemcpy_mvc:
+ mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c
index 846ec64ab2c..b647d5ff0ad 100644
--- a/arch/s390/lib/string.c
+++ b/arch/s390/lib/string.c
@@ -43,11 +43,7 @@ static inline char *__strnend(const char *s, size_t n)
*/
size_t strlen(const char *s)
{
-#if __GNUC__ < 4
return __strend(s) - s;
-#else
- return __builtin_strlen(s);
-#endif
}
EXPORT_SYMBOL(strlen);
@@ -73,7 +69,6 @@ EXPORT_SYMBOL(strnlen);
*/
char *strcpy(char *dest, const char *src)
{
-#if __GNUC__ < 4
register int r0 asm("0") = 0;
char *ret = dest;
@@ -82,9 +77,6 @@ char *strcpy(char *dest, const char *src)
: "+&a" (dest), "+&a" (src) : "d" (r0)
: "cc", "memory" );
return ret;
-#else
- return __builtin_strcpy(dest, src);
-#endif
}
EXPORT_SYMBOL(strcpy);
@@ -106,7 +98,7 @@ size_t strlcpy(char *dest, const char *src, size_t size)
if (size) {
size_t len = (ret >= size) ? size-1 : ret;
dest[len] = '\0';
- __builtin_memcpy(dest, src, len);
+ memcpy(dest, src, len);
}
return ret;
}
@@ -124,8 +116,8 @@ EXPORT_SYMBOL(strlcpy);
char *strncpy(char *dest, const char *src, size_t n)
{
size_t len = __strnend(src, n) - src;
- __builtin_memset(dest + len, 0, n - len);
- __builtin_memcpy(dest, src, len);
+ memset(dest + len, 0, n - len);
+ memcpy(dest, src, len);
return dest;
}
EXPORT_SYMBOL(strncpy);
@@ -171,7 +163,7 @@ size_t strlcat(char *dest, const char *src, size_t n)
if (len >= n)
len = n - 1;
dest[len] = '\0';
- __builtin_memcpy(dest, src, len);
+ memcpy(dest, src, len);
}
return res;
}
@@ -194,7 +186,7 @@ char *strncat(char *dest, const char *src, size_t n)
char *p = __strend(dest);
p[len] = '\0';
- __builtin_memcpy(p, src, len);
+ memcpy(p, src, len);
return dest;
}
EXPORT_SYMBOL(strncat);
@@ -348,41 +340,3 @@ void *memscan(void *s, int c, size_t n)
return (void *) ret;
}
EXPORT_SYMBOL(memscan);
-
-/**
- * memcpy - Copy one area of memory to another
- * @dest: Where to copy to
- * @src: Where to copy from
- * @n: The size of the area.
- *
- * returns a pointer to @dest
- */
-void *memcpy(void *dest, const void *src, size_t n)
-{
- return __builtin_memcpy(dest, src, n);
-}
-EXPORT_SYMBOL(memcpy);
-
-/**
- * memset - Fill a region of memory with the given value
- * @s: Pointer to the start of the area.
- * @c: The byte to fill the area with
- * @n: The size of the area.
- *
- * returns a pointer to @s
- */
-void *memset(void *s, int c, size_t n)
-{
- char *xs;
-
- if (c == 0)
- return __builtin_memset(s, 0, n);
-
- xs = (char *) s;
- if (n > 0)
- do {
- *xs++ = c;
- } while (--n > 0);
- return s;
-}
-EXPORT_SYMBOL(memset);