aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/mips/lib
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/Makefile10
-rw-r--r--arch/mips/lib/csum_partial_copy.c49
-rw-r--r--arch/mips/lib/dec_and_lock.c55
-rw-r--r--arch/mips/lib/iomap.c78
-rw-r--r--arch/mips/lib/memcpy.S508
-rw-r--r--arch/mips/lib/promlib.c24
-rw-r--r--arch/mips/lib/strlen_user.S39
-rw-r--r--arch/mips/lib/strncpy_user.S58
-rw-r--r--arch/mips/lib/strnlen_user.S45
9 files changed, 866 insertions, 0 deletions
diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile
new file mode 100644
index 00000000000..21b92b9dd01
--- /dev/null
+++ b/arch/mips/lib/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for MIPS-specific library files..
+#
+
+lib-y += csum_partial_copy.o dec_and_lock.o memcpy.o promlib.o \
+ strlen_user.o strncpy_user.o strnlen_user.o
+
+obj-y += iomap.o
+
+EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/lib/csum_partial_copy.c b/arch/mips/lib/csum_partial_copy.c
new file mode 100644
index 00000000000..ffed0a6a1c1
--- /dev/null
+++ b/arch/mips/lib/csum_partial_copy.c
@@ -0,0 +1,49 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics GmbH
+ * Copyright (C) 1998, 1999 Ralf Baechle
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/string.h>
+#include <asm/uaccess.h>
+#include <net/checksum.h>
+
+/*
+ * copy while checksumming, otherwise like csum_partial
+ */
+unsigned int csum_partial_copy_nocheck(const unsigned char *src, unsigned char *dst,
+ int len, unsigned int sum)
+{
+ /*
+ * It's 2:30 am and I don't feel like doing it real ...
+ * This is lots slower than the real thing (tm)
+ */
+ sum = csum_partial(src, len, sum);
+ memcpy(dst, src, len);
+
+ return sum;
+}
+
+/*
+ * Copy from userspace and compute checksum. If we catch an exception
+ * then zero the rest of the buffer.
+ */
+unsigned int csum_partial_copy_from_user (const unsigned char *src, unsigned char *dst,
+ int len, unsigned int sum, int *err_ptr)
+{
+ int missing;
+
+ might_sleep();
+ missing = copy_from_user(dst, src, len);
+ if (missing) {
+ memset(dst + len - missing, 0, missing);
+ *err_ptr = -EFAULT;
+ }
+
+ return csum_partial(dst, len, sum);
+}
diff --git a/arch/mips/lib/dec_and_lock.c b/arch/mips/lib/dec_and_lock.c
new file mode 100644
index 00000000000..e44e9579bd3
--- /dev/null
+++ b/arch/mips/lib/dec_and_lock.c
@@ -0,0 +1,55 @@
+/*
+ * MIPS version of atomic_dec_and_lock() using cmpxchg
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+
+/*
+ * This is an implementation of the notion of "decrement a
+ * reference count, and return locked if it decremented to zero".
+ *
+ * This implementation can be used on any architecture that
+ * has a cmpxchg, and where atomic->value is an int holding
+ * the value of the atomic (i.e. the high bits aren't used
+ * for a lock or anything like that).
+ *
+ * N.B. ATOMIC_DEC_AND_LOCK gets defined in include/linux/spinlock.h
+ * if spinlocks are empty and thus atomic_dec_and_lock is defined
+ * to be atomic_dec_and_test - in that case we don't need it
+ * defined here as well.
+ */
+
+#ifndef ATOMIC_DEC_AND_LOCK
+int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
+{
+ int counter;
+ int newcount;
+
+ for (;;) {
+ counter = atomic_read(atomic);
+ newcount = counter - 1;
+ if (!newcount)
+ break; /* do it the slow way */
+
+ newcount = cmpxchg(&atomic->counter, counter, newcount);
+ if (newcount == counter)
+ return 0;
+ }
+
+ spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock(lock);
+ return 0;
+}
+
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#endif /* ATOMIC_DEC_AND_LOCK */
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
new file mode 100644
index 00000000000..b5d5fa83376
--- /dev/null
+++ b/arch/mips/lib/iomap.c
@@ -0,0 +1,78 @@
+/*
+ * iomap.c, Memory Mapped I/O routines for MIPS architecture.
+ *
+ * This code is based on lib/iomap.c, by Linus Torvalds.
+ *
+ * Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@hh.iij4u.or.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ unsigned long end;
+
+ end = port + nr - 1UL;
+ if (ioport_resource.start > port ||
+ ioport_resource.end < end || port > end)
+ return NULL;
+
+ return (void __iomem *)(mips_io_port_base + port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start, len, flags;
+
+ if (dev == NULL)
+ return NULL;
+
+ start = pci_resource_start(dev, bar);
+ len = pci_resource_len(dev, bar);
+ if (!start || !len)
+ return NULL;
+
+ if (maxlen != 0 && len > maxlen)
+ len = maxlen;
+
+ flags = pci_resource_flags(dev, bar);
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ if (flags & IORESOURCE_CACHEABLE)
+ return ioremap_cacheable_cow(start, len);
+ return ioremap_nocache(start, len);
+ }
+
+ return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
new file mode 100644
index 00000000000..afa8eae18ff
--- /dev/null
+++ b/arch/mips/lib/memcpy.S
@@ -0,0 +1,508 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Unified implementation of memcpy, memmove and the __copy_user backend.
+ *
+ * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
+ * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
+ * Copyright (C) 2002 Broadcom, Inc.
+ * memcpy/copy_user author: Mark Vandevoorde
+ *
+ * Mnemonic names for arguments to memcpy/__copy_user
+ */
+#include <linux/config.h>
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define dst a0
+#define src a1
+#define len a2
+
+/*
+ * Spec
+ *
+ * memcpy copies len bytes from src to dst and sets v0 to dst.
+ * It assumes that
+ * - src and dst don't overlap
+ * - src is readable
+ * - dst is writable
+ * memcpy uses the standard calling convention
+ *
+ * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
+ * the number of uncopied bytes due to an exception caused by a read or write.
+ * __copy_user assumes that src and dst don't overlap, and that the call is
+ * implementing one of the following:
+ * copy_to_user
+ * - src is readable (no exceptions when reading src)
+ * copy_from_user
+ * - dst is writable (no exceptions when writing dst)
+ * __copy_user uses a non-standard calling convention; see
+ * include/asm-mips/uaccess.h
+ *
+ * When an exception happens on a load, the handler must
+ # ensure that all of the destination buffer is overwritten to prevent
+ * leaking information to user mode programs.
+ */
+
+/*
+ * Implementation
+ */
+
+/*
+ * The exception handler for loads requires that:
+ * 1- AT contain the address of the byte just past the end of the source
+ * of the copy,
+ * 2- src_entry <= src < AT, and
+ * 3- (dst - src) == (dst_entry - src_entry),
+ * The _entry suffix denotes values when __copy_user was called.
+ *
+ * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
+ * (2) is met by incrementing src by the number of bytes copied
+ * (3) is met by not doing loads between a pair of increments of dst and src
+ *
+ * The exception handlers for stores adjust len (if necessary) and return.
+ * These handlers do not need to overwrite any data.
+ *
+ * For __rmemcpy and memmove an exception is always a kernel bug, therefore
+ * they're not protected.
+ */
+
+#define EXC(inst_reg,addr,handler) \
+9: inst_reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Only on the 64-bit kernel we can made use of 64-bit registers.
+ */
+#ifdef CONFIG_MIPS64
+#define USE_DOUBLE
+#endif
+
+#ifdef USE_DOUBLE
+
+#define LOAD ld
+#define LOADL ldl
+#define LOADR ldr
+#define STOREL sdl
+#define STORER sdr
+#define STORE sd
+#define ADD daddu
+#define SUB dsubu
+#define SRL dsrl
+#define SRA dsra
+#define SLL dsll
+#define SLLV dsllv
+#define SRLV dsrlv
+#define NBYTES 8
+#define LOG_NBYTES 3
+
+/*
+ * As we are sharing code base with the mips32 tree (which use the o32 ABI
+ * register definitions). We need to redefine the register definitions from
+ * the n64 ABI register naming to the o32 ABI register naming.
+ */
+#undef t0
+#undef t1
+#undef t2
+#undef t3
+#define t0 $8
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
+#define t5 $13
+#define t6 $14
+#define t7 $15
+
+#else
+
+#define LOAD lw
+#define LOADL lwl
+#define LOADR lwr
+#define STOREL swl
+#define STORER swr
+#define STORE sw
+#define ADD addu
+#define SUB subu
+#define SRL srl
+#define SLL sll
+#define SRA sra
+#define SLLV sllv
+#define SRLV srlv
+#define NBYTES 4
+#define LOG_NBYTES 2
+
+#endif /* USE_DOUBLE */
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+#define FIRST(unit) ((unit)*NBYTES)
+#define REST(unit) (FIRST(unit)+NBYTES-1)
+#define UNIT(unit) FIRST(unit)
+
+#define ADDRMASK (NBYTES-1)
+
+ .text
+ .set noreorder
+ .set noat
+
+/*
+ * A combined memcpy/__copy_user
+ * __copy_user sets len to 0 for success; else to an upper bound of
+ * the number of uncopied bytes.
+ * memcpy sets v0 to dst.
+ */
+ .align 5
+LEAF(memcpy) /* a0=dst a1=src a2=len */
+ move v0, dst /* return value */
+__memcpy:
+FEXPORT(__copy_user)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+#define rem t8
+
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREF( 0, 0(src) )
+ PREF( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREF( 0, 1*32(src) )
+ PREF( 1, 1*32(dst) )
+ bnez t2, copy_bytes_checklen
+ and t0, src, ADDRMASK
+ PREF( 0, 2*32(src) )
+ PREF( 1, 2*32(dst) )
+ bnez t1, dst_unaligned
+ nop
+ bnez t0, src_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+both_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, cleanup_both_aligned # len < 8*NBYTES
+ and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
+ PREF( 0, 3*32(src) )
+ PREF( 1, 3*32(dst) )
+ .align 4
+1:
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( LOAD t4, UNIT(4)(src), l_exc_copy)
+EXC( LOAD t7, UNIT(5)(src), l_exc_copy)
+EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
+EXC( LOAD t0, UNIT(6)(src), l_exc_copy)
+EXC( LOAD t1, UNIT(7)(src), l_exc_copy)
+ ADD src, src, 8*NBYTES
+ ADD dst, dst, 8*NBYTES
+EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
+EXC( STORE t4, UNIT(-4)(dst), s_exc_p4u)
+EXC( STORE t7, UNIT(-3)(dst), s_exc_p3u)
+EXC( STORE t0, UNIT(-2)(dst), s_exc_p2u)
+EXC( STORE t1, UNIT(-1)(dst), s_exc_p1u)
+ PREF( 0, 8*32(src) )
+ PREF( 1, 8*32(dst) )
+ bne len, rem, 1b
+ nop
+
+ /*
+ * len == rem == the number of bytes left to copy < 8*NBYTES
+ */
+cleanup_both_aligned:
+ beqz len, done
+ sltu t0, len, 4*NBYTES
+ bnez t0, less_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+EXC( LOAD t0, UNIT(0)(src), l_exc)
+EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ beqz len, done
+ ADD dst, dst, 4*NBYTES
+less_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, copy_bytes
+ nop
+1:
+EXC( LOAD t0, 0(src), l_exc)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ bne rem, len, 1b
+ ADD dst, dst, NBYTES
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+#define bits t2
+ beqz len, done
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+EXC( LOAD t0, 0(src), l_exc)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+EXC( STREST t0, -1(t1), s_exc)
+ jr ra
+ move len, zero
+dst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+#define match rem
+EXC( LDFIRST t3, FIRST(0)(src), l_exc)
+ ADD t2, zero, NBYTES
+EXC( LDREST t3, REST(0)(src), l_exc_copy)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+EXC( STFIRST t3, FIRST(0)(dst), s_exc)
+ beq len, t2, done
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, both_aligned
+ ADD src, src, t2
+
+src_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREF( 0, 3*32(src) )
+ beqz t0, cleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREF( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+EXC( LDREST t1, REST(1)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDREST t2, REST(2)(src), l_exc_copy)
+EXC( LDREST t3, REST(3)(src), l_exc_copy)
+ PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ bne len, rem, 1b
+ ADD dst, dst, 4*NBYTES
+
+cleanup_src_unaligned:
+ beqz len, done
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, copy_bytes
+ nop
+1:
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDREST t0, REST(0)(src), l_exc_copy)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), s_exc_p1u)
+ bne len, rem, 1b
+ ADD dst, dst, NBYTES
+
+copy_bytes_checklen:
+ beqz len, done
+ nop
+copy_bytes:
+ /* 0 < len < NBYTES */
+#define COPY_BYTE(N) \
+EXC( lb t0, N(src), l_exc); \
+ SUB len, len, 1; \
+ beqz len, done; \
+EXC( sb t0, N(dst), s_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lb t0, NBYTES-2(src), l_exc)
+ SUB len, len, 1
+ jr ra
+EXC( sb t0, NBYTES-2(dst), s_exc_p1)
+done:
+ jr ra
+ nop
+ END(memcpy)
+
+l_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0)
+1:
+EXC( lb t1, 0(src), l_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ bne src, t0, 1b
+ ADD dst, dst, 1
+l_exc:
+ LOAD t0, TI_TASK($28)
+ nop
+ LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ nop
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ beqz len, done
+ SUB src, len, 1
+1: sb zero, 0(dst)
+ ADD dst, dst, 1
+ bnez src, 1b
+ SUB src, src, 1
+ jr ra
+ nop
+
+
+#define SEXC(n) \
+s_exc_p ## n ## u: \
+ jr ra; \
+ ADD len, len, n*NBYTES
+
+SEXC(8)
+SEXC(7)
+SEXC(6)
+SEXC(5)
+SEXC(4)
+SEXC(3)
+SEXC(2)
+SEXC(1)
+
+s_exc_p1:
+ jr ra
+ ADD len, len, 1
+s_exc:
+ jr ra
+ nop
+
+ .align 5
+LEAF(memmove)
+ ADD t0, a0, a2
+ ADD t1, a1, a2
+ sltu t0, a1, t0 # dst + len <= src -> memcpy
+ sltu t1, a0, t1 # dst >= src + len -> memcpy
+ and t0, t1
+ beqz t0, __memcpy
+ move v0, a0 /* return value */
+ beqz a2, r_out
+ END(memmove)
+
+ /* fall through to __rmemcpy */
+LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
+ sltu t0, a1, a0
+ beqz t0, r_end_bytes_up # src >= dst
+ nop
+ ADD a0, a2 # dst = dst + len
+ ADD a1, a2 # src = src + len
+
+r_end_bytes:
+ lb t0, -1(a1)
+ SUB a2, a2, 0x1
+ sb t0, -1(a0)
+ SUB a1, a1, 0x1
+ bnez a2, r_end_bytes
+ SUB a0, a0, 0x1
+
+r_out:
+ jr ra
+ move a2, zero
+
+r_end_bytes_up:
+ lb t0, (a1)
+ SUB a2, a2, 0x1
+ sb t0, (a0)
+ ADD a1, a1, 0x1
+ bnez a2, r_end_bytes_up
+ ADD a0, a0, 0x1
+
+ jr ra
+ move a2, zero
+ END(__rmemcpy)
diff --git a/arch/mips/lib/promlib.c b/arch/mips/lib/promlib.c
new file mode 100644
index 00000000000..dddfe98b4de
--- /dev/null
+++ b/arch/mips/lib/promlib.c
@@ -0,0 +1,24 @@
+#include <stdarg.h>
+#include <linux/kernel.h>
+
+extern void prom_putchar(char);
+
+void prom_printf(char *fmt, ...)
+{
+ va_list args;
+ char ppbuf[1024];
+ char *bptr;
+
+ va_start(args, fmt);
+ vsprintf(ppbuf, fmt, args);
+
+ bptr = ppbuf;
+
+ while (*bptr != 0) {
+ if (*bptr == '\n')
+ prom_putchar('\r');
+
+ prom_putchar(*bptr++);
+ }
+ va_end(args);
+}
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
new file mode 100644
index 00000000000..07660e86c99
--- /dev/null
+++ b/arch/mips/lib/strlen_user.S
@@ -0,0 +1,39 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (c) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 for error
+ */
+LEAF(__strlen_user_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, fault
+
+FEXPORT(__strlen_user_nocheck_asm)
+ move v0, a0
+1: EX(lb, t0, (v0), fault)
+ PTR_ADDIU v0, 1
+ bnez t0, 1b
+ PTR_SUBU v0, a0
+ jr ra
+ END(__strlen_user_asm)
+
+fault: move v0, zero
+ jr ra
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
new file mode 100644
index 00000000000..14bed17c164
--- /dev/null
+++ b/arch/mips/lib/strncpy_user.S
@@ -0,0 +1,58 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996, 1999 by Ralf Baechle
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Returns: -EFAULT if exception before terminator, N if the entire
+ * buffer filled, else strlen.
+ */
+
+/*
+ * Ugly special case have to check: we might get passed a user space
+ * pointer which wraps into the kernel space. We don't deal with that. If
+ * it happens at most some bytes of the exceptions handlers will be copied.
+ */
+
+LEAF(__strncpy_from_user_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a1
+ bnez v0, fault
+
+FEXPORT(__strncpy_from_user_nocheck_asm)
+ move v0, zero
+ move v1, a1
+ .set noreorder
+1: EX(lbu, t0, (v1), fault)
+ PTR_ADDIU v1, 1
+ beqz t0, 2f
+ sb t0, (a0)
+ PTR_ADDIU v0, 1
+ bne v0, a2, 1b
+ PTR_ADDIU a0, 1
+ .set reorder
+2: PTR_ADDU t0, a1, v0
+ xor t0, a1
+ bltz t0, fault
+ jr ra # return n
+ END(__strncpy_from_user_asm)
+
+fault: li v0, -EFAULT
+ jr ra
+
+ .section __ex_table,"a"
+ PTR 1b, fault
+ .previous
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
new file mode 100644
index 00000000000..6e7a8eed4de
--- /dev/null
+++ b/arch/mips/lib/strnlen_user.S
@@ -0,0 +1,45 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle
+ * Copyright (c) 1999 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/offset.h>
+#include <asm/regdef.h>
+
+#define EX(insn,reg,addr,handler) \
+9: insn reg, addr; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
+/*
+ * Return the size of a string including the ending NUL character upto a
+ * maximum of a1 or 0 in case of error.
+ *
+ * Note: for performance reasons we deliberately accept that a user may
+ * make strlen_user and strnlen_user access the first few KSEG0
+ * bytes. There's nothing secret there. On 64-bit accessing beyond
+ * the maximum is a tad hairier ...
+ */
+LEAF(__strnlen_user_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, fault
+
+FEXPORT(__strnlen_user_nocheck_asm)
+ move v0, a0
+ PTR_ADDU a1, a0 # stop pointer
+1: beq v0, a1, 1f # limit reached?
+ EX(lb, t0, (v0), fault)
+ PTR_ADDU v0, 1
+ bnez t0, 1b
+1: PTR_SUBU v0, a0
+ jr ra
+ END(__strnlen_user_asm)
+
+fault: move v0, zero
+ jr ra