aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe45
-rw-r--r--Documentation/ABI/testing/sysfs-platform-msi-laptop83
-rw-r--r--Documentation/device-mapper/cache-policies.txt77
-rw-r--r--Documentation/device-mapper/cache.txt243
-rw-r--r--Documentation/devicetree/bindings/arc/interrupts.txt24
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt70
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt47
-rw-r--r--Documentation/devicetree/bindings/mtd/elm.txt16
-rw-r--r--Documentation/devicetree/bindings/mtd/mtd-physmap.txt3
-rw-r--r--Documentation/devicetree/bindings/serial/lantiq_asc.txt16
-rw-r--r--Documentation/kernel-parameters.txt36
-rw-r--r--Documentation/scsi/ChangeLog.megaraid_sas9
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arc/Kbuild2
-rw-r--r--arch/arc/Kconfig453
-rw-r--r--arch/arc/Kconfig.debug34
-rw-r--r--arch/arc/Makefile126
-rw-r--r--arch/arc/boot/Makefile26
-rw-r--r--arch/arc/boot/dts/Makefile13
-rw-r--r--arch/arc/boot/dts/angel4.dts55
-rw-r--r--arch/arc/boot/dts/skeleton.dts10
-rw-r--r--arch/arc/boot/dts/skeleton.dtsi37
-rw-r--r--arch/arc/configs/fpga_defconfig61
-rw-r--r--arch/arc/include/asm/Kbuild49
-rw-r--r--arch/arc/include/asm/arcregs.h433
-rw-r--r--arch/arc/include/asm/asm-offsets.h9
-rw-r--r--arch/arc/include/asm/atomic.h232
-rw-r--r--arch/arc/include/asm/barrier.h42
-rw-r--r--arch/arc/include/asm/bitops.h516
-rw-r--r--arch/arc/include/asm/bug.h37
-rw-r--r--arch/arc/include/asm/cache.h75
-rw-r--r--arch/arc/include/asm/cacheflush.h67
-rw-r--r--arch/arc/include/asm/checksum.h101
-rw-r--r--arch/arc/include/asm/clk.h22
-rw-r--r--arch/arc/include/asm/cmpxchg.h143
-rw-r--r--arch/arc/include/asm/current.h32
-rw-r--r--arch/arc/include/asm/defines.h56
-rw-r--r--arch/arc/include/asm/delay.h68
-rw-r--r--arch/arc/include/asm/disasm.h116
-rw-r--r--arch/arc/include/asm/dma-mapping.h221
-rw-r--r--arch/arc/include/asm/dma.h14
-rw-r--r--arch/arc/include/asm/elf.h78
-rw-r--r--arch/arc/include/asm/entry.h724
-rw-r--r--arch/arc/include/asm/exec.h15
-rw-r--r--arch/arc/include/asm/futex.h151
-rw-r--r--arch/arc/include/asm/io.h105
-rw-r--r--arch/arc/include/asm/irq.h25
-rw-r--r--arch/arc/include/asm/irqflags.h153
-rw-r--r--arch/arc/include/asm/kdebug.h19
-rw-r--r--arch/arc/include/asm/kgdb.h61
-rw-r--r--arch/arc/include/asm/kprobes.h62
-rw-r--r--arch/arc/include/asm/linkage.h63
-rw-r--r--arch/arc/include/asm/mach_desc.h87
-rw-r--r--arch/arc/include/asm/mmu.h23
-rw-r--r--arch/arc/include/asm/mmu_context.h213
-rw-r--r--arch/arc/include/asm/module.h28
-rw-r--r--arch/arc/include/asm/mutex.h18
-rw-r--r--arch/arc/include/asm/page.h109
-rw-r--r--arch/arc/include/asm/perf_event.h13
-rw-r--r--arch/arc/include/asm/pgalloc.h134
-rw-r--r--arch/arc/include/asm/pgtable.h405
-rw-r--r--arch/arc/include/asm/processor.h151
-rw-r--r--arch/arc/include/asm/prom.h14
-rw-r--r--arch/arc/include/asm/ptrace.h130
-rw-r--r--arch/arc/include/asm/sections.h18
-rw-r--r--arch/arc/include/asm/segment.h24
-rw-r--r--arch/arc/include/asm/serial.h25
-rw-r--r--arch/arc/include/asm/setup.h37
-rw-r--r--arch/arc/include/asm/smp.h130
-rw-r--r--arch/arc/include/asm/spinlock.h144
-rw-r--r--arch/arc/include/asm/spinlock_types.h35
-rw-r--r--arch/arc/include/asm/string.h40
-rw-r--r--arch/arc/include/asm/switch_to.h41
-rw-r--r--arch/arc/include/asm/syscall.h72
-rw-r--r--arch/arc/include/asm/syscalls.h29
-rw-r--r--arch/arc/include/asm/thread_info.h121
-rw-r--r--arch/arc/include/asm/timex.h18
-rw-r--r--arch/arc/include/asm/tlb-mmu1.h104
-rw-r--r--arch/arc/include/asm/tlb.h58
-rw-r--r--arch/arc/include/asm/tlbflush.h28
-rw-r--r--arch/arc/include/asm/uaccess.h751
-rw-r--r--arch/arc/include/asm/unaligned.h29
-rw-r--r--arch/arc/include/asm/unwind.h163
-rw-r--r--arch/arc/include/uapi/asm/Kbuild12
-rw-r--r--arch/arc/include/uapi/asm/byteorder.h18
-rw-r--r--arch/arc/include/uapi/asm/cachectl.h28
-rw-r--r--arch/arc/include/uapi/asm/elf.h26
-rw-r--r--arch/arc/include/uapi/asm/page.h39
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h48
-rw-r--r--arch/arc/include/uapi/asm/setup.h6
-rw-r--r--arch/arc/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/arc/include/uapi/asm/signal.h27
-rw-r--r--arch/arc/include/uapi/asm/swab.h98
-rw-r--r--arch/arc/include/uapi/asm/unistd.h34
-rw-r--r--arch/arc/kernel/Makefile33
-rw-r--r--arch/arc/kernel/arc_hostlink.c58
-rw-r--r--arch/arc/kernel/arcksyms.c56
-rw-r--r--arch/arc/kernel/asm-offsets.c64
-rw-r--r--arch/arc/kernel/clk.c21
-rw-r--r--arch/arc/kernel/ctx_sw.c109
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S58
-rw-r--r--arch/arc/kernel/devtree.c123
-rw-r--r--arch/arc/kernel/disasm.c538
-rw-r--r--arch/arc/kernel/entry.S839
-rw-r--r--arch/arc/kernel/fpu.c55
-rw-r--r--arch/arc/kernel/head.S111
-rw-r--r--arch/arc/kernel/irq.c273
-rw-r--r--arch/arc/kernel/kgdb.c205
-rw-r--r--arch/arc/kernel/kprobes.c525
-rw-r--r--arch/arc/kernel/module.c145
-rw-r--r--arch/arc/kernel/process.c235
-rw-r--r--arch/arc/kernel/ptrace.c158
-rw-r--r--arch/arc/kernel/reset.c33
-rw-r--r--arch/arc/kernel/setup.c473
-rw-r--r--arch/arc/kernel/signal.c360
-rw-r--r--arch/arc/kernel/smp.c332
-rw-r--r--arch/arc/kernel/stacktrace.c254
-rw-r--r--arch/arc/kernel/sys.c18
-rw-r--r--arch/arc/kernel/time.c265
-rw-r--r--arch/arc/kernel/traps.c170
-rw-r--r--arch/arc/kernel/troubleshoot.c322
-rw-r--r--arch/arc/kernel/unaligned.c245
-rw-r--r--arch/arc/kernel/unwind.c1329
-rw-r--r--arch/arc/kernel/vmlinux.lds.S163
-rw-r--r--arch/arc/lib/Makefile9
-rw-r--r--arch/arc/lib/memcmp.S124
-rw-r--r--arch/arc/lib/memcpy-700.S66
-rw-r--r--arch/arc/lib/memset.S59
-rw-r--r--arch/arc/lib/strchr-700.S123
-rw-r--r--arch/arc/lib/strcmp.S96
-rw-r--r--arch/arc/lib/strcpy-700.S70
-rw-r--r--arch/arc/lib/strlen.S83
-rw-r--r--arch/arc/mm/Makefile10
-rw-r--r--arch/arc/mm/cache_arc700.c768
-rw-r--r--arch/arc/mm/dma.c94
-rw-r--r--arch/arc/mm/extable.c63
-rw-r--r--arch/arc/mm/fault.c228
-rw-r--r--arch/arc/mm/init.c187
-rw-r--r--arch/arc/mm/ioremap.c91
-rw-r--r--arch/arc/mm/tlb.c645
-rw-r--r--arch/arc/mm/tlbex.S408
-rw-r--r--arch/arc/oprofile/Makefile9
-rw-r--r--arch/arc/oprofile/common.c26
-rw-r--r--arch/arc/plat-arcfpga/Kconfig84
-rw-r--r--arch/arc/plat-arcfpga/Makefile12
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/memmap.h31
-rw-r--r--arch/arc/plat-arcfpga/include/plat/smp.h118
-rw-r--r--arch/arc/plat-arcfpga/irq.c25
-rw-r--r--arch/arc/plat-arcfpga/platform.c226
-rw-r--r--arch/arc/plat-arcfpga/smp.c171
-rw-r--r--arch/arm64/include/asm/unistd32.h6
-rw-r--r--arch/arm64/kernel/sys32.S5
-rw-r--r--arch/mips/Kbuild.platforms4
-rw-r--r--arch/mips/Kconfig49
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/alchemy/Platform6
-rw-r--r--arch/mips/alchemy/board-gpr.c12
-rw-r--r--arch/mips/alchemy/board-mtx1.c8
-rw-r--r--arch/mips/alchemy/common/dbdma.c14
-rw-r--r--arch/mips/alchemy/common/gpiolib.c14
-rw-r--r--arch/mips/alchemy/common/irq.c178
-rw-r--r--arch/mips/alchemy/common/platform.c4
-rw-r--r--arch/mips/alchemy/common/setup.c2
-rw-r--r--arch/mips/alchemy/common/sleeper.S20
-rw-r--r--arch/mips/alchemy/common/time.c2
-rw-r--r--arch/mips/alchemy/common/usb.c2
-rw-r--r--arch/mips/alchemy/devboards/bcsr.c2
-rw-r--r--arch/mips/alchemy/devboards/db1000.c16
-rw-r--r--arch/mips/alchemy/devboards/db1200.c28
-rw-r--r--arch/mips/alchemy/devboards/db1300.c10
-rw-r--r--arch/mips/alchemy/devboards/db1550.c18
-rw-r--r--arch/mips/alchemy/devboards/pm.c2
-rw-r--r--arch/mips/ar7/Platform6
-rw-r--r--arch/mips/ar7/platform.c12
-rw-r--r--arch/mips/ath79/Kconfig20
-rw-r--r--arch/mips/ath79/Makefile1
-rw-r--r--arch/mips/ath79/clock.c80
-rw-r--r--arch/mips/ath79/common.c4
-rw-r--r--arch/mips/ath79/dev-common.c9
-rw-r--r--arch/mips/ath79/dev-usb.c126
-rw-r--r--arch/mips/ath79/dev-wmac.c30
-rw-r--r--arch/mips/ath79/early_printk.c2
-rw-r--r--arch/mips/ath79/gpio.c52
-rw-r--r--arch/mips/ath79/irq.c187
-rw-r--r--arch/mips/ath79/mach-ap121.c2
-rw-r--r--arch/mips/ath79/mach-ap136.c156
-rw-r--r--arch/mips/ath79/mach-ap81.c2
-rw-r--r--arch/mips/ath79/mach-db120.c2
-rw-r--r--arch/mips/ath79/mach-pb44.c6
-rw-r--r--arch/mips/ath79/machtypes.h1
-rw-r--r--arch/mips/ath79/pci.c165
-rw-r--r--arch/mips/ath79/pci.h1
-rw-r--r--arch/mips/ath79/setup.c18
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/nvram.c163
-rw-r--r--arch/mips/bcm47xx/setup.c6
-rw-r--r--arch/mips/bcm47xx/sprom.c26
-rw-r--r--arch/mips/bcm47xx/wgt634u.c40
-rw-r--r--arch/mips/bcm63xx/boards/board_bcm963xx.c26
-rw-r--r--arch/mips/bcm63xx/early_printk.c4
-rw-r--r--arch/mips/boot/Makefile2
-rw-r--r--arch/mips/boot/compressed/Makefile8
-rw-r--r--arch/mips/boot/compressed/calc_vmlinuz_load_addr.c4
-rw-r--r--arch/mips/boot/compressed/decompress.c4
-rw-r--r--arch/mips/boot/compressed/head.S4
-rw-r--r--arch/mips/boot/ecoff.h62
-rw-r--r--arch/mips/boot/elf2ecoff.c8
-rw-r--r--arch/mips/cavium-octeon/Kconfig9
-rw-r--r--arch/mips/cavium-octeon/Makefile3
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c10
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c28
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c22
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-spi.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-util.c34
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c4
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c10
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c6
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-l2c.c66
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c28
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-spi.c70
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c16
-rw-r--r--arch/mips/cavium-octeon/oct_ilm.c206
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c4
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S22
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/cavium-octeon/octeon_3xxx.dts34
-rw-r--r--arch/mips/cavium-octeon/octeon_68xx.dts34
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h14
-rw-r--r--arch/mips/cavium-octeon/setup.c8
-rw-r--r--arch/mips/cavium-octeon/smp.c10
-rw-r--r--arch/mips/cobalt/led.c2
-rw-r--r--arch/mips/cobalt/mtd.c2
-rw-r--r--arch/mips/cobalt/rtc.c2
-rw-r--r--arch/mips/configs/ath79_defconfig1
-rw-r--r--arch/mips/configs/pnx8550_jbs_defconfig98
-rw-r--r--arch/mips/configs/pnx8550_stb810_defconfig92
-rw-r--r--arch/mips/configs/rt305x_defconfig167
-rw-r--r--arch/mips/dec/int-handler.S98
-rw-r--r--arch/mips/dec/kn02xa-berr.c4
-rw-r--r--arch/mips/dec/prom/call_o32.S2
-rw-r--r--arch/mips/dec/prom/dectypes.h2
-rw-r--r--arch/mips/dec/prom/init.c2
-rw-r--r--arch/mips/dec/prom/memory.c2
-rw-r--r--arch/mips/dec/setup.c4
-rw-r--r--arch/mips/dec/wbflush.c6
-rw-r--r--arch/mips/emma/markeins/irq.c2
-rw-r--r--arch/mips/emma/markeins/platform.c2
-rw-r--r--arch/mips/emma/markeins/setup.c2
-rw-r--r--arch/mips/fw/arc/file.c4
-rw-r--r--arch/mips/fw/arc/identify.c2
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/fw/arc/promlib.c2
-rw-r--r--arch/mips/fw/lib/call_o32.S2
-rw-r--r--arch/mips/fw/sni/sniprom.c16
-rw-r--r--arch/mips/include/asm/abi.h8
-rw-r--r--arch/mips/include/asm/addrspace.h6
-rw-r--r--arch/mips/include/asm/asm.h98
-rw-r--r--arch/mips/include/asm/atomic.h4
-rw-r--r--arch/mips/include/asm/barrier.h10
-rw-r--r--arch/mips/include/asm/bcache.h2
-rw-r--r--arch/mips/include/asm/bitops.h22
-rw-r--r--arch/mips/include/asm/bootinfo.h20
-rw-r--r--arch/mips/include/asm/break.h26
-rw-r--r--arch/mips/include/asm/cacheops.h14
-rw-r--r--arch/mips/include/asm/checksum.h2
-rw-r--r--arch/mips/include/asm/cmpxchg.h6
-rw-r--r--arch/mips/include/asm/compat-signal.h4
-rw-r--r--arch/mips/include/asm/compat.h6
-rw-r--r--arch/mips/include/asm/cpu-features.h38
-rw-r--r--arch/mips/include/asm/cpu-info.h18
-rw-r--r--arch/mips/include/asm/cpu.h37
-rw-r--r--arch/mips/include/asm/dec/ioasic_addrs.h22
-rw-r--r--arch/mips/include/asm/dec/kn01.h12
-rw-r--r--arch/mips/include/asm/dec/kn02ca.h2
-rw-r--r--arch/mips/include/asm/dec/prom.h2
-rw-r--r--arch/mips/include/asm/dma-mapping.h2
-rw-r--r--arch/mips/include/asm/dma.h112
-rw-r--r--arch/mips/include/asm/elf.h30
-rw-r--r--arch/mips/include/asm/emma/emma2rh.h112
-rw-r--r--arch/mips/include/asm/emma/markeins.h2
-rw-r--r--arch/mips/include/asm/fixmap.h4
-rw-r--r--arch/mips/include/asm/floppy.h4
-rw-r--r--arch/mips/include/asm/fpregdef.h10
-rw-r--r--arch/mips/include/asm/fpu.h6
-rw-r--r--arch/mips/include/asm/futex.h12
-rw-r--r--arch/mips/include/asm/fw/arc/hinv.h10
-rw-r--r--arch/mips/include/asm/fw/arc/types.h12
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_api.h6
-rw-r--r--arch/mips/include/asm/fw/cfe/cfe_error.h18
-rw-r--r--arch/mips/include/asm/gcmpregs.h100
-rw-r--r--arch/mips/include/asm/gic.h17
-rw-r--r--arch/mips/include/asm/gio_device.h14
-rw-r--r--arch/mips/include/asm/gt64120.h22
-rw-r--r--arch/mips/include/asm/hazards.h8
-rw-r--r--arch/mips/include/asm/highmem.h4
-rw-r--r--arch/mips/include/asm/inst.h348
-rw-r--r--arch/mips/include/asm/io.h30
-rw-r--r--arch/mips/include/asm/ip32/crime.h8
-rw-r--r--arch/mips/include/asm/ip32/ip32_ints.h2
-rw-r--r--arch/mips/include/asm/ip32/mace.h12
-rw-r--r--arch/mips/include/asm/irq.h6
-rw-r--r--arch/mips/include/asm/irq_cpu.h6
-rw-r--r--arch/mips/include/asm/isadep.h4
-rw-r--r--arch/mips/include/asm/jazz.h166
-rw-r--r--arch/mips/include/asm/jazzdma.h50
-rw-r--r--arch/mips/include/asm/kmap_types.h2
-rw-r--r--arch/mips/include/asm/kprobes.h2
-rw-r--r--arch/mips/include/asm/lasat/eeprom.h12
-rw-r--r--arch/mips/include/asm/lasat/lasat.h14
-rw-r--r--arch/mips/include/asm/lasat/serial.h2
-rw-r--r--arch/mips/include/asm/local.h6
-rw-r--r--arch/mips/include/asm/m48t37.h2
-rw-r--r--arch/mips/include/asm/mach-ar7/ar7.h18
-rw-r--r--arch/mips/include/asm/mach-ar7/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h132
-rw-r--r--arch/mips/include/asm/mach-ath79/ar933x_uart.h12
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h17
-rw-r--r--arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h27
-rw-r--r--arch/mips/include/asm/mach-ath79/pci.h28
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000.h198
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1000_dma.h10
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1100_mmc.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h16
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_ide.h22
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_psc.h8
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1000.h20
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h2
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h (renamed from arch/mips/include/asm/mach-bcm47xx/nvram.h)13
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h24
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h4
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h36
-rw-r--r--arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h4
-rw-r--r--arch/mips/include/asm/mach-cobalt/mach-gt64120.h2
-rw-r--r--arch/mips/include/asm/mach-db1x00/bcsr.h4
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1200.h2
-rw-r--r--arch/mips/include/asm/mach-db1x00/db1300.h2
-rw-r--r--arch/mips/include/asm/mach-emma2rh/irq.h2
-rw-r--r--arch/mips/include/asm/mach-generic/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-generic/floppy.h4
-rw-r--r--arch/mips/include/asm/mach-generic/ide.h4
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h4
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h4
-rw-r--r--arch/mips/include/asm/mach-ip27/mmzone.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/topology.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-ip28/spaces.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/war.h2
-rw-r--r--arch/mips/include/asm/mach-jazz/floppy.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/clock.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/dma.h10
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/irq.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/timer.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/war.h24
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h4
-rw-r--r--arch/mips/include/asm/mach-lasat/mach-gt64120.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536.h422
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h122
-rw-r--r--arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/gpio.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h50
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h4
-rw-r--r--arch/mips/include/asm/mach-loongson/mem.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/irq.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/loongson1.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/prom.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h4
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-wdt.h4
-rw-r--r--arch/mips/include/asm/mach-malta/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-malta/irq.h2
-rw-r--r--arch/mips/include/asm/mach-malta/mach-gt64120.h2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h)0
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h)0
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h)110
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h)4
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h)4
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h)46
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h)8
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h)2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h)416
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h)96
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h)2
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/war.h (renamed from arch/mips/include/asm/pmc-sierra/msp71xx/war.h)4
-rw-r--r--arch/mips/include/asm/mach-pnx833x/irq-mapping.h18
-rw-r--r--arch/mips/include/asm/mach-pnx833x/pnx833x.h12
-rw-r--r--arch/mips/include/asm/mach-pnx8550/cm.h43
-rw-r--r--arch/mips/include/asm/mach-pnx8550/glb.h86
-rw-r--r--arch/mips/include/asm/mach-pnx8550/int.h140
-rw-r--r--arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h262
-rw-r--r--arch/mips/include/asm/mach-pnx8550/nand.h121
-rw-r--r--arch/mips/include/asm/mach-pnx8550/pci.h185
-rw-r--r--arch/mips/include/asm/mach-pnx8550/uart.h30
-rw-r--r--arch/mips/include/asm/mach-pnx8550/usb.h32
-rw-r--r--arch/mips/include/asm/mach-powertv/asic.h8
-rw-r--r--arch/mips/include/asm/mach-powertv/asic_regs.h4
-rw-r--r--arch/mips/include/asm/mach-powertv/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-powertv/interrupts.h62
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h39
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h139
-rw-r--r--arch/mips/include/asm/mach-ralink/war.h (renamed from arch/mips/include/asm/mach-pnx8550/war.h)7
-rw-r--r--arch/mips/include/asm/mach-rc32434/ddr.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma.h12
-rw-r--r--arch/mips/include/asm/mach-rc32434/dma_v.h4
-rw-r--r--arch/mips/include/asm/mach-rc32434/eth.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/gpio.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/irq.h12
-rw-r--r--arch/mips/include/asm/mach-rc32434/pci.h60
-rw-r--r--arch/mips/include/asm/mach-rc32434/rb.h6
-rw-r--r--arch/mips/include/asm/mach-rc32434/rc32434.h2
-rw-r--r--arch/mips/include/asm/mach-rc32434/timer.h18
-rw-r--r--arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h8
-rw-r--r--arch/mips/include/asm/mach-sead3/irq.h2
-rw-r--r--arch/mips/include/asm/mach-sibyte/war.h4
-rw-r--r--arch/mips/include/asm/mach-wrppmc/mach-gt64120.h16
-rw-r--r--arch/mips/include/asm/mc146818-time.h4
-rw-r--r--arch/mips/include/asm/mips-boards/bonito64.h106
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h70
-rw-r--r--arch/mips/include/asm/mips-boards/launch.h10
-rw-r--r--arch/mips/include/asm/mips-boards/malta.h10
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h8
-rw-r--r--arch/mips/include/asm/mips-boards/piix4.h8
-rw-r--r--arch/mips/include/asm/mips-boards/prom.h6
-rw-r--r--arch/mips/include/asm/mips-boards/sead3int.h4
-rw-r--r--arch/mips/include/asm/mips-boards/sim.h14
-rw-r--r--arch/mips/include/asm/mipsmtregs.h8
-rw-r--r--arch/mips/include/asm/mipsregs.h735
-rw-r--r--arch/mips/include/asm/mmu_context.h6
-rw-r--r--arch/mips/include/asm/msc01_ic.h174
-rw-r--r--arch/mips/include/asm/netlogic/common.h16
-rw-r--r--arch/mips/include/asm/netlogic/haldefs.h2
-rw-r--r--arch/mips/include/asm/netlogic/mips-extns.h83
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/bridge.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h2
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/iomap.h48
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pcibus.h46
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/pic.h118
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/sys.h160
-rw-r--r--arch/mips/include/asm/netlogic/xlp-hal/uart.h4
-rw-r--r--arch/mips/include/asm/netlogic/xlr/fmn.h242
-rw-r--r--arch/mips/include/asm/netlogic/xlr/iomap.h88
-rw-r--r--arch/mips/include/asm/netlogic/xlr/msidef.h18
-rw-r--r--arch/mips/include/asm/netlogic/xlr/pic.h58
-rw-r--r--arch/mips/include/asm/nile4.h38
-rw-r--r--arch/mips/include/asm/octeon/cvmx-address.h50
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootmem.h72
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h44
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h30
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fau.h162
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa.h32
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-rgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-sgmii.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-util.h18
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-xaui.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper.h12
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h14
-rw-r--r--arch/mips/include/asm/octeon/cvmx-l2c.h210
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mdio.h40
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip-defs.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pip.h62
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pko.h60
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h122
-rw-r--r--arch/mips/include/asm/octeon/cvmx-scratch.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spi.h66
-rw-r--r--arch/mips/include/asm/octeon/cvmx-spinlock.h78
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-wqe.h104
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h48
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h10
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h240
-rw-r--r--arch/mips/include/asm/octeon/octeon.h12
-rw-r--r--arch/mips/include/asm/octeon/pci-octeon.h2
-rw-r--r--arch/mips/include/asm/paccess.h2
-rw-r--r--arch/mips/include/asm/page.h14
-rw-r--r--arch/mips/include/asm/pci.h11
-rw-r--r--arch/mips/include/asm/pci/bridge.h30
-rw-r--r--arch/mips/include/asm/pgtable-32.h20
-rw-r--r--arch/mips/include/asm/pgtable-64.h4
-rw-r--r--arch/mips/include/asm/pgtable-bits.h42
-rw-r--r--arch/mips/include/asm/pgtable.h4
-rw-r--r--arch/mips/include/asm/processor.h46
-rw-r--r--arch/mips/include/asm/r4kcache.h12
-rw-r--r--arch/mips/include/asm/regdef.h66
-rw-r--r--arch/mips/include/asm/rtlx.h2
-rw-r--r--arch/mips/include/asm/seccomp.h2
-rw-r--r--arch/mips/include/asm/sgi/gio.h16
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h82
-rw-r--r--arch/mips/include/asm/sgi/ioc.h4
-rw-r--r--arch/mips/include/asm/sgi/ip22.h6
-rw-r--r--arch/mips/include/asm/sgi/mc.h26
-rw-r--r--arch/mips/include/asm/sgi/pi1.h20
-rw-r--r--arch/mips/include/asm/sgialib.h2
-rw-r--r--arch/mips/include/asm/sgiarcs.h136
-rw-r--r--arch/mips/include/asm/shmparam.h2
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_int.h424
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_l2c.h138
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_mc.h1192
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_regs.h806
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_scd.h276
-rw-r--r--arch/mips/include/asm/sibyte/bigsur.h20
-rw-r--r--arch/mips/include/asm/sibyte/carmel.h52
-rw-r--r--arch/mips/include/asm/sibyte/sb1250.h4
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_defs.h94
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_dma.h426
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_genbus.h26
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_int.h274
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_l2c.h92
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_ldt.h14
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mac.h568
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mc.h712
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_regs.h854
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_scd.h668
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_smbus.h158
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_syncser.h136
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_uart.h262
-rw-r--r--arch/mips/include/asm/sibyte/sentosa.h6
-rw-r--r--arch/mips/include/asm/sibyte/swarm.h26
-rw-r--r--arch/mips/include/asm/smp.h12
-rw-r--r--arch/mips/include/asm/smtc.h4
-rw-r--r--arch/mips/include/asm/sn/addrs.h54
-rw-r--r--arch/mips/include/asm/sn/agent.h12
-rw-r--r--arch/mips/include/asm/sn/arch.h8
-rw-r--r--arch/mips/include/asm/sn/fru.h12
-rw-r--r--arch/mips/include/asm/sn/gda.h24
-rw-r--r--arch/mips/include/asm/sn/intr.h16
-rw-r--r--arch/mips/include/asm/sn/io.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h8
-rw-r--r--arch/mips/include/asm/sn/klconfig.h526
-rw-r--r--arch/mips/include/asm/sn/kldir.h182
-rw-r--r--arch/mips/include/asm/sn/launch.h16
-rw-r--r--arch/mips/include/asm/sn/mapped_kernel.h4
-rw-r--r--arch/mips/include/asm/sn/nmi.h8
-rw-r--r--arch/mips/include/asm/sn/sn0/addrs.h20
-rw-r--r--arch/mips/include/asm/sn/sn0/arch.h22
-rw-r--r--arch/mips/include/asm/sn/sn0/hub.h12
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h442
-rw-r--r--arch/mips/include/asm/sn/sn0/hubmd.h214
-rw-r--r--arch/mips/include/asm/sn/sn0/hubni.h78
-rw-r--r--arch/mips/include/asm/sn/sn0/hubpi.h184
-rw-r--r--arch/mips/include/asm/sn/sn0/ip27.h28
-rw-r--r--arch/mips/include/asm/sn/types.h4
-rw-r--r--arch/mips/include/asm/sni.h104
-rw-r--r--arch/mips/include/asm/sparsemem.h2
-rw-r--r--arch/mips/include/asm/spinlock.h4
-rw-r--r--arch/mips/include/asm/spinlock_types.h2
-rw-r--r--arch/mips/include/asm/stackframe.h14
-rw-r--r--arch/mips/include/asm/string.h8
-rw-r--r--arch/mips/include/asm/switch_to.h4
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/time.h6
-rw-r--r--arch/mips/include/asm/tlb.h2
-rw-r--r--arch/mips/include/asm/topology.h2
-rw-r--r--arch/mips/include/asm/traps.h2
-rw-r--r--arch/mips/include/asm/txx9/jmr3927.h14
-rw-r--r--arch/mips/include/asm/txx9/rbtx4927.h6
-rw-r--r--arch/mips/include/asm/txx9/rbtx4938.h20
-rw-r--r--arch/mips/include/asm/txx9/rbtx4939.h14
-rw-r--r--arch/mips/include/asm/txx9/smsc_fdc37m81x.h54
-rw-r--r--arch/mips/include/asm/txx9/tx3927.h16
-rw-r--r--arch/mips/include/asm/txx9/tx4927.h18
-rw-r--r--arch/mips/include/asm/txx9/tx4927pcic.h6
-rw-r--r--arch/mips/include/asm/txx9/tx4938.h32
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h50
-rw-r--r--arch/mips/include/asm/txx9tmr.h4
-rw-r--r--arch/mips/include/asm/uaccess.h102
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/asm/user.h2
-rw-r--r--arch/mips/include/asm/vr41xx/pci.h2
-rw-r--r--arch/mips/include/asm/vr41xx/tb0287.h2
-rw-r--r--arch/mips/include/asm/war.h46
-rw-r--r--arch/mips/include/asm/xtalk/xtalk.h24
-rw-r--r--arch/mips/include/asm/xtalk/xwidget.h8
-rw-r--r--arch/mips/include/uapi/asm/Kbuild1
-rw-r--r--arch/mips/include/uapi/asm/break.h16
-rw-r--r--arch/mips/include/uapi/asm/cachectl.h10
-rw-r--r--arch/mips/include/uapi/asm/errno.h188
-rw-r--r--arch/mips/include/uapi/asm/fcntl.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h331
-rw-r--r--arch/mips/include/uapi/asm/ioctls.h22
-rw-r--r--arch/mips/include/uapi/asm/mman.h10
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h8
-rw-r--r--arch/mips/include/uapi/asm/sembuf.h4
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h8
-rw-r--r--arch/mips/include/uapi/asm/signal.h24
-rw-r--r--arch/mips/include/uapi/asm/socket.h14
-rw-r--r--arch/mips/include/uapi/asm/sockios.h2
-rw-r--r--arch/mips/include/uapi/asm/stat.h2
-rw-r--r--arch/mips/include/uapi/asm/statfs.h6
-rw-r--r--arch/mips/include/uapi/asm/sysmips.h8
-rw-r--r--arch/mips/include/uapi/asm/termbits.h158
-rw-r--r--arch/mips/include/uapi/asm/termios.h12
-rw-r--r--arch/mips/include/uapi/asm/unistd.h60
-rw-r--r--arch/mips/jazz/Makefile2
-rw-r--r--arch/mips/jazz/irq.c4
-rw-r--r--arch/mips/jazz/jazzdma.c6
-rw-r--r--arch/mips/jazz/setup.c20
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c8
-rw-r--r--arch/mips/jz4740/clock-debugfs.c2
-rw-r--r--arch/mips/jz4740/clock.c4
-rw-r--r--arch/mips/jz4740/dma.c2
-rw-r--r--arch/mips/jz4740/gpio.c2
-rw-r--r--arch/mips/jz4740/irq.c2
-rw-r--r--arch/mips/jz4740/irq.h2
-rw-r--r--arch/mips/jz4740/platform.c12
-rw-r--r--arch/mips/jz4740/pm.c2
-rw-r--r--arch/mips/jz4740/prom.c2
-rw-r--r--arch/mips/jz4740/reset.c2
-rw-r--r--arch/mips/jz4740/setup.c2
-rw-r--r--arch/mips/jz4740/time.c2
-rw-r--r--arch/mips/jz4740/timer.c2
-rw-r--r--arch/mips/kernel/Makefile36
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c10
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c10
-rw-r--r--arch/mips/kernel/bmips_vec.S6
-rw-r--r--arch/mips/kernel/branch.c10
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c4
-rw-r--r--arch/mips/kernel/cevt-ds1287.c4
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c4
-rw-r--r--arch/mips/kernel/cevt-r4k.c6
-rw-r--r--arch/mips/kernel/cevt-sb1250.c4
-rw-r--r--arch/mips/kernel/cevt-smtc.c2
-rw-r--r--arch/mips/kernel/cevt-txx9.c6
-rw-r--r--arch/mips/kernel/cpu-bugs64.c10
-rw-r--r--arch/mips/kernel/cpu-probe.c132
-rw-r--r--arch/mips/kernel/cpufreq/loongson2_cpufreq.c19
-rw-r--r--arch/mips/kernel/crash.c2
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c2
-rw-r--r--arch/mips/kernel/csrc-gic.c49
-rw-r--r--arch/mips/kernel/csrc-ioasic.c2
-rw-r--r--arch/mips/kernel/csrc-powertv.c6
-rw-r--r--arch/mips/kernel/csrc-sb1250.c2
-rw-r--r--arch/mips/kernel/early_printk.c5
-rw-r--r--arch/mips/kernel/ftrace.c12
-rw-r--r--arch/mips/kernel/genex.S14
-rw-r--r--arch/mips/kernel/head.S4
-rw-r--r--arch/mips/kernel/i8259.c2
-rw-r--r--arch/mips/kernel/irq-gt641xx.c4
-rw-r--r--arch/mips/kernel/irq-msc01.c6
-rw-r--r--arch/mips/kernel/irq-rm7000.c4
-rw-r--r--arch/mips/kernel/irq.c2
-rw-r--r--arch/mips/kernel/irq_cpu.c50
-rw-r--r--arch/mips/kernel/irq_txx9.c10
-rw-r--r--arch/mips/kernel/kgdb.c6
-rw-r--r--arch/mips/kernel/kprobes.c10
-rw-r--r--arch/mips/kernel/linux32.c10
-rw-r--r--arch/mips/kernel/mips_ksyms.c4
-rw-r--r--arch/mips/kernel/module-rela.c6
-rw-r--r--arch/mips/kernel/module.c10
-rw-r--r--arch/mips/kernel/octeon_switch.S104
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c30
-rw-r--r--arch/mips/kernel/proc.c26
-rw-r--r--arch/mips/kernel/process.c4
-rw-r--r--arch/mips/kernel/ptrace.c12
-rw-r--r--arch/mips/kernel/ptrace32.c2
-rw-r--r--arch/mips/kernel/r2300_fpu.S128
-rw-r--r--arch/mips/kernel/r2300_switch.S2
-rw-r--r--arch/mips/kernel/r4k_switch.S4
-rw-r--r--arch/mips/kernel/relocate_kernel.S12
-rw-r--r--arch/mips/kernel/rtlx.c18
-rw-r--r--arch/mips/kernel/scall32-o32.S14
-rw-r--r--arch/mips/kernel/scall64-64.S6
-rw-r--r--arch/mips/kernel/scall64-n32.S10
-rw-r--r--arch/mips/kernel/scall64-o32.S18
-rw-r--r--arch/mips/kernel/setup.c101
-rw-r--r--arch/mips/kernel/signal.c12
-rw-r--r--arch/mips/kernel/signal32.c14
-rw-r--r--arch/mips/kernel/signal_n32.c10
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp-mt.c4
-rw-r--r--arch/mips/kernel/smtc-asm.S2
-rw-r--r--arch/mips/kernel/smtc-proc.c6
-rw-r--r--arch/mips/kernel/smtc.c21
-rw-r--r--arch/mips/kernel/sync-r4k.c2
-rw-r--r--arch/mips/kernel/syscall.c4
-rw-r--r--arch/mips/kernel/time.c12
-rw-r--r--arch/mips/kernel/traps.c32
-rw-r--r--arch/mips/kernel/unaligned.c34
-rw-r--r--arch/mips/kernel/vmlinux.lds.S8
-rw-r--r--arch/mips/kernel/vpe.c113
-rw-r--r--arch/mips/kernel/watch.c4
-rw-r--r--arch/mips/lantiq/clk.c18
-rw-r--r--arch/mips/lantiq/clk.h7
-rw-r--r--arch/mips/lantiq/dts/danube.dtsi2
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts2
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c4
-rw-r--r--arch/mips/lantiq/irq.c105
-rw-r--r--arch/mips/lantiq/prom.h2
-rw-r--r--arch/mips/lantiq/xway/clk.c43
-rw-r--r--arch/mips/lantiq/xway/reset.c9
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c15
-rw-r--r--arch/mips/lasat/Makefile2
-rw-r--r--arch/mips/lasat/ds1603.h2
-rw-r--r--arch/mips/lasat/image/Makefile2
-rw-r--r--arch/mips/lasat/image/head.S2
-rw-r--r--arch/mips/lasat/picvue.c34
-rw-r--r--arch/mips/lasat/picvue.h16
-rw-r--r--arch/mips/lasat/serial.c2
-rw-r--r--arch/mips/lasat/sysctl.c4
-rw-r--r--arch/mips/lib/bitops.c4
-rw-r--r--arch/mips/lib/csum_partial.S42
-rw-r--r--arch/mips/lib/delay.c2
-rw-r--r--arch/mips/lib/dump_tlb.c2
-rw-r--r--arch/mips/lib/memcpy.S34
-rw-r--r--arch/mips/lib/memset.S4
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c2
-rw-r--r--arch/mips/lib/strncpy_user.S2
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/lib/uncached.c2
-rw-r--r--arch/mips/loongson/Makefile2
-rw-r--r--arch/mips/loongson/common/bonito-irq.c6
-rw-r--r--arch/mips/loongson/common/cmdline.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_acc.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ehci.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ide.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_isa.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_mfgpt.c6
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_ohci.c4
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_pci.c4
-rw-r--r--arch/mips/loongson/common/early_printk.c6
-rw-r--r--arch/mips/loongson/common/env.c4
-rw-r--r--arch/mips/loongson/common/gpio.c16
-rw-r--r--arch/mips/loongson/common/init.c4
-rw-r--r--arch/mips/loongson/common/irq.c6
-rw-r--r--arch/mips/loongson/common/machtype.c20
-rw-r--r--arch/mips/loongson/common/mem.c4
-rw-r--r--arch/mips/loongson/common/pci.c34
-rw-r--r--arch/mips/loongson/common/platform.c4
-rw-r--r--arch/mips/loongson/common/reset.c10
-rw-r--r--arch/mips/loongson/common/serial.c18
-rw-r--r--arch/mips/loongson/common/setup.c6
-rw-r--r--arch/mips/loongson/common/time.c6
-rw-r--r--arch/mips/loongson/common/uart_base.c4
-rw-r--r--arch/mips/loongson/fuloong-2e/irq.c12
-rw-r--r--arch/mips/loongson/fuloong-2e/reset.c4
-rw-r--r--arch/mips/loongson/lemote-2f/ec_kb3310b.h216
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c20
-rw-r--r--arch/mips/loongson/lemote-2f/machtype.c14
-rw-r--r--arch/mips/loongson/lemote-2f/reset.c10
-rw-r--r--arch/mips/loongson1/Platform2
-rw-r--r--arch/mips/loongson1/common/clock.c4
-rw-r--r--arch/mips/loongson1/common/irq.c4
-rw-r--r--arch/mips/loongson1/common/platform.c6
-rw-r--r--arch/mips/loongson1/common/prom.c6
-rw-r--r--arch/mips/loongson1/common/reset.c4
-rw-r--r--arch/mips/loongson1/common/setup.c4
-rw-r--r--arch/mips/loongson1/ls1b/board.c4
-rw-r--r--arch/mips/math-emu/Makefile1
-rw-r--r--arch/mips/math-emu/cp1emu.c8
-rw-r--r--arch/mips/math-emu/dp_add.c2
-rw-r--r--arch/mips/math-emu/dp_sqrt.c6
-rw-r--r--arch/mips/math-emu/dp_sub.c2
-rw-r--r--arch/mips/math-emu/ieee754.c18
-rw-r--r--arch/mips/math-emu/ieee754dp.c2
-rw-r--r--arch/mips/math-emu/ieee754int.h2
-rw-r--r--arch/mips/math-emu/ieee754sp.c2
-rw-r--r--arch/mips/math-emu/ieee754xcpt.c2
-rw-r--r--arch/mips/math-emu/kernel_linkage.c2
-rw-r--r--arch/mips/math-emu/sp_add.c2
-rw-r--r--arch/mips/math-emu/sp_mul.c4
-rw-r--r--arch/mips/math-emu/sp_sub.c2
-rw-r--r--arch/mips/mm/Makefile6
-rw-r--r--arch/mips/mm/c-octeon.c2
-rw-r--r--arch/mips/mm/c-r3k.c8
-rw-r--r--arch/mips/mm/c-r4k.c27
-rw-r--r--arch/mips/mm/c-tx39.c12
-rw-r--r--arch/mips/mm/cerr-sb1.c30
-rw-r--r--arch/mips/mm/cex-gen.S6
-rw-r--r--arch/mips/mm/cex-oct.S36
-rw-r--r--arch/mips/mm/cex-sb1.S8
-rw-r--r--arch/mips/mm/dma-default.c2
-rw-r--r--arch/mips/mm/fault.c4
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c4
-rw-r--r--arch/mips/mm/ioremap.c4
-rw-r--r--arch/mips/mm/page.c6
-rw-r--r--arch/mips/mm/pgtable-64.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-r5k.c4
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c33
-rw-r--r--arch/mips/mm/uasm.c16
-rw-r--r--arch/mips/mti-malta/malta-amon.c6
-rw-r--r--arch/mips/mti-malta/malta-cmdline.c2
-rw-r--r--arch/mips/mti-malta/malta-display.c6
-rw-r--r--arch/mips/mti-malta/malta-init.c14
-rw-r--r--arch/mips/mti-malta/malta-int.c40
-rw-r--r--arch/mips/mti-malta/malta-memory.c4
-rw-r--r--arch/mips/mti-malta/malta-pci.c6
-rw-r--r--arch/mips/mti-malta/malta-platform.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c4
-rw-r--r--arch/mips/mti-malta/malta-smtc.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c83
-rw-r--r--arch/mips/mti-sead3/Makefile10
-rw-r--r--arch/mips/mti-sead3/leds-sead3.c5
-rw-r--r--arch/mips/mti-sead3/sead3-console.c4
-rw-r--r--arch/mips/mti-sead3/sead3-display.c2
-rw-r--r--arch/mips/mti-sead3/sead3-i2c-drv.c28
-rw-r--r--arch/mips/mti-sead3/sead3-init.c5
-rw-r--r--arch/mips/mti-sead3/sead3-memory.c138
-rw-r--r--arch/mips/mti-sead3/sead3-net.c4
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-bus.c6
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-i2c-drv.c58
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c27
-rw-r--r--arch/mips/mti-sead3/sead3-time.c8
-rw-r--r--arch/mips/mti-sead3/sead3.dts26
-rw-r--r--arch/mips/netlogic/Platform4
-rw-r--r--arch/mips/netlogic/common/irq.c43
-rw-r--r--arch/mips/netlogic/common/smp.c8
-rw-r--r--arch/mips/netlogic/common/smpboot.S20
-rw-r--r--arch/mips/netlogic/common/time.c56
-rw-r--r--arch/mips/netlogic/dts/xlp_evp.dts2
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c4
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c2
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c35
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c6
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c12
-rw-r--r--arch/mips/netlogic/xlr/platform.c12
-rw-r--r--arch/mips/netlogic/xlr/setup.c4
-rw-r--r--arch/mips/oprofile/common.c7
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c10
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c38
-rw-r--r--arch/mips/pci/Makefile5
-rw-r--r--arch/mips/pci/fixup-cobalt.c28
-rw-r--r--arch/mips/pci/fixup-emma2rh.c2
-rw-r--r--arch/mips/pci/fixup-fuloong2e.c8
-rw-r--r--arch/mips/pci/fixup-ip32.c14
-rw-r--r--arch/mips/pci/fixup-lemote2f.c14
-rw-r--r--arch/mips/pci/fixup-malta.c14
-rw-r--r--arch/mips/pci/fixup-pmcmsp.c224
-rw-r--r--arch/mips/pci/fixup-pnx8550.c57
-rw-r--r--arch/mips/pci/fixup-sni.c68
-rw-r--r--arch/mips/pci/fixup-tb0219.c2
-rw-r--r--arch/mips/pci/fixup-tb0287.c2
-rw-r--r--arch/mips/pci/fixup-wrppmc.c2
-rw-r--r--arch/mips/pci/ops-bcm63xx.c16
-rw-r--r--arch/mips/pci/ops-bonito64.c4
-rw-r--r--arch/mips/pci/ops-gt64xxx_pci0.c24
-rw-r--r--arch/mips/pci/ops-lantiq.c2
-rw-r--r--arch/mips/pci/ops-loongson2.c2
-rw-r--r--arch/mips/pci/ops-msc.c22
-rw-r--r--arch/mips/pci/ops-nile4.c2
-rw-r--r--arch/mips/pci/ops-pmcmsp.c450
-rw-r--r--arch/mips/pci/ops-pnx8550.c282
-rw-r--r--arch/mips/pci/ops-rc32434.c2
-rw-r--r--arch/mips/pci/ops-sni.c8
-rw-r--r--arch/mips/pci/ops-tx4927.c8
-rw-r--r--arch/mips/pci/ops-vr41xx.c6
-rw-r--r--arch/mips/pci/pci-alchemy.c6
-rw-r--r--arch/mips/pci/pci-ar71xx.c194
-rw-r--r--arch/mips/pci/pci-ar724x.c304
-rw-r--r--arch/mips/pci/pci-bcm1480.c8
-rw-r--r--arch/mips/pci/pci-bcm1480ht.c6
-rw-r--r--arch/mips/pci/pci-bcm47xx.c2
-rw-r--r--arch/mips/pci/pci-bcm63xx.c62
-rw-r--r--arch/mips/pci/pci-bcm63xx.h2
-rw-r--r--arch/mips/pci/pci-ip27.c8
-rw-r--r--arch/mips/pci/pci-ip32.c6
-rw-r--r--arch/mips/pci/pci-lantiq.c12
-rw-r--r--arch/mips/pci/pci-lasat.c26
-rw-r--r--arch/mips/pci/pci-octeon.c30
-rw-r--r--arch/mips/pci/pci-rc32434.c6
-rw-r--r--arch/mips/pci/pci-sb1250.c8
-rw-r--r--arch/mips/pci/pci-vr41xx.c22
-rw-r--r--arch/mips/pci/pci-vr41xx.h2
-rw-r--r--arch/mips/pci/pci-xlp.c156
-rw-r--r--arch/mips/pci/pci-xlr.c34
-rw-r--r--arch/mips/pci/pci.c19
-rw-r--r--arch/mips/pci/pcie-octeon.c58
-rw-r--r--arch/mips/pmc-sierra/Platform7
-rw-r--r--arch/mips/pmcs-msp71xx/Kconfig (renamed from arch/mips/pmc-sierra/Kconfig)0
-rw-r--r--arch/mips/pmcs-msp71xx/Makefile (renamed from arch/mips/pmc-sierra/msp71xx/Makefile)0
-rw-r--r--arch/mips/pmcs-msp71xx/Platform7
-rw-r--r--arch/mips/pmcs-msp71xx/gpio.c (renamed from arch/mips/pmc-sierra/msp71xx/gpio.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/gpio_extended.c (renamed from arch/mips/pmc-sierra/msp71xx/gpio_extended.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_elb.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_elb.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_eth.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_eth.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_hwbutton.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_cic.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c)6
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_per.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_per.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_slp.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_pci.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_pci.c)2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_prom.c)4
-rw-r--r--arch/mips/pmcs-msp71xx/msp_serial.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_serial.c)32
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_setup.c)8
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_smp.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smtc.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_smtc.c)0
-rw-r--r--arch/mips/pmcs-msp71xx/msp_time.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_time.c)2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_usb.c (renamed from arch/mips/pmc-sierra/msp71xx/msp_usb.c)54
-rw-r--r--arch/mips/pnx833x/Platform2
-rw-r--r--arch/mips/pnx833x/common/interrupts.c110
-rw-r--r--arch/mips/pnx833x/common/platform.c34
-rw-r--r--arch/mips/pnx833x/common/prom.c2
-rw-r--r--arch/mips/pnx833x/common/reset.c2
-rw-r--r--arch/mips/pnx833x/common/setup.c2
-rw-r--r--arch/mips/pnx833x/stb22x/board.c2
-rw-r--r--arch/mips/pnx8550/Makefile3
-rw-r--r--arch/mips/pnx8550/Platform7
-rw-r--r--arch/mips/pnx8550/common/Makefile26
-rw-r--r--arch/mips/pnx8550/common/int.c236
-rw-r--r--arch/mips/pnx8550/common/pci.c134
-rw-r--r--arch/mips/pnx8550/common/platform.c162
-rw-r--r--arch/mips/pnx8550/common/proc.c110
-rw-r--r--arch/mips/pnx8550/common/prom.c128
-rw-r--r--arch/mips/pnx8550/common/reset.c40
-rw-r--r--arch/mips/pnx8550/common/setup.c142
-rw-r--r--arch/mips/pnx8550/common/time.c151
-rw-r--r--arch/mips/pnx8550/jbs/Makefile4
-rw-r--r--arch/mips/pnx8550/jbs/board_setup.c56
-rw-r--r--arch/mips/pnx8550/jbs/init.c53
-rw-r--r--arch/mips/pnx8550/jbs/irqmap.c35
-rw-r--r--arch/mips/pnx8550/stb810/Makefile4
-rw-r--r--arch/mips/pnx8550/stb810/board_setup.c41
-rw-r--r--arch/mips/pnx8550/stb810/irqmap.c22
-rw-r--r--arch/mips/pnx8550/stb810/prom_init.c46
-rw-r--r--arch/mips/power/cpu.c2
-rw-r--r--arch/mips/power/hibernate.S2
-rw-r--r--arch/mips/powertv/asic/asic-calliope.c10
-rw-r--r--arch/mips/powertv/asic/asic-cronus.c6
-rw-r--r--arch/mips/powertv/asic/asic-gaia.c2
-rw-r--r--arch/mips/powertv/asic/asic-zeus.c6
-rw-r--r--arch/mips/powertv/asic/asic_devices.c8
-rw-r--r--arch/mips/powertv/asic/asic_int.c4
-rw-r--r--arch/mips/powertv/asic/irq_asic.c4
-rw-r--r--arch/mips/powertv/asic/prealloc-calliope.c10
-rw-r--r--arch/mips/powertv/asic/prealloc-cronus.c12
-rw-r--r--arch/mips/powertv/asic/prealloc-cronuslite.c8
-rw-r--r--arch/mips/powertv/asic/prealloc-gaia.c382
-rw-r--r--arch/mips/powertv/asic/prealloc-zeus.c10
-rw-r--r--arch/mips/powertv/init.c2
-rw-r--r--arch/mips/powertv/ioremap.c4
-rw-r--r--arch/mips/powertv/memory.c2
-rw-r--r--arch/mips/powertv/powertv-usb.c46
-rw-r--r--arch/mips/ralink/Kconfig32
-rw-r--r--arch/mips/ralink/Makefile15
-rw-r--r--arch/mips/ralink/Platform10
-rw-r--r--arch/mips/ralink/clk.c72
-rw-r--r--arch/mips/ralink/common.h44
-rw-r--r--arch/mips/ralink/dts/Makefile1
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi106
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts52
-rw-r--r--arch/mips/ralink/early_printk.c44
-rw-r--r--arch/mips/ralink/irq.c180
-rw-r--r--arch/mips/ralink/of.c107
-rw-r--r--arch/mips/ralink/prom.c69
-rw-r--r--arch/mips/ralink/reset.c44
-rw-r--r--arch/mips/ralink/rt305x.c242
-rw-r--r--arch/mips/rb532/devices.c12
-rw-r--r--arch/mips/rb532/gpio.c8
-rw-r--r--arch/mips/rb532/irq.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-eisa.c14
-rw-r--r--arch/mips/sgi-ip22/ip22-gio.c14
-rw-r--r--arch/mips/sgi-ip22/ip22-int.c36
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c44
-rw-r--r--arch/mips/sgi-ip22/ip22-nvram.c16
-rw-r--r--arch/mips/sgi-ip22/ip22-platform.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c4
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c16
-rw-r--r--arch/mips/sgi-ip27/ip27-berr.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-console.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c14
-rw-r--r--arch/mips/sgi-ip27/ip27-init.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c12
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-reset.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-smp.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-timer.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c16
-rw-r--r--arch/mips/sgi-ip32/ip32-irq.c8
-rw-r--r--arch/mips/sibyte/Platform6
-rw-r--r--arch/mips/sibyte/bcm1480/irq.c8
-rw-r--r--arch/mips/sibyte/common/cfe.c10
-rw-r--r--arch/mips/sibyte/common/sb_tbprof.c18
-rw-r--r--arch/mips/sibyte/sb1250/bus_watcher.c6
-rw-r--r--arch/mips/sibyte/sb1250/irq.c8
-rw-r--r--arch/mips/sibyte/sb1250/setup.c18
-rw-r--r--arch/mips/sibyte/swarm/platform.c4
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c50
-rw-r--r--arch/mips/sni/a20r.c44
-rw-r--r--arch/mips/sni/eisa.c2
-rw-r--r--arch/mips/sni/irq.c20
-rw-r--r--arch/mips/sni/pcimt.c36
-rw-r--r--arch/mips/sni/pcit.c40
-rw-r--r--arch/mips/sni/rm200.c38
-rw-r--r--arch/mips/sni/setup.c24
-rw-r--r--arch/mips/sni/time.c30
-rw-r--r--arch/mips/txx9/Platform4
-rw-r--r--arch/mips/txx9/generic/irq_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/irq_tx4939.c4
-rw-r--r--arch/mips/txx9/generic/mem_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/pci.c4
-rw-r--r--arch/mips/txx9/generic/setup.c8
-rw-r--r--arch/mips/txx9/generic/setup_tx3927.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c4
-rw-r--r--arch/mips/txx9/generic/smsc_fdc37m81x.c48
-rw-r--r--arch/mips/txx9/rbtx4927/irq.c2
-rw-r--r--arch/mips/txx9/rbtx4927/prom.c2
-rw-r--r--arch/mips/txx9/rbtx4927/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4938/setup.c8
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c4
-rw-r--r--arch/mips/vr41xx/common/bcu.c4
-rw-r--r--arch/mips/vr41xx/common/cmu.c16
-rw-r--r--arch/mips/vr41xx/common/giu.c2
-rw-r--r--arch/mips/vr41xx/common/icu.c8
-rw-r--r--arch/mips/vr41xx/common/pmu.c2
-rw-r--r--arch/mips/vr41xx/common/rtc.c2
-rw-r--r--arch/mips/vr41xx/common/type.c2
-rw-r--r--arch/mips/wrppmc/Platform2
-rw-r--r--arch/mips/wrppmc/irq.c4
-rw-r--r--arch/mips/wrppmc/serial.c2
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/kernel/sys_parisc32.c10
-rw-r--r--arch/parisc/kernel/syscall_table.S6
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c18
-rw-r--r--arch/s390/kernel/compat_wrapper.S16
-rw-r--r--arch/s390/kernel/syscalls.S6
-rw-r--r--arch/sparc/kernel/signal32.c2
-rw-r--r--arch/sparc/kernel/systbls_64.S6
-rw-r--r--arch/x86/ia32/sys_ia32.c5
-rw-r--r--arch/x86/include/asm/sys_ia32.h1
-rw-r--r--arch/x86/kernel/setup.c13
-rw-r--r--arch/x86/mm/numa.c11
-rw-r--r--arch/x86/mm/srat.c125
-rw-r--r--arch/x86/syscalls/syscall_32.tbl6
-rw-r--r--drivers/acpi/numa.c23
-rw-r--r--drivers/dma/dw_dmac.c145
-rw-r--r--drivers/dma/dw_dmac_regs.h7
-rw-r--r--drivers/hsi/hsi.c2
-rw-r--r--drivers/md/Kconfig55
-rw-r--r--drivers/md/Makefile6
-rw-r--r--drivers/md/dm-bio-prison.c155
-rw-r--r--drivers/md/dm-bio-prison.h58
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-block-types.h54
-rw-r--r--drivers/md/dm-cache-metadata.c1146
-rw-r--r--drivers/md/dm-cache-metadata.h142
-rw-r--r--drivers/md/dm-cache-policy-cleaner.c464
-rw-r--r--drivers/md/dm-cache-policy-internal.h124
-rw-r--r--drivers/md/dm-cache-policy-mq.c1195
-rw-r--r--drivers/md/dm-cache-policy.c161
-rw-r--r--drivers/md/dm-cache-policy.h228
-rw-r--r--drivers/md/dm-cache-target.c2584
-rw-r--r--drivers/md/dm-crypt.c45
-rw-r--r--drivers/md/dm-delay.c12
-rw-r--r--drivers/md/dm-flakey.c11
-rw-r--r--drivers/md/dm-ioctl.c166
-rw-r--r--drivers/md/dm-kcopyd.c121
-rw-r--r--drivers/md/dm-linear.c13
-rw-r--r--drivers/md/dm-mpath.c12
-rw-r--r--drivers/md/dm-raid.c10
-rw-r--r--drivers/md/dm-raid1.c17
-rw-r--r--drivers/md/dm-snap.c33
-rw-r--r--drivers/md/dm-stripe.c27
-rw-r--r--drivers/md/dm-table.c11
-rw-r--r--drivers/md/dm-target.c2
-rw-r--r--drivers/md/dm-thin-metadata.c12
-rw-r--r--drivers/md/dm-thin.c277
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm-zero.c2
-rw-r--r--drivers/md/dm.c452
-rw-r--r--drivers/md/persistent-data/Kconfig2
-rw-r--r--drivers/md/persistent-data/Makefile2
-rw-r--r--drivers/md/persistent-data/dm-array.c808
-rw-r--r--drivers/md/persistent-data/dm-array.h166
-rw-r--r--drivers/md/persistent-data/dm-bitset.c163
-rw-r--r--drivers/md/persistent-data/dm-bitset.h165
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c1
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h1
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c7
-rw-r--r--drivers/md/persistent-data/dm-btree.c52
-rw-r--r--drivers/md/persistent-data/dm-btree.h15
-rw-r--r--drivers/misc/kgdbts.c2
-rw-r--r--drivers/mtd/Kconfig4
-rw-r--r--drivers/mtd/ar7part.c6
-rw-r--r--drivers/mtd/bcm47xxpart.c51
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c217
-rw-r--r--drivers/mtd/cmdlinepart.c47
-rw-r--r--drivers/mtd/devices/Makefile4
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.c55
-rw-r--r--drivers/mtd/devices/bcm47xxsflash.h15
-rw-r--r--drivers/mtd/devices/elm.c404
-rw-r--r--drivers/mtd/devices/m25p80.c100
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/physmap_of.c9
-rw-r--r--drivers/mtd/maps/uclinux.c30
-rw-r--r--drivers/mtd/nand/atmel_nand.c141
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h4
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/main.c14
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c4
-rw-r--r--drivers/mtd/nand/davinci_nand.c24
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c233
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h22
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c63
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h4
-rw-r--r--drivers/mtd/nand/mxc_nand.c11
-rw-r--r--drivers/mtd/nand/nand_base.c8
-rw-r--r--drivers/mtd/nand/nand_ecc.c5
-rw-r--r--drivers/mtd/nand/nandsim.c6
-rw-r--r--drivers/mtd/nand/omap2.c583
-rw-r--r--drivers/mtd/ofpart.c7
-rw-r--r--drivers/mtd/tests/mtd_nandecctest.c10
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c8
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c25
-rw-r--r--drivers/mtd/ubi/debug.h6
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c6
-rw-r--r--drivers/platform/x86/Kconfig15
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/acer-wmi.c21
-rw-r--r--drivers/platform/x86/asus-laptop.c85
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c76
-rw-r--r--drivers/platform/x86/asus-wmi.c108
-rw-r--r--drivers/platform/x86/asus-wmi.h9
-rw-r--r--drivers/platform/x86/chromeos_laptop.c371
-rw-r--r--drivers/platform/x86/eeepc-wmi.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c117
-rw-r--r--drivers/platform/x86/msi-laptop.c374
-rw-r--r--drivers/platform/x86/msi-wmi.c224
-rw-r--r--drivers/platform/x86/sony-laptop.c145
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c17
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c256
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c266
-rw-r--r--drivers/scsi/fcoe/fcoe.h6
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c45
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c186
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c199
-rw-r--r--drivers/scsi/fcoe/libfcoe.h20
-rw-r--r--drivers/scsi/fnic/Makefile2
-rw-r--r--drivers/scsi/fnic/fnic.h60
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c314
-rw-r--r--drivers/scsi/fnic/fnic_io.h6
-rw-r--r--drivers/scsi/fnic/fnic_main.c17
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c721
-rw-r--r--drivers/scsi/fnic/fnic_trace.c273
-rw-r--r--drivers/scsi/fnic/fnic_trace.h90
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/ipr.c33
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_fcp.c6
-rw-r--r--drivers/scsi/libfc/fc_libfc.h38
-rw-r--r--drivers/scsi/libfc/fc_rport.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c56
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h6
-rw-r--r--drivers/scsi/mvsas/mv_sas.c10
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h54
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h15
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c200
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c39
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c82
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c207
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c169
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c6
-rw-r--r--drivers/scsi/storvsc_drv.c137
-rw-r--r--drivers/scsi/ufs/Kconfig74
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs.h44
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c211
-rw-r--r--drivers/scsi/ufs/ufshcd.c423
-rw-r--r--drivers/scsi/ufs/ufshcd.h202
-rw-r--r--drivers/scsi/ufs/ufshci.h44
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c11
-rw-r--r--drivers/target/sbp/sbp_target.c2
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pscsi.c9
-rw-r--r--drivers/tty/serial/Kconfig10
-rw-r--r--fs/autofs4/root.c4
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/backref.c5
-rw-r--r--fs/btrfs/backref.h2
-rw-r--r--fs/btrfs/btrfs_inode.h20
-rw-r--r--fs/btrfs/check-integrity.c3
-rw-r--r--fs/btrfs/compression.c4
-rw-r--r--fs/btrfs/ctree.c68
-rw-r--r--fs/btrfs/ctree.h150
-rw-r--r--fs/btrfs/delayed-inode.c147
-rw-r--r--fs/btrfs/delayed-inode.h1
-rw-r--r--fs/btrfs/delayed-ref.c82
-rw-r--r--fs/btrfs/delayed-ref.h52
-rw-r--r--fs/btrfs/dev-replace.c6
-rw-r--r--fs/btrfs/disk-io.c227
-rw-r--r--fs/btrfs/disk-io.h7
-rw-r--r--fs/btrfs/extent-tree.c578
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/extent_map.c1
-rw-r--r--fs/btrfs/file-item.c67
-rw-r--r--fs/btrfs/file.c57
-rw-r--r--fs/btrfs/free-space-cache.c62
-rw-r--r--fs/btrfs/inode.c1064
-rw-r--r--fs/btrfs/ioctl.c211
-rw-r--r--fs/btrfs/locking.c5
-rw-r--r--fs/btrfs/ordered-data.c98
-rw-r--r--fs/btrfs/ordered-data.h14
-rw-r--r--fs/btrfs/print-tree.c1
-rw-r--r--fs/btrfs/qgroup.c55
-rw-r--r--fs/btrfs/raid56.c2099
-rw-r--r--fs/btrfs/raid56.h51
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/scrub.c10
-rw-r--r--fs/btrfs/send.c53
-rw-r--r--fs/btrfs/send.h1
-rw-r--r--fs/btrfs/super.c89
-rw-r--r--fs/btrfs/sysfs.c1
-rw-r--r--fs/btrfs/transaction.c151
-rw-r--r--fs/btrfs/transaction.h8
-rw-r--r--fs/btrfs/tree-defrag.c19
-rw-r--r--fs/btrfs/tree-log.c166
-rw-r--r--fs/btrfs/ulist.c2
-rw-r--r--fs/btrfs/volumes.c636
-rw-r--r--fs/btrfs/volumes.h11
-rw-r--r--fs/cifs/cifsfs.c5
-rw-r--r--fs/cifs/cifssmb.c5
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c12
-rw-r--r--fs/ext4/balloc.c2
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h1
-rw-r--r--fs/ext4/extents_status.c39
-rw-r--r--fs/ext4/mballoc.c8
-rw-r--r--fs/ext4/resize.c6
-rw-r--r--fs/ext4/super.c61
-rw-r--r--fs/jbd2/transaction.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/nfs4filelayout.c6
-rw-r--r--fs/nfs/nfs4filelayout.h2
-rw-r--r--fs/nfs/nfs4proc.c21
-rw-r--r--fs/nfs/pnfs.c21
-rw-r--r--fs/nfs/pnfs.h6
-rw-r--r--fs/nfs/unlink.c20
-rw-r--r--fs/open.c15
-rw-r--r--fs/read_write.c9
-rw-r--r--fs/timerfd.c10
-rw-r--r--include/asm-generic/checksum.h4
-rw-r--r--include/asm-generic/uaccess.h14
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/bcma/bcma_driver_chipcommon.h1
-rw-r--r--include/linux/btrfs.h6
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/device-mapper.h49
-rw-r--r--include/linux/dm-kcopyd.h25
-rw-r--r--include/linux/dmaengine.h16
-rw-r--r--include/linux/dw_dmac.h5
-rw-r--r--include/linux/hsi/hsi.h6
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/mm.h18
-rw-r--r--include/linux/mtd/map.h9
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/platform_data/elm.h54
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/scsi/Kbuild3
-rw-r--r--include/scsi/fc/Kbuild4
-rw-r--r--include/scsi/fcoe_sysfs.h11
-rw-r--r--include/scsi/libfcoe.h32
-rw-r--r--include/scsi/scsi_host.h4
-rw-r--r--include/trace/events/ext4.h40
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/btrfs.h (renamed from fs/btrfs/ioctl.h)18
-rw-r--r--include/uapi/linux/dm-ioctl.h11
-rw-r--r--include/uapi/scsi/Kbuild3
-rw-r--r--include/uapi/scsi/fc/Kbuild4
-rw-r--r--include/uapi/scsi/fc/fc_els.h (renamed from include/scsi/fc/fc_els.h)0
-rw-r--r--include/uapi/scsi/fc/fc_fs.h (renamed from include/scsi/fc/fc_fs.h)0
-rw-r--r--include/uapi/scsi/fc/fc_gs.h (renamed from include/scsi/fc/fc_gs.h)0
-rw-r--r--include/uapi/scsi/fc/fc_ns.h (renamed from include/scsi/fc/fc_ns.h)0
-rw-r--r--include/uapi/scsi/scsi_bsg_fc.h (renamed from include/scsi/scsi_bsg_fc.h)0
-rw-r--r--include/uapi/scsi/scsi_netlink.h (renamed from include/scsi/scsi_netlink.h)0
-rw-r--r--include/uapi/scsi/scsi_netlink_fc.h (renamed from include/scsi/scsi_netlink_fc.h)0
-rw-r--r--init/Kconfig8
-rw-r--r--kernel/debug/debug_core.h2
-rw-r--r--kernel/debug/gdbstub.c3
-rw-r--r--kernel/debug/kdb/kdb_bp.c20
-rw-r--r--kernel/debug/kdb/kdb_debugger.c25
-rw-r--r--kernel/debug/kdb/kdb_main.c135
-rw-r--r--kernel/debug/kdb/kdb_private.h4
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sysctl.c5
-rw-r--r--lib/Kconfig.kgdb18
-rw-r--r--lib/checksum.c2
-rw-r--r--mm/memblock.c50
-rw-r--r--mm/page_alloc.c285
-rw-r--r--net/sunrpc/clnt.c15
-rw-r--r--net/sunrpc/xprt.c6
-rw-r--r--tools/perf/perf.h6
1336 files changed, 57763 insertions, 20355 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
index 50e2a80ea28..21640eaad37 100644
--- a/Documentation/ABI/testing/sysfs-bus-fcoe
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -1,14 +1,53 @@
-What: /sys/bus/fcoe/ctlr_X
+What: /sys/bus/fcoe/
+Date: August 2012
+KernelVersion: TBD
+Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
+Description: The FCoE bus. Attributes in this directory are control interfaces.
+Attributes:
+
+ ctlr_create: 'FCoE Controller' instance creation interface. Writing an
+ <ifname> to this file will allocate and populate sysfs with a
+ fcoe_ctlr_device (ctlr_X). The user can then configure any
+ per-port settings and finally write to the fcoe_ctlr_device's
+ 'start' attribute to begin the kernel's discovery and login
+ process.
+
+ ctlr_destroy: 'FCoE Controller' instance removal interface. Writing a
+ fcoe_ctlr_device's sysfs name to this file will log the
+ fcoe_ctlr_device out of the fabric or otherwise connected
+ FCoE devices. It will also free all kernel memory allocated
+ for this fcoe_ctlr_device and any structures associated
+ with it, this includes the scsi_host.
+
+What: /sys/bus/fcoe/devices/ctlr_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
-Description: 'FCoE Controller' instances on the fcoe bus
+Description: 'FCoE Controller' instances on the fcoe bus.
+ The FCoE Controller now has a three stage creation process.
+ 1) Write interface name to ctlr_create 2) Configure the FCoE
+ Controller (ctlr_X) 3) Enable the FCoE Controller to begin
+ discovery and login. The FCoE Controller is destroyed by
+ writing it's name, i.e. ctlr_X to the ctlr_delete file.
+
Attributes:
fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
this value will change the dev_loss_tmo for all
FCFs discovered by this controller.
+ mode: Display or change the FCoE Controller's mode. Possible
+ modes are 'Fabric' and 'VN2VN'. If a FCoE Controller
+ is started in 'Fabric' mode then FIP FCF discovery is
+ initiated and ultimately a fabric login is attempted.
+ If a FCoE Controller is started in 'VN2VN' mode then
+ FIP VN2VN discovery and login is performed. A FCoE
+ Controller only supports one mode at a time.
+
+ enabled: Whether an FCoE controller is enabled or disabled.
+ 0 if disabled, 1 if enabled. Writing either 0 or 1
+ to this file will enable or disable the FCoE controller.
+
lesb/link_fail: Link Error Status Block (LESB) link failure count.
lesb/vlink_fail: Link Error Status Block (LESB) virtual link
@@ -26,7 +65,7 @@ Attributes:
Notes: ctlr_X (global increment starting at 0)
-What: /sys/bus/fcoe/fcf_X
+What: /sys/bus/fcoe/devices/fcf_X
Date: March 2012
KernelVersion: TBD
Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
diff --git a/Documentation/ABI/testing/sysfs-platform-msi-laptop b/Documentation/ABI/testing/sysfs-platform-msi-laptop
new file mode 100644
index 00000000000..307a247ba1e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-msi-laptop
@@ -0,0 +1,83 @@
+What: /sys/devices/platform/msi-laptop-pf/lcd_level
+Date: Oct 2006
+KernelVersion: 2.6.19
+Contact: "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+ Screen brightness: contains a single integer in the range 0..8.
+
+What: /sys/devices/platform/msi-laptop-pf/auto_brightness
+Date: Oct 2006
+KernelVersion: 2.6.19
+Contact: "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+ Enable automatic brightness control: contains either 0 or 1. If
+ set to 1 the hardware adjusts the screen brightness
+ automatically when the power cord is plugged/unplugged.
+
+What: /sys/devices/platform/msi-laptop-pf/wlan
+Date: Oct 2006
+KernelVersion: 2.6.19
+Contact: "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+ WLAN subsystem enabled: contains either 0 or 1.
+
+What: /sys/devices/platform/msi-laptop-pf/bluetooth
+Date: Oct 2006
+KernelVersion: 2.6.19
+Contact: "Lennart Poettering <mzxreary@0pointer.de>"
+Description:
+ Bluetooth subsystem enabled: contains either 0 or 1. Please
+ note that this file is constantly 0 if no Bluetooth hardware is
+ available.
+
+What: /sys/devices/platform/msi-laptop-pf/touchpad
+Date: Nov 2012
+KernelVersion: 3.8
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Contains either 0 or 1 and indicates if touchpad is turned on.
+ Touchpad state can only be toggled by pressing Fn+F3.
+
+What: /sys/devices/platform/msi-laptop-pf/turbo_mode
+Date: Nov 2012
+KernelVersion: 3.8
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Contains either 0 or 1 and indicates if turbo mode is turned
+ on. In turbo mode power LED is orange and processor is
+ overclocked. Turbo mode is available only if charging. It is
+ only possible to toggle turbo mode state by pressing Fn+F10,
+ and there is a few seconds cooldown between subsequent toggles.
+ If user presses Fn+F10 too frequent, turbo mode state is not
+ changed.
+
+What: /sys/devices/platform/msi-laptop-pf/eco_mode
+Date: Nov 2012
+KernelVersion: 3.8
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Contains either 0 or 1 and indicates if ECO mode is turned on.
+ In ECO mode power LED is green and userspace should do some
+ powersaving actions. ECO mode is available only on battery
+ power. ECO mode can only be toggled by pressing Fn+F10.
+
+What: /sys/devices/platform/msi-laptop-pf/turbo_cooldown
+Date: Nov 2012
+KernelVersion: 3.8
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Contains value in range 0..3:
+ * 0 -> Turbo mode is off
+ * 1 -> Turbo mode is on, cannot be turned off yet
+ * 2 -> Turbo mode is off, cannot be turned on yet
+ * 3 -> Turbo mode is on
+
+What: /sys/devices/platform/msi-laptop-pf/auto_fan
+Date: Nov 2012
+KernelVersion: 3.8
+Contact: "Maxim Mikityanskiy <maxtram95@gmail.com>"
+Description:
+ Contains either 0 or 1 and indicates if fan speed is controlled
+ automatically (1) or fan runs at maximal speed (0). Can be
+ toggled in software.
+
diff --git a/Documentation/device-mapper/cache-policies.txt b/Documentation/device-mapper/cache-policies.txt
new file mode 100644
index 00000000000..d7c440b444c
--- /dev/null
+++ b/Documentation/device-mapper/cache-policies.txt
@@ -0,0 +1,77 @@
+Guidance for writing policies
+=============================
+
+Try to keep transactionality out of it. The core is careful to
+avoid asking about anything that is migrating. This is a pain, but
+makes it easier to write the policies.
+
+Mappings are loaded into the policy at construction time.
+
+Every bio that is mapped by the target is referred to the policy.
+The policy can return a simple HIT or MISS or issue a migration.
+
+Currently there's no way for the policy to issue background work,
+e.g. to start writing back dirty blocks that are going to be evicte
+soon.
+
+Because we map bios, rather than requests it's easy for the policy
+to get fooled by many small bios. For this reason the core target
+issues periodic ticks to the policy. It's suggested that the policy
+doesn't update states (eg, hit counts) for a block more than once
+for each tick. The core ticks by watching bios complete, and so
+trying to see when the io scheduler has let the ios run.
+
+
+Overview of supplied cache replacement policies
+===============================================
+
+multiqueue
+----------
+
+This policy is the default.
+
+The multiqueue policy has two sets of 16 queues: one set for entries
+waiting for the cache and another one for those in the cache.
+Cache entries in the queues are aged based on logical time. Entry into
+the cache is based on variable thresholds and queue selection is based
+on hit count on entry. The policy aims to take different cache miss
+costs into account and to adjust to varying load patterns automatically.
+
+Message and constructor argument pairs are:
+ 'sequential_threshold <#nr_sequential_ios>' and
+ 'random_threshold <#nr_random_ios>'.
+
+The sequential threshold indicates the number of contiguous I/Os
+required before a stream is treated as sequential. The random threshold
+is the number of intervening non-contiguous I/Os that must be seen
+before the stream is treated as random again.
+
+The sequential and random thresholds default to 512 and 4 respectively.
+
+Large, sequential ios are probably better left on the origin device
+since spindles tend to have good bandwidth. The io_tracker counts
+contiguous I/Os to try to spot when the io is in one of these sequential
+modes.
+
+cleaner
+-------
+
+The cleaner writes back all dirty blocks in a cache to decommission it.
+
+Examples
+========
+
+The syntax for a table is:
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+ <#feature_args> [<feature arg>]*
+ <policy> <#policy_args> [<policy arg>]*
+
+The syntax to send a message using the dmsetup command is:
+ dmsetup message <mapped device> 0 sequential_threshold 1024
+ dmsetup message <mapped device> 0 random_threshold 8
+
+Using dmsetup:
+ dmsetup create blah --table "0 268435456 cache /dev/sdb /dev/sdc \
+ /dev/sdd 512 0 mq 4 sequential_threshold 1024 random_threshold 8"
+ creates a 128GB large mapped device named 'blah' with the
+ sequential threshold set to 1024 and the random_threshold set to 8.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
new file mode 100644
index 00000000000..f50470abe24
--- /dev/null
+++ b/Documentation/device-mapper/cache.txt
@@ -0,0 +1,243 @@
+Introduction
+============
+
+dm-cache is a device mapper target written by Joe Thornber, Heinz
+Mauelshagen, and Mike Snitzer.
+
+It aims to improve performance of a block device (eg, a spindle) by
+dynamically migrating some of its data to a faster, smaller device
+(eg, an SSD).
+
+This device-mapper solution allows us to insert this caching at
+different levels of the dm stack, for instance above the data device for
+a thin-provisioning pool. Caching solutions that are integrated more
+closely with the virtual memory system should give better performance.
+
+The target reuses the metadata library used in the thin-provisioning
+library.
+
+The decision as to what data to migrate and when is left to a plug-in
+policy module. Several of these have been written as we experiment,
+and we hope other people will contribute others for specific io
+scenarios (eg. a vm image server).
+
+Glossary
+========
+
+ Migration - Movement of the primary copy of a logical block from one
+ device to the other.
+ Promotion - Migration from slow device to fast device.
+ Demotion - Migration from fast device to slow device.
+
+The origin device always contains a copy of the logical block, which
+may be out of date or kept in sync with the copy on the cache device
+(depending on policy).
+
+Design
+======
+
+Sub-devices
+-----------
+
+The target is constructed by passing three devices to it (along with
+other parameters detailed later):
+
+1. An origin device - the big, slow one.
+
+2. A cache device - the small, fast one.
+
+3. A small metadata device - records which blocks are in the cache,
+ which are dirty, and extra hints for use by the policy object.
+ This information could be put on the cache device, but having it
+ separate allows the volume manager to configure it differently,
+ e.g. as a mirror for extra robustness.
+
+Fixed block size
+----------------
+
+The origin is divided up into blocks of a fixed size. This block size
+is configurable when you first create the cache. Typically we've been
+using block sizes of 256k - 1024k.
+
+Having a fixed block size simplifies the target a lot. But it is
+something of a compromise. For instance, a small part of a block may be
+getting hit a lot, yet the whole block will be promoted to the cache.
+So large block sizes are bad because they waste cache space. And small
+block sizes are bad because they increase the amount of metadata (both
+in core and on disk).
+
+Writeback/writethrough
+----------------------
+
+The cache has two modes, writeback and writethrough.
+
+If writeback, the default, is selected then a write to a block that is
+cached will go only to the cache and the block will be marked dirty in
+the metadata.
+
+If writethrough is selected then a write to a cached block will not
+complete until it has hit both the origin and cache devices. Clean
+blocks should remain clean.
+
+A simple cleaner policy is provided, which will clean (write back) all
+dirty blocks in a cache. Useful for decommissioning a cache.
+
+Migration throttling
+--------------------
+
+Migrating data between the origin and cache device uses bandwidth.
+The user can set a throttle to prevent more than a certain amount of
+migration occuring at any one time. Currently we're not taking any
+account of normal io traffic going to the devices. More work needs
+doing here to avoid migrating during those peak io moments.
+
+For the time being, a message "migration_threshold <#sectors>"
+can be used to set the maximum number of sectors being migrated,
+the default being 204800 sectors (or 100MB).
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
+written. If no such requests are made then commits will occur every
+second. This means the cache behaves like a physical disk that has a
+write cache (the same is true of the thin-provisioning target). If
+power is lost you may lose some recent writes. The metadata should
+always be consistent in spite of any crash.
+
+The 'dirty' state for a cache block changes far too frequently for us
+to keep updating it on the fly. So we treat it as a hint. In normal
+operation it will be written when the dm device is suspended. If the
+system crashes all cache blocks will be assumed dirty when restarted.
+
+Per-block policy hints
+----------------------
+
+Policy plug-ins can store a chunk of data per cache block. It's up to
+the policy how big this chunk is, but it should be kept small. Like the
+dirty flags this data is lost if there's a crash so a safe fallback
+value should always be possible.
+
+For instance, the 'mq' policy, which is currently the default policy,
+uses this facility to store the hit count of the cache blocks. If
+there's a crash this information will be lost, which means the cache
+may be less efficient until those hit counts are regenerated.
+
+Policy hints affect performance, not correctness.
+
+Policy messaging
+----------------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these. Device-mapper
+messages are used. Refer to cache-policies.txt.
+
+Discard bitset resolution
+-------------------------
+
+We can avoid copying data during migration if we know the block has
+been discarded. A prime example of this is when mkfs discards the
+whole block device. We store a bitset tracking the discard state of
+blocks. However, we allow this bitset to have a different block size
+from the cache blocks. This is because we need to track the discard
+state for all of the origin device (compare with the dirty bitset
+which is just for the smaller cache device).
+
+Target interface
+================
+
+Constructor
+-----------
+
+ cache <metadata dev> <cache dev> <origin dev> <block size>
+ <#feature args> [<feature arg>]*
+ <policy> <#policy args> [policy args]*
+
+ metadata dev : fast device holding the persistent metadata
+ cache dev : fast device holding cached data blocks
+ origin dev : slow device holding original data blocks
+ block size : cache unit size in sectors
+
+ #feature args : number of feature arguments passed
+ feature args : writethrough. (The default is writeback.)
+
+ policy : the replacement policy to use
+ #policy args : an even number of arguments corresponding to
+ key/value pairs passed to the policy
+ policy args : key/value pairs passed to the policy
+ E.g. 'sequential_threshold 1024'
+ See cache-policies.txt for details.
+
+Optional feature arguments are:
+ writethrough : write through caching that prohibits cache block
+ content from being different from origin block content.
+ Without this argument, the default behaviour is to write
+ back cache block contents later for performance reasons,
+ so they may differ from the corresponding origin blocks.
+
+A policy called 'default' is always registered. This is an alias for
+the policy we currently think is giving best all round performance.
+
+As the default policy could vary between kernels, if you are relying on
+the characteristics of a specific policy, always request it by name.
+
+Status
+------
+
+<#used metadata blocks>/<#total metadata blocks> <#read hits> <#read misses>
+<#write hits> <#write misses> <#demotions> <#promotions> <#blocks in cache>
+<#dirty> <#features> <features>* <#core args> <core args>* <#policy args>
+<policy args>*
+
+#used metadata blocks : Number of metadata blocks used
+#total metadata blocks : Total number of metadata blocks
+#read hits : Number of times a READ bio has been mapped
+ to the cache
+#read misses : Number of times a READ bio has been mapped
+ to the origin
+#write hits : Number of times a WRITE bio has been mapped
+ to the cache
+#write misses : Number of times a WRITE bio has been
+ mapped to the origin
+#demotions : Number of times a block has been removed
+ from the cache
+#promotions : Number of times a block has been moved to
+ the cache
+#blocks in cache : Number of blocks resident in the cache
+#dirty : Number of blocks in the cache that differ
+ from the origin
+#feature args : Number of feature args to follow
+feature args : 'writethrough' (optional)
+#core args : Number of core arguments (must be even)
+core args : Key/value pairs for tuning the core
+ e.g. migration_threshold
+#policy args : Number of policy arguments to follow (must be even)
+policy args : Key/value pairs
+ e.g. 'sequential_threshold 1024
+
+Messages
+--------
+
+Policies will have different tunables, specific to each one, so we
+need a generic way of getting and setting these. Device-mapper
+messages are used. (A sysfs interface would also be possible.)
+
+The message format is:
+
+ <key> <value>
+
+E.g.
+ dmsetup message my_cache 0 sequential_threshold 1024
+
+Examples
+========
+
+The test suite can be found here:
+
+https://github.com/jthornber/thinp-test-suite
+
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+ /dev/mapper/ssd /dev/mapper/origin 512 1 writeback default 0'
+dmsetup create my_cache --table '0 41943040 cache /dev/mapper/metadata \
+ /dev/mapper/ssd /dev/mapper/origin 1024 1 writeback \
+ mq 4 sequential_threshold 1024 random_threshold 8'
diff --git a/Documentation/devicetree/bindings/arc/interrupts.txt b/Documentation/devicetree/bindings/arc/interrupts.txt
new file mode 100644
index 00000000000..9a5d562435e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arc/interrupts.txt
@@ -0,0 +1,24 @@
+* ARC700 incore Interrupt Controller
+
+ The core interrupt controller provides 32 prioritised interrupts (2 levels)
+ to ARC700 core.
+
+Properties:
+
+- compatible: "snps,arc700-intc"
+- interrupt-controller: This is an interrupt controller.
+- #interrupt-cells: Must be <1>.
+
+ Single Cell "interrupts" property of a device specifies the IRQ number
+ between 0 to 31
+
+ intc accessed via the special ARC AUX register interface, hence "reg" property
+ is not specified.
+
+Example:
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
index 5bb3dfb6f1d..d58675ea1ab 100644
--- a/Documentation/devicetree/bindings/dma/snps-dma.txt
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -3,59 +3,61 @@
Required properties:
- compatible: "snps,dma-spear1340"
- reg: Address range of the DMAC registers
-- interrupt-parent: Should be the phandle for the interrupt controller
- that services interrupts for this device
- interrupt: Should contain the DMAC interrupt number
-- nr_channels: Number of channels supported by hardware
-- is_private: The device channels should be marked as private and not for by the
- general purpose DMA channel allocator. False if not passed.
+- dma-channels: Number of channels supported by hardware
+- dma-requests: Number of DMA request lines supported, up to 16
+- dma-masters: Number of AHB masters supported by the controller
+- #dma-cells: must be <3>
- chan_allocation_order: order of allocation of channel, 0 (default): ascending,
1: descending
- chan_priority: priority of channels. 0 (default): increase from chan 0->n, 1:
increase from chan n->0
- block_size: Maximum block size supported by the controller
-- nr_masters: Number of AHB masters supported by the controller
- data_width: Maximum data width supported by hardware per AHB master
(0 - 8bits, 1 - 16bits, ..., 5 - 256bits)
-- slave_info:
- - bus_id: name of this device channel, not just a device name since
- devices may have more than one channel e.g. "foo_tx". For using the
- dw_generic_filter(), slave drivers must pass exactly this string as
- param to filter function.
- - cfg_hi: Platform-specific initializer for the CFG_HI register
- - cfg_lo: Platform-specific initializer for the CFG_LO register
- - src_master: src master for transfers on allocated channel.
- - dst_master: dest master for transfers on allocated channel.
+
+
+Optional properties:
+- interrupt-parent: Should be the phandle for the interrupt controller
+ that services interrupts for this device
+- is_private: The device channels should be marked as private and not for by the
+ general purpose DMA channel allocator. False if not passed.
Example:
- dma@fc000000 {
+ dmahost: dma@fc000000 {
compatible = "snps,dma-spear1340";
reg = <0xfc000000 0x1000>;
interrupt-parent = <&vic1>;
interrupts = <12>;
- nr_channels = <8>;
+ dma-channels = <8>;
+ dma-requests = <16>;
+ dma-masters = <2>;
+ #dma-cells = <3>;
chan_allocation_order = <1>;
chan_priority = <1>;
block_size = <0xfff>;
- nr_masters = <2>;
data_width = <3 3 0 0>;
+ };
- slave_info {
- uart0-tx {
- bus_id = "uart0-tx";
- cfg_hi = <0x4000>; /* 0x8 << 11 */
- cfg_lo = <0>;
- src_master = <0>;
- dst_master = <1>;
- };
- spi0-tx {
- bus_id = "spi0-tx";
- cfg_hi = <0x2000>; /* 0x4 << 11 */
- cfg_lo = <0>;
- src_master = <0>;
- dst_master = <0>;
- };
- };
+DMA clients connected to the Designware DMA controller must use the format
+described in the dma.txt file, using a four-cell specifier for each channel.
+The four cells in order are:
+
+1. A phandle pointing to the DMA controller
+2. The DMA request line number
+3. Source master for transfers on allocated channel
+4. Destination master for transfers on allocated channel
+
+Example:
+
+ serial@e0000000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0xe0000000 0x1000>;
+ interrupts = <0 35 0x4>;
+ status = "disabled";
+ dmas = <&dmahost 12 0 1>,
+ <&dmahost 13 0 1 0>;
+ dma-names = "rx", "rx";
};
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
new file mode 100644
index 00000000000..13aa4b62c62
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -0,0 +1,47 @@
+MIPS CPU interrupt controller
+
+On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU
+IRQs from a devicetree file and create a irq_domain for IRQ controller.
+
+With the irq_domain in place we can describe how the 8 IRQs are wired to the
+platforms internal interrupt controller cascade.
+
+Below is an example of a platform describing the cascade inside the devicetree
+and the code used to load it inside arch_init_irq().
+
+Required properties:
+- compatible : Should be "mti,cpu-interrupt-controller"
+
+Example devicetree:
+ cpu-irq: cpu-irq@0 {
+ #address-cells = <0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu-irq>;
+ interrupts = <2>;
+ };
+
+
+Example platform irq.c:
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
diff --git a/Documentation/devicetree/bindings/mtd/elm.txt b/Documentation/devicetree/bindings/mtd/elm.txt
new file mode 100644
index 00000000000..8c1528c421d
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/elm.txt
@@ -0,0 +1,16 @@
+Error location module
+
+Required properties:
+- compatible: Must be "ti,am33xx-elm"
+- reg: physical base address and size of the registers map.
+- interrupts: Interrupt number for the elm.
+
+Optional properties:
+- ti,hwmods: Name of the hwmod associated to the elm
+
+Example:
+elm: elm@0 {
+ compatible = "ti,am3352-elm";
+ reg = <0x48080000 0x2000>;
+ interrupts = <4>;
+};
diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
index dab7847fc80..61c5ec850f2 100644
--- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
+++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt
@@ -26,6 +26,9 @@ file systems on embedded devices.
- linux,mtd-name: allow to specify the mtd name for retro capability with
physmap-flash drivers as boot loader pass the mtd partition via the old
device name physmap-flash.
+ - use-advanced-sector-protection: boolean to enable support for the
+ advanced sector protection (Spansion: PPB - Persistent Protection
+ Bits) locking.
For JEDEC compatible devices, the following additional properties
are defined:
diff --git a/Documentation/devicetree/bindings/serial/lantiq_asc.txt b/Documentation/devicetree/bindings/serial/lantiq_asc.txt
new file mode 100644
index 00000000000..5b78591aaa4
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/lantiq_asc.txt
@@ -0,0 +1,16 @@
+Lantiq SoC ASC serial controller
+
+Required properties:
+- compatible : Should be "lantiq,asc"
+- reg : Address and length of the register set for the device
+- interrupts: the 3 (tx rx err) interrupt numbers. The interrupt specifier
+ depends on the interrupt-parent interrupt controller.
+
+Example:
+
+asc1: serial@E100C00 {
+ compatible = "lantiq,asc";
+ reg = <0xE100C00 0x400>;
+ interrupt-parent = <&icu0>;
+ interrupts = <112 113 114>;
+};
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index e567af39ee3..3a54fca730c 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1645,42 +1645,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
that the amount of memory usable for all allocations
is not too small.
- movablemem_map=acpi
- [KNL,X86,IA-64,PPC] This parameter is similar to
- memmap except it specifies the memory map of
- ZONE_MOVABLE.
- This option inform the kernel to use Hot Pluggable bit
- in flags from SRAT from ACPI BIOS to determine which
- memory devices could be hotplugged. The corresponding
- memory ranges will be set as ZONE_MOVABLE.
- NOTE: Whatever node the kernel resides in will always
- be un-hotpluggable.
-
- movablemem_map=nn[KMG]@ss[KMG]
- [KNL,X86,IA-64,PPC] This parameter is similar to
- memmap except it specifies the memory map of
- ZONE_MOVABLE.
- If user specifies memory ranges, the info in SRAT will
- be ingored. And it works like the following:
- - If more ranges are all within one node, then from
- lowest ss to the end of the node will be ZONE_MOVABLE.
- - If a range is within a node, then from ss to the end
- of the node will be ZONE_MOVABLE.
- - If a range covers two or more nodes, then from ss to
- the end of the 1st node will be ZONE_MOVABLE, and all
- the rest nodes will only have ZONE_MOVABLE.
- If memmap is specified at the same time, the
- movablemem_map will be limited within the memmap
- areas. If kernelcore or movablecore is also specified,
- movablemem_map will have higher priority to be
- satisfied. So the administrator should be careful that
- the amount of movablemem_map areas are not too large.
- Otherwise kernel won't have enough memory to start.
- NOTE: We don't stop users specifying the node the
- kernel resides in as hotpluggable so that this
- option can be used as a workaround of firmware
- bugs.
-
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index da03146c182..09673c7fc8e 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,12 @@
+Release Date : Sat. Feb 9, 2013 17:00:00 PST 2013 -
+ (emaild-id:megaraidlinux@lsi.com)
+ Adam Radford
+Current Version : 06.506.00.00-rc1
+Old Version : 06.504.01.00-rc1
+ 1. Add 4k FastPath DIF support.
+ 2. Dont load DevHandle unless FastPath enabled.
+ 3. Version and Changelog update.
+-------------------------------------------------------------------------------
Release Date : Mon. Oct 1, 2012 17:00:00 PST 2012 -
(emaild-id:megaraidlinux@lsi.com)
Adam Radford
diff --git a/MAINTAINERS b/MAINTAINERS
index 6db1c6bdf01..aea0adf414d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7682,6 +7682,12 @@ F: lib/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
+SYNOPSYS ARC ARCHITECTURE
+M: Vineet Gupta <vgupta@synopsys.com>
+L: linux-snps-arc@vger.kernel.org
+S: Supported
+F: arch/arc/
+
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>
S: Maintained
diff --git a/arch/arc/Kbuild b/arch/arc/Kbuild
new file mode 100644
index 00000000000..082d329d324
--- /dev/null
+++ b/arch/arc/Kbuild
@@ -0,0 +1,2 @@
+obj-y += kernel/
+obj-y += mm/
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
new file mode 100644
index 00000000000..e6f4eca09ee
--- /dev/null
+++ b/arch/arc/Kconfig
@@ -0,0 +1,453 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+config ARC
+ def_bool y
+ select CLONE_BACKWARDS
+ # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
+ select DEVTMPFS if !INITRAMFS_SOURCE=""
+ select GENERIC_ATOMIC64
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_FIND_FIRST_BIT
+ # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
+ select GENERIC_IRQ_SHOW
+ select GENERIC_KERNEL_EXECVE
+ select GENERIC_KERNEL_THREAD
+ select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SMP_IDLE_THREAD
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_GENERIC_HARDIRQS
+ select HAVE_IOREMAP_PROT
+ select HAVE_KPROBES
+ select HAVE_KRETPROBES
+ select HAVE_MEMBLOCK
+ select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_OPROFILE
+ select HAVE_PERF_EVENTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA
+ select NO_BOOTMEM
+ select OF
+ select OF_EARLY_FLATTREE
+ select PERF_USE_VMALLOC
+
+config SCHED_OMIT_FRAME_POINTER
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config RWSEM_GENERIC_SPINLOCK
+ def_bool y
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config MMU
+ def_bool y
+
+config NO_IOPORT
+ def_bool y
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config BINFMT_ELF
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+ select STACKTRACE
+
+config HAVE_LATENCYTOP_SUPPORT
+ def_bool y
+
+config NO_DMA
+ def_bool n
+
+source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
+menu "ARC Architecture Configuration"
+
+menu "ARC Platform/SoC/Board"
+
+source "arch/arc/plat-arcfpga/Kconfig"
+#New platform adds here
+
+endmenu
+
+menu "ARC CPU Configuration"
+
+choice
+ prompt "ARC Core"
+ default ARC_CPU_770
+
+config ARC_CPU_750D
+ bool "ARC750D"
+ help
+ Support for ARC750 core
+
+config ARC_CPU_770
+ bool "ARC770"
+ select ARC_CPU_REL_4_10
+ help
+ Support for ARC770 core introduced with Rel 4.10 (Summer 2011)
+ This core has a bunch of cool new features:
+ -MMU-v3: Variable Page Sz (4k, 8k, 16k), bigger J-TLB (128x4)
+ Shared Address Spaces (for sharing TLB entires in MMU)
+ -Caches: New Prog Model, Region Flush
+ -Insns: endian swap, load-locked/store-conditional, time-stamp-ctr
+
+endchoice
+
+config CPU_BIG_ENDIAN
+ bool "Enable Big Endian Mode"
+ default n
+ help
+ Build kernel for Big Endian Mode of ARC CPU
+
+# If a platform can't work with 0x8000_0000 based dma_addr_t
+config ARC_PLAT_NEEDS_CPU_TO_DMA
+ bool
+
+config SMP
+ bool "Symmetric Multi-Processing (Incomplete)"
+ default n
+ select USE_GENERIC_SMP_HELPERS
+ help
+ This enables support for systems with more than one CPU. If you have
+ a system with only one CPU, like most personal computers, say N. If
+ you have a system with more than one CPU, say Y.
+
+if SMP
+
+config ARC_HAS_COH_CACHES
+ def_bool n
+
+config ARC_HAS_COH_LLSC
+ def_bool n
+
+config ARC_HAS_COH_RTSC
+ def_bool n
+
+config ARC_HAS_REENTRANT_IRQ_LV2
+ def_bool n
+
+endif
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "2"
+
+menuconfig ARC_CACHE
+ bool "Enable Cache Support"
+ default y
+ # if SMP, cache enabled ONLY if ARC implementation has cache coherency
+ depends on !SMP || ARC_HAS_COH_CACHES
+
+if ARC_CACHE
+
+config ARC_CACHE_LINE_SHIFT
+ int "Cache Line Length (as power of 2)"
+ range 5 7
+ default "6"
+ help
+ Starting with ARC700 4.9, Cache line length is configurable,
+ This option specifies "N", with Line-len = 2 power N
+ So line lengths of 32, 64, 128 are specified by 5,6,7, respectively
+ Linux only supports same line lengths for I and D caches.
+
+config ARC_HAS_ICACHE
+ bool "Use Instruction Cache"
+ default y
+
+config ARC_HAS_DCACHE
+ bool "Use Data Cache"
+ default y
+
+config ARC_CACHE_PAGES
+ bool "Per Page Cache Control"
+ default y
+ depends on ARC_HAS_ICACHE || ARC_HAS_DCACHE
+ help
+ This can be used to over-ride the global I/D Cache Enable on a
+ per-page basis (but only for pages accessed via MMU such as
+ Kernel Virtual address or User Virtual Address)
+ TLB entries have a per-page Cache Enable Bit.
+ Note that Global I/D ENABLE + Per Page DISABLE works but corollary
+ Global DISABLE + Per Page ENABLE won't work
+
+endif #ARC_CACHE
+
+config ARC_HAS_ICCM
+ bool "Use ICCM"
+ help
+ Single Cycle RAMS to store Fast Path Code
+ default n
+
+config ARC_ICCM_SZ
+ int "ICCM Size in KB"
+ default "64"
+ depends on ARC_HAS_ICCM
+
+config ARC_HAS_DCCM
+ bool "Use DCCM"
+ help
+ Single Cycle RAMS to store Fast Path Data
+ default n
+
+config ARC_DCCM_SZ
+ int "DCCM Size in KB"
+ default "64"
+ depends on ARC_HAS_DCCM
+
+config ARC_DCCM_BASE
+ hex "DCCM map address"
+ default "0xA0000000"
+ depends on ARC_HAS_DCCM
+
+config ARC_HAS_HW_MPY
+ bool "Use Hardware Multiplier (Normal or Faster XMAC)"
+ default y
+ help
+ Influences how gcc generates code for MPY operations.
+ If enabled, MPYxx insns are generated, provided by Standard/XMAC
+ Multipler. Otherwise software multipy lib is used
+
+choice
+ prompt "ARC700 MMU Version"
+ default ARC_MMU_V3 if ARC_CPU_770
+ default ARC_MMU_V2 if ARC_CPU_750D
+
+config ARC_MMU_V1
+ bool "MMU v1"
+ help
+ Orig ARC700 MMU
+
+config ARC_MMU_V2
+ bool "MMU v2"
+ help
+ Fixed the deficiency of v1 - possible thrashing in memcpy sceanrio
+ when 2 D-TLB and 1 I-TLB entries index into same 2way set.
+
+config ARC_MMU_V3
+ bool "MMU v3"
+ depends on ARC_CPU_770
+ help
+ Introduced with ARC700 4.10: New Features
+ Variable Page size (1k-16k), var JTLB size 128 x (2 or 4)
+ Shared Address Spaces (SASID)
+
+endchoice
+
+
+choice
+ prompt "MMU Page Size"
+ default ARC_PAGE_SIZE_8K
+
+config ARC_PAGE_SIZE_8K
+ bool "8KB"
+ help
+ Choose between 8k vs 16k
+
+config ARC_PAGE_SIZE_16K
+ bool "16KB"
+ depends on ARC_MMU_V3
+
+config ARC_PAGE_SIZE_4K
+ bool "4KB"
+ depends on ARC_MMU_V3
+
+endchoice
+
+config ARC_COMPACT_IRQ_LEVELS
+ bool "ARCompact IRQ Priorities: High(2)/Low(1)"
+ default n
+ # Timer HAS to be high priority, for any other high priority config
+ select ARC_IRQ3_LV2
+ # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy
+ depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2
+
+if ARC_COMPACT_IRQ_LEVELS
+
+config ARC_IRQ3_LV2
+ bool
+
+config ARC_IRQ5_LV2
+ bool
+
+config ARC_IRQ6_LV2
+ bool
+
+endif
+
+config ARC_FPU_SAVE_RESTORE
+ bool "Enable FPU state persistence across context switch"
+ default n
+ help
+ Double Precision Floating Point unit had dedictaed regs which
+ need to be saved/restored across context-switch.
+ Note that ARC FPU is overly simplistic, unlike say x86, which has
+ hardware pieces to allow software to conditionally save/restore,
+ based on actual usage of FPU by a task. Thus our implemn does
+ this for all tasks in system.
+
+menuconfig ARC_CPU_REL_4_10
+ bool "Enable support for Rel 4.10 features"
+ default n
+ help
+ -ARC770 (and dependent features) enabled
+ -ARC750 also shares some of the new features with 770
+
+config ARC_HAS_LLSC
+ bool "Insn: LLOCK/SCOND (efficient atomic ops)"
+ default y
+ depends on ARC_CPU_770
+ # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics
+ depends on !SMP || ARC_HAS_COH_LLSC
+
+config ARC_HAS_SWAPE
+ bool "Insn: SWAPE (endian-swap)"
+ default y
+ depends on ARC_CPU_REL_4_10
+
+config ARC_HAS_RTSC
+ bool "Insn: RTSC (64-bit r/o cycle counter)"
+ default y
+ depends on ARC_CPU_REL_4_10
+ # if SMP, enable RTSC only if counter is coherent across cores
+ depends on !SMP || ARC_HAS_COH_RTSC
+
+endmenu # "ARC CPU Configuration"
+
+config LINUX_LINK_BASE
+ hex "Linux Link Address"
+ default "0x80000000"
+ help
+ ARC700 divides the 32 bit phy address space into two equal halves
+ -Lower 2G (0 - 0x7FFF_FFFF ) is user virtual, translated by MMU
+ -Upper 2G (0x8000_0000 onwards) is untranslated, for kernel
+ Typically Linux kernel is linked at the start of untransalted addr,
+ hence the default value of 0x8zs.
+ However some customers have peripherals mapped at this addr, so
+ Linux needs to be scooted a bit.
+ If you don't know what the above means, leave this setting alone.
+
+config ARC_CURR_IN_REG
+ bool "Dedicate Register r25 for current_task pointer"
+ default y
+ help
+ This reserved Register R25 to point to Current Task in
+ kernel mode. This saves memory access for each such access
+
+
+config ARC_MISALIGN_ACCESS
+ bool "Emulate unaligned memory access (userspace only)"
+ default N
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
+ select SYSCTL_ARCH_UNALIGN_ALLOW
+ help
+ This enables misaligned 16 & 32 bit memory access from user space.
+ Use ONLY-IF-ABS-NECESSARY as it will be very slow and also can hide
+ potential bugs in code
+
+config ARC_STACK_NONEXEC
+ bool "Make stack non-executable"
+ default n
+ help
+ To disable the execute permissions of stack/heap of processes
+ which are enabled by default.
+
+config HZ
+ int "Timer Frequency"
+ default 100
+
+config ARC_METAWARE_HLINK
+ bool "Support for Metaware debugger assisted Host access"
+ default n
+ help
+ This options allows a Linux userland apps to directly access
+ host file system (open/creat/read/write etc) with help from
+ Metaware Debugger. This can come in handy for Linux-host communication
+ when there is no real usable peripheral such as EMAC.
+
+menuconfig ARC_DBG
+ bool "ARC debugging"
+ default y
+
+config ARC_DW2_UNWIND
+ bool "Enable DWARF specific kernel stack unwind"
+ depends on ARC_DBG
+ default y
+ select KALLSYMS
+ help
+ Compiles the kernel with DWARF unwind information and can be used
+ to get stack backtraces.
+
+ If you say Y here the resulting kernel image will be slightly larger
+ but not slower, and it will give very useful debugging information.
+ If you don't debug the kernel, you can say N, but we may not be able
+ to solve problems without frame unwind information
+
+config ARC_DBG_TLB_PARANOIA
+ bool "Paranoia Checks in Low Level TLB Handlers"
+ depends on ARC_DBG
+ default n
+
+config ARC_DBG_TLB_MISS_COUNT
+ bool "Profile TLB Misses"
+ default n
+ select DEBUG_FS
+ depends on ARC_DBG
+ help
+ Counts number of I and D TLB Misses and exports them via Debugfs
+ The counters can be cleared via Debugfs as well
+
+config CMDLINE
+ string "Kernel command line to built-in"
+ default "print-fatal-signals=1"
+ help
+ The default command line which will be appended to the optional
+ u-boot provided command line (see below)
+
+config CMDLINE_UBOOT
+ bool "Support U-boot kernel command line passing"
+ default n
+ help
+ If you are using U-boot (www.denx.de) and wish to pass the kernel
+ command line from the U-boot environment to the Linux kernel then
+ switch this option on.
+ ARC U-boot will setup the cmdline in RAM/flash and set r2 to point
+ to it. kernel startup code will copy the string into cmdline buffer
+ and also append CONFIG_CMDLINE.
+
+config ARC_BUILTIN_DTB_NAME
+ string "Built in DTB"
+ help
+ Set the name of the DTB to embed in the vmlinux binary
+ Leaving it blank selects the minimal "skeleton" dtb
+
+source "kernel/Kconfig.preempt"
+
+endmenu # "ARC Architecture Configuration"
+
+source "mm/Kconfig"
+source "net/Kconfig"
+source "drivers/Kconfig"
+source "fs/Kconfig"
+source "arch/arc/Kconfig.debug"
+source "security/Kconfig"
+source "crypto/Kconfig"
+source "lib/Kconfig"
diff --git a/arch/arc/Kconfig.debug b/arch/arc/Kconfig.debug
new file mode 100644
index 00000000000..962c6099659
--- /dev/null
+++ b/arch/arc/Kconfig.debug
@@ -0,0 +1,34 @@
+menu "Kernel hacking"
+
+source "lib/Kconfig.debug"
+
+config EARLY_PRINTK
+ bool "Early printk" if EMBEDDED
+ default y
+ help
+ Write kernel log output directly into the VGA buffer or to a serial
+ port.
+
+ This is useful for kernel debugging when your machine crashes very
+ early before the console code is initialized. For normal operation
+ it is not recommended because it looks ugly and doesn't cooperate
+ with klogd/syslogd or the X server. You should normally N here,
+ unless you want to debug such a crash.
+
+config DEBUG_STACKOVERFLOW
+ bool "Check for stack overflows"
+ depends on DEBUG_KERNEL
+ help
+ This option will cause messages to be printed if free stack space
+ drops below a certain limit.
+
+config 16KSTACKS
+ bool "Use 16Kb for kernel stacks instead of 8Kb"
+ help
+ If you say Y here the kernel will use a 16Kb stacksize for the
+ kernel stack attached to each process/thread. The default is 8K.
+ This increases the resident kernel footprint and will cause less
+ threads to run on the system and also increase the pressure
+ on the VM subsystem for higher order allocations.
+
+endmenu
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
new file mode 100644
index 00000000000..92379c7cbc1
--- /dev/null
+++ b/arch/arc/Makefile
@@ -0,0 +1,126 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+UTS_MACHINE := arc
+
+KBUILD_DEFCONFIG := fpga_defconfig
+
+cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__
+
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/defines.h
+
+ifdef CONFIG_ARC_CURR_IN_REG
+# For a global register defintion, make sure it gets passed to every file
+# We had a customer reported bug where some code built in kernel was NOT using
+# any kernel headers, and missing the r25 global register
+# Can't do unconditionally (like above) because of recursive include issues
+# due to <linux/thread_info.h>
+LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
+endif
+
+atleast_gcc44 := $(call cc-ifversion, -gt, 0402, y)
+cflags-$(atleast_gcc44) += -fsection-anchors
+
+cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
+cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
+cflags-$(CONFIG_ARC_HAS_RTSC) += -mrtsc
+cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
+
+ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+# Generic build system uses -O2, we want -O3
+cflags-y += -O3
+endif
+
+# small data is default for elf32 tool-chain. If not usable, disable it
+# This also allows repurposing GP as scratch reg to gcc reg allocator
+disable_small_data := y
+cflags-$(disable_small_data) += -mno-sdata -fcall-used-gp
+
+cflags-$(CONFIG_CPU_BIG_ENDIAN) += -mbig-endian
+ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
+
+# STAR 9000518362:
+# arc-linux-uclibc-ld (buildroot) or arceb-elf32-ld (EZChip) don't accept
+# --build-id w/o "-marclinux".
+# Default arc-elf32-ld is OK
+ldflags-y += -marclinux
+
+ARC_LIBGCC := -mA7
+cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
+
+ifndef CONFIG_ARC_HAS_HW_MPY
+ cflags-y += -mno-mpy
+
+# newlib for ARC700 assumes MPY to be always present, which is generally true
+# However, if someone really doesn't want MPY, we need to use the 600 ver
+# which coupled with -mno-mpy will use mpy emulation
+# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
+# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
+
+ ARC_LIBGCC := -marc600
+ ifneq ($(atleast_gcc44),y)
+ cflags-y += -multcost=30
+ endif
+endif
+
+LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name)
+
+# Modules with short calls might break for calls into builtin-kernel
+KBUILD_CFLAGS_MODULE += -mlong-calls
+
+# Finally dump eveything into kernel build system
+KBUILD_CFLAGS += $(cflags-y)
+KBUILD_AFLAGS += $(KBUILD_CFLAGS)
+LDFLAGS += $(ldflags-y)
+
+head-y := arch/arc/kernel/head.o
+
+# See arch/arc/Kbuild for content of core part of the kernel
+core-y += arch/arc/
+
+# w/o this dtb won't embed into kernel binary
+core-y += arch/arc/boot/dts/
+
+core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/
+
+drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/
+
+libs-y += arch/arc/lib/ $(LIBGCC)
+
+#default target for make without any arguements.
+KBUILD_IMAGE := bootpImage
+
+all: $(KBUILD_IMAGE)
+boot := arch/arc/boot
+
+bootpImage: vmlinux
+
+uImage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
+%.dtb %.dtb.S %.dtb.o: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@
+
+dtbs: scripts
+ $(Q)$(MAKE) $(build)=$(boot)/dts dtbs
+
+archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+
+# Hacks to enable final link due to absence of link-time branch relexation
+# and gcc choosing optimal(shorter) branches at -O3
+#
+# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+# However lib/decompress_inflate.o (.init.text) calls
+# zlib_inflate_workspacesize (.text) causing relocation errors.
+# Thus forcing all exten calls in this file to be long calls
+export CFLAGS_decompress_inflate.o = -mmedium-calls
+export CFLAGS_initramfs.o = -mmedium-calls
+ifdef CONFIG_SMP
+export CFLAGS_core.o = -mmedium-calls
+endif
diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
new file mode 100644
index 00000000000..7d514c24e09
--- /dev/null
+++ b/arch/arc/boot/Makefile
@@ -0,0 +1,26 @@
+targets := vmlinux.bin vmlinux.bin.gz uImage
+
+# uImage build relies on mkimage being availble on your host for ARC target
+# You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
+# and make sure it's reacable from your PATH
+MKIMAGE := $(srctree)/scripts/mkuboot.sh
+
+OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+LINUX_START_TEXT = $$(readelf -h vmlinux | \
+ grep "Entry point address" | grep -o 0x.*)
+
+UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
+UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
+UIMAGE_COMPRESSION = gzip
+
+$(obj)/vmlinux.bin: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage)
+
+PHONY += FORCE
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
new file mode 100644
index 00000000000..5776835d583
--- /dev/null
+++ b/arch/arc/boot/dts/Makefile
@@ -0,0 +1,13 @@
+# Built-in dtb
+builtindtb-y := angel4
+
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
+ builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+endif
+
+obj-y += $(builtindtb-y).dtb.o
+targets += $(builtindtb-y).dtb
+
+dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb)
+
+clean-files := *.dtb
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
new file mode 100644
index 00000000000..bae4f936cb0
--- /dev/null
+++ b/arch/arc/boot/dts/angel4.dts
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
+
+/ {
+ compatible = "snps,arc-angel4";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ interrupt-parent = <&intc>;
+
+ chosen {
+ bootargs = "console=ttyARC0,115200n8";
+ };
+
+ aliases {
+ serial0 = &arcuart0;
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+
+ fpga {
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ /* child and parent address space 1:1 mapped */
+ ranges;
+
+ intc: interrupt-controller {
+ compatible = "snps,arc700-intc";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ arcuart0: serial@c0fc1000 {
+ compatible = "snps,arc-uart";
+ reg = <0xc0fc1000 0x100>;
+ interrupts = <5>;
+ clock-frequency = <80000000>;
+ current-speed = <115200>;
+ status = "okay";
+ };
+ };
+};
diff --git a/arch/arc/boot/dts/skeleton.dts b/arch/arc/boot/dts/skeleton.dts
new file mode 100644
index 00000000000..25a84fb5b3d
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dts
@@ -0,0 +1,10 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+/dts-v1/;
+
+/include/ "skeleton.dtsi"
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi
new file mode 100644
index 00000000000..a870bdd5e40
--- /dev/null
+++ b/arch/arc/boot/dts/skeleton.dtsi
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Skeleton device tree; the bare minimum needed to boot; just include and
+ * add a compatible value.
+ */
+
+/ {
+ compatible = "snps,arc";
+ clock-frequency = <80000000>; /* 80 MHZ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ chosen { };
+ aliases { };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "snps,arc770d";
+ reg = <0>;
+ };
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x00000000 0x10000000>; /* 256M */
+ };
+};
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
new file mode 100644
index 00000000000..b8698067ebb
--- /dev/null
+++ b/arch/arc/configs/fpga_defconfig
@@ -0,0 +1,61 @@
+CONFIG_CROSS_COMPILE="arc-elf32-"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_DEFAULT_HOSTNAME="ARCLinux"
+# CONFIG_SWAP is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE="../arc_initramfs"
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+# CONFIG_LBDAF is not set
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARC_PLAT_FPGA_LEGACY=y
+CONFIG_ARC_BOARD_ML509=y
+# CONFIG_ARC_HAS_RTSC is not set
+CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_UNIX_DIAG=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+# CONFIG_IPV6 is not set
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_ARC=y
+CONFIG_SERIAL_ARC_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild
new file mode 100644
index 00000000000..48af742f8b5
--- /dev/null
+++ b/arch/arc/include/asm/Kbuild
@@ -0,0 +1,49 @@
+generic-y += auxvec.h
+generic-y += bugs.h
+generic-y += bitsperlong.h
+generic-y += clkdev.h
+generic-y += cputime.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += emergency-restart.h
+generic-y += errno.h
+generic-y += fcntl.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += ioctl.h
+generic-y += ioctls.h
+generic-y += ipcbuf.h
+generic-y += irq_regs.h
+generic-y += kmap_types.h
+generic-y += kvm_para.h
+generic-y += local.h
+generic-y += local64.h
+generic-y += mman.h
+generic-y += msgbuf.h
+generic-y += param.h
+generic-y += parport.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += poll.h
+generic-y += posix_types.h
+generic-y += resource.h
+generic-y += scatterlist.h
+generic-y += sembuf.h
+generic-y += shmbuf.h
+generic-y += shmparam.h
+generic-y += siginfo.h
+generic-y += socket.h
+generic-y += sockios.h
+generic-y += stat.h
+generic-y += statfs.h
+generic-y += termbits.h
+generic-y += termios.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += types.h
+generic-y += ucontext.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += xor.h
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
new file mode 100644
index 00000000000..1b907c46566
--- /dev/null
+++ b/arch/arc/include/asm/arcregs.h
@@ -0,0 +1,433 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ARCREGS_H
+#define _ASM_ARC_ARCREGS_H
+
+#ifdef __KERNEL__
+
+/* Build Configuration Registers */
+#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_CRC_BCR 0x62
+#define ARC_REG_DVFB_BCR 0x64
+#define ARC_REG_EXTARITH_BCR 0x65
+#define ARC_REG_VECBASE_BCR 0x68
+#define ARC_REG_PERIBASE_BCR 0x69
+#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */
+#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */
+#define ARC_REG_MMU_BCR 0x6f
+#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_TIMERS_BCR 0x75
+#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_XY_MEM_BCR 0x79
+#define ARC_REG_MAC_BCR 0x7a
+#define ARC_REG_MUL_BCR 0x7b
+#define ARC_REG_SWAP_BCR 0x7c
+#define ARC_REG_NORM_BCR 0x7d
+#define ARC_REG_MIXMAX_BCR 0x7e
+#define ARC_REG_BARREL_BCR 0x7f
+#define ARC_REG_D_UNCACH_BCR 0x6A
+
+/* status32 Bits Positions */
+#define STATUS_H_BIT 0 /* CPU Halted */
+#define STATUS_E1_BIT 1 /* Int 1 enable */
+#define STATUS_E2_BIT 2 /* Int 2 enable */
+#define STATUS_A1_BIT 3 /* Int 1 active */
+#define STATUS_A2_BIT 4 /* Int 2 active */
+#define STATUS_AE_BIT 5 /* Exception active */
+#define STATUS_DE_BIT 6 /* PC is in delay slot */
+#define STATUS_U_BIT 7 /* User/Kernel mode */
+#define STATUS_L_BIT 12 /* Loop inhibit */
+
+/* These masks correspond to the status word(STATUS_32) bits */
+#define STATUS_H_MASK (1<<STATUS_H_BIT)
+#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
+#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
+#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
+#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
+#define STATUS_AE_MASK (1<<STATUS_AE_BIT)
+#define STATUS_DE_MASK (1<<STATUS_DE_BIT)
+#define STATUS_U_MASK (1<<STATUS_U_BIT)
+#define STATUS_L_MASK (1<<STATUS_L_BIT)
+
+/*
+ * ECR: Exception Cause Reg bits-n-pieces
+ * [23:16] = Exception Vector
+ * [15: 8] = Exception Cause Code
+ * [ 7: 0] = Exception Parameters (for certain types only)
+ */
+#define ECR_VEC_MASK 0xff0000
+#define ECR_CODE_MASK 0x00ff00
+#define ECR_PARAM_MASK 0x0000ff
+
+/* Exception Cause Vector Values */
+#define ECR_V_INSN_ERR 0x02
+#define ECR_V_MACH_CHK 0x20
+#define ECR_V_ITLB_MISS 0x21
+#define ECR_V_DTLB_MISS 0x22
+#define ECR_V_PROTV 0x23
+
+/* Protection Violation Exception Cause Code Values */
+#define ECR_C_PROTV_INST_FETCH 0x00
+#define ECR_C_PROTV_LOAD 0x01
+#define ECR_C_PROTV_STORE 0x02
+#define ECR_C_PROTV_XCHG 0x03
+#define ECR_C_PROTV_MISALIG_DATA 0x04
+
+/* DTLB Miss Exception Cause Code Values */
+#define ECR_C_BIT_DTLB_LD_MISS 8
+#define ECR_C_BIT_DTLB_ST_MISS 9
+
+
+/* Auxiliary registers */
+#define AUX_IDENTITY 4
+#define AUX_INTR_VEC_BASE 0x25
+#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
+#define AUX_IRQ_LV12 0x43 /* interrupt level register */
+
+#define AUX_IENABLE 0x40c
+#define AUX_ITRIGGER 0x40d
+#define AUX_IPULSE 0x415
+
+/* Timer related Aux registers */
+#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
+#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
+#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
+#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
+#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
+#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
+
+#define TIMER_CTRL_IE (1 << 0) /* Interupt when Count reachs limit */
+#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
+
+/* MMU Management regs */
+#define ARC_REG_TLBPD0 0x405
+#define ARC_REG_TLBPD1 0x406
+#define ARC_REG_TLBINDEX 0x407
+#define ARC_REG_TLBCOMMAND 0x408
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/* Error code if probe fails */
+#define TLB_LKUP_ERR 0x80000000
+
+/* TLB Commands */
+#define TLBWrite 0x1
+#define TLBRead 0x2
+#define TLBGetIndex 0x3
+#define TLBProbe 0x4
+
+#if (CONFIG_ARC_MMU_VER >= 2)
+#define TLBWriteNI 0x5 /* write JTLB without inv uTLBs */
+#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
+#else
+#undef TLBWriteNI /* These cmds don't exist on older MMU */
+#undef TLBIVUTLB
+#endif
+
+/* Instruction cache related Auxiliary registers */
+#define ARC_REG_IC_BCR 0x77 /* Build Config reg */
+#define ARC_REG_IC_IVIC 0x10
+#define ARC_REG_IC_CTRL 0x11
+#define ARC_REG_IC_IVIL 0x19
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_IC_PTAG 0x1E
+#endif
+
+/* Bit val in IC_CTRL */
+#define IC_CTRL_CACHE_DISABLE 0x1
+
+/* Data cache related Auxiliary registers */
+#define ARC_REG_DC_BCR 0x72
+#define ARC_REG_DC_IVDC 0x47
+#define ARC_REG_DC_CTRL 0x48
+#define ARC_REG_DC_IVDL 0x4A
+#define ARC_REG_DC_FLSH 0x4B
+#define ARC_REG_DC_FLDL 0x4C
+#if (CONFIG_ARC_MMU_VER > 2)
+#define ARC_REG_DC_PTAG 0x5C
+#endif
+
+/* Bit val in DC_CTRL */
+#define DC_CTRL_INV_MODE_FLUSH 0x40
+#define DC_CTRL_FLUSH_STATUS 0x100
+
+/* MMU Management regs */
+#define ARC_REG_PID 0x409
+#define ARC_REG_SCRATCH_DATA0 0x418
+
+/* Bits in MMU PID register */
+#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
+
+/*
+ * Floating Pt Registers
+ * Status regs are read-only (build-time) so need not be saved/restored
+ */
+#define ARC_AUX_FP_STAT 0x300
+#define ARC_AUX_DPFP_1L 0x301
+#define ARC_AUX_DPFP_1H 0x302
+#define ARC_AUX_DPFP_2L 0x303
+#define ARC_AUX_DPFP_2H 0x304
+#define ARC_AUX_DPFP_STAT 0x305
+
+#ifndef __ASSEMBLY__
+
+/*
+ ******************************************************************
+ * Inline ASM macros to read/write AUX Regs
+ * Essentially invocation of lr/sr insns from "C"
+ */
+
+#if 1
+
+#define read_aux_reg(reg) __builtin_arc_lr(reg)
+
+/* gcc builtin sr needs reg param to be long immediate */
+#define write_aux_reg(reg_immed, val) \
+ __builtin_arc_sr((unsigned int)val, reg_immed)
+
+#else
+
+#define read_aux_reg(reg) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " lr %0, [%1]" \
+ : "=r"(__ret) \
+ : "i"(reg)); \
+ __ret; \
+})
+
+/*
+ * Aux Reg address is specified as long immediate by caller
+ * e.g.
+ * write_aux_reg(0x69, some_val);
+ * This generates tightest code.
+ */
+#define write_aux_reg(reg_imm, val) \
+({ \
+ __asm__ __volatile__( \
+ " sr %0, [%1] \n" \
+ : \
+ : "ir"(val), "i"(reg_imm)); \
+})
+
+/*
+ * Aux Reg address is specified in a variable
+ * * e.g.
+ * reg_num = 0x69
+ * write_aux_reg2(reg_num, some_val);
+ * This has to generate glue code to load the reg num from
+ * memory to a reg hence not recommended.
+ */
+#define write_aux_reg2(reg_in_var, val) \
+({ \
+ unsigned int tmp; \
+ \
+ __asm__ __volatile__( \
+ " ld %0, [%2] \n\t" \
+ " sr %1, [%0] \n\t" \
+ : "=&r"(tmp) \
+ : "r"(val), "memory"(&reg_in_var)); \
+})
+
+#endif
+
+#define READ_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ tmp = read_aux_reg(reg); \
+ if (sizeof(tmp) == sizeof(into)) { \
+ into = *((typeof(into) *)&tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+#define WRITE_BCR(reg, into) \
+{ \
+ unsigned int tmp; \
+ if (sizeof(tmp) == sizeof(into)) { \
+ tmp = (*(unsigned int *)(into)); \
+ write_aux_reg(reg, tmp); \
+ } else { \
+ extern void bogus_undefined(void); \
+ bogus_undefined(); \
+ } \
+}
+
+/* Helpers */
+#define TO_KB(bytes) ((bytes) >> 10)
+#define TO_MB(bytes) (TO_KB(bytes) >> 10)
+#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
+#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+/* These DPFP regs need to be saved/restored across ctx-sw */
+struct arc_fpu {
+ struct {
+ unsigned int l, h;
+ } aux_dpfp[2];
+};
+#endif
+
+/*
+ ***************************************************************
+ * Build Configuration Registers, with encoded hardware config
+ */
+struct bcr_identity {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int chip_id:16, cpu_id:8, family:8;
+#else
+ unsigned int family:8, cpu_id:8, chip_id:16;
+#endif
+};
+
+struct bcr_mmu_1_2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, u_itlb:8, u_dtlb:8;
+#else
+ unsigned int u_dtlb:8, u_itlb:8, sets:4, ways:4, ver:8;
+#endif
+};
+
+struct bcr_mmu_3 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ver:8, ways:4, sets:4, osm:1, reserv:3, pg_sz:4,
+ u_itlb:4, u_dtlb:4;
+#else
+ unsigned int u_dtlb:4, u_itlb:4, pg_sz:4, reserv:3, osm:1, sets:4,
+ ways:4, ver:8;
+#endif
+};
+
+#define EXTN_SWAP_VALID 0x1
+#define EXTN_NORM_VALID 0x2
+#define EXTN_MINMAX_VALID 0x2
+#define EXTN_BARREL_VALID 0x2
+
+struct bcr_extn {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2,
+ norm:2, swap:1;
+#else
+ unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2,
+ crc:1, pad:20;
+#endif
+};
+
+/* DSP Options Ref Manual */
+struct bcr_extn_mac_mul {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, type:8, ver:8;
+#else
+ unsigned int ver:8, type:8, pad:16;
+#endif
+};
+
+struct bcr_extn_xymem {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
+#else
+ unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
+#endif
+};
+
+struct bcr_cache {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
+#else
+ unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
+#endif
+};
+
+struct bcr_perip {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int start:8, pad2:8, sz:8, pad:8;
+#else
+ unsigned int pad:8, sz:8, pad2:8, start:8;
+#endif
+};
+struct bcr_iccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int base:16, pad:5, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, pad:5, base:16;
+#endif
+};
+
+/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
+struct bcr_dccm_base {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int addr:24, ver:8;
+#else
+ unsigned int ver:8, addr:24;
+#endif
+};
+
+/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
+struct bcr_dccm {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int res:21, sz:3, ver:8;
+#else
+ unsigned int ver:8, sz:3, res:21;
+#endif
+};
+
+/* Both SP and DP FPU BCRs have same format */
+struct bcr_fp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int fast:1, ver:8;
+#else
+ unsigned int ver:8, fast:1;
+#endif
+};
+
+/*
+ *******************************************************************
+ * Generic structures to hold build configuration used at runtime
+ */
+
+struct cpuinfo_arc_mmu {
+ unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
+};
+
+struct cpuinfo_arc_cache {
+ unsigned int has_aliasing, sz, line_len, assoc, ver;
+};
+
+struct cpuinfo_arc_ccm {
+ unsigned int base_addr, sz;
+};
+
+struct cpuinfo_arc {
+ struct cpuinfo_arc_cache icache, dcache;
+ struct cpuinfo_arc_mmu mmu;
+ struct bcr_identity core;
+ unsigned int timers;
+ unsigned int vec_base;
+ unsigned int uncached_base;
+ struct cpuinfo_arc_ccm iccm, dccm;
+ struct bcr_extn extn;
+ struct bcr_extn_xymem extn_xymem;
+ struct bcr_extn_mac_mul extn_mac_mul;
+ struct bcr_fp fp, dpfp;
+};
+
+extern struct cpuinfo_arc cpuinfo_arc700[];
+
+#endif /* __ASEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/asm-offsets.h b/arch/arc/include/asm/asm-offsets.h
new file mode 100644
index 00000000000..dad18768fe4
--- /dev/null
+++ b/arch/arc/include/asm/asm-offsets.h
@@ -0,0 +1,9 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <generated/asm-offsets.h>
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
new file mode 100644
index 00000000000..83f03ca6caf
--- /dev/null
+++ b/arch/arc/include/asm/atomic.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_ATOMIC_H
+#define _ASM_ARC_ATOMIC_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+#include <asm/smp.h>
+
+#define atomic_read(v) ((v)->counter)
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp) /* Early clobber, to prevent reg reuse */
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+}
+
+/* add and also return the new value */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " add %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " sub %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(&v->counter), "ir"(i)
+ : "cc");
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned int temp;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bic %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(addr), "ir"(mask)
+ : "cc");
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#ifndef CONFIG_SMP
+
+ /* violating atomic_xxx API locking protocol in UP for optimization sake */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+#else
+
+static inline void atomic_set(atomic_t *v, int i)
+{
+ /*
+ * Independent of hardware support, all of the atomic_xxx() APIs need
+ * to follow the same locking rules to make sure that a "hardware"
+ * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
+ * sequence
+ *
+ * Thus atomic_set() despite being 1 insn (and seemingly atomic)
+ * requires the locking.
+ */
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter = i;
+ atomic_ops_unlock(flags);
+}
+#endif
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ */
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter += i;
+ atomic_ops_unlock(flags);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ v->counter -= i;
+ atomic_ops_unlock(flags);
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+ unsigned long flags;
+ unsigned long temp;
+
+ atomic_ops_lock(flags);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ atomic_ops_unlock(flags);
+
+ return temp;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+ unsigned long flags;
+
+ atomic_ops_lock(flags);
+ *addr &= ~mask;
+ atomic_ops_unlock(flags);
+}
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+/**
+ * __atomic_add_unless - add unless the number is a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, so long as it was not @u.
+ * Returns the old value of @v
+ */
+#define __atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
+ c = old; \
+ c; \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+#define atomic_inc(v) atomic_add(1, v)
+#define atomic_dec(v) atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
+
+#define ATOMIC_INIT(i) { (i) }
+
+#include <asm-generic/atomic64.h>
+
+#endif
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h
new file mode 100644
index 00000000000..f6cb7c4ffb3
--- /dev/null
+++ b/arch/arc/include/asm/barrier.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_BARRIER_H
+#define __ASM_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+/* TODO-vineetg: Need to see what this does, don't we need sync anywhere */
+#define mb() __asm__ __volatile__ ("" : : : "memory")
+#define rmb() mb()
+#define wmb() mb()
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+#define read_barrier_depends() mb()
+
+/* TODO-vineetg verify the correctness of macros here */
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
+
+#define smp_read_barrier_depends() do { } while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
new file mode 100644
index 00000000000..647a83a8e75
--- /dev/null
+++ b/arch/arc/include/asm/bitops.h
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_BITOPS_H
+#define _ASM_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
+ * The Kconfig glue ensures that in SMP, this is only set if the container
+ * SoC/platform has cross-core coherent LLOCK/SCOND
+ */
+#if defined(CONFIG_ARC_HAS_LLSC)
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bset %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bclr %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " bxor %0, %0, %2 \n"
+ " scond %0, [%1] \n"
+ " bnz 1b \n"
+ : "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+}
+
+/*
+ * Semantically:
+ * Test the bit
+ * if clear
+ * set it and return 0 (old value)
+ * else
+ * return 1 (old value).
+ *
+ * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
+ * and the old value of bit is returned
+ */
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bset %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bclr %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned int old, temp;
+
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%2] \n"
+ " bxor %1, %0, %3 \n"
+ " scond %1, [%2] \n"
+ " bnz 1b \n"
+ : "=&r"(old), "=&r"(temp)
+ : "r"(m), "ir"(nr)
+ : "cc");
+
+ return (old & (1 << nr)) != 0;
+}
+
+#else /* !CONFIG_ARC_HAS_LLSC */
+
+#include <asm/smp.h>
+
+/*
+ * Non hardware assisted Atomic-R-M-W
+ * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
+ *
+ * There's "significant" micro-optimization in writing our own variants of
+ * bitops (over generic variants)
+ *
+ * (1) The generic APIs have "signed" @nr while we have it "unsigned"
+ * This avoids extra code to be generated for pointer arithmatic, since
+ * is "not sure" that index is NOT -ve
+ * (2) Utilize the fact that ARCompact bit fidding insn (BSET/BCLR/ASL) etc
+ * only consider bottom 5 bits of @nr, so NO need to mask them off.
+ * (GCC Quirk: however for constant @nr we still need to do the masking
+ * at compile time)
+ */
+
+static inline void set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline void change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+
+ bitops_unlock(flags);
+}
+
+static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old, flags;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ bitops_lock(flags);
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ bitops_unlock(flags);
+
+ return (old & (1 << nr)) != 0;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+/***************************************
+ * Non atomic variants
+ **************************************/
+
+static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp | (1UL << nr);
+}
+
+static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp & ~(1UL << nr);
+}
+
+static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long temp;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ temp = *m;
+ *m = temp ^ (1UL << nr);
+}
+
+static inline int
+__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old | (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old & ~(1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+static inline int
+__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
+{
+ unsigned long old;
+ m += nr >> 5;
+
+ if (__builtin_constant_p(nr))
+ nr &= 0x1f;
+
+ old = *m;
+ *m = old ^ (1 << nr);
+
+ return (old & (1 << nr)) != 0;
+}
+
+/*
+ * This routine doesn't need to be atomic.
+ */
+static inline int
+__constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ return ((1UL << (nr & 31)) &
+ (((const volatile unsigned int *)addr)[nr >> 5])) != 0;
+}
+
+static inline int
+__test_bit(unsigned int nr, const volatile unsigned long *addr)
+{
+ unsigned long mask;
+
+ addr += nr >> 5;
+
+ /* ARC700 only considers 5 bits in bit-fiddling insn */
+ mask = 1 << nr;
+
+ return ((mask & *addr) != 0);
+}
+
+#define test_bit(nr, addr) (__builtin_constant_p(nr) ? \
+ __constant_test_bit((nr), (addr)) : \
+ __test_bit((nr), (addr)))
+
+/*
+ * Count the number of zeros, starting from MSB
+ * Helper for fls( ) friends
+ * This is a pure count, so (1-32) or (0-31) doesn't apply
+ * It could be 0 to 32, based on num of 0's in there
+ * clz(0x8000_0000) = 0, clz(0xFFFF_FFFF)=0, clz(0) = 32, clz(1) = 31
+ */
+static inline __attribute__ ((const)) int clz(unsigned int x)
+{
+ unsigned int res;
+
+ __asm__ __volatile__(
+ " norm.f %0, %1 \n"
+ " mov.n %0, 0 \n"
+ " add.p %0, %0, 1 \n"
+ : "=r"(res)
+ : "r"(x)
+ : "cc");
+
+ return res;
+}
+
+static inline int constant_fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+/*
+ * fls = Find Last Set in word
+ * @result: [1-32]
+ * fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
+ */
+static inline __attribute__ ((const)) int fls(unsigned long x)
+{
+ if (__builtin_constant_p(x))
+ return constant_fls(x);
+
+ return 32 - clz(x);
+}
+
+/*
+ * __fls: Similar to fls, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __fls(unsigned long x)
+{
+ if (!x)
+ return 0;
+ else
+ return fls(x) - 1;
+}
+
+/*
+ * ffs = Find First Set in word (LSB to MSB)
+ * @result: [1-32], 0 if all 0's
+ */
+#define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
+
+/*
+ * __ffs: Similar to ffs, but zero based (0-31)
+ */
+static inline __attribute__ ((const)) int __ffs(unsigned long word)
+{
+ if (!word)
+ return word;
+
+ return ffs(word) - 1;
+}
+
+/*
+ * ffz = Find First Zero in word.
+ * @return:[0-31], 32 if all 1's
+ */
+#define ffz(x) __ffs(~(x))
+
+/* TODO does this affect uni-processor code */
+#define smp_mb__before_clear_bit() barrier()
+#define smp_mb__after_clear_bit() barrier()
+
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic-setbit.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
new file mode 100644
index 00000000000..2ad8f9b1c54
--- /dev/null
+++ b/arch/arc/include/asm/bug.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_BUG_H
+#define _ASM_ARC_BUG_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/ptrace.h>
+
+struct task_struct;
+
+void show_regs(struct pt_regs *regs);
+void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs);
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg);
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg);
+
+#define BUG() do { \
+ dump_stack(); \
+ pr_warn("Kernel BUG in %s: %s: %d!\n", \
+ __FILE__, __func__, __LINE__); \
+} while (0)
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
new file mode 100644
index 00000000000..6632273861f
--- /dev/null
+++ b/arch/arc/include/asm/cache.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHE_H
+#define __ARC_ASM_CACHE_H
+
+/* In case $$ not config, setup a dummy number for rest of kernel */
+#ifndef CONFIG_ARC_CACHE_LINE_SHIFT
+#define L1_CACHE_SHIFT 6
+#else
+#define L1_CACHE_SHIFT CONFIG_ARC_CACHE_LINE_SHIFT
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define ARC_ICACHE_WAYS 2
+#define ARC_DCACHE_WAYS 4
+
+/* Helpers */
+#define ARC_ICACHE_LINE_LEN L1_CACHE_BYTES
+#define ARC_DCACHE_LINE_LEN L1_CACHE_BYTES
+
+#define ICACHE_LINE_MASK (~(ARC_ICACHE_LINE_LEN - 1))
+#define DCACHE_LINE_MASK (~(ARC_DCACHE_LINE_LEN - 1))
+
+#if ARC_ICACHE_LINE_LEN != ARC_DCACHE_LINE_LEN
+#error "Need to fix some code as I/D cache lines not same"
+#else
+#define is_not_cache_aligned(p) ((unsigned long)p & (~DCACHE_LINE_MASK))
+#endif
+
+#ifndef __ASSEMBLY__
+
+/* Uncached access macros */
+#define arc_read_uncached_32(ptr) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__( \
+ " ld.di %0, [%1] \n" \
+ : "=r"(__ret) \
+ : "r"(ptr)); \
+ __ret; \
+})
+
+#define arc_write_uncached_32(ptr, data)\
+({ \
+ __asm__ __volatile__( \
+ " st.di %0, [%1] \n" \
+ : \
+ : "r"(data), "r"(ptr)); \
+})
+
+/* used to give SHMLBA a value to avoid Cache Aliasing */
+extern unsigned int ARC_shmlba;
+
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+
+/*
+ * ARC700 doesn't cache any access in top 256M.
+ * Ideal for wiring memory mapped peripherals as we don't need to do
+ * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
+ */
+#define ARC_UNCACHED_ADDR_SPACE 0xc0000000
+
+extern void arc_cache_init(void);
+extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
+extern void __init read_decode_cache_bcr(void);
+#endif
+
+#endif /* _ASM_CACHE_H */
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
new file mode 100644
index 00000000000..97ee96f2650
--- /dev/null
+++ b/arch/arc/include/asm/cacheflush.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_{range,page} (munmap, exit, COW-break)
+ *
+ * vineetg: April 2008
+ * -Added a critical CacheLine flush to copy_to_user_page( ) which
+ * was causing gdbserver to not setup breakpoints consistently
+ */
+
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+void flush_cache_all(void);
+
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+void flush_dcache_page(struct page *page);
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz);
+void dma_cache_inv(unsigned long start, unsigned long sz);
+void dma_cache_wback(unsigned long start, unsigned long sz);
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+/* TBD: optimize this */
+#define flush_cache_vmap(start, end) flush_cache_all()
+#define flush_cache_vunmap(start, end) flush_cache_all()
+
+/*
+ * VM callbacks when entire/range of user-space V-P mappings are
+ * torn-down/get-invalidated
+ *
+ * Currently we don't support D$ aliasing configs for our VIPT caches
+ * NOPS for VIPT Cache with non-aliasing D$ configurations only
+ */
+#define flush_cache_dup_mm(mm) /* called on fork */
+#define flush_cache_mm(mm) /* called on munmap/exit */
+#define flush_cache_range(mm, u_vstart, u_vend)
+#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+do { \
+ memcpy(dst, src, len); \
+ if (vma->vm_flags & VM_EXEC) \
+ flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\
+} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len); \
+
+#endif
diff --git a/arch/arc/include/asm/checksum.h b/arch/arc/include/asm/checksum.h
new file mode 100644
index 00000000000..10957298b7a
--- /dev/null
+++ b/arch/arc/include/asm/checksum.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Joern Rennecke <joern.rennecke@embecosm.com>: Jan 2012
+ * -Insn Scheduling improvements to csum core routines.
+ * = csum_fold( ) largely derived from ARM version.
+ * = ip_fast_cum( ) to have module scheduling
+ * -gcc 4.4.x broke networking. Alias analysis needed to be primed.
+ * worked around by adding memory clobber to ip_fast_csum( )
+ *
+ * vineetg: May 2010
+ * -Rewrote ip_fast_cscum( ) and csum_fold( ) with fast inline asm
+ */
+
+#ifndef _ASM_ARC_CHECKSUM_H
+#define _ASM_ARC_CHECKSUM_H
+
+/*
+ * Fold a partial checksum
+ *
+ * The 2 swords comprising the 32bit sum are added, any carry to 16th bit
+ * added back and final sword result inverted.
+ */
+static inline __sum16 csum_fold(__wsum s)
+{
+ unsigned r = s << 16 | s >> 16; /* ror */
+ s = ~s;
+ s -= r;
+ return s >> 16;
+}
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+static inline __sum16
+ip_fast_csum(const void *iph, unsigned int ihl)
+{
+ const void *ptr = iph;
+ unsigned int tmp, tmp2, sum;
+
+ __asm__(
+ " ld.ab %0, [%3, 4] \n"
+ " ld.ab %2, [%3, 4] \n"
+ " sub %1, %4, 2 \n"
+ " lsr.f lp_count, %1, 1 \n"
+ " bcc 0f \n"
+ " add.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ "0: lp 1f \n"
+ " ld.ab %1, [%3, 4] \n"
+ " adc.f %0, %0, %2 \n"
+ " ld.ab %2, [%3, 4] \n"
+ " adc.f %0, %0, %1 \n"
+ "1: adc.f %0, %0, %2 \n"
+ " add.cs %0,%0,1 \n"
+ : "=&r"(sum), "=r"(tmp), "=&r"(tmp2), "+&r" (ptr)
+ : "r"(ihl)
+ : "cc", "lp_count", "memory");
+
+ return csum_fold(sum);
+}
+
+/*
+ * TCP pseudo Header is 12 bytes:
+ * SA [4], DA [4], zeroes [1], Proto[1], TCP Seg(hdr+data) Len [2]
+ */
+static inline __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ __asm__ __volatile__(
+ " add.f %0, %0, %1 \n"
+ " adc.f %0, %0, %2 \n"
+ " adc.f %0, %0, %3 \n"
+ " adc.f %0, %0, %4 \n"
+ " adc %0, %0, 0 \n"
+ : "+&r"(sum)
+ : "r"(saddr), "r"(daddr),
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ "r"(len),
+#else
+ "r"(len << 8),
+#endif
+ "r"(htons(proto))
+ : "cc");
+
+ return sum;
+}
+
+#define csum_fold csum_fold
+#define ip_fast_csum ip_fast_csum
+#define csum_tcpudp_nofold csum_tcpudp_nofold
+
+#include <asm-generic/checksum.h>
+
+#endif /* _ASM_ARC_CHECKSUM_H */
diff --git a/arch/arc/include/asm/clk.h b/arch/arc/include/asm/clk.h
new file mode 100644
index 00000000000..bf9d29f5bd5
--- /dev/null
+++ b/arch/arc/include/asm/clk.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_CLK_H
+#define _ASM_ARC_CLK_H
+
+/* Although we can't really hide core_freq, the accessor is still better way */
+extern unsigned long core_freq;
+
+static inline unsigned long arc_get_core_freq(void)
+{
+ return core_freq;
+}
+
+extern int arc_set_core_freq(unsigned long);
+
+#endif
diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h
new file mode 100644
index 00000000000..03cd6894855
--- /dev/null
+++ b/arch/arc/include/asm/cmpxchg.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_CMPXCHG_H
+#define __ASM_ARC_CMPXCHG_H
+
+#include <linux/types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_ARC_HAS_LLSC
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long prev;
+
+ __asm__ __volatile__(
+ "1: llock %0, [%1] \n"
+ " brne %0, %2, 2f \n"
+ " scond %3, [%1] \n"
+ " bnz 1b \n"
+ "2: \n"
+ : "=&r"(prev)
+ : "r"(ptr), "ir"(expected),
+ "r"(new) /* can't be "ir". scond can't take limm for "b" */
+ : "cc");
+
+ return prev;
+}
+
+#else
+
+static inline unsigned long
+__cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
+{
+ unsigned long flags;
+ int prev;
+ volatile unsigned long *p = ptr;
+
+ atomic_ops_lock(flags);
+ prev = *p;
+ if (prev == expected)
+ *p = new;
+ atomic_ops_unlock(flags);
+ return prev;
+}
+
+#endif /* CONFIG_ARC_HAS_LLSC */
+
+#define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
+ (unsigned long)(o), (unsigned long)(n)))
+
+/*
+ * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
+ * just to gaurantee semantics.
+ * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
+ * which also happens to be atomic_ops_lock.
+ *
+ * Thus despite semantically being different, implementation of atomic_cmpxchg()
+ * is same as cmpxchg().
+ */
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+
+
+/*
+ * xchg (reg with memory) based on "Native atomic" EX insn
+ */
+static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
+ int size)
+{
+ extern unsigned long __xchg_bad_pointer(void);
+
+ switch (size) {
+ case 4:
+ __asm__ __volatile__(
+ " ex %0, [%1] \n"
+ : "+r"(val)
+ : "r"(ptr)
+ : "memory");
+
+ return val;
+ }
+ return __xchg_bad_pointer();
+}
+
+#define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
+ sizeof(*(ptr))))
+
+/*
+ * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
+ * not require any locking. However there's a quirk.
+ * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
+ * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
+ * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
+ * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
+ *
+ * This however is only relevant if SMP and/or ARC lacks LLSC
+ * if (UP or LLSC)
+ * xchg doesn't need serialization
+ * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
+ * xchg needs serialization
+ */
+
+#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
+
+#define xchg(ptr, with) \
+({ \
+ unsigned long flags; \
+ typeof(*(ptr)) old_val; \
+ \
+ atomic_ops_lock(flags); \
+ old_val = _xchg(ptr, with); \
+ atomic_ops_unlock(flags); \
+ old_val; \
+})
+
+#else
+
+#define xchg(ptr, with) _xchg(ptr, with)
+
+#endif
+
+/*
+ * "atomic" variant of xchg()
+ * REQ: It needs to follow the same serialization rules as other atomic_xxx()
+ * Since xchg() doesn't always do that, it would seem that following defintion
+ * is incorrect. But here's the rationale:
+ * SMP : Even xchg() takes the atomic_ops_lock, so OK.
+ * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
+ * is natively "SMP safe", no serialization required).
+ * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
+ * could clobber them. atomic_xchg() itself would be 1 insn, so it
+ * can't be clobbered by others. Thus no serialization required when
+ * atomic_xchg is involved.
+ */
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+#endif
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
new file mode 100644
index 00000000000..87b918585c4
--- /dev/null
+++ b/arch/arc/include/asm/current.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: May 16th, 2008
+ * - Current macro is now implemented as "global register" r25
+ */
+
+#ifndef _ASM_ARC_CURRENT_H
+#define _ASM_ARC_CURRENT_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+register struct task_struct *curr_arc asm("r25");
+#define current (curr_arc)
+
+#else
+#include <asm-generic/current.h>
+#endif /* ! CONFIG_ARC_CURR_IN_REG */
+
+#endif /* ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/defines.h b/arch/arc/include/asm/defines.h
new file mode 100644
index 00000000000..6097bb439cc
--- /dev/null
+++ b/arch/arc/include/asm/defines.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_DEFINES_H__
+#define __ARC_ASM_DEFINES_H__
+
+#if defined(CONFIG_ARC_MMU_V1)
+#define CONFIG_ARC_MMU_VER 1
+#elif defined(CONFIG_ARC_MMU_V2)
+#define CONFIG_ARC_MMU_VER 2
+#elif defined(CONFIG_ARC_MMU_V3)
+#define CONFIG_ARC_MMU_VER 3
+#endif
+
+#ifdef CONFIG_ARC_HAS_LLSC
+#define __CONFIG_ARC_HAS_LLSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_LLSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_SWAPE
+#define __CONFIG_ARC_HAS_SWAPE_VAL 1
+#else
+#define __CONFIG_ARC_HAS_SWAPE_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_RTSC
+#define __CONFIG_ARC_HAS_RTSC_VAL 1
+#else
+#define __CONFIG_ARC_HAS_RTSC_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_MMU_SASID
+#define __CONFIG_ARC_MMU_SASID_VAL 1
+#else
+#define __CONFIG_ARC_MMU_SASID_VAL 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+#define __CONFIG_ARC_HAS_ICACHE 1
+#else
+#define __CONFIG_ARC_HAS_ICACHE 0
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+#define __CONFIG_ARC_HAS_DCACHE 1
+#else
+#define __CONFIG_ARC_HAS_DCACHE 0
+#endif
+
+#endif /* __ARC_ASM_DEFINES_H__ */
diff --git a/arch/arc/include/asm/delay.h b/arch/arc/include/asm/delay.h
new file mode 100644
index 00000000000..442ce5d0f70
--- /dev/null
+++ b/arch/arc/include/asm/delay.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Delay routines using pre computed loops_per_jiffy value.
+ *
+ * vineetg: Feb 2012
+ * -Rewrote in "C" to avoid dealing with availability of H/w MPY
+ * -Also reduced the num of MPY operations from 3 to 2
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_UDELAY_H
+#define __ASM_ARC_UDELAY_H
+
+#include <asm/param.h> /* HZ */
+
+static inline void __delay(unsigned long loops)
+{
+ __asm__ __volatile__(
+ "1: sub.f %0, %0, 1 \n"
+ " jpnz 1b \n"
+ : "+r"(loops)
+ :
+ : "cc");
+}
+
+extern void __bad_udelay(void);
+
+/*
+ * Normal Math for computing loops in "N" usecs
+ * -we have precomputed @loops_per_jiffy
+ * -1 sec has HZ jiffies
+ * loops per "N" usecs = ((loops_per_jiffy * HZ / 1000000) * N)
+ *
+ * Approximate Division by multiplication:
+ * -Mathematically if we multiply and divide a number by same value the
+ * result remains unchanged: In this case, we use 2^32
+ * -> (loops_per_N_usec * 2^32 ) / 2^32
+ * -> (((loops_per_jiffy * HZ / 1000000) * N) * 2^32) / 2^32
+ * -> (loops_per_jiffy * HZ * N * 4295) / 2^32
+ *
+ * -Divide by 2^32 is very simply right shift by 32
+ * -We simply need to ensure that the multiply per above eqn happens in
+ * 64-bit precision (if CPU doesn't support it - gcc can emaulate it)
+ */
+
+static inline void __udelay(unsigned long usecs)
+{
+ unsigned long loops;
+
+ /* (long long) cast ensures 64 bit MPY - real or emulated
+ * HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
+ */
+ loops = ((long long)(usecs * 4295 * HZ) *
+ (long long)(loops_per_jiffy)) >> 32;
+
+ __delay(loops);
+}
+
+#define udelay(n) (__builtin_constant_p(n) ? ((n) > 20000 ? __bad_udelay() \
+ : __udelay(n)) : __udelay(n))
+
+#endif /* __ASM_ARC_UDELAY_H */
diff --git a/arch/arc/include/asm/disasm.h b/arch/arc/include/asm/disasm.h
new file mode 100644
index 00000000000..f1cce3d059a
--- /dev/null
+++ b/arch/arc/include/asm/disasm.h
@@ -0,0 +1,116 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_DISASM_H__
+#define __ARC_DISASM_H__
+
+enum {
+ op_Bcc = 0, op_BLcc = 1, op_LD = 2, op_ST = 3, op_MAJOR_4 = 4,
+ op_MAJOR_5 = 5, op_LD_ADD = 12, op_ADD_SUB_SHIFT = 13,
+ op_ADD_MOV_CMP = 14, op_S = 15, op_LD_S = 16, op_LDB_S = 17,
+ op_LDW_S = 18, op_LDWX_S = 19, op_ST_S = 20, op_STB_S = 21,
+ op_STW_S = 22, op_Su5 = 23, op_SP = 24, op_GP = 25,
+ op_Pcl = 26, op_MOV_S = 27, op_ADD_CMP = 28, op_BR_S = 29,
+ op_B_S = 30, op_BL_S = 31
+};
+
+enum flow {
+ noflow,
+ direct_jump,
+ direct_call,
+ indirect_jump,
+ indirect_call,
+ invalid_instr
+};
+
+#define IS_BIT(word, n) ((word) & (1<<n))
+#define BITS(word, s, e) (((word) >> (s)) & (~((-2) << ((e) - (s)))))
+
+#define MAJOR_OPCODE(word) (BITS((word), 27, 31))
+#define MINOR_OPCODE(word) (BITS((word), 16, 21))
+#define FIELD_A(word) (BITS((word), 0, 5))
+#define FIELD_B(word) ((BITS((word), 12, 14)<<3) | \
+ (BITS((word), 24, 26)))
+#define FIELD_C(word) (BITS((word), 6, 11))
+#define FIELD_u6(word) FIELDC(word)
+#define FIELD_s12(word) sign_extend(((BITS((word), 0, 5) << 6) | \
+ BITS((word), 6, 11)), 12)
+
+/* note that for BL/BRcc these two macro's need another AND statement to mask
+ * out bit 1 (make the result a multiple of 4) */
+#define FIELD_s9(word) sign_extend(((BITS(word, 15, 15) << 8) | \
+ BITS(word, 16, 23)), 9)
+#define FIELD_s21(word) sign_extend(((BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+#define FIELD_s25(word) sign_extend(((BITS(word, 0, 3) << 21) | \
+ (BITS(word, 6, 15) << 11) | \
+ (BITS(word, 17, 26) << 1)), 12)
+
+/* note: these operate on 16 bits! */
+#define FIELD_S_A(word) ((BITS((word), 2, 2)<<3) | BITS((word), 0, 2))
+#define FIELD_S_B(word) ((BITS((word), 10, 10)<<3) | \
+ BITS((word), 8, 10))
+#define FIELD_S_C(word) ((BITS((word), 7, 7)<<3) | BITS((word), 5, 7))
+#define FIELD_S_H(word) ((BITS((word), 0, 2)<<3) | BITS((word), 5, 8))
+#define FIELD_S_u5(word) (BITS((word), 0, 4))
+#define FIELD_S_u6(word) (BITS((word), 0, 4) << 1)
+#define FIELD_S_u7(word) (BITS((word), 0, 4) << 2)
+#define FIELD_S_u10(word) (BITS((word), 0, 7) << 2)
+#define FIELD_S_s7(word) sign_extend(BITS((word), 0, 5) << 1, 9)
+#define FIELD_S_s8(word) sign_extend(BITS((word), 0, 7) << 1, 9)
+#define FIELD_S_s9(word) sign_extend(BITS((word), 0, 8), 9)
+#define FIELD_S_s10(word) sign_extend(BITS((word), 0, 8) << 1, 10)
+#define FIELD_S_s11(word) sign_extend(BITS((word), 0, 8) << 2, 11)
+#define FIELD_S_s13(word) sign_extend(BITS((word), 0, 10) << 2, 13)
+
+#define STATUS32_L 0x00000100
+#define REG_LIMM 62
+
+struct disasm_state {
+ /* generic info */
+ unsigned long words[2];
+ int instr_len;
+ int major_opcode;
+ /* info for branch/jump */
+ int is_branch;
+ int target;
+ int delay_slot;
+ enum flow flow;
+ /* info for load/store */
+ int src1, src2, src3, dest, wb_reg;
+ int zz, aa, x, pref, di;
+ int fault, write;
+};
+
+static inline int sign_extend(int value, int bits)
+{
+ if (IS_BIT(value, (bits - 1)))
+ value |= (0xffffffff << bits);
+
+ return value;
+}
+
+static inline int is_short_instr(unsigned long addr)
+{
+ uint16_t word = *((uint16_t *)addr);
+ int opcode = (word >> 11) & 0x1F;
+ return (opcode >= 0x0B);
+}
+
+void disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs);
+int disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs
+ *cregs, unsigned long *fall_thru, unsigned long *target);
+long get_reg(int reg, struct pt_regs *regs, struct callee_regs *cregs);
+void set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs);
+
+#endif /* __ARC_DISASM_H__ */
diff --git a/arch/arc/include/asm/dma-mapping.h b/arch/arc/include/asm/dma-mapping.h
new file mode 100644
index 00000000000..31f77aec082
--- /dev/null
+++ b/arch/arc/include/asm/dma-mapping.h
@@ -0,0 +1,221 @@
+/*
+ * DMA Mapping glue for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_MAPPING_H
+#define ASM_ARC_DMA_MAPPING_H
+
+#include <asm-generic/dma-coherent.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
+/*
+ * dma_map_* API take cpu addresses, which is kernel logical address in the
+ * untranslated address space (0x8000_0000) based. The dma address (bus addr)
+ * ideally needs to be 0x0000_0000 based hence these glue routines.
+ * However given that intermediate bus bridges can ignore the high bit, we can
+ * do with these routines being no-ops.
+ * If a platform/device comes up which sriclty requires 0 based bus addr
+ * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
+ */
+#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
+#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
+
+#else
+#include <plat/dma_addr.h>
+#endif
+
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle);
+
+/* drivers/base/dma-mapping.c */
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr,
+ size_t size);
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
+#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
+
+/*
+ * streaming DMA Mapping API...
+ * CPU accesses page via normal paddr, thus needs to explicitly made
+ * consistent before each use
+ */
+
+static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ dma_cache_inv(paddr, size);
+ break;
+ case DMA_TO_DEVICE:
+ dma_cache_wback(paddr, size);
+ break;
+ case DMA_BIDIRECTIONAL:
+ dma_cache_wback_inv(paddr, size);
+ break;
+ default:
+ pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
+ }
+}
+
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir);
+
+#define _dma_cache_sync(addr, sz, dir) \
+do { \
+ if (__builtin_constant_p(dir)) \
+ __inline_dma_cache_sync(addr, sz, dir); \
+ else \
+ __arc_dma_cache_sync(addr, sz, dir); \
+} \
+while (0);
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ _dma_cache_sync((unsigned long)cpu_addr, size, dir);
+ return plat_kernel_addr_to_dma(dev, cpu_addr);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+{
+ unsigned long paddr = page_to_phys(page) + offset;
+ return dma_map_single(dev, (void *)paddr, size, dir);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+
+ return nents;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
+ size_t size, enum dma_data_direction dir)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
+ DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_FROM_DEVICE);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ _dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
+ size, DMA_TO_DEVICE);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < nelems; i++, sg++)
+ _dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
+}
+
+static inline int dma_supported(struct device *dev, u64 dma_mask)
+{
+ /* Support 32 bit DMA mask exclusively */
+ return dma_mask == DMA_BIT_MASK(32);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+static inline int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+ *dev->dma_mask = dma_mask;
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arc/include/asm/dma.h b/arch/arc/include/asm/dma.h
new file mode 100644
index 00000000000..ca7c45181de
--- /dev/null
+++ b/arch/arc/include/asm/dma.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ASM_ARC_DMA_H
+#define ASM_ARC_DMA_H
+
+#define MAX_DMA_ADDRESS 0xC0000000
+
+#endif
diff --git a/arch/arc/include/asm/elf.h b/arch/arc/include/asm/elf.h
new file mode 100644
index 00000000000..f4c8d36ebec
--- /dev/null
+++ b/arch/arc/include/asm/elf.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_ELF_H
+#define __ASM_ARC_ELF_H
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/* These ELF defines belong to uapi but libc elf.h already defines them */
+#define EM_ARCOMPACT 93
+
+/* ARC Relocations (kernel Modules only) */
+#define R_ARC_32 0x4
+#define R_ARC_32_ME 0x1B
+#define R_ARC_S25H_PCREL 0x10
+#define R_ARC_S25W_PCREL 0x11
+
+/*to set parameters in the core dumps */
+#define ELF_ARCH EM_ARCOMPACT
+#define ELF_CLASS ELFCLASS32
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define ELF_DATA ELFDATA2MSB
+#else
+#define ELF_DATA ELFDATA2LSB
+#endif
+
+/*
+ * To ensure that
+ * -we don't load something for the wrong architecture.
+ * -The userspace is using the correct syscall ABI
+ */
+struct elf32_hdr;
+extern int elf_check_arch(const struct elf32_hdr *);
+#define elf_check_arch elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE PAGE_SIZE
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
+/*
+ * When the program starts, a1 contains a pointer to a function to be
+ * registered with atexit, as per the SVR4 ABI. A value of 0 means we
+ * have no such handler.
+ */
+#define ELF_PLAT_INIT(_r, load_addr) ((_r)->r0 = 0)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
+
+#endif
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
new file mode 100644
index 00000000000..23daa326fc9
--- /dev/null
+++ b/arch/arc/include/asm/entry.h
@@ -0,0 +1,724 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ * Stack switching code can no longer reliably rely on the fact that
+ * if we are NOT in user mode, stack is switched to kernel mode.
+ * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
+ * it's prologue including stack switching from user mode
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we also need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
+ *
+ * Vineetg: May 5th 2008
+ * -Modified CALLEE_REG save/restore macros to handle the fact that
+ * r25 contains the kernel current task ptr
+ * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
+ * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
+ * address Write back load ld.ab instead of seperate ld/add instn
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_ENTRY_H
+#define __ASM_ARC_ENTRY_H
+
+#ifdef __ASSEMBLY__
+#include <asm/unistd.h> /* For NR_syscalls defination */
+#include <asm/asm-offsets.h>
+#include <asm/arcregs.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h> /* For VMALLOC_START */
+#include <asm/thread_info.h> /* For THREAD_SIZE */
+
+/* Note on the LD/ST addr modes with addr reg wback
+ *
+ * LD.a same as LD.aw
+ *
+ * LD.a reg1, [reg2, x] => Pre Incr
+ * Eff Addr for load = [reg2 + x]
+ *
+ * LD.ab reg1, [reg2, x] => Post Incr
+ * Eff Addr for load = [reg2]
+ */
+
+/*--------------------------------------------------------------
+ * Save caller saved registers (scratch registers) ( r0 - r12 )
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLER_SAVED
+ st.a r0, [sp, -4]
+ st.a r1, [sp, -4]
+ st.a r2, [sp, -4]
+ st.a r3, [sp, -4]
+ st.a r4, [sp, -4]
+ st.a r5, [sp, -4]
+ st.a r6, [sp, -4]
+ st.a r7, [sp, -4]
+ st.a r8, [sp, -4]
+ st.a r9, [sp, -4]
+ st.a r10, [sp, -4]
+ st.a r11, [sp, -4]
+ st.a r12, [sp, -4]
+.endm
+
+/*--------------------------------------------------------------
+ * Restore caller saved registers (scratch registers)
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLER_SAVED
+ ld.ab r12, [sp, 4]
+ ld.ab r11, [sp, 4]
+ ld.ab r10, [sp, 4]
+ ld.ab r9, [sp, 4]
+ ld.ab r8, [sp, 4]
+ ld.ab r7, [sp, 4]
+ ld.ab r6, [sp, 4]
+ ld.ab r5, [sp, 4]
+ ld.ab r4, [sp, 4]
+ ld.ab r3, [sp, 4]
+ ld.ab r2, [sp, 4]
+ ld.ab r1, [sp, 4]
+ ld.ab r0, [sp, 4]
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * on kernel stack.
+ * User mode callee regs need to be saved in case of
+ * -fork and friends for replicating from parent to child
+ * -before going into do_signal( ) for ptrace/core-dump
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Low level exception/ISR save user mode r25
+ * into task->thread.user_r25. So it needs to be retrieved from there and
+ * saved into kernel stack with rest of callee reg-file
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_USER
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Retrieve orig r25 and save it on stack
+ ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
+ st.a r12, [sp, -4]
+#else
+ st.a r25, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" callee_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save callee saved registers (non scratch registers) ( r13 - r25 )
+ * kernel mode callee regs needed to be saved in case of context switch
+ * If r25 is used for caching task pointer then that need not be saved
+ * as it can be re-created from current task global
+ *-------------------------------------------------------------*/
+.macro SAVE_CALLEE_SAVED_KERNEL
+ st.a r13, [sp, -4]
+ st.a r14, [sp, -4]
+ st.a r15, [sp, -4]
+ st.a r16, [sp, -4]
+ st.a r17, [sp, -4]
+ st.a r18, [sp, -4]
+ st.a r19, [sp, -4]
+ st.a r20, [sp, -4]
+ st.a r21, [sp, -4]
+ st.a r22, [sp, -4]
+ st.a r23, [sp, -4]
+ st.a r24, [sp, -4]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ sub sp, sp, 8
+#else
+ st.a r25, [sp, -4]
+ sub sp, sp, 4
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_KERNEL:
+ * Loads callee (non scratch) Reg File by popping from Kernel mode stack.
+ * This is reverse of SAVE_CALLEE_SAVED,
+ *
+ * NOTE:
+ * Ideally this shd only be called in switch_to for loading
+ * switched-IN task's CALLEE Reg File.
+ * For all other cases RESTORE_CALLEE_SAVED_FAST must be used
+ * which simply pops the stack w/o touching regs.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_KERNEL
+
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ add sp, sp, 8 /* skip callee_reg gutter and user r25 placeholder */
+#else
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+
+.endm
+
+/*--------------------------------------------------------------
+ * RESTORE_CALLEE_SAVED_USER:
+ * This is called after do_signal where tracer might have changed callee regs
+ * thus we need to restore the reg file.
+ * Special case handling is required for r25 in case it is used by kernel
+ * for caching task ptr. Ptrace would have modified on-kernel-stack value of
+ * r25, which needs to be shoved back into task->thread.user_r25 where from
+ * Low level exception/ISR return code will retrieve to populate with rest of
+ * callee reg-file.
+ *-------------------------------------------------------------*/
+.macro RESTORE_CALLEE_SAVED_USER
+
+ add sp, sp, 4 /* skip "callee_regs->stack_place_holder" */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld.ab r12, [sp, 4]
+ st r12, [r25, TASK_THREAD + THREAD_USER_R25]
+#else
+ ld.ab r25, [sp, 4]
+#endif
+
+ ld.ab r24, [sp, 4]
+ ld.ab r23, [sp, 4]
+ ld.ab r22, [sp, 4]
+ ld.ab r21, [sp, 4]
+ ld.ab r20, [sp, 4]
+ ld.ab r19, [sp, 4]
+ ld.ab r18, [sp, 4]
+ ld.ab r17, [sp, 4]
+ ld.ab r16, [sp, 4]
+ ld.ab r15, [sp, 4]
+ ld.ab r14, [sp, 4]
+ ld.ab r13, [sp, 4]
+.endm
+
+/*--------------------------------------------------------------
+ * Super FAST Restore callee saved regs by simply re-adjusting SP
+ *-------------------------------------------------------------*/
+.macro DISCARD_CALLEE_SAVED_USER
+ add sp, sp, 14 * 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore User mode r25 saved in task_struct->thread.user_r25
+ *-------------------------------------------------------------*/
+.macro RESTORE_USER_R25
+ ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
+.endm
+
+/*-------------------------------------------------------------
+ * given a tsk struct, get to the base of it's kernel mode stack
+ * tsk->thread_info is really a PAGE, whose bottom hoists stack
+ * which grows upwards towards thread_info
+ *------------------------------------------------------------*/
+
+.macro GET_TSK_STACK_BASE tsk, out
+
+ /* Get task->thread_info (this is essentially start of a PAGE) */
+ ld \out, [\tsk, TASK_THREAD_INFO]
+
+ /* Go to end of page where stack begins (grows upwards) */
+ add2 \out, \out, (THREAD_SIZE - 4)/4 /* one word GUTTER */
+
+.endm
+
+/*--------------------------------------------------------------
+ * Switch to Kernel Mode stack if SP points to User Mode stack
+ *
+ * Entry : r9 contains pre-IRQ/exception/trap status32
+ * Exit : SP is set to kernel mode stack pointer
+ * If CURR_IN_REG, r25 set to "current" task pointer
+ * Clobbers: r9
+ *-------------------------------------------------------------*/
+
+.macro SWITCH_TO_KERNEL_STK
+
+ /* User Mode when this happened ? Yes: Proceed to switch stack */
+ bbit1 r9, STATUS_U_BIT, 88f
+
+ /* OK we were already in kernel mode when this event happened, thus can
+ * assume SP is kernel mode SP. _NO_ need to do any stack switching
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ /* However....
+ * If Level 2 Interrupts enabled, we may end up with a corner case:
+ * 1. User Task executing
+ * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
+ * 3. But before it could switch SP from USER to KERNEL stack
+ * a L2 IRQ "Interrupts" L1
+ * Thay way although L2 IRQ happened in Kernel mode, stack is still
+ * not switched.
+ * To handle this, we may need to switch stack even if in kernel mode
+ * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
+ */
+ brlo sp, VMALLOC_START, 88f
+
+ /* TODO: vineetg:
+ * We need to be a bit more cautious here. What if a kernel bug in
+ * L1 ISR, caused SP to go whaco (some small value which looks like
+ * USER stk) and then we take L2 ISR.
+ * Above brlo alone would treat it as a valid L1-L2 sceanrio
+ * instead of shouting alound
+ * The only feasible way is to make sure this L2 happened in
+ * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
+ * L1 ISR before it switches stack
+ */
+
+#endif
+
+ /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
+ * safe-keeping not really needed, but it keeps the epilogue code
+ * (SP restore) simpler/uniform.
+ */
+ b.d 77f
+
+ st.a sp, [sp, -12] ; Make room for orig_r0 and orig_r8
+
+88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
+
+ GET_CURR_TASK_ON_CPU r9
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+ /* If current task pointer cached in r25, time to
+ * -safekeep USER r25 in task->thread_struct->user_r25
+ * -load r25 with current task ptr
+ */
+ st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
+ mov r25, r9
+#endif
+
+ /* With current tsk in r9, get it's kernel mode stack base */
+ GET_TSK_STACK_BASE r9, r9
+
+#ifdef PT_REGS_CANARY
+ st 0xabcdabcd, [r9, 0]
+#endif
+
+ /* Save Pre Intr/Exception User SP on kernel stack */
+ st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
+
+ /* CAUTION:
+ * SP should be set at the very end when we are done with everything
+ * In case of 2 levels of interrupt we depend on value of SP to assume
+ * that everything else is done (loading r25 etc)
+ */
+
+ /* set SP to point to kernel mode stack */
+ mov sp, r9
+
+77: /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
+
+.endm
+
+/*------------------------------------------------------------
+ * "FAKE" a rtie to return from CPU Exception context
+ * This is to re-enable Exceptions within exception
+ * Look at EV_ProtV to see how this is actually used
+ *-------------------------------------------------------------*/
+
+.macro FAKE_RET_FROM_EXCPN reg
+
+ ld \reg, [sp, PT_status32]
+ bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
+ bset \reg, \reg, STATUS_L_BIT
+ sr \reg, [erstatus]
+ mov \reg, 55f
+ sr \reg, [eret]
+
+ rtie
+55:
+.endm
+
+/*
+ * @reg [OUT] &thread_info of "current"
+ */
+.macro GET_CURR_THR_INFO_FROM_SP reg
+ and \reg, sp, ~(THREAD_SIZE - 1)
+.endm
+
+/*
+ * @reg [OUT] thread_info->flags of "current"
+ */
+.macro GET_CURR_THR_INFO_FLAGS reg
+ GET_CURR_THR_INFO_FROM_SP \reg
+ ld \reg, [\reg, THREAD_INFO_FLAGS]
+.endm
+
+/*--------------------------------------------------------------
+ * For early Exception Prologue, a core reg is temporarily needed to
+ * code the rest of prolog (stack switching). This is done by stashing
+ * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
+ *
+ * Before saving the full regfile - this reg is restored back, only
+ * to be saved again on kernel mode stack, as part of ptregs.
+ *-------------------------------------------------------------*/
+.macro EXCPN_PROLOG_FREEUP_REG reg
+#ifdef CONFIG_SMP
+ sr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ st \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+.macro EXCPN_PROLOG_RESTORE_REG reg
+#ifdef CONFIG_SMP
+ lr \reg, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld \reg, [@ex_saved_reg1]
+#endif
+.endm
+
+/*--------------------------------------------------------------
+ * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
+ * Requires SP to be already switched to kernel mode Stack
+ * sp points to the next free element on the stack at exit of this macro.
+ * Registers are pushed / popped in the order defined in struct ptregs
+ * in asm/ptrace.h
+ * Note that syscalls are implemented via TRAP which is also a exception
+ * from CPU's point of view
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_EXCEPTION marker
+
+ st \marker, [sp, 8]
+ st r0, [sp, 4] /* orig_r0, needed only for sys calls */
+
+ /* Restore r9 used to code the early prologue */
+ EXCPN_PROLOG_RESTORE_REG r9
+
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ lr r9, [eret]
+ st.a r9, [sp, -4]
+ lr r9, [erstatus]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [erbta]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbeef
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for exceptions
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_SYS
+ SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
+.endm
+
+/*--------------------------------------------------------------
+ * Save scratch regs for sys calls
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_TRAP
+ /*
+ * Setup pt_regs->orig_r8.
+ * Encode syscall number (r8) in upper short word of event type (r9)
+ * N.B. #1: This is already endian safe (see ptrace.h)
+ * #2: Only r9 can be used as scratch as it is already clobbered
+ * and it's contents are no longer needed by the latter part
+ * of exception prologue
+ */
+ lsl r9, r8, 16
+ or r9, r9, orig_r8_IS_SCALL
+
+ SAVE_ALL_EXCEPTION r9
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by system call or Exceptions
+ * SP should always be pointing to the next free stack element
+ * when entering this macro.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+.macro RESTORE_ALL_SYS
+
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [erbta]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [erstatus]
+ ld.ab r9, [sp, 4]
+ sr r9, [eret]
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+
+/*--------------------------------------------------------------
+ * Save all registers used by interrupt handlers.
+ *-------------------------------------------------------------*/
+.macro SAVE_ALL_INT1
+
+ /* restore original r9 , saved in int1_saved_reg
+ * It will be saved on stack in macro: SAVE_CALLER_SAVED
+ */
+#ifdef CONFIG_SMP
+ lr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ ld r9, [@int1_saved_reg]
+#endif
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink1, [sp, -4]
+ lr r9, [status32_l1]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l1]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee1
+ st r9, [sp, -4]
+#endif
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+.macro SAVE_ALL_INT2
+
+ /* TODO-vineetg: SMP we can't use global nor can we use
+ * SCRATCH0 as we do for int1 because while int1 is using
+ * it, int2 can come
+ */
+ /* retsore original r9 , saved in sys_saved_r9 */
+ ld r9, [@int2_saved_reg]
+
+ /* now we are ready to save the remaining context :) */
+ st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
+ st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
+ SAVE_CALLER_SAVED
+ st.a r26, [sp, -4] /* gp */
+ st.a fp, [sp, -4]
+ st.a blink, [sp, -4]
+ st.a ilink2, [sp, -4]
+ lr r9, [status32_l2]
+ st.a r9, [sp, -4]
+ st.a lp_count, [sp, -4]
+ lr r9, [lp_end]
+ st.a r9, [sp, -4]
+ lr r9, [lp_start]
+ st.a r9, [sp, -4]
+ lr r9, [bta_l2]
+ st.a r9, [sp, -4]
+
+#ifdef PT_REGS_CANARY
+ mov r9, 0xdeadbee2
+ st r9, [sp, -4]
+#endif
+
+ /* move up by 1 word to "create" pt_regs->"stack_place_holder" */
+ sub sp, sp, 4
+.endm
+
+/*--------------------------------------------------------------
+ * Restore all registers used by interrupt handlers.
+ *
+ * NOTE:
+ *
+ * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
+ * for memory load operations. If used in that way interrupts are deffered
+ * by hardware and that is not good.
+ *-------------------------------------------------------------*/
+
+.macro RESTORE_ALL_INT1
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4] /* Actual reg file */
+ sr r9, [bta_l1]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l1]
+ ld.ab r9, [sp, 4]
+ mov ilink1, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+.endm
+
+.macro RESTORE_ALL_INT2
+ add sp, sp, 4 /* hop over unused "pt_regs->stack_place_holder" */
+
+ ld.ab r9, [sp, 4]
+ sr r9, [bta_l2]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_start]
+ ld.ab r9, [sp, 4]
+ sr r9, [lp_end]
+ ld.ab r9, [sp, 4]
+ mov lp_count, r9
+ ld.ab r9, [sp, 4]
+ sr r9, [status32_l2]
+ ld.ab r9, [sp, 4]
+ mov ilink2, r9
+ ld.ab blink, [sp, 4]
+ ld.ab fp, [sp, 4]
+ ld.ab r26, [sp, 4] /* gp */
+ RESTORE_CALLER_SAVED
+
+ ld sp, [sp] /* restore original sp */
+ /* orig_r0 and orig_r8 skipped automatically */
+
+.endm
+
+
+/* Get CPU-ID of this core */
+.macro GET_CPU_ID reg
+ lr \reg, [identity]
+ lsr \reg, \reg, 8
+ bmsk \reg, \reg, 7
+.endm
+
+#ifdef CONFIG_SMP
+
+/*-------------------------------------------------
+ * Retrieve the current running task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ */
+.macro GET_CURR_TASK_ON_CPU reg
+ GET_CPU_ID \reg
+ ld.as \reg, [@_current_task, \reg]
+.endm
+
+/*-------------------------------------------------
+ * Save a new task as the "current" task on this CPU
+ * 1. Determine curr CPU id.
+ * 2. Use it to index into _current_task[ ]
+ *
+ * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
+ * because ST r0, [r1, offset] can ONLY have s9 @offset
+ * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
+ */
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ GET_CPU_ID \tmp
+ add2 \tmp, @_current_task, \tmp
+ st \tsk, [\tmp]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+
+.endm
+
+
+#else /* Uniprocessor implementation of macros */
+
+.macro GET_CURR_TASK_ON_CPU reg
+ ld \reg, [@_current_task]
+.endm
+
+.macro SET_CURR_TASK_ON_CPU tsk, tmp
+ st \tsk, [@_current_task]
+#ifdef CONFIG_ARC_CURR_IN_REG
+ mov r25, \tsk
+#endif
+.endm
+
+#endif /* SMP / UNI */
+
+/* ------------------------------------------------------------------
+ * Get the ptr to some field of Current Task at @off in task struct
+ * -Uses r25 for Current task ptr if that is enabled
+ */
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ add \reg, r25, \off
+.endm
+
+#else
+
+.macro GET_CURR_TASK_FIELD_PTR off, reg
+ GET_CURR_TASK_ON_CPU \reg
+ add \reg, \reg, \off
+.endm
+
+#endif /* CONFIG_ARC_CURR_IN_REG */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ARC_ENTRY_H */
diff --git a/arch/arc/include/asm/exec.h b/arch/arc/include/asm/exec.h
new file mode 100644
index 00000000000..28abc6905e0
--- /dev/null
+++ b/arch/arc/include/asm/exec.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_EXEC_H
+#define __ASM_ARC_EXEC_H
+
+/* Align to 16b */
+#define arch_align_stack(p) ((unsigned long)(p) & ~0xf)
+
+#endif
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
new file mode 100644
index 00000000000..4dc64ddebec
--- /dev/null
+++ b/arch/arc/include/asm/futex.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: August 2010: From Android kernel work
+ */
+
+#ifndef _ASM_FUTEX_H
+#define _ASM_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)\
+ \
+ __asm__ __volatile__( \
+ "1: ld %1, [%2] \n" \
+ insn "\n" \
+ "2: st %0, [%2] \n" \
+ " mov %0, 0 \n" \
+ "3: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .align 4 \n" \
+ "4: mov %0, %4 \n" \
+ " b 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .align 4 \n" \
+ " .word 1b, 4b \n" \
+ " .word 2b, 4b \n" \
+ " .previous \n" \
+ \
+ : "=&r" (ret), "=&r" (oldval) \
+ : "r" (uaddr), "r" (oparg), "ir" (-EFAULT) \
+ : "cc", "memory")
+
+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("or %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("bic %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("xor %0, %1, %3", ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+/* Compare-xchg with preemption disabled.
+ * Notes:
+ * -Best-Effort: Exchg happens only if compare succeeds.
+ * If compare fails, returns; leaving retry/looping to upper layers
+ * -successful cmp-xchg: return orig value in @addr (same as cmp val)
+ * -Compare fails: return orig value in @addr
+ * -user access r/w fails: return -EFAULT
+ */
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ u32 newval)
+{
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable(); /* implies preempt_disable() */
+
+ /* TBD : can use llock/scond */
+ __asm__ __volatile__(
+ "1: ld %0, [%3] \n"
+ " brne %0, %1, 3f \n"
+ "2: st %2, [%3] \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ "4: mov %0, %4 \n"
+ " b 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 2b, 4b \n"
+ " .previous\n"
+ : "=&r"(val)
+ : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
+ : "cc", "memory");
+
+ pagefault_enable(); /* subsumes preempt_enable() */
+
+ *uval = val;
+ return val;
+}
+
+#endif
diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h
new file mode 100644
index 00000000000..473424d7528
--- /dev/null
+++ b/arch/arc/include/asm/io.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_IO_H
+#define _ASM_ARC_IO_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+
+#define PCI_IOBASE ((void __iomem *)0)
+
+extern void __iomem *ioremap(unsigned long physaddr, unsigned long size);
+extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+ unsigned long flags);
+extern void iounmap(const void __iomem *addr);
+
+#define ioremap_nocache(phy, sz) ioremap(phy, sz)
+#define ioremap_wc(phy, sz) ioremap(phy, sz)
+
+/* Change struct page to physical address */
+#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 b;
+
+ __asm__ __volatile__(
+ " ldb%U1 %0, %1 \n"
+ : "=r" (b)
+ : "m" (*(volatile u8 __force *)addr)
+ : "memory");
+
+ return b;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 s;
+
+ __asm__ __volatile__(
+ " ldw%U1 %0, %1 \n"
+ : "=r" (s)
+ : "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+ return s;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 w;
+
+ __asm__ __volatile__(
+ " ld%U1 %0, %1 \n"
+ : "=r" (w)
+ : "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+ return w;
+}
+
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stb%U1 %0, %1 \n"
+ :
+ : "r" (b), "m" (*(volatile u8 __force *)addr)
+ : "memory");
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 s, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " stw%U1 %0, %1 \n"
+ :
+ : "r" (s), "m" (*(volatile u16 __force *)addr)
+ : "memory");
+
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 w, volatile void __iomem *addr)
+{
+ __asm__ __volatile__(
+ " st%U1 %0, %1 \n"
+ :
+ : "r" (w), "m" (*(volatile u32 __force *)addr)
+ : "memory");
+
+}
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_ARC_IO_H */
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
new file mode 100644
index 00000000000..4c588f9820c
--- /dev/null
+++ b/arch/arc/include/asm/irq.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQ_H
+#define __ASM_ARC_IRQ_H
+
+#define NR_IRQS 32
+
+/* Platform Independent IRQs */
+#define TIMER0_IRQ 3
+#define TIMER1_IRQ 4
+
+#include <asm-generic/irq.h>
+
+extern void __init arc_init_IRQ(void);
+extern int __init get_hw_config_num_irq(void);
+
+void __cpuinit arc_local_timer_setup(unsigned int cpu);
+
+#endif
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
new file mode 100644
index 00000000000..ccd84806b62
--- /dev/null
+++ b/arch/arc/include/asm/irqflags.h
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_IRQFLAGS_H
+#define __ASM_ARC_IRQFLAGS_H
+
+/* vineetg: March 2010 : local_irq_save( ) optimisation
+ * -Remove explicit mov of current status32 into reg, that is not needed
+ * -Use BIC insn instead of INVERTED + AND
+ * -Conditionally disable interrupts (if they are not enabled, don't disable)
+*/
+
+#ifdef __KERNEL__
+
+#include <asm/arcregs.h>
+
+#ifndef __ASSEMBLY__
+
+/******************************************************************
+ * IRQ Control Macros
+ ******************************************************************/
+
+/*
+ * Save IRQ state and disable IRQs
+ */
+static inline long arch_local_irq_save(void)
+{
+ unsigned long temp, flags;
+
+ __asm__ __volatile__(
+ " lr %1, [status32] \n"
+ " bic %0, %1, %2 \n"
+ " and.f 0, %1, %2 \n"
+ " flag.nz %0 \n"
+ : "=r"(temp), "=r"(flags)
+ : "n"((STATUS_E1_MASK | STATUS_E2_MASK))
+ : "cc");
+
+ return flags;
+}
+
+/*
+ * restore saved IRQ state
+ */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+
+ __asm__ __volatile__(
+ " flag %0 \n"
+ :
+ : "r"(flags));
+}
+
+/*
+ * Unconditionally Enable IRQs
+ */
+extern void arch_local_irq_enable(void);
+
+/*
+ * Unconditionally Disable IRQs
+ */
+static inline void arch_local_irq_disable(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ " and %0, %0, %1 \n"
+ " flag %0 \n"
+ : "=&r"(temp)
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
+}
+
+/*
+ * save IRQ state
+ */
+static inline long arch_local_save_flags(void)
+{
+ unsigned long temp;
+
+ __asm__ __volatile__(
+ " lr %0, [status32] \n"
+ : "=&r"(temp));
+
+ return temp;
+}
+
+/*
+ * Query IRQ state
+ */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & (STATUS_E1_MASK
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+ | STATUS_E2_MASK
+#endif
+ ));
+}
+
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+static inline void arch_mask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb &= ~(1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+static inline void arch_unmask_irq(unsigned int irq)
+{
+ unsigned int ienb;
+
+ ienb = read_aux_reg(AUX_IENABLE);
+ ienb |= (1 << irq);
+ write_aux_reg(AUX_IENABLE, ienb);
+}
+
+#else
+
+.macro IRQ_DISABLE scratch
+ lr \scratch, [status32]
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_DISABLE_SAVE scratch, save
+ lr \scratch, [status32]
+ mov \save, \scratch /* Make a copy */
+ bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+.macro IRQ_ENABLE scratch
+ lr \scratch, [status32]
+ or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
+ flag \scratch
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* KERNEL */
+
+#endif
diff --git a/arch/arc/include/asm/kdebug.h b/arch/arc/include/asm/kdebug.h
new file mode 100644
index 00000000000..3fbe6c472c0
--- /dev/null
+++ b/arch/arc/include/asm/kdebug.h
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_IERR,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
new file mode 100644
index 00000000000..f3c4934f0ca
--- /dev/null
+++ b/arch/arc/include/asm/kgdb.h
@@ -0,0 +1,61 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_KGDB_H__
+#define __ARC_KGDB_H__
+
+#ifdef CONFIG_KGDB
+
+#include <asm/user.h>
+
+/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
+ * register API yet */
+#undef DBG_MAX_REG_NUM
+
+#define GDB_MAX_REGS 39
+
+#define BREAK_INSTR_SIZE 2
+#define CACHE_FLUSH_IS_SAFE 1
+#define NUMREGBYTES (GDB_MAX_REGS * 4)
+#define BUFMAX 2048
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ("trap_s 0x4\n");
+}
+
+extern void kgdb_trap(struct pt_regs *regs, int param);
+
+enum arc700_linux_regnums {
+ _R0 = 0,
+ _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
+ _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
+ _R25, _R26,
+ _BTA = 27,
+ _LP_START = 28,
+ _LP_END = 29,
+ _LP_COUNT = 30,
+ _STATUS32 = 31,
+ _BLINK = 32,
+ _FP = 33,
+ __SP = 34,
+ _EFA = 35,
+ _RET = 36,
+ _ORIG_R8 = 37,
+ _STOP_PC = 38
+};
+
+#else
+static inline void kgdb_trap(struct pt_regs *regs, int param)
+{
+}
+#endif
+
+#endif /* __ARC_KGDB_H__ */
diff --git a/arch/arc/include/asm/kprobes.h b/arch/arc/include/asm/kprobes.h
new file mode 100644
index 00000000000..4d9c211fce7
--- /dev/null
+++ b/arch/arc/include/asm/kprobes.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARC_KPROBES_H
+#define _ARC_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+
+typedef u16 kprobe_opcode_t;
+
+#define UNIMP_S_INSTRUCTION 0x79e0
+#define TRAP_S_2_INSTRUCTION 0x785e
+
+#define MAX_INSN_SIZE 8
+#define MAX_STACK_SIZE 64
+
+struct arch_specific_insn {
+ int is_short;
+ kprobe_opcode_t *t1_addr, *t2_addr;
+ kprobe_opcode_t t1_opcode, t2_opcode;
+};
+
+#define flush_insn_slot(p) do { } while (0)
+
+#define kretprobe_blacklist_size 0
+
+struct kprobe;
+
+void arch_remove_kprobe(struct kprobe *p);
+
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data);
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned long status;
+};
+
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ struct pt_regs jprobe_saved_regs;
+ char jprobes_stack[MAX_STACK_SIZE];
+ struct prev_kprobe prev_kprobe;
+};
+
+int kprobe_fault_handler(struct pt_regs *regs, unsigned long cause);
+void kretprobe_trampoline(void);
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs);
+#else
+static void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+}
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
new file mode 100644
index 00000000000..0283e9e44e0
--- /dev/null
+++ b/arch/arc/include/asm/linkage.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_LINKAGE_H
+#define __ASM_LINKAGE_H
+
+#ifdef __ASSEMBLY__
+
+/* Can't use the ENTRY macro in linux/linkage.h
+ * gas considers ';' as comment vs. newline
+ */
+.macro ARC_ENTRY name
+ .global \name
+ .align 4
+ \name:
+.endm
+
+.macro ARC_EXIT name
+#define ASM_PREV_SYM_ADDR(name) .-##name
+ .size \ name, ASM_PREV_SYM_ADDR(\name)
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_DATA nm
+#ifdef CONFIG_ARC_HAS_DCCM
+ .section .data.arcfp
+#else
+ .section .data
+#endif
+ .global \nm
+.endm
+
+/* annotation for data we want in DCCM - if enabled in .config */
+.macro ARCFP_CODE
+#ifdef CONFIG_ARC_HAS_ICCM
+ .section .text.arcfp, "ax",@progbits
+#else
+ .section .text, "ax",@progbits
+#endif
+.endm
+
+#else /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_ARC_HAS_ICCM
+#define __arcfp_code __attribute__((__section__(".text.arcfp")))
+#else
+#define __arcfp_code __attribute__((__section__(".text")))
+#endif
+
+#ifdef CONFIG_ARC_HAS_DCCM
+#define __arcfp_data __attribute__((__section__(".data.arcfp")))
+#else
+#define __arcfp_data __attribute__((__section__(".data")))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
new file mode 100644
index 00000000000..9998dc846eb
--- /dev/null
+++ b/arch/arc/include/asm/mach_desc.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * based on METAG mach/arch.h (which in turn was based on ARM)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MACH_DESC_H_
+#define _ASM_ARC_MACH_DESC_H_
+
+/**
+ * struct machine_desc - Board specific callbacks, called from ARC common code
+ * Provided by each ARC board using MACHINE_START()/MACHINE_END(), so
+ * a multi-platform kernel builds with array of such descriptors.
+ * We extend the early DT scan to also match the DT's "compatible" string
+ * against the @dt_compat of all such descriptors, and one with highest
+ * "DT score" is selected as global @machine_desc.
+ *
+ * @name: Board/SoC name
+ * @dt_compat: Array of device tree 'compatible' strings
+ * (XXX: although only 1st entry is looked at)
+ * @init_early: Very early callback [called from setup_arch()]
+ * @init_irq: setup external IRQ controllers [called from init_IRQ()]
+ * @init_smp: for each CPU (e.g. setup IPI)
+ * [(M):init_IRQ(), (o):start_kernel_secondary()]
+ * @init_time: platform specific clocksource/clockevent registration
+ * [called from time_init()]
+ * @init_machine: arch initcall level callback (e.g. populate static
+ * platform devices or parse Devicetree)
+ * @init_late: Late initcall level callback
+ *
+ */
+struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+
+ void (*init_early)(void);
+ void (*init_irq)(void);
+#ifdef CONFIG_SMP
+ void (*init_smp)(unsigned int);
+#endif
+ void (*init_time)(void);
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+};
+
+/*
+ * Current machine - only accessible during boot.
+ */
+extern struct machine_desc *machine_desc;
+
+/*
+ * Machine type table - also only accessible during boot
+ */
+extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+#define for_each_machine_desc(p) \
+ for (p = __arch_info_begin; p < __arch_info_end; p++)
+
+static inline struct machine_desc *default_machine_desc(void)
+{
+ /* the default machine is the last one linked in */
+ if (__arch_info_end - 1 < __arch_info_begin)
+ return NULL;
+ return __arch_info_end - 1;
+}
+
+/*
+ * Set of macros to define architecture features.
+ * This is built into a table by the linker.
+ */
+#define MACHINE_START(_type, _name) \
+static const struct machine_desc __mach_desc_##_type \
+__used \
+__attribute__((__section__(".arch.info.init"))) = { \
+ .name = _name,
+
+#define MACHINE_END \
+};
+
+extern struct machine_desc *setup_machine_fdt(void *dt);
+extern void __init copy_devtree(void);
+
+#endif
diff --git a/arch/arc/include/asm/mmu.h b/arch/arc/include/asm/mmu.h
new file mode 100644
index 00000000000..56b02320f1a
--- /dev/null
+++ b/arch/arc/include/asm/mmu.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMU_H
+#define _ASM_ARC_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+ unsigned long asid; /* Pvt Addr-Space ID for mm */
+#ifdef CONFIG_ARC_TLB_DBG
+ struct task_struct *tsk;
+#endif
+} mm_context_t;
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h
new file mode 100644
index 00000000000..0d71fb11b57
--- /dev/null
+++ b/arch/arc/include/asm/mmu_context.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Refactored get_new_mmu_context( ) to only handle live-mm.
+ * retiring-mm handled in other hooks
+ *
+ * Vineetg: March 25th, 2008: Bug #92690
+ * -Major rewrite of Core ASID allocation routine get_new_mmu_context
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_MMU_CONTEXT_H
+#define _ASM_ARC_MMU_CONTEXT_H
+
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+
+#include <asm-generic/mm_hooks.h>
+
+/* ARC700 ASID Management
+ *
+ * ARC MMU provides 8-bit ASID (0..255) to TAG TLB entries, allowing entries
+ * with same vaddr (different tasks) to co-exit. This provides for
+ * "Fast Context Switch" i.e. no TLB flush on ctxt-switch
+ *
+ * Linux assigns each task a unique ASID. A simple round-robin allocation
+ * of H/w ASID is done using software tracker @asid_cache.
+ * When it reaches max 255, the allocation cycle starts afresh by flushing
+ * the entire TLB and wrapping ASID back to zero.
+ *
+ * For book-keeping, Linux uses a couple of data-structures:
+ * -mm_struct has an @asid field to keep a note of task's ASID (needed at the
+ * time of say switch_mm( )
+ * -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
+ * given an ASID, finding the mm struct associated.
+ *
+ * The round-robin allocation algorithm allows for ASID stealing.
+ * If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
+ * already assigned to another (switched-out) task. Obviously the prev owner
+ * is marked with an invalid ASID to make it request for a new ASID when it
+ * gets scheduled next time. However its TLB entries (with ASID "x") could
+ * exist, which must be cleared before the same ASID is used by the new owner.
+ * Flushing them would be plausible but costly solution. Instead we force a
+ * allocation policy quirk, which ensures that a stolen ASID won't have any
+ * TLB entries associates, alleviating the need to flush.
+ * The quirk essentially is not allowing ASID allocated in prev cycle
+ * to be used past a roll-over in the next cycle.
+ * When this happens (i.e. task ASID > asid tracker), task needs to refresh
+ * its ASID, aligning it to current value of tracker. If the task doesn't get
+ * scheduled past a roll-over, hence its ASID is not yet realigned with
+ * tracker, such ASID is anyways safely reusable because it is
+ * gauranteed that TLB entries with that ASID wont exist.
+ */
+
+#define FIRST_ASID 0
+#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
+#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
+#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
+
+/* ASID to mm struct mapping */
+extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+extern int asid_cache;
+
+/*
+ * Assign a new ASID to task. If the task already has an ASID, it is
+ * relinquished.
+ */
+static inline void get_new_mmu_context(struct mm_struct *mm)
+{
+ struct mm_struct *prev_owner;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Relinquish the currently owned ASID (if any).
+ * Doing unconditionally saves a cmp-n-branch; for already unused
+ * ASID slot, the value was/remains NULL
+ */
+ asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
+
+ /* move to new ASID */
+ if (++asid_cache > MAX_ASID) { /* ASID roll-over */
+ asid_cache = FIRST_ASID;
+ flush_tlb_all();
+ }
+
+ /*
+ * Is next ASID already owned by some-one else (we are stealing it).
+ * If so, let the orig owner be aware of this, so when it runs, it
+ * asks for a brand new ASID. This would only happen for a long-lived
+ * task with ASID from prev allocation cycle (before ASID roll-over).
+ *
+ * This might look wrong - if we are re-using some other task's ASID,
+ * won't we use it's stale TLB entries too. Actually switch_mm( ) takes
+ * care of such a case: it ensures that task with ASID from prev alloc
+ * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
+ * The stealing scenario described here will only happen if that task
+ * didn't get a chance to refresh it's ASID - implying stale entries
+ * won't exist.
+ */
+ prev_owner = asid_mm_map[asid_cache];
+ if (prev_owner)
+ prev_owner->context.asid = NO_ASID;
+
+ /* Assign new ASID to tsk */
+ asid_mm_map[asid_cache] = mm;
+ mm->context.asid = asid_cache;
+
+#ifdef CONFIG_ARC_TLB_DBG
+ pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
+ " pid:%u, assigned asid:%lu\n",
+ (unsigned int)mm, (unsigned int)prev_owner,
+ (unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
+ (mm->context.tsk)->pid, mm->context.asid);
+#endif
+
+ write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Initialize the context related info for a new mm_struct
+ * instance.
+ */
+static inline int
+init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ mm->context.asid = NO_ASID;
+#ifdef CONFIG_ARC_TLB_DBG
+ mm->context.tsk = tsk;
+#endif
+ return 0;
+}
+
+/* Prepare the MMU for task: setup PID reg with allocated ASID
+ If task doesn't have an ASID (never alloc or stolen, get a new ASID)
+*/
+static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+#ifndef CONFIG_SMP
+ /* PGD cached in MMU reg to avoid 3 mem lookups: task->mm->pgd */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /*
+ * Get a new ASID if task doesn't have a valid one. Possible when
+ * -task never had an ASID (fresh after fork)
+ * -it's ASID was stolen - past an ASID roll-over.
+ * -There's a third obscure scenario (if this task is running for the
+ * first time afer an ASID rollover), where despite having a valid
+ * ASID, we force a get for new ASID - see comments at top.
+ *
+ * Both the non-alloc scenario and first-use-after-rollover can be
+ * detected using the single condition below: NO_ASID = 256
+ * while asid_cache is always a valid ASID value (0-255).
+ */
+ if (next->context.asid > asid_cache) {
+ get_new_mmu_context(next);
+ } else {
+ /*
+ * XXX: This will never happen given the chks above
+ * BUG_ON(next->context.asid > MAX_ASID);
+ */
+ write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
+ }
+
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ asid_mm_map[mm->context.asid] = NULL;
+ mm->context.asid = NO_ASID;
+
+ local_irq_restore(flags);
+}
+
+/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
+ * for retiring-mm. However destroy_context( ) still needs to do that because
+ * between mm_release( ) = >deactive_mm( ) and
+ * mmput => .. => __mmdrop( ) => destroy_context( )
+ * there is a good chance that task gets sched-out/in, making it's ASID valid
+ * again (this teased me for a whole day).
+ */
+#define deactivate_mm(tsk, mm) do { } while (0)
+
+static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+#ifndef CONFIG_SMP
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
+#endif
+
+ /* Unconditionally get a new ASID */
+ get_new_mmu_context(next);
+
+}
+
+#define enter_lazy_tlb(mm, tsk)
+
+#endif /* __ASM_ARC_MMU_CONTEXT_H */
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
new file mode 100644
index 00000000000..518222bb3f8
--- /dev/null
+++ b/arch/arc/include/asm/module.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+
+ */
+
+#ifndef _ASM_ARC_MODULE_H
+#define _ASM_ARC_MODULE_H
+
+#include <asm-generic/module.h>
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+struct mod_arch_specific {
+ void *unw_info;
+ int unw_sec_idx;
+};
+#endif
+
+#define MODULE_PROC_FAMILY "ARC700"
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_ARC_MODULE_H */
diff --git a/arch/arc/include/asm/mutex.h b/arch/arc/include/asm/mutex.h
new file mode 100644
index 00000000000..a2f88ff9f50
--- /dev/null
+++ b/arch/arc/include/asm/mutex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * xchg() based mutex fast path maintains a state of 0 or 1, as opposed to
+ * atomic dec based which can "count" any number of lock contenders.
+ * This ideally needs to be fixed in core, but for now switching to dec ver.
+ */
+#if defined(CONFIG_SMP) && (CONFIG_NR_CPUS > 2)
+#include <asm-generic/mutex-dec.h>
+#else
+#include <asm-generic/mutex-xchg.h>
+#endif
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
new file mode 100644
index 00000000000..bdf54610455
--- /dev/null
+++ b/arch/arc/include/asm/page.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASM_ARC_PAGE_H
+#define __ASM_ARC_PAGE_H
+
+#include <uapi/asm/page.h>
+
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+/* TBD: for now don't worry about VIPT D$ aliasing */
+#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(addr, vaddr, pg) clear_page(addr)
+#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#define pte_pgprot(x) __pgprot(pte_val(x))
+
+#else /* !STRICT_MM_TYPECHECKS */
+
+typedef unsigned long pte_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+typedef unsigned long pgtable_t;
+
+#define pte_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+#define __pte(x) (x)
+#define __pgprot(x) (x)
+#define pte_pgprot(x) (x)
+
+#endif
+
+#define ARCH_PFN_OFFSET (CONFIG_LINUX_LINK_BASE >> PAGE_SHIFT)
+
+#define pfn_valid(pfn) (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+
+/*
+ * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
+ *
+ * These macros have historically been misnamed
+ * virt here means link-address/program-address as embedded in object code.
+ * So if kernel img is linked at 0x8000_0000 onwards, 0x8010_0000 will be
+ * 128th page, and virt_to_page( ) will return the struct page corresp to it.
+ * mem_map[ ] is an array of struct page for each page frame in the system
+ *
+ * Independent of where linux is linked at, link-addr = physical address
+ * So the old macro __pa = vaddr + PAGE_OFFSET - CONFIG_LINUX_LINK_BASE
+ * would have been wrong in case kernel is not at 0x8zs
+ */
+#define __pa(vaddr) ((unsigned long)vaddr)
+#define __va(paddr) ((void *)((unsigned long)(paddr)))
+
+#define virt_to_page(kaddr) \
+ (mem_map + ((__pa(kaddr) - CONFIG_LINUX_LINK_BASE) >> PAGE_SHIFT))
+
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+
+/* Default Permissions for page, used in mmap.c */
+#ifdef CONFIG_ARC_STACK_NONEXEC
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#else
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#endif
+
+#define WANT_PAGE_VIRTUAL 1
+
+#include <asm-generic/memory_model.h> /* page_to_pfn, pfn_to_page */
+#include <asm-generic/getorder.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
new file mode 100644
index 00000000000..115ad96480e
--- /dev/null
+++ b/arch/arc/include/asm/perf_event.h
@@ -0,0 +1,13 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __ASM_PERF_EVENT_H
+#define __ASM_PERF_EVENT_H
+
+#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h
new file mode 100644
index 00000000000..36a9f20c21a
--- /dev/null
+++ b/arch/arc/include/asm/pgalloc.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2011
+ * -"/proc/meminfo | grep PageTables" kept on increasing
+ * Recently added pgtable dtor was not getting called.
+ *
+ * vineetg: May 2011
+ * -Variable pg-sz means that Page Tables could be variable sized themselves
+ * So calculate it based on addr traversal split [pgd-bits:pte-bits:xxx]
+ * -Page Table size capped to max 1 to save memory - hence verified.
+ * -Since these deal with constants, gcc compile-time optimizes them.
+ *
+ * vineetg: Nov 2010
+ * -Added pgtable ctor/dtor used for pgtable mem accounting
+ *
+ * vineetg: April 2010
+ * -Switched pgtable_t from being struct page * to unsigned long
+ * =Needed so that Page Table allocator (pte_alloc_one) is not forced to
+ * to deal with struct page. Thay way in future we can make it allocate
+ * multiple PG Tbls in one Page Frame
+ * =sweet side effect is avoiding calls to ugly page_address( ) from the
+ * pg-tlb allocator sub-sys (pte_alloc_one, ptr_free, pmd_populate
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGALLOC_H
+#define _ASM_ARC_PGALLOC_H
+
+#include <linux/mm.h>
+#include <linux/log2.h>
+
+static inline void
+pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
+{
+ pmd_set(pmd, pte);
+}
+
+static inline void
+pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep)
+{
+ pmd_set(pmd, (pte_t *) ptep);
+}
+
+static inline int __get_order_pgd(void)
+{
+ return get_order(PTRS_PER_PGD * 4);
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ int num, num2;
+ pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd());
+
+ if (ret) {
+ num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE;
+ memzero(ret, num * sizeof(pgd_t));
+
+ num2 = VMALLOC_SIZE / PGDIR_SIZE;
+ memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t));
+
+ memzero(ret + num + num2,
+ (PTRS_PER_PGD - num - num2) * sizeof(pgd_t));
+
+ }
+ return ret;
+}
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+ free_pages((unsigned long)pgd, __get_order_pgd());
+}
+
+
+/*
+ * With software-only page-tables, addr-split for traversal is tweakable and
+ * that directly governs how big tables would be at each level.
+ * Further, the MMU page size is configurable.
+ * Thus we need to programatically assert the size constraint
+ * All of this is const math, allowing gcc to do constant folding/propagation.
+ */
+
+static inline int __get_order_pte(void)
+{
+ return get_order(PTRS_PER_PTE * 4);
+}
+
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+ unsigned long address)
+{
+ pte_t *pte;
+
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+ __get_order_pte());
+
+ return pte;
+}
+
+static inline pgtable_t
+pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+ pgtable_t pte_pg;
+
+ pte_pg = __get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+ if (pte_pg) {
+ memzero((void *)pte_pg, PTRS_PER_PTE * 4);
+ pgtable_page_ctor(virt_to_page(pte_pg));
+ }
+
+ return pte_pg;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+ free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */
+}
+
+static inline void pte_free(struct mm_struct *mm, pgtable_t ptep)
+{
+ pgtable_page_dtor(virt_to_page(ptep));
+ free_pages(ptep, __get_order_pte());
+}
+
+#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
+
+#define check_pgt_cache() do { } while (0)
+#define pmd_pgtable(pmd) pmd_page_vaddr(pmd)
+
+#endif /* _ASM_ARC_PGALLOC_H */
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
new file mode 100644
index 00000000000..b7e36684c09
--- /dev/null
+++ b/arch/arc/include/asm/pgtable.h
@@ -0,0 +1,405 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
+ * They are semantically the same although in different contexts
+ * VALID marks a TLB entry exists and it will only happen if PRESENT
+ * - Utilise some unused free bits to confine PTE flags to 12 bits
+ * This is a must for 4k pg-sz
+ *
+ * vineetg: Mar 2011 - changes to accomodate MMU TLB Page Descriptor mods
+ * -TLB Locking never really existed, except for initial specs
+ * -SILENT_xxx not needed for our port
+ * -Per my request, MMU V3 changes the layout of some of the bits
+ * to avoid a few shifts in TLB Miss handlers.
+ *
+ * vineetg: April 2010
+ * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
+ * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
+ *
+ * vineetg: April 2010
+ * -Switched form 8:11:13 split for page table lookup to 11:8:13
+ * -this speeds up page table allocation itself as we now have to memset 1K
+ * instead of 8k per page table.
+ * -TODO: Right now page table alloc is 8K and rest 7K is unused
+ * need to optimise it
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_PGTABLE_H
+#define _ASM_ARC_PGTABLE_H
+
+#include <asm/page.h>
+#include <asm/mmu.h>
+#include <asm-generic/pgtable-nopmd.h>
+
+/**************************************************************************
+ * Page Table Flags
+ *
+ * ARC700 MMU only deals with softare managed TLB entries.
+ * Page Tables are purely for Linux VM's consumption and the bits below are
+ * suited to that (uniqueness). Hence some are not implemented in the TLB and
+ * some have different value in TLB.
+ * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in
+ * seperate PD0 and PD1, which combined forms a translation entry)
+ * while for PTE perspective, they are 8 and 9 respectively
+ * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
+ * (saves some bit shift ops in TLB Miss hdlrs)
+ */
+
+#if (CONFIG_ARC_MMU_VER <= 2)
+
+#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
+#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<5) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
+#define _PAGE_GLOBAL (1<<9) /* Page is global (H) */
+#define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<10) /* page cache/ swap (S) */
+#define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */
+
+#else
+
+/* PD1 */
+#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
+#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
+#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
+#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
+#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
+#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
+#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
+#define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */
+
+/* PD0 */
+#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
+#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
+#define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr
+ usable for shared TLB entries (H) */
+
+#define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */
+#define _PAGE_FILE (1<<12) /* page cache/ swap (S) */
+
+#define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */
+#endif
+
+/* Kernel allowed all permissions for all pages */
+#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifdef CONFIG_ARC_CACHE_PAGES
+#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
+#else
+#define _PAGE_DEF_CACHEABLE (0)
+#endif
+
+/* Helper for every "user" page
+ * -kernel can R/W/X
+ * -by default cached, unless config otherwise
+ * -present in memory
+ */
+#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
+
+/* Set of bits not changed in pte_modify */
+#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
+
+/* More Abbrevaited helpers */
+#define PAGE_U_NONE __pgprot(___DEF)
+#define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
+#define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
+#define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_EXECUTE)
+
+#define PAGE_SHARED PAGE_U_W_R
+
+/* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of
+ * kernel vaddr space - visible in all addr spaces, but kernel mode only
+ * Thus Global, all-kernel-access, no-user-access, cached
+ */
+#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL)
+
+/* ioremap */
+#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \
+ _PAGE_GLOBAL)
+
+/**************************************************************************
+ * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
+ *
+ * Certain cases have 1:1 mapping
+ * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
+ * which directly corresponds to PAGE_U_X_R
+ *
+ * Other rules which cause the divergence from 1:1 mapping
+ *
+ * 1. Although ARC700 can do exclusive execute/write protection (meaning R
+ * can be tracked independet of X/W unlike some other CPUs), still to
+ * keep things consistent with other archs:
+ * -Write implies Read: W => R
+ * -Execute implies Read: X => R
+ *
+ * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
+ * This is to enable COW mechanism
+ */
+ /* xwr */
+#define __P000 PAGE_U_NONE
+#define __P001 PAGE_U_R
+#define __P010 PAGE_U_R /* Pvt-W => !W */
+#define __P011 PAGE_U_R /* Pvt-W => !W */
+#define __P100 PAGE_U_X_R /* X => R */
+#define __P101 PAGE_U_X_R
+#define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
+#define __P111 PAGE_U_X_R /* Pvt-W => !W */
+
+#define __S000 PAGE_U_NONE
+#define __S001 PAGE_U_R
+#define __S010 PAGE_U_W_R /* W => R */
+#define __S011 PAGE_U_W_R
+#define __S100 PAGE_U_X_R /* X => R */
+#define __S101 PAGE_U_X_R
+#define __S110 PAGE_U_X_W_R /* X => R */
+#define __S111 PAGE_U_X_W_R
+
+/****************************************************************
+ * Page Table Lookup split
+ *
+ * We implement 2 tier paging and since this is all software, we are free
+ * to customize the span of a PGD / PTE entry to suit us
+ *
+ * 32 bit virtual address
+ * -------------------------------------------------------
+ * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * -------------------------------------------------------
+ * | | |
+ * | | --> off in page frame
+ * | |
+ * | ---> index into Page Table
+ * |
+ * ----> index into Page Directory
+ */
+
+#define BITS_IN_PAGE PAGE_SHIFT
+
+/* Optimal Sizing of Pg Tbl - based on MMU page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_8K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define BITS_FOR_PTE 8
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define BITS_FOR_PTE 9
+#endif
+
+#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+
+#define PGDIR_SHIFT (BITS_FOR_PTE + BITS_IN_PAGE)
+#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
+#define PGDIR_MASK (~(PGDIR_SIZE-1))
+
+#ifdef __ASSEMBLY__
+#define PTRS_PER_PTE (1 << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1 << BITS_FOR_PGD)
+#else
+#define PTRS_PER_PTE (1UL << BITS_FOR_PTE)
+#define PTRS_PER_PGD (1UL << BITS_FOR_PGD)
+#endif
+/*
+ * Number of entries a user land program use.
+ * TASK_SIZE is the maximum vaddr that can be used by a userland program.
+ */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/*
+ * No special requirements for lowest virtual address we permit any user space
+ * mapping to be mapped at.
+ */
+#define FIRST_USER_ADDRESS 0
+
+
+/****************************************************************
+ * Bucket load of VM Helpers
+ */
+
+#ifndef __ASSEMBLY__
+
+#define pte_ERROR(e) \
+ pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
+#define pgd_ERROR(e) \
+ pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+
+/* the zero page used for uninitialized and anonymous pages */
+extern char empty_zero_page[PAGE_SIZE];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#define pte_unmap(pte) do { } while (0)
+#define pte_unmap_nested(pte) do { } while (0)
+
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+/* find the page descriptor of the Page Tbl ref by PMD entry */
+#define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
+
+/* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
+#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
+
+/* In a 2 level sys, setup the PGD entry with PTE value */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+ pmd_val(*pmdp) = (unsigned long)ptep;
+}
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
+
+#define pmd_none(x) (!pmd_val(x))
+#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
+#define pmd_present(x) (pmd_val(x))
+#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
+
+#define pte_page(x) (mem_map + \
+ (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+
+#define mk_pte(page, pgprot) \
+({ \
+ pte_t pte; \
+ pte_val(pte) = __pa(page_address(page)) + pgprot_val(pgprot); \
+ pte; \
+})
+
+/* TBD: Non linear mapping stuff */
+static inline int pte_file(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_FILE;
+}
+
+#define PTE_FILE_MAX_BITS 30
+#define pgoff_to_pte(x) __pte(x)
+#define pte_to_pgoff(x) (pte_val(x) >> 2)
+#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
+/*
+ * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
+ * and returns ptr to PTE entry corresponding to @addr
+ */
+#define pte_offset(dir, addr) ((pte_t *)(pmd_page_vaddr(*dir)) +\
+ __pte_index(addr))
+
+/* No mapping of Page Tables in high mem etc, so following same as above */
+#define pte_offset_kernel(dir, addr) pte_offset(dir, addr)
+#define pte_offset_map(dir, addr) pte_offset(dir, addr)
+
+/* Zoo of pte_xxx function */
+#define pte_read(pte) (pte_val(pte) & _PAGE_READ)
+#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
+#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED)
+#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
+#define pte_special(pte) (0)
+
+#define PTE_BIT_FUNC(fn, op) \
+ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+
+PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
+PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
+PTE_BIT_FUNC(mkclean, &= ~(_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkdirty, |= (_PAGE_MODIFIED));
+PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
+PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
+PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
+PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
+
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+/* Macro to mark a page protection as uncacheable */
+#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
+
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ set_pte(ptep, pteval);
+}
+
+/*
+ * All kernel related VM pages are in init's mm.
+ */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
+#define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr))
+
+/*
+ * Macro to quickly access the PGD entry, utlising the fact that some
+ * arch may cache the pointer to Page Directory of "current" task
+ * in a MMU register
+ *
+ * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
+ * becomes read a register
+ *
+ * ********CAUTION*******:
+ * Kernel code might be dealing with some mm_struct of NON "current"
+ * Thus use this macro only when you are certain that "current" is current
+ * e.g. when dealing with signal frame setup code etc
+ */
+#ifndef CONFIG_SMP
+#define pgd_offset_fast(mm, addr) \
+({ \
+ pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
+ pgd_base + pgd_index(addr); \
+})
+#else
+#define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
+#endif
+
+extern void paging_init(void);
+extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
+
+/* Encode swap {type,off} tuple into PTE
+ * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
+ * both PAGE_FILE and PAGE_PRESENT are zero in a PTE holding swap "identifier"
+ */
+#define __swp_entry(type, off) ((swp_entry_t) { \
+ ((type) & 0x1f) | ((off) << 13) })
+
+/* Decode a PTE containing swap "identifier "into constituents */
+#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+
+/* NOPs, to keep generic kernel happy */
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+#define kern_addr_valid(addr) (1)
+
+/*
+ * remap a physical page `pfn' of size `size' with page protection `prot'
+ * into virtual address `from'
+ */
+#define io_remap_pfn_range(vma, from, pfn, size, prot) \
+ remap_pfn_range(vma, from, pfn, size, prot)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init() do { } while (0)
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
new file mode 100644
index 00000000000..5f26b2c1cba
--- /dev/null
+++ b/arch/arc/include/asm/processor.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: March 2009
+ * -Implemented task_pt_regs( )
+ *
+ * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
+ */
+
+#ifndef __ASM_ARC_PROCESSOR_H
+#define __ASM_ARC_PROCESSOR_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
+
+/* Arch specific stuff which needs to be saved per task.
+ * However these items are not so important so as to earn a place in
+ * struct thread_info
+ */
+struct thread_struct {
+ unsigned long ksp; /* kernel mode stack pointer */
+ unsigned long callee_reg; /* pointer to callee regs */
+ unsigned long fault_address; /* dbls as brkpt holder as well */
+ unsigned long cause_code; /* Exception Cause Code (ECR) */
+#ifdef CONFIG_ARC_CURR_IN_REG
+ unsigned long user_r25;
+#endif
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ struct arc_fpu fpu;
+#endif
+};
+
+#define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
+}
+
+/* Forward declaration, a strange C thing */
+struct task_struct;
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+unsigned long thread_saved_pc(struct task_struct *t);
+
+#define task_pt_regs(p) \
+ ((struct pt_regs *)(THREAD_SIZE - 4 + (void *)task_stack_page(p)) - 1)
+
+/* Free all resources held by a thread. */
+#define release_thread(thread) do { } while (0)
+
+/* Prepare to copy thread state - unlazy all lazy status */
+#define prepare_to_copy(tsk) do { } while (0)
+
+/*
+ * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
+ * get optimised away by gcc
+ */
+#ifdef CONFIG_SMP
+#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
+#else
+#define cpu_relax() do { } while (0)
+#endif
+
+#define copy_segments(tsk, mm) do { } while (0)
+#define release_segments(mm) do { } while (0)
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
+
+/*
+ * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
+ * These can't be derived from pt_regs as that would give correp user-mode val
+ */
+#define KSTK_ESP(tsk) (tsk->thread.ksp)
+#define KSTK_BLINK(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1+1)*4)))
+#define KSTK_FP(tsk) (*((unsigned int *)((KSTK_ESP(tsk)) + (13+1)*4)))
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ *
+ * E1,E2 so that Interrupts are enabled in user mode
+ * L set, so Loop inhibited to begin with
+ * lp_start and lp_end seeded with bogus non-zero values so to easily catch
+ * the ARC700 sr to lp_start hardware bug
+ */
+#define start_thread(_regs, _pc, _usp) \
+do { \
+ set_fs(USER_DS); /* reads from user space */ \
+ (_regs)->ret = (_pc); \
+ /* Interrupts enabled in User Mode */ \
+ (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
+ | STATUS_E1_MASK | STATUS_E2_MASK; \
+ (_regs)->sp = (_usp); \
+ /* bogus seed values for debugging */ \
+ (_regs)->lp_start = 0x10; \
+ (_regs)->lp_end = 0x80; \
+} while (0)
+
+extern unsigned int get_wchan(struct task_struct *p);
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ * Should the PC register be read instead ? This macro does not seem to
+ * be used in many places so this wont be all that bad.
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+#endif /* !__ASSEMBLY__ */
+
+/* Kernels Virtual memory area.
+ * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
+ * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
+ * of the translated bottom 2GB for kernel virtual memory and protect
+ * these pages from user accesses by disabling Ru, Eu and Wu.
+ */
+#define VMALLOC_SIZE (0x10000000) /* 256M */
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+#define VMALLOC_END (PAGE_OFFSET)
+
+/* Most of the architectures seem to be keeping some kind of padding between
+ * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
+ */
+#define USER_KERNEL_GUTTER 0x10000000
+
+/* User address space:
+ * On ARC700, CPU allows the entire lower half of 32 bit address space to be
+ * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
+ * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
+ * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
+ * Thus total User vaddr space is (0:0x5FFF_FFFF)
+ */
+#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/prom.h b/arch/arc/include/asm/prom.h
new file mode 100644
index 00000000000..692d0d0789a
--- /dev/null
+++ b/arch/arc/include/asm/prom.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_PROM_H_
+#define _ASM_ARC_PROM_H_
+
+#define HAVE_ARCH_DEVTREE_FIXUPS
+
+#endif
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
new file mode 100644
index 00000000000..8ae783d20a8
--- /dev/null
+++ b/arch/arc/include/asm/ptrace.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+#ifndef __ASM_ARC_PTRACE_H
+#define __ASM_ARC_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+
+#ifndef __ASSEMBLY__
+
+/* THE pt_regs: Defines how regs are saved during entry into kernel */
+
+struct pt_regs {
+ /*
+ * 1 word gutter after reg-file has been saved
+ * Technically not needed, Since SP always points to a "full" location
+ * (vs. "empty"). But pt_regs is shared with tools....
+ */
+ long res;
+
+ /* Real registers */
+ long bta; /* bta_l1, bta_l2, erbta */
+ long lp_start;
+ long lp_end;
+ long lp_count;
+ long status32; /* status32_l1, status32_l2, erstatus */
+ long ret; /* ilink1, ilink2 or eret */
+ long blink;
+ long fp;
+ long r26; /* gp */
+ long r12;
+ long r11;
+ long r10;
+ long r9;
+ long r8;
+ long r7;
+ long r6;
+ long r5;
+ long r4;
+ long r3;
+ long r2;
+ long r1;
+ long r0;
+ long sp; /* user/kernel sp depending on where we came from */
+ long orig_r0;
+
+ /*to distinguish bet excp, syscall, irq */
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ /* so that assembly code is same for LE/BE */
+ unsigned long orig_r8:16, event:16;
+#else
+ unsigned long event:16, orig_r8:16;
+#endif
+ long orig_r8_word;
+ };
+};
+
+/* Callee saved registers - need to be saved only when you are scheduled out */
+
+struct callee_regs {
+ long res; /* Again this is not needed */
+ long r25;
+ long r24;
+ long r23;
+ long r22;
+ long r21;
+ long r20;
+ long r19;
+ long r18;
+ long r17;
+ long r16;
+ long r15;
+ long r14;
+ long r13;
+};
+
+#define instruction_pointer(regs) ((regs)->ret)
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* return 1 if user mode or 0 if kernel mode */
+#define user_mode(regs) (regs->status32 & STATUS_U_MASK)
+
+#define user_stack_pointer(regs)\
+({ unsigned int sp; \
+ if (user_mode(regs)) \
+ sp = (regs)->sp;\
+ else \
+ sp = -1; \
+ sp; \
+})
+
+/* return 1 if PC in delay slot */
+#define delay_mode(regs) ((regs->status32 & STATUS_DE_MASK) == STATUS_DE_MASK)
+
+#define in_syscall(regs) (regs->event & orig_r8_IS_SCALL)
+#define in_brkpt_trap(regs) (regs->event & orig_r8_IS_BRKPT)
+
+#define syscall_wont_restart(regs) (regs->event |= orig_r8_IS_SCALL_RESTARTED)
+#define syscall_restartable(regs) !(regs->event & orig_r8_IS_SCALL_RESTARTED)
+
+#define current_pt_regs() \
+({ \
+ /* open-coded current_thread_info() */ \
+ register unsigned long sp asm ("sp"); \
+ unsigned long pg_start = (sp & ~(THREAD_SIZE - 1)); \
+ (struct pt_regs *)(pg_start + THREAD_SIZE - 4) - 1; \
+})
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define orig_r8_IS_SCALL 0x0001
+#define orig_r8_IS_SCALL_RESTARTED 0x0002
+#define orig_r8_IS_BRKPT 0x0004
+#define orig_r8_IS_EXCPN 0x0004
+#define orig_r8_IS_IRQ1 0x0010
+#define orig_r8_IS_IRQ2 0x0020
+
+#endif /* __ASM_PTRACE_H */
diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
new file mode 100644
index 00000000000..6fc1159dfef
--- /dev/null
+++ b/arch/arc/include/asm/sections.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SECTIONS_H
+#define _ASM_ARC_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _int_vec_base_lds[];
+extern char __arc_dccm_base[];
+extern char __dtb_start[];
+
+#endif
diff --git a/arch/arc/include/asm/segment.h b/arch/arc/include/asm/segment.h
new file mode 100644
index 00000000000..da2c4597981
--- /dev/null
+++ b/arch/arc/include/asm/segment.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASMARC_SEGMENT_H
+#define __ASMARC_SEGMENT_H
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#define KERNEL_DS MAKE_MM_SEG(0)
+#define USER_DS MAKE_MM_SEG(TASK_SIZE)
+
+#define segment_eq(a, b) ((a) == (b))
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASMARC_SEGMENT_H */
diff --git a/arch/arc/include/asm/serial.h b/arch/arc/include/asm/serial.h
new file mode 100644
index 00000000000..4dff5a1e412
--- /dev/null
+++ b/arch/arc/include/asm/serial.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SERIAL_H
+#define _ASM_ARC_SERIAL_H
+
+/*
+ * early-8250 requires BASE_BAUD to be defined and includes this header.
+ * We put in a typical value:
+ * (core clk / 16) - i.e. UART samples 16 times per sec.
+ * Athough in multi-platform-image this might not work, specially if the
+ * clk driving the UART is different.
+ * We can't use DeviceTree as this is typically for early serial.
+ */
+
+#include <asm/clk.h>
+
+#define BASE_BAUD (arc_get_core_freq() / 16)
+
+#endif /* _ASM_ARC_SERIAL_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
new file mode 100644
index 00000000000..229e5068149
--- /dev/null
+++ b/arch/arc/include/asm/setup.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ASMARC_SETUP_H
+#define __ASMARC_SETUP_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/setup.h>
+
+#define COMMAND_LINE_SIZE 256
+
+/*
+ * Data structure to map a ID to string
+ * Used a lot for bootup reporting of hardware diversity
+ */
+struct id_to_str {
+ int id;
+ const char *str;
+};
+
+struct cpuinfo_data {
+ struct id_to_str info;
+ int up_range;
+};
+
+extern int root_mountflags, end_mem;
+extern int running_on_hw;
+
+void __init setup_processor(void);
+void __init setup_arch_memory(void);
+
+#endif /* __ASMARC_SETUP_H */
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
new file mode 100644
index 00000000000..c4fb211dcd2
--- /dev/null
+++ b/arch/arc/include/asm/smp.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_SMP_H
+#define __ASM_ARC_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+/* including cpumask.h leads to cyclic deps hence this Forward declaration */
+struct cpumask;
+
+/*
+ * APIs provided by arch SMP code to generic code
+ */
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+/*
+ * APIs provided by arch SMP code to rest of arch code
+ */
+extern void __init smp_init_cpus(void);
+extern void __init first_lines_of_secondary(void);
+extern const char *arc_platform_smp_cpuinfo(void);
+
+/*
+ * API expected BY platform smp code (FROM arch smp code)
+ *
+ * smp_ipi_irq_setup:
+ * Takes @cpu and @irq to which the arch-common ISR is hooked up
+ */
+extern int smp_ipi_irq_setup(int cpu, int irq);
+
+/*
+ * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP
+ *
+ * @info: SoC SMP specific info for /proc/cpuinfo etc
+ * @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
+ * @ipi_send: To send IPI to a @cpumask
+ * @ips_clear: To clear IPI received by @cpu at @irq
+ */
+struct plat_smp_ops {
+ const char *info;
+ void (*cpu_kick)(int cpu, unsigned long pc);
+ void (*ipi_send)(void *callmap);
+ void (*ipi_clear)(int cpu, int irq);
+};
+
+/* TBD: stop exporting it for direct population by platform */
+extern struct plat_smp_ops plat_smp_ops;
+
+#endif /* CONFIG_SMP */
+
+/*
+ * ARC700 doesn't support atomic Read-Modify-Write ops.
+ * Originally Interrupts had to be disabled around code to gaurantee atomicity.
+ * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops
+ * based on retry-if-irq-in-atomic (with hardware assist).
+ * However despite these, we provide the IRQ disabling variant
+ *
+ * (1) These insn were introduced only in 4.10 release. So for older released
+ * support needed.
+ *
+ * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be
+ * gaurantted by the platform (not something which core handles).
+ * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ
+ * disabling for atomicity.
+ *
+ * However exported spinlock API is not usable due to cyclic hdr deps
+ * (even after system.h disintegration upstream)
+ * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
+ * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h
+ *
+ * So the workaround is to use the lowest level arch spinlock API.
+ * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP,
+ * but same is not true for ARCH backend, hence the need for 2 variants
+ */
+#ifndef CONFIG_ARC_HAS_LLSC
+
+#include <linux/irqflags.h>
+#ifdef CONFIG_SMP
+
+#include <asm/spinlock.h>
+
+extern arch_spinlock_t smp_atomic_ops_lock;
+extern arch_spinlock_t smp_bitops_lock;
+
+#define atomic_ops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_atomic_ops_lock); \
+} while (0)
+
+#define atomic_ops_unlock(flags) do { \
+ arch_spin_unlock(&smp_atomic_ops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#define bitops_lock(flags) do { \
+ local_irq_save(flags); \
+ arch_spin_lock(&smp_bitops_lock); \
+} while (0)
+
+#define bitops_unlock(flags) do { \
+ arch_spin_unlock(&smp_bitops_lock); \
+ local_irq_restore(flags); \
+} while (0)
+
+#else /* !CONFIG_SMP */
+
+#define atomic_ops_lock(flags) local_irq_save(flags)
+#define atomic_ops_unlock(flags) local_irq_restore(flags)
+
+#define bitops_lock(flags) local_irq_save(flags)
+#define bitops_unlock(flags) local_irq_restore(flags)
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !CONFIG_ARC_HAS_LLSC */
+
+#endif
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
new file mode 100644
index 00000000000..f158197ac5b
--- /dev/null
+++ b/arch/arc/include/asm/spinlock.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#include <asm/spinlock_types.h>
+#include <asm/processor.h>
+#include <asm/barrier.h>
+
+#define arch_spin_is_locked(x) ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
+#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
+#define arch_spin_unlock_wait(x) \
+ do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ " breq %0, %2, 1b \n"
+ : "+&r" (tmp)
+ : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
+ : "memory");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
+
+ __asm__ __volatile__(
+ "1: ex %0, [%1] \n"
+ : "+r" (tmp)
+ : "r"(&(lock->slock))
+ : "memory");
+
+ return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
+}
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
+ smp_mb();
+}
+
+/*
+ * Read-write spinlocks, allowing multiple readers but only one writer.
+ *
+ * The spinlock itself is contained in @counter and access to it is
+ * serialized with @lock_mutex.
+ *
+ * Unfair locking as Writers could be starved indefinitely by Reader(s)
+ */
+
+/* Would read_trylock() succeed? */
+#define arch_read_can_lock(x) ((x)->counter > 0)
+
+/* Would write_trylock() succeed? */
+#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
+
+/* 1 - lock taken successfully */
+static inline int arch_read_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * zero means writer holds the lock exclusively, deny Reader.
+ * Otherwise grant lock to first/subseq reader
+ */
+ if (rw->counter > 0) {
+ rw->counter--;
+ ret = 1;
+ }
+
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ smp_mb();
+ return ret;
+}
+
+/* 1 - lock taken successfully */
+static inline int arch_write_trylock(arch_rwlock_t *rw)
+{
+ int ret = 0;
+
+ arch_spin_lock(&(rw->lock_mutex));
+
+ /*
+ * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
+ * deny writer. Otherwise if unlocked grant to writer
+ * Hence the claim that Linux rwlocks are unfair to writers.
+ * (can be starved for an indefinite time by readers).
+ */
+ if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
+ rw->counter = 0;
+ ret = 1;
+ }
+ arch_spin_unlock(&(rw->lock_mutex));
+
+ return ret;
+}
+
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+ while (!arch_read_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+ while (!arch_write_trylock(rw))
+ cpu_relax();
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter++;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+ arch_spin_lock(&(rw->lock_mutex));
+ rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
+ arch_spin_unlock(&(rw->lock_mutex));
+}
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
+
+#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
new file mode 100644
index 00000000000..8276bfd6170
--- /dev/null
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_SPINLOCK_TYPES_H
+#define __ASM_SPINLOCK_TYPES_H
+
+typedef struct {
+ volatile unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED__ 0
+#define __ARCH_SPIN_LOCK_LOCKED__ 1
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED__ }
+#define __ARCH_SPIN_LOCK_LOCKED { __ARCH_SPIN_LOCK_LOCKED__ }
+
+/*
+ * Unlocked: 0x01_00_00_00
+ * Read lock(s): 0x00_FF_00_00 to say 0x01
+ * Write lock: 0x0, but only possible if prior value "unlocked" 0x0100_0000
+ */
+typedef struct {
+ volatile unsigned int counter;
+ arch_spinlock_t lock_mutex;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
+#define __ARCH_RW_LOCK_UNLOCKED { .counter = __ARCH_RW_LOCK_UNLOCKED__ }
+
+#endif
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
new file mode 100644
index 00000000000..87676c8f141
--- /dev/null
+++ b/arch/arc/include/asm/string.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -We had half-optimised memset/memcpy, got better versions of those
+ * -Added memcmp, strchr, strcpy, strcmp, strlen
+ *
+ * Amit Bhor: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_STRING_H
+#define _ASM_ARC_STRING_H
+
+#include <linux/types.h>
+
+#ifdef __KERNEL__
+
+#define __HAVE_ARCH_MEMSET
+#define __HAVE_ARCH_MEMCPY
+#define __HAVE_ARCH_MEMCMP
+#define __HAVE_ARCH_STRCHR
+#define __HAVE_ARCH_STRCPY
+#define __HAVE_ARCH_STRCMP
+#define __HAVE_ARCH_STRLEN
+
+extern void *memset(void *ptr, int, __kernel_size_t);
+extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void memzero(void *ptr, __kernel_size_t n);
+extern int memcmp(const void *, const void *, __kernel_size_t);
+extern char *strchr(const char *s, int c);
+extern char *strcpy(char *dest, const char *src);
+extern int strcmp(const char *cs, const char *ct);
+extern __kernel_size_t strlen(const char *);
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/switch_to.h b/arch/arc/include/asm/switch_to.h
new file mode 100644
index 00000000000..1b171ab5fec
--- /dev/null
+++ b/arch/arc/include/asm/switch_to.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SWITCH_TO_H
+#define _ASM_ARC_SWITCH_TO_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+
+extern void fpu_save_restore(struct task_struct *p, struct task_struct *n);
+#define ARC_FPU_PREV(p, n) fpu_save_restore(p, n)
+#define ARC_FPU_NEXT(t)
+
+#else
+
+#define ARC_FPU_PREV(p, n)
+#define ARC_FPU_NEXT(n)
+
+#endif /* !CONFIG_ARC_FPU_SAVE_RESTORE */
+
+struct task_struct *__switch_to(struct task_struct *p, struct task_struct *n);
+
+#define switch_to(prev, next, last) \
+do { \
+ ARC_FPU_PREV(prev, next); \
+ last = __switch_to(prev, next);\
+ ARC_FPU_NEXT(next); \
+ mb(); \
+} while (0)
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
new file mode 100644
index 00000000000..33ab3048e9b
--- /dev/null
+++ b/arch/arc/include/asm/syscall.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALL_H
+#define _ASM_ARC_SYSCALL_H 1
+
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h> /* in_syscall() */
+
+static inline long
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+ if (user_mode(regs) && in_syscall(regs))
+ return regs->orig_r8;
+ else
+ return -1;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+ /* XXX: I can't fathom how pt_regs->r8 will be clobbered ? */
+ regs->r8 = regs->orig_r8;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+ /* 0 if syscall succeeded, otherwise -Errorcode */
+ return IS_ERR_VALUE(regs->r0) ? regs->r0 : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+ return regs->r0;
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val)
+{
+ regs->r0 = (long) error ?: val;
+}
+
+/*
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ */
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args)
+{
+ unsigned long *inside_ptregs = &(regs->r0);
+ inside_ptregs -= i;
+
+ BUG_ON((i + n) > 6);
+
+ while (n--) {
+ args[i++] = (*inside_ptregs);
+ inside_ptregs--;
+ }
+}
+
+#endif
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
new file mode 100644
index 00000000000..e53a5340ba4
--- /dev/null
+++ b/arch/arc/include/asm/syscalls.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SYSCALLS_H
+#define _ASM_ARC_SYSCALLS_H 1
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+int sys_clone_wrapper(int, int, int, int, int);
+int sys_fork_wrapper(void);
+int sys_vfork_wrapper(void);
+int sys_cacheflush(uint32_t, uint32_t uint32_t);
+int sys_arc_settls(void *);
+int sys_arc_gettls(void);
+
+#include <asm-generic/syscalls.h>
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
new file mode 100644
index 00000000000..2d50a4cdd7f
--- /dev/null
+++ b/arch/arc/include/asm/thread_info.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Oct 2009
+ * No need for ARC specific thread_info allocator (kmalloc/free). This is
+ * anyways one page allocation, thus slab alloc can be short-circuited and
+ * the generic version (get_free_page) would be loads better.
+ *
+ * Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+
+#ifdef CONFIG_16KSTACKS
+#define THREAD_SIZE_ORDER 1
+#else
+#define THREAD_SIZE_ORDER 0
+#endif
+
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/thread_info.h>
+#include <asm/segment.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ * must also be changed
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0 => preemptable, <0 => BUG */
+ struct task_struct *task; /* main task structure */
+ mm_segment_t addr_limit; /* thread address space */
+ struct exec_domain *exec_domain;/* execution domain */
+ __u32 cpu; /* current CPU */
+ unsigned long thr_ptr; /* TLS ptr */
+ struct restart_block restart_block;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .task = &tsk, \
+ .exec_domain = &default_exec_domain, \
+ .flags = 0, \
+ .cpu = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+ .restart_block = { \
+ .fn = do_no_restart_syscall, \
+ }, \
+}
+
+#define init_thread_info (init_thread_union.thread_info)
+#define init_stack (init_thread_union.stack)
+
+static inline __attribute_const__ struct thread_info *current_thread_info(void)
+{
+ register unsigned long sp asm("sp");
+ return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define PREEMPT_ACTIVE 0x10000000
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in LSW
+ * - other flags in MSW
+ */
+#define TIF_RESTORE_SIGMASK 0 /* restore sig mask in do_signal() */
+#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_SYSCALL_AUDIT 4 /* syscall auditing active */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+
+/* true if poll_idle() is polling TIF_NEED_RESCHED */
+#define TIF_MEMDIE 16
+
+#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
+#define _TIF_MEMDIE (1<<TIF_MEMDIE)
+
+/* work to do on interrupt/exception return */
+#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
+ _TIF_NOTIFY_RESUME)
+
+/*
+ * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
+ * SYSCALL_TRACE is anways seperately/unconditionally tested right after a
+ * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/timex.h b/arch/arc/include/asm/timex.h
new file mode 100644
index 00000000000..0a82960a75e
--- /dev/null
+++ b/arch/arc/include/asm/timex.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TIMEX_H
+#define _ASM_ARC_TIMEX_H
+
+#define CLOCK_TICK_RATE 80000000 /* slated to be removed */
+
+#include <asm-generic/timex.h>
+
+/* XXX: get_cycles() to be implemented with RTSC insn */
+
+#endif /* _ASM_ARC_TIMEX_H */
diff --git a/arch/arc/include/asm/tlb-mmu1.h b/arch/arc/include/asm/tlb-mmu1.h
new file mode 100644
index 00000000000..a5ff961b1ef
--- /dev/null
+++ b/arch/arc/include/asm/tlb-mmu1.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_TLB_MMU_V1_H__
+#define __ASM_TLB_MMU_V1_H__
+
+#if defined(__ASSEMBLY__) && defined(CONFIG_ARC_MMU_VER == 1)
+
+#include <asm/tlb.h>
+
+.macro TLB_WRITE_HEURISTICS
+
+#define JH_HACK1
+#undef JH_HACK2
+#undef JH_HACK3
+
+#ifdef JH_HACK3
+; Calculate set index for 2-way MMU
+; -avoiding use of GetIndex from MMU
+; and its unpleasant LFSR pseudo-random sequence
+;
+; r1 = TLBPD0 from TLB_RELOAD above
+;
+; -- jh_ex_way_set not cleared on startup
+; didn't want to change setup.c
+; hence extra instruction to clean
+;
+; -- should be in cache since in same line
+; as r0/r1 saves above
+;
+ld r0,[jh_ex_way_sel] ; victim pointer
+and r0,r0,1 ; clean
+xor.f r0,r0,1 ; flip
+st r0,[jh_ex_way_sel] ; store back
+asr r0,r1,12 ; get set # <<1, note bit 12=R=0
+or.nz r0,r0,1 ; set way bit
+and r0,r0,0xff ; clean
+sr r0,[ARC_REG_TLBINDEX]
+#endif
+
+#ifdef JH_HACK2
+; JH hack #2
+; Faster than hack #1 in non-thrash case, but hard-coded for 2-way MMU
+; Slower in thrash case (where it matters) because more code is executed
+; Inefficient due to two-register paradigm of this miss handler
+;
+/* r1 = data TLBPD0 at this point */
+lr r0,[eret] /* instruction address */
+xor r0,r0,r1 /* compare set # */
+and.f r0,r0,0x000fe000 /* 2-way MMU mask */
+bne 88f /* not in same set - no need to probe */
+
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+; lr r1,[ARC_REG_TLBPD0] /* Data VPN+ASID - already in r1 from TLB_RELOAD*/
+and r1,r1,0xff /* Data ASID */
+or r0,r0,r1 /* Instruction address + Data ASID */
+
+lr r1,[ARC_REG_TLBPD0] /* save TLBPD0 containing data TLB*/
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r1,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+xor r0,r0,1 /* flip bottom bit of data index */
+b.d 89f
+sr r0,[ARC_REG_TLBINDEX] /* and put it back */
+88:
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+89:
+#endif
+
+#ifdef JH_HACK1
+;
+; Always checks whether instruction will be kicked out by dtlb miss
+;
+mov_s r3, r1 ; save PD0 prepared by TLB_RELOAD in r3
+lr r0,[eret] /* instruction address */
+and r0,r0,PAGE_MASK /* VPN of instruction address */
+bmsk r1,r3,7 /* Data ASID, bits 7-0 */
+or_s r0,r0,r1 /* Instruction address + Data ASID */
+
+sr r0,[ARC_REG_TLBPD0] /* write instruction address to TLBPD0 */
+sr TLBProbe, [ARC_REG_TLBCOMMAND] /* Look for instruction */
+lr r0,[ARC_REG_TLBINDEX] /* r0 = index where instruction is, if at all */
+sr r3,[ARC_REG_TLBPD0] /* restore TLBPD0 */
+
+sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+lr r1,[ARC_REG_TLBINDEX] /* r1 = index where MMU wants to put data */
+cmp r0,r1 /* if no match on indices, go around */
+xor.eq r1,r1,1 /* flip bottom bit of data index */
+sr r1,[ARC_REG_TLBINDEX] /* and put it back */
+#endif
+
+.endm
+
+#endif
+
+#endif
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
new file mode 100644
index 00000000000..3eb2ce0bdfa
--- /dev/null
+++ b/arch/arc/include/asm/tlb.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_TLB_H
+#define _ASM_ARC_TLB_H
+
+#ifdef __KERNEL__
+
+#include <asm/pgtable.h>
+
+/* Masks for actual TLB "PD"s */
+#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
+#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
+ _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
+ _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
+
+#ifndef __ASSEMBLY__
+
+#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm)
+
+/*
+ * This pair is called at time of munmap/exit to flush cache and TLB entries
+ * for mappings being torn down.
+ * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now)
+ * as we don't support aliasing configs in our VIPT D$.
+ * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well-
+ * albiet for difft reasons - its better handled by moving to new ASID
+ *
+ * Note, read http://lkml.org/lkml/2004/1/15/6
+ */
+#define tlb_start_vma(tlb, vma)
+#define tlb_end_vma(tlb, vma)
+
+#define __tlb_remove_tlb_entry(tlb, ptep, address)
+
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
+#else
+#define tlb_paranoid_check(a, b)
+#endif
+
+void arc_mmu_init(void);
+extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
+void __init read_decode_mmu_bcr(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_ARC_TLB_H */
diff --git a/arch/arc/include/asm/tlbflush.h b/arch/arc/include/asm/tlbflush.h
new file mode 100644
index 00000000000..b2f9bc7f68c
--- /dev/null
+++ b/arch/arc/include/asm/tlbflush.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_TLBFLUSH__
+#define __ASM_ARC_TLBFLUSH__
+
+#include <linux/mm.h>
+
+void local_flush_tlb_all(void);
+void local_flush_tlb_mm(struct mm_struct *mm);
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void local_flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+
+/* XXX: Revisit for SMP */
+#define flush_tlb_range(vma, s, e) local_flush_tlb_range(vma, s, e)
+#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
+#define flush_tlb_kernel_range(s, e) local_flush_tlb_kernel_range(s, e)
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
+
+#endif
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
new file mode 100644
index 00000000000..32420824375
--- /dev/null
+++ b/arch/arc/include/asm/uaccess.h
@@ -0,0 +1,751 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: June 2010
+ * -__clear_user( ) called multiple times during elf load was byte loop
+ * converted to do as much word clear as possible.
+ *
+ * vineetg: Dec 2009
+ * -Hand crafted constant propagation for "constant" copy sizes
+ * -stock kernel shrunk by 33K at -O3
+ *
+ * vineetg: Sept 2009
+ * -Added option to (UN)inline copy_(to|from)_user to reduce code sz
+ * -kernel shrunk by 200K even at -O3 (gcc 4.2.1)
+ * -Enabled when doing -Os
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_UACCESS_H
+#define _ASM_ARC_UACCESS_H
+
+#include <linux/sched.h>
+#include <asm/errno.h>
+#include <linux/string.h> /* for generic string functions */
+
+
+#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+
+/*
+ * Algorthmically, for __user_ok() we want do:
+ * (start < TASK_SIZE) && (start+len < TASK_SIZE)
+ * where TASK_SIZE could either be retrieved from thread_info->addr_limit or
+ * emitted directly in code.
+ *
+ * This can however be rewritten as follows:
+ * (len <= TASK_SIZE) && (start+len < TASK_SIZE)
+ *
+ * Because it essentially checks if buffer end is within limit and @len is
+ * non-ngeative, which implies that buffer start will be within limit too.
+ *
+ * The reason for rewriting being, for majorit yof cases, @len is generally
+ * compile time constant, causing first sub-expression to be compile time
+ * subsumed.
+ *
+ * The second part would generate weird large LIMMs e.g. (0x6000_0000 - 0x10),
+ * so we check for TASK_SIZE using get_fs() since the addr_limit load from mem
+ * would already have been done at this call site for __kernel_ok()
+ *
+ */
+#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
+ (((addr)+(sz)) <= get_fs()))
+#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
+ likely(__user_ok((addr), (sz))))
+
+/*********** Single byte/hword/word copies ******************/
+
+#define __get_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_get_user_one(*(k), u, "ldb", __ret); break; \
+ case 2: __arc_get_user_one(*(k), u, "ldw", __ret); break; \
+ case 4: __arc_get_user_one(*(k), u, "ld", __ret); break; \
+ case 8: __arc_get_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+/*
+ * Returns 0 on success, -EFAULT if not.
+ * @ret already contains 0 - given that errors will be less likely
+ * (hence +r asm constraint below).
+ * In case of error, fixup code will make it -EFAULT
+ */
+#define __arc_get_user_one(dst, src, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __arc_get_user_one_64(dst, src, ret) \
+ __asm__ __volatile__( \
+ "1: ld %1,[%2]\n" \
+ "4: ld %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret), "=r" (dst) \
+ : "r" (src), "ir" (-EFAULT))
+
+#define __put_user_fn(sz, u, k) \
+({ \
+ long __ret = 0; /* success by default */ \
+ switch (sz) { \
+ case 1: __arc_put_user_one(*(k), u, "stb", __ret); break; \
+ case 2: __arc_put_user_one(*(k), u, "stw", __ret); break; \
+ case 4: __arc_put_user_one(*(k), u, "st", __ret); break; \
+ case 8: __arc_put_user_one_64(*(k), u, __ret); break; \
+ } \
+ __ret; \
+})
+
+#define __arc_put_user_one(src, dst, op, ret) \
+ __asm__ __volatile__( \
+ "1: "op" %1,[%2]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+#define __arc_put_user_one_64(src, dst, ret) \
+ __asm__ __volatile__( \
+ "1: st %1,[%2]\n" \
+ "4: st %R1,[%2, 4]\n" \
+ "2: ;nop\n" \
+ " .section .fixup, \"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, %3\n" \
+ " j 2b\n" \
+ " .previous\n" \
+ " .section __ex_table, \"a\"\n" \
+ " .align 4\n" \
+ " .word 1b,3b\n" \
+ " .word 4b,3b\n" \
+ " .previous\n" \
+ \
+ : "+r" (ret) \
+ : "r" (src), "r" (dst), "ir" (-EFAULT))
+
+
+static inline unsigned long
+__arc_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__ (
+ " mov.f lp_count, %0 \n"
+ " lpnz 2f \n"
+ "1: ldb.ab %1, [%3, 1] \n"
+ " stb.ab %1, [%2, 1] \n"
+ " sub %0,%0,1 \n"
+ "2: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 2b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 3b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /*
+ * Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ /*
+ * Hand-crafted constant propagation to reduce code sz of the
+ * laddered copy 16x,8,4,2,1
+ */
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ "1: ld.ab %3, [%2, 4] \n"
+ "11: ld.ab %4, [%2, 4] \n"
+ "12: ld.ab %5, [%2, 4] \n"
+ "13: ld.ab %6, [%2, 4] \n"
+ " st.ab %3, [%1, 4] \n"
+ " st.ab %4, [%1, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ "14: ld.ab %3, [%2,4] \n"
+ "15: ld.ab %4, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " st.ab %4, [%1,4] \n"
+ " sub %0,%0,8 \n"
+ "31: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ "16: ld.ab %3, [%2,4] \n"
+ " st.ab %3, [%1,4] \n"
+ " sub %0,%0,4 \n"
+ "32: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ "17: ldw.ab %3, [%2,2] \n"
+ " stw.ab %3, [%1,2] \n"
+ " sub %0,%0,2 \n"
+ "33: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ "18: ldb.ab %3, [%2,2] \n"
+ " stb.ab %3, [%1,2] \n"
+ " sub %0,%0,1 \n"
+ "34: ; nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ "1: ld.ab %5, [%2, 4] \n"
+ "11: ld.ab %6, [%2, 4] \n"
+ "12: ld.ab %7, [%2, 4] \n"
+ "13: ld.ab %8, [%2, 4] \n"
+ " st.ab %5, [%1, 4] \n"
+ " st.ab %6, [%1, 4] \n"
+ " st.ab %7, [%1, 4] \n"
+ " st.ab %8, [%1, 4] \n"
+ " sub %0,%0,16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ "14: ld.ab %5, [%2,4] \n"
+ "15: ld.ab %6, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " st.ab %6, [%1,4] \n"
+ " sub.f %0,%0,8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ "16: ld.ab %5, [%2,4] \n"
+ " st.ab %5, [%1,4] \n"
+ " sub.f %0,%0,4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ "17: ldw.ab %5, [%2,2] \n"
+ " stw.ab %5, [%1,2] \n"
+ " sub.f %0,%0,2 \n"
+ "33: bbit0 %3,0,34f \n"
+ "18: ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ " stb.ab %5, [%1,1] \n"
+ " sub.f %0,%0,1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+extern unsigned long slowpath_copy_to_user(void __user *to, const void *from,
+ unsigned long n);
+
+static inline unsigned long
+__arc_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ long res = 0;
+ char val;
+ unsigned long tmp1, tmp2, tmp3, tmp4;
+ unsigned long orig_n = n;
+
+ if (n == 0)
+ return 0;
+
+ /* unaligned */
+ if (((unsigned long)to & 0x3) || ((unsigned long)from & 0x3)) {
+
+ unsigned char tmp;
+
+ __asm__ __volatile__(
+ " mov.f lp_count, %0 \n"
+ " lpnz 3f \n"
+ " ldb.ab %1, [%3, 1] \n"
+ "1: stb.ab %1, [%2, 1] \n"
+ " sub %0, %0, 1 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+
+ : "+r" (n),
+ /* Note as an '&' earlyclobber operand to make sure the
+ * temporary register inside the loop is not the same as
+ * FROM or TO.
+ */
+ "=&r" (tmp), "+r" (to), "+r" (from)
+ :
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return n;
+ }
+
+ if (__builtin_constant_p(orig_n)) {
+ res = orig_n;
+
+ if (orig_n / 16) {
+ orig_n = orig_n % 16;
+
+ __asm__ __volatile__(
+ " lsr lp_count, %7,4 \n"
+ " lp 3f \n"
+ " ld.ab %3, [%2, 4] \n"
+ " ld.ab %4, [%2, 4] \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ "1: st.ab %3, [%1, 4] \n"
+ "11: st.ab %4, [%1, 4] \n"
+ "12: st.ab %5, [%1, 4] \n"
+ "13: st.ab %6, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ : "ir"(n)
+ : "lp_count", "memory");
+ }
+ if (orig_n / 8) {
+ orig_n = orig_n % 8;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ " ld.ab %4, [%2,4] \n"
+ "14: st.ab %3, [%1,4] \n"
+ "15: st.ab %4, [%1,4] \n"
+ " sub %0, %0, 8 \n"
+ "31:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 31b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from),
+ "=r"(tmp1), "=r"(tmp2)
+ :
+ : "memory");
+ }
+ if (orig_n / 4) {
+ orig_n = orig_n % 4;
+
+ __asm__ __volatile__(
+ " ld.ab %3, [%2,4] \n"
+ "16: st.ab %3, [%1,4] \n"
+ " sub %0, %0, 4 \n"
+ "32:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 32b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 16b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n / 2) {
+ orig_n = orig_n % 2;
+
+ __asm__ __volatile__(
+ " ldw.ab %3, [%2,2] \n"
+ "17: stw.ab %3, [%1,2] \n"
+ " sub %0, %0, 2 \n"
+ "33:;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 33b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 17b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ if (orig_n & 1) {
+ __asm__ __volatile__(
+ " ldb.ab %3, [%2,1] \n"
+ "18: stb.ab %3, [%1,1] \n"
+ " sub %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "+r" (res), "+r"(to), "+r"(from), "=r"(tmp1)
+ :
+ : "memory");
+ }
+ } else { /* n is NOT constant, so laddered copy of 16x,8,4,2,1 */
+
+ __asm__ __volatile__(
+ " mov %0,%3 \n"
+ " lsr.f lp_count, %3,4 \n" /* 16x bytes */
+ " lpnz 3f \n"
+ " ld.ab %5, [%2, 4] \n"
+ " ld.ab %6, [%2, 4] \n"
+ " ld.ab %7, [%2, 4] \n"
+ " ld.ab %8, [%2, 4] \n"
+ "1: st.ab %5, [%1, 4] \n"
+ "11: st.ab %6, [%1, 4] \n"
+ "12: st.ab %7, [%1, 4] \n"
+ "13: st.ab %8, [%1, 4] \n"
+ " sub %0, %0, 16 \n"
+ "3: and.f %3,%3,0xf \n" /* stragglers */
+ " bz 34f \n"
+ " bbit0 %3,3,31f \n" /* 8 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ " ld.ab %6, [%2,4] \n"
+ "14: st.ab %5, [%1,4] \n"
+ "15: st.ab %6, [%1,4] \n"
+ " sub.f %0, %0, 8 \n"
+ "31: bbit0 %3,2,32f \n" /* 4 bytes left */
+ " ld.ab %5, [%2,4] \n"
+ "16: st.ab %5, [%1,4] \n"
+ " sub.f %0, %0, 4 \n"
+ "32: bbit0 %3,1,33f \n" /* 2 bytes left */
+ " ldw.ab %5, [%2,2] \n"
+ "17: stw.ab %5, [%1,2] \n"
+ " sub.f %0, %0, 2 \n"
+ "33: bbit0 %3,0,34f \n"
+ " ldb.ab %5, [%2,1] \n" /* 1 byte left */
+ "18: stb.ab %5, [%1,1] \n"
+ " sub.f %0, %0, 1 \n"
+ "34: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: j 34b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .word 11b,4b \n"
+ " .word 12b,4b \n"
+ " .word 13b,4b \n"
+ " .word 14b,4b \n"
+ " .word 15b,4b \n"
+ " .word 16b,4b \n"
+ " .word 17b,4b \n"
+ " .word 18b,4b \n"
+ " .previous \n"
+ : "=r" (res), "+r"(to), "+r"(from), "+r"(n), "=r"(val),
+ "=r"(tmp1), "=r"(tmp2), "=r"(tmp3), "=r"(tmp4)
+ :
+ : "lp_count", "memory");
+ }
+
+ return res;
+}
+
+static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
+{
+ long res = n;
+ unsigned char *d_char = to;
+
+ __asm__ __volatile__(
+ " bbit0 %0, 0, 1f \n"
+ "75: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "1: bbit0 %0, 1, 2f \n"
+ "76: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "2: asr.f lp_count, %1, 2 \n"
+ " lpnz 3f \n"
+ "77: st.ab %2, [%0,4] \n"
+ " sub %1, %1, 4 \n"
+ "3: bbit0 %1, 1, 4f \n"
+ "78: stw.ab %2, [%0,2] \n"
+ " sub %1, %1, 2 \n"
+ "4: bbit0 %1, 0, 5f \n"
+ "79: stb.ab %2, [%0,1] \n"
+ " sub %1, %1, 1 \n"
+ "5: \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "3: j 5b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 75b, 3b \n"
+ " .word 76b, 3b \n"
+ " .word 77b, 3b \n"
+ " .word 78b, 3b \n"
+ " .word 79b, 3b \n"
+ " .previous \n"
+ : "+r"(d_char), "+r"(res)
+ : "i"(0)
+ : "lp_count", "lp_start", "lp_end", "memory");
+
+ return res;
+}
+
+static inline long
+__arc_strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ long res = count;
+ char val;
+ unsigned int hw_count;
+
+ if (count == 0)
+ return 0;
+
+ __asm__ __volatile__(
+ " lp 2f \n"
+ "1: ldb.ab %3, [%2, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " stb.ab %3, [%1, 1] \n"
+ "2: sub %0, %6, %4 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, %5 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
+ : "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
+ : "memory");
+
+ return res;
+}
+
+static inline long __arc_strnlen_user(const char __user *s, long n)
+{
+ long res, tmp1, cnt;
+ char val;
+
+ __asm__ __volatile__(
+ " mov %2, %1 \n"
+ "1: ldb.ab %3, [%0, 1] \n"
+ " breq.d %3, 0, 2f \n"
+ " sub.f %2, %2, 1 \n"
+ " bnz 1b \n"
+ " sub %2, %2, 1 \n"
+ "2: sub %0, %1, %2 \n"
+ "3: ;nop \n"
+ " .section .fixup, \"ax\" \n"
+ " .align 4 \n"
+ "4: mov %0, 0 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table, \"a\" \n"
+ " .align 4 \n"
+ " .word 1b, 4b \n"
+ " .previous \n"
+ : "=r"(res), "=r"(tmp1), "=r"(cnt), "=r"(val)
+ : "0"(s), "1"(n)
+ : "memory");
+
+ return res;
+}
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n)
+#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n)
+#define __clear_user(d, n) __arc_clear_user(d, n)
+#define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n)
+#define __strnlen_user(s, n) __arc_strnlen_user(s, n)
+#else
+extern long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n);
+extern long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n);
+extern unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n);
+extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count);
+extern long arc_strnlen_user_noinline(const char __user *src, long n);
+
+#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n)
+#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n)
+#define __clear_user(d, n) arc_clear_user_noinline(d, n)
+#define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n)
+#define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n)
+
+#endif
+
+#include <asm-generic/uaccess.h>
+
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
new file mode 100644
index 00000000000..5dbe63f17b6
--- /dev/null
+++ b/arch/arc/include/asm/unaligned.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNALIGNED_H
+#define _ASM_ARC_UNALIGNED_H
+
+/* ARC700 can't handle unaligned Data accesses. */
+
+#include <asm-generic/unaligned.h>
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs);
+#else
+static inline int
+misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ return 0;
+}
+#endif
+
+#endif /* _ASM_ARC_UNALIGNED_H */
diff --git a/arch/arc/include/asm/unwind.h b/arch/arc/include/asm/unwind.h
new file mode 100644
index 00000000000..7ca628b6ee2
--- /dev/null
+++ b/arch/arc/include/asm/unwind.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_UNWIND_H
+#define _ASM_ARC_UNWIND_H
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+#include <linux/sched.h>
+
+struct arc700_regs {
+ unsigned long r0;
+ unsigned long r1;
+ unsigned long r2;
+ unsigned long r3;
+ unsigned long r4;
+ unsigned long r5;
+ unsigned long r6;
+ unsigned long r7;
+ unsigned long r8;
+ unsigned long r9;
+ unsigned long r10;
+ unsigned long r11;
+ unsigned long r12;
+ unsigned long r13;
+ unsigned long r14;
+ unsigned long r15;
+ unsigned long r16;
+ unsigned long r17;
+ unsigned long r18;
+ unsigned long r19;
+ unsigned long r20;
+ unsigned long r21;
+ unsigned long r22;
+ unsigned long r23;
+ unsigned long r24;
+ unsigned long r25;
+ unsigned long r26;
+ unsigned long r27; /* fp */
+ unsigned long r28; /* sp */
+ unsigned long r29;
+ unsigned long r30;
+ unsigned long r31; /* blink */
+ unsigned long r63; /* pc */
+};
+
+struct unwind_frame_info {
+ struct arc700_regs regs;
+ struct task_struct *task;
+ unsigned call_frame:1;
+};
+
+#define UNW_PC(frame) ((frame)->regs.r63)
+#define UNW_SP(frame) ((frame)->regs.r28)
+#define UNW_BLINK(frame) ((frame)->regs.r31)
+
+/* Rajesh FIXME */
+#ifdef CONFIG_FRAME_POINTER
+#define UNW_FP(frame) ((frame)->regs.r27)
+#define FRAME_RETADDR_OFFSET 4
+#define FRAME_LINK_OFFSET 0
+#define STACK_BOTTOM_UNW(tsk) STACK_LIMIT((tsk)->thread.ksp)
+#define STACK_TOP_UNW(tsk) ((tsk)->thread.ksp)
+#else
+#define UNW_FP(frame) ((void)(frame), 0)
+#endif
+
+#define STACK_LIMIT(ptr) (((ptr) - 1) & ~(THREAD_SIZE - 1))
+
+#define UNW_REGISTER_INFO \
+ PTREGS_INFO(r0), \
+ PTREGS_INFO(r1), \
+ PTREGS_INFO(r2), \
+ PTREGS_INFO(r3), \
+ PTREGS_INFO(r4), \
+ PTREGS_INFO(r5), \
+ PTREGS_INFO(r6), \
+ PTREGS_INFO(r7), \
+ PTREGS_INFO(r8), \
+ PTREGS_INFO(r9), \
+ PTREGS_INFO(r10), \
+ PTREGS_INFO(r11), \
+ PTREGS_INFO(r12), \
+ PTREGS_INFO(r13), \
+ PTREGS_INFO(r14), \
+ PTREGS_INFO(r15), \
+ PTREGS_INFO(r16), \
+ PTREGS_INFO(r17), \
+ PTREGS_INFO(r18), \
+ PTREGS_INFO(r19), \
+ PTREGS_INFO(r20), \
+ PTREGS_INFO(r21), \
+ PTREGS_INFO(r22), \
+ PTREGS_INFO(r23), \
+ PTREGS_INFO(r24), \
+ PTREGS_INFO(r25), \
+ PTREGS_INFO(r26), \
+ PTREGS_INFO(r27), \
+ PTREGS_INFO(r28), \
+ PTREGS_INFO(r29), \
+ PTREGS_INFO(r30), \
+ PTREGS_INFO(r31), \
+ PTREGS_INFO(r63)
+
+#define UNW_DEFAULT_RA(raItem, dataAlign) \
+ ((raItem).where == Memory && !((raItem).value * (dataAlign) + 4))
+
+extern int arc_unwind(struct unwind_frame_info *frame);
+extern void arc_unwind_init(void);
+extern void arc_unwind_setup(void);
+extern void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size);
+extern void unwind_remove_table(void *handle, int init_only);
+
+static inline int
+arch_unwind_init_running(struct unwind_frame_info *info,
+ int (*callback) (struct unwind_frame_info *info,
+ void *arg),
+ void *arg)
+{
+ return 0;
+}
+
+static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
+{
+ return 0;
+}
+
+static inline void arch_unw_init_blocked(struct unwind_frame_info *info)
+{
+ return;
+}
+
+static inline void arch_unw_init_frame_info(struct unwind_frame_info *info,
+ struct pt_regs *regs)
+{
+ return;
+}
+
+#else
+
+#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
+#define UNW_FP(frame) ((void)(frame), 0)
+
+static inline void arc_unwind_init(void)
+{
+}
+
+static inline void arc_unwind_setup(void)
+{
+}
+#define unwind_add_table(a, b, c)
+#define unwind_remove_table(a, b)
+
+#endif /* CONFIG_ARC_DW2_UNWIND */
+
+#endif /* _ASM_ARC_UNWIND_H */
diff --git a/arch/arc/include/uapi/asm/Kbuild b/arch/arc/include/uapi/asm/Kbuild
new file mode 100644
index 00000000000..18fefaea73f
--- /dev/null
+++ b/arch/arc/include/uapi/asm/Kbuild
@@ -0,0 +1,12 @@
+# UAPI Header export list
+include include/uapi/asm-generic/Kbuild.asm
+header-y += elf.h
+header-y += page.h
+header-y += setup.h
+header-y += byteorder.h
+header-y += cachectl.h
+header-y += ptrace.h
+header-y += sigcontext.h
+header-y += signal.h
+header-y += swab.h
+header-y += unistd.h
diff --git a/arch/arc/include/uapi/asm/byteorder.h b/arch/arc/include/uapi/asm/byteorder.h
new file mode 100644
index 00000000000..9da71d415c3
--- /dev/null
+++ b/arch/arc/include/uapi/asm/byteorder.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_ARC_BYTEORDER_H
+#define __ASM_ARC_BYTEORDER_H
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#include <linux/byteorder/big_endian.h>
+#else
+#include <linux/byteorder/little_endian.h>
+#endif
+
+#endif /* ASM_ARC_BYTEORDER_H */
diff --git a/arch/arc/include/uapi/asm/cachectl.h b/arch/arc/include/uapi/asm/cachectl.h
new file mode 100644
index 00000000000..51c73f0255b
--- /dev/null
+++ b/arch/arc/include/uapi/asm/cachectl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARC_ASM_CACHECTL_H
+#define __ARC_ASM_CACHECTL_H
+
+/*
+ * ARC ABI flags defined for Android's finegrained cacheflush requirements
+ */
+#define CF_I_INV 0x0002
+#define CF_D_FLUSH 0x0010
+#define CF_D_FLUSH_INV 0x0020
+
+#define CF_DEFAULT (CF_I_INV | CF_D_FLUSH)
+
+/*
+ * Standard flags expected by cacheflush system call users
+ */
+#define ICACHE CF_I_INV
+#define DCACHE CF_D_FLUSH
+#define BCACHE (CF_I_INV | CF_D_FLUSH)
+
+#endif
diff --git a/arch/arc/include/uapi/asm/elf.h b/arch/arc/include/uapi/asm/elf.h
new file mode 100644
index 00000000000..0f99ac8fcbb
--- /dev/null
+++ b/arch/arc/include/uapi/asm/elf.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_ELF_H
+#define _UAPI__ASM_ARC_ELF_H
+
+#include <asm/ptrace.h> /* for user_regs_struct */
+
+/* Machine specific ELF Hdr flags */
+#define EF_ARC_OSABI_MSK 0x00000f00
+#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
+#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
+
+typedef unsigned long elf_greg_t;
+typedef unsigned long elf_fpregset_t;
+
+#define ELF_NGREG (sizeof(struct user_regs_struct) / sizeof(elf_greg_t))
+
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+#endif
diff --git a/arch/arc/include/uapi/asm/page.h b/arch/arc/include/uapi/asm/page.h
new file mode 100644
index 00000000000..e5d41e08240
--- /dev/null
+++ b/arch/arc/include/uapi/asm/page.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _UAPI__ASM_ARC_PAGE_H
+#define _UAPI__ASM_ARC_PAGE_H
+
+/* PAGE_SHIFT determines the page size */
+#if defined(CONFIG_ARC_PAGE_SIZE_16K)
+#define PAGE_SHIFT 14
+#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
+#define PAGE_SHIFT 12
+#else
+/*
+ * Default 8k
+ * done this way (instead of under CONFIG_ARC_PAGE_SIZE_8K) because adhoc
+ * user code (busybox appletlib.h) expects PAGE_SHIFT to be defined w/o
+ * using the correct uClibc header and in their build our autoconf.h is
+ * not available
+ */
+#define PAGE_SHIFT 13
+#endif
+
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_OFFSET (0x80000000)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT) /* Default 8K */
+#define PAGE_OFFSET (0x80000000UL) /* Kernel starts at 2G onwards */
+#endif
+
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+
+#endif /* _UAPI__ASM_ARC_PAGE_H */
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
new file mode 100644
index 00000000000..6afa4f70207
--- /dev/null
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _UAPI__ASM_ARC_PTRACE_H
+#define _UAPI__ASM_ARC_PTRACE_H
+
+
+#ifndef __ASSEMBLY__
+/*
+ * Userspace ABI: Register state needed by
+ * -ptrace (gdbserver)
+ * -sigcontext (SA_SIGNINFO signal frame)
+ *
+ * This is to decouple pt_regs from user-space ABI, to be able to change it
+ * w/o affecting the ABI.
+ * Although the layout (initial padding) is similar to pt_regs to have some
+ * optimizations when copying pt_regs to/from user_regs_struct.
+ *
+ * Also, sigcontext only care about the scratch regs as that is what we really
+ * save/restore for signal handling.
+*/
+struct user_regs_struct {
+
+ struct scratch {
+ long pad;
+ long bta, lp_start, lp_end, lp_count;
+ long status32, ret, blink, fp, gp;
+ long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
+ long sp;
+ } scratch;
+ struct callee {
+ long pad;
+ long r25, r24, r23, r22, r21, r20;
+ long r19, r18, r17, r16, r15, r14, r13;
+ } callee;
+ long efa; /* break pt addr, for break points in delay slots */
+ long stop_pc; /* give dbg stop_pc directly after checking orig_r8 */
+};
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _UAPI__ASM_ARC_PTRACE_H */
diff --git a/arch/arc/include/uapi/asm/setup.h b/arch/arc/include/uapi/asm/setup.h
new file mode 100644
index 00000000000..a6d4e44938b
--- /dev/null
+++ b/arch/arc/include/uapi/asm/setup.h
@@ -0,0 +1,6 @@
+/*
+ * setup.h is part of userspace header ABI so UAPI scripts have to generate it
+ * even if there's nothing to export - causing empty <uapi/asm/setup.h>
+ * However to prevent "patch" from discarding it we add this placeholder
+ * comment
+ */
diff --git a/arch/arc/include/uapi/asm/sigcontext.h b/arch/arc/include/uapi/asm/sigcontext.h
new file mode 100644
index 00000000000..9678a11fc15
--- /dev/null
+++ b/arch/arc/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_SIGCONTEXT_H
+#define _ASM_ARC_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure - contains all info to do with the state
+ * before the signal handler was invoked.
+ */
+struct sigcontext {
+ struct user_regs_struct regs;
+};
+
+#endif /* _ASM_ARC_SIGCONTEXT_H */
diff --git a/arch/arc/include/uapi/asm/signal.h b/arch/arc/include/uapi/asm/signal.h
new file mode 100644
index 00000000000..fad62f7f42d
--- /dev/null
+++ b/arch/arc/include/uapi/asm/signal.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
+ */
+
+#ifndef _ASM_ARC_SIGNAL_H
+#define _ASM_ARC_SIGNAL_H
+
+/*
+ * This is much needed for ARC sigreturn optimization.
+ * This allows uClibc to piggback the addr of a sigreturn stub in sigaction,
+ * which allows sigreturn based re-entry into kernel after handling signal.
+ * W/o this kernel needs to "synthesize" the sigreturn trampoline on user
+ * mode stack which in turn forces the following:
+ * -TLB Flush (after making the stack page executable)
+ * -Cache line Flush (to make I/D Cache lines coherent)
+ */
+#define SA_RESTORER 0x04000000
+
+#include <asm-generic/signal.h>
+
+#endif /* _ASM_ARC_SIGNAL_H */
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h
new file mode 100644
index 00000000000..095599a7319
--- /dev/null
+++ b/arch/arc/include/uapi/asm/swab.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Support single cycle endian-swap insn in ARC700 4.10
+ *
+ * vineetg: June 2009
+ * -Better htonl implementation (5 instead of 9 ALU instructions)
+ * -Hardware assisted single cycle bswap (Use Case of ARC custom instrn)
+ */
+
+#ifndef __ASM_ARC_SWAB_H
+#define __ASM_ARC_SWAB_H
+
+#include <linux/types.h>
+
+/* Native single cycle endian swap insn */
+#ifdef CONFIG_ARC_HAS_SWAPE
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " swape %0, %1 \n" \
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#else
+
+/* Several ways of Endian-Swap Emulation for ARC
+ * 0: kernel generic
+ * 1: ARC optimised "C"
+ * 2: ARC Custom instruction
+ */
+#define ARC_BSWAP_TYPE 1
+
+#if (ARC_BSWAP_TYPE == 1) /******* Software only ********/
+
+/* The kernel default implementation of htonl is
+ * return x<<24 | x>>24 |
+ * (x & (__u32)0x0000ff00UL)<<8 | (x & (__u32)0x00ff0000UL)>>8;
+ *
+ * This generates 9 instructions on ARC (excluding the ld/st)
+ *
+ * 8051fd8c: ld r3,[r7,20] ; Mem op : Get the value to be swapped
+ * 8051fd98: asl r5,r3,24 ; get 3rd Byte
+ * 8051fd9c: lsr r2,r3,24 ; get 0th Byte
+ * 8051fda0: and r4,r3,0xff00
+ * 8051fda8: asl r4,r4,8 ; get 1st Byte
+ * 8051fdac: and r3,r3,0x00ff0000
+ * 8051fdb4: or r2,r2,r5 ; combine 0th and 3rd Bytes
+ * 8051fdb8: lsr r3,r3,8 ; 2nd Byte at correct place in Dst Reg
+ * 8051fdbc: or r2,r2,r4 ; combine 0,3 Bytes with 1st Byte
+ * 8051fdc0: or r2,r2,r3 ; combine 0,3,1 Bytes with 2nd Byte
+ * 8051fdc4: st r2,[r1,20] ; Mem op : save result back to mem
+ *
+ * Joern suggested a better "C" algorithm which is great since
+ * (1) It is portable to any architecure
+ * (2) At the same time it takes advantage of ARC ISA (rotate intrns)
+ */
+
+#define __arch_swab32(x) \
+({ unsigned long __in = (x), __tmp; \
+ __tmp = __in << 8 | __in >> 24; /* ror tmp,in,24 */ \
+ __in = __in << 24 | __in >> 8; /* ror in,in,8 */ \
+ __tmp ^= __in; \
+ __tmp &= 0xff00ff; \
+ __tmp ^ __in; \
+})
+
+#elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */
+
+#define __arch_swab32(x) \
+({ \
+ unsigned int tmp = x; \
+ __asm__( \
+ " .extInstruction bswap, 7, 0x00, SUFFIX_NONE, SYNTAX_2OP \n"\
+ " bswap %0, %1 \n"\
+ : "=r" (tmp) \
+ : "r" (tmp)); \
+ tmp; \
+})
+
+#endif /* ARC_BSWAP_TYPE=zzz */
+
+#endif /* CONFIG_ARC_HAS_SWAPE */
+
+#if !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+
+#endif
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
new file mode 100644
index 00000000000..6f30484f34b
--- /dev/null
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/******** no-legacy-syscalls-ABI *******/
+
+#define __ARCH_WANT_SYS_EXECVE
+#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_FORK
+
+#define sys_mmap2 sys_mmap_pgoff
+
+#include <asm-generic/unistd.h>
+
+#define NR_syscalls __NR_syscalls
+
+/* ARC specific syscall */
+#define __NR_cacheflush (__NR_arch_specific_syscall + 0)
+#define __NR_arc_settls (__NR_arch_specific_syscall + 1)
+#define __NR_arc_gettls (__NR_arch_specific_syscall + 2)
+
+__SYSCALL(__NR_cacheflush, sys_cacheflush)
+__SYSCALL(__NR_arc_settls, sys_arc_settls)
+__SYSCALL(__NR_arc_gettls, sys_arc_gettls)
+
+
+/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
+#define __NR_sysfs (__NR_arch_specific_syscall + 3)
+__SYSCALL(__NR_sysfs, sys_sysfs)
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
new file mode 100644
index 00000000000..c242ef07ba7
--- /dev/null
+++ b/arch/arc/kernel/Makefile
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+# Pass UTS_MACHINE for user_regset definition
+CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"'
+
+obj-y := arcksyms.o setup.o irq.o time.o reset.o ptrace.o entry.o process.o
+obj-y += signal.o traps.o sys.o troubleshoot.o stacktrace.o disasm.o clk.o
+obj-y += devtree.o
+
+obj-$(CONFIG_MODULES) += arcksyms.o module.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
+
+obj-$(CONFIG_ARC_FPU_SAVE_RESTORE) += fpu.o
+CFLAGS_fpu.o += -mdpfp
+
+ifdef CONFIG_ARC_DW2_UNWIND
+CFLAGS_ctx_sw.o += -fno-omit-frame-pointer
+obj-y += ctx_sw.o
+else
+obj-y += ctx_sw_asm.o
+endif
+
+extra-y := vmlinux.lds head.o
diff --git a/arch/arc/kernel/arc_hostlink.c b/arch/arc/kernel/arc_hostlink.c
new file mode 100644
index 00000000000..47b2a17cc52
--- /dev/null
+++ b/arch/arc/kernel/arc_hostlink.c
@@ -0,0 +1,58 @@
+/*
+ * arc_hostlink.c: Pseudo-driver for Metaware provided "hostlink" facility
+ *
+ * Allows Linux userland access to host in absence of any peripherals.
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/fs.h> /* file_operations */
+#include <linux/miscdevice.h>
+#include <linux/mm.h> /* VM_IO */
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+static unsigned char __HOSTLINK__[4 * PAGE_SIZE] __aligned(PAGE_SIZE);
+
+static int arc_hl_mmap(struct file *fp, struct vm_area_struct *vma)
+{
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot)) {
+ pr_warn("Hostlink buffer mmap ERROR\n");
+ return -EAGAIN;
+ }
+ return 0;
+}
+
+static long arc_hl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ /* we only support, returning the physical addr to mmap in user space */
+ put_user((unsigned int)__HOSTLINK__, (int __user *)arg);
+ return 0;
+}
+
+static const struct file_operations arc_hl_fops = {
+ .unlocked_ioctl = arc_hl_ioctl,
+ .mmap = arc_hl_mmap,
+};
+
+static struct miscdevice arc_hl_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "hostlink",
+ .fops = &arc_hl_fops
+};
+
+static int __init arc_hl_init(void)
+{
+ pr_info("ARC Hostlink driver mmap at 0x%p\n", __HOSTLINK__);
+ return misc_register(&arc_hl_dev);
+}
+module_init(arc_hl_init);
diff --git a/arch/arc/kernel/arcksyms.c b/arch/arc/kernel/arcksyms.c
new file mode 100644
index 00000000000..4d9e77724be
--- /dev/null
+++ b/arch/arc/kernel/arcksyms.c
@@ -0,0 +1,56 @@
+/*
+ * arcksyms.c - Exporting symbols not exportable from their own sources
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+
+/* libgcc functions, not part of kernel sources */
+extern void __ashldi3(void);
+extern void __ashrdi3(void);
+extern void __divsi3(void);
+extern void __divsf3(void);
+extern void __lshrdi3(void);
+extern void __modsi3(void);
+extern void __muldi3(void);
+extern void __ucmpdi2(void);
+extern void __udivsi3(void);
+extern void __umodsi3(void);
+extern void __cmpdi2(void);
+extern void __fixunsdfsi(void);
+extern void __muldf3(void);
+extern void __divdf3(void);
+extern void __floatunsidf(void);
+extern void __floatunsisf(void);
+
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__divsi3);
+EXPORT_SYMBOL(__divsf3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(__modsi3);
+EXPORT_SYMBOL(__muldi3);
+EXPORT_SYMBOL(__ucmpdi2);
+EXPORT_SYMBOL(__udivsi3);
+EXPORT_SYMBOL(__umodsi3);
+EXPORT_SYMBOL(__cmpdi2);
+EXPORT_SYMBOL(__fixunsdfsi);
+EXPORT_SYMBOL(__muldf3);
+EXPORT_SYMBOL(__divdf3);
+EXPORT_SYMBOL(__floatunsidf);
+EXPORT_SYMBOL(__floatunsisf);
+
+/* ARC optimised assembler routines */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strlen);
diff --git a/arch/arc/kernel/asm-offsets.c b/arch/arc/kernel/asm-offsets.c
new file mode 100644
index 00000000000..0dc148ebce7
--- /dev/null
+++ b/arch/arc/kernel/asm-offsets.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/hardirq.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
+
+ BLANK();
+
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_CALLEE_REG, offsetof(struct thread_struct, callee_reg));
+#ifdef CONFIG_ARC_CURR_IN_REG
+ DEFINE(THREAD_USER_R25, offsetof(struct thread_struct, user_r25));
+#endif
+ DEFINE(THREAD_FAULT_ADDR,
+ offsetof(struct thread_struct, fault_address));
+
+ BLANK();
+
+ DEFINE(THREAD_INFO_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(THREAD_INFO_PREEMPT_COUNT,
+ offsetof(struct thread_info, preempt_count));
+
+ BLANK();
+
+ DEFINE(TASK_ACT_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+
+ DEFINE(MM_CTXT, offsetof(struct mm_struct, context));
+ DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
+
+ DEFINE(MM_CTXT_ASID, offsetof(mm_context_t, asid));
+
+ BLANK();
+
+ DEFINE(PT_status32, offsetof(struct pt_regs, status32));
+ DEFINE(PT_orig_r8, offsetof(struct pt_regs, orig_r8_word));
+ DEFINE(PT_sp, offsetof(struct pt_regs, sp));
+ DEFINE(PT_r0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_r1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_r2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_r3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_r4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_r5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_r6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_r7, offsetof(struct pt_regs, r7));
+
+ return 0;
+}
diff --git a/arch/arc/kernel/clk.c b/arch/arc/kernel/clk.c
new file mode 100644
index 00000000000..66ce0dc917f
--- /dev/null
+++ b/arch/arc/kernel/clk.c
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/clk.h>
+
+unsigned long core_freq = 800000000;
+
+/*
+ * As of now we default to device-tree provided clock
+ * In future we can determine this in early boot
+ */
+int arc_set_core_freq(unsigned long freq)
+{
+ core_freq = freq;
+ return 0;
+}
diff --git a/arch/arc/kernel/ctx_sw.c b/arch/arc/kernel/ctx_sw.c
new file mode 100644
index 00000000000..60844dac613
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw.c
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -"C" version of lowest level context switch asm macro called by schedular
+ * gcc doesn't generate the dward CFI info for hand written asm, hence can't
+ * backtrace out of it (e.g. tasks sleeping in kernel).
+ * So we cheat a bit by writing almost similar code in inline-asm.
+ * -This is a hacky way of doing things, but there is no other simple way.
+ * I don't want/intend to extend unwinding code to understand raw asm
+ */
+
+#include <asm/asm-offsets.h>
+#include <linux/sched.h>
+
+struct task_struct *__sched
+__switch_to(struct task_struct *prev_task, struct task_struct *next_task)
+{
+ unsigned int tmp;
+ unsigned int prev = (unsigned int)prev_task;
+ unsigned int next = (unsigned int)next_task;
+ int num_words_to_skip = 1;
+#ifdef CONFIG_ARC_CURR_IN_REG
+ num_words_to_skip++;
+#endif
+
+ __asm__ __volatile__(
+ /* FP/BLINK save generated by gcc (standard function prologue */
+ "st.a r13, [sp, -4] \n\t"
+ "st.a r14, [sp, -4] \n\t"
+ "st.a r15, [sp, -4] \n\t"
+ "st.a r16, [sp, -4] \n\t"
+ "st.a r17, [sp, -4] \n\t"
+ "st.a r18, [sp, -4] \n\t"
+ "st.a r19, [sp, -4] \n\t"
+ "st.a r20, [sp, -4] \n\t"
+ "st.a r21, [sp, -4] \n\t"
+ "st.a r22, [sp, -4] \n\t"
+ "st.a r23, [sp, -4] \n\t"
+ "st.a r24, [sp, -4] \n\t"
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "st.a r25, [sp, -4] \n\t"
+#endif
+ "sub sp, sp, %4 \n\t" /* create gutter at top */
+
+ /* set ksp of outgoing task in tsk->thread.ksp */
+ "st.as sp, [%3, %1] \n\t"
+
+ "sync \n\t"
+
+ /*
+ * setup _current_task with incoming tsk.
+ * optionally, set r25 to that as well
+ * For SMP extra work to get to &_current_task[cpu]
+ * (open coded SET_CURR_TASK_ON_CPU)
+ */
+#ifndef CONFIG_SMP
+ "st %2, [@_current_task] \n\t"
+#else
+ "lr r24, [identity] \n\t"
+ "lsr r24, r24, 8 \n\t"
+ "bmsk r24, r24, 7 \n\t"
+ "add2 r24, @_current_task, r24 \n\t"
+ "st %2, [r24] \n\t"
+#endif
+#ifdef CONFIG_ARC_CURR_IN_REG
+ "mov r25, %2 \n\t"
+#endif
+
+ /* get ksp of incoming task from tsk->thread.ksp */
+ "ld.as sp, [%2, %1] \n\t"
+
+ /* start loading it's CALLEE reg file */
+
+ "add sp, sp, %4 \n\t" /* skip gutter at top */
+
+#ifndef CONFIG_ARC_CURR_IN_REG
+ "ld.ab r25, [sp, 4] \n\t"
+#endif
+ "ld.ab r24, [sp, 4] \n\t"
+ "ld.ab r23, [sp, 4] \n\t"
+ "ld.ab r22, [sp, 4] \n\t"
+ "ld.ab r21, [sp, 4] \n\t"
+ "ld.ab r20, [sp, 4] \n\t"
+ "ld.ab r19, [sp, 4] \n\t"
+ "ld.ab r18, [sp, 4] \n\t"
+ "ld.ab r17, [sp, 4] \n\t"
+ "ld.ab r16, [sp, 4] \n\t"
+ "ld.ab r15, [sp, 4] \n\t"
+ "ld.ab r14, [sp, 4] \n\t"
+ "ld.ab r13, [sp, 4] \n\t"
+
+ /* last (ret value) = prev : although for ARC it mov r0, r0 */
+ "mov %0, %3 \n\t"
+
+ /* FP/BLINK restore generated by gcc (standard func epilogue */
+
+ : "=r"(tmp)
+ : "n"((TASK_THREAD + THREAD_KSP) / 4), "r"(next), "r"(prev),
+ "n"(num_words_to_skip * 4)
+ : "blink"
+ );
+
+ return (struct task_struct *)tmp;
+}
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
new file mode 100644
index 00000000000..d8972345e4c
--- /dev/null
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Aug 2009
+ * -Moved core context switch macro out of entry.S into this file.
+ * -This is the more "natural" hand written assembler
+ */
+
+#include <asm/entry.h> /* For the SAVE_* macros */
+#include <asm/asm-offsets.h>
+#include <asm/linkage.h>
+
+;################### Low Level Context Switch ##########################
+
+ .section .sched.text,"ax",@progbits
+ .align 4
+ .global __switch_to
+ .type __switch_to, @function
+__switch_to:
+
+ /* Save regs on kernel mode stack of task */
+ st.a blink, [sp, -4]
+ st.a fp, [sp, -4]
+ SAVE_CALLEE_SAVED_KERNEL
+
+ /* Save the now KSP in task->thread.ksp */
+ st.as sp, [r0, (TASK_THREAD + THREAD_KSP)/4]
+
+ /*
+ * Return last task in r0 (return reg)
+ * On ARC, Return reg = First Arg reg = r0.
+ * Since we already have last task in r0,
+ * don't need to do anything special to return it
+ */
+
+ /* hardware memory barrier */
+ sync
+
+ /*
+ * switch to new task, contained in r1
+ * Temp reg r3 is required to get the ptr to store val
+ */
+ SET_CURR_TASK_ON_CPU r1, r3
+
+ /* reload SP with kernel mode stack pointer in task->thread.ksp */
+ ld.as sp, [r1, (TASK_THREAD + THREAD_KSP)/4]
+
+ /* restore the registers */
+ RESTORE_CALLEE_SAVED_KERNEL
+ ld.ab fp, [sp, 4]
+ ld.ab blink, [sp, 4]
+ j [blink]
+
+ARC_EXIT __switch_to
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
new file mode 100644
index 00000000000..bdee3a81205
--- /dev/null
+++ b/arch/arc/kernel/devtree.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * Based on reduced version of METAG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+
+#include <linux/init.h>
+#include <linux/reboot.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <asm/prom.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+/* called from unflatten_device_tree() to bootstrap devicetree itself */
+void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
+{
+ return __va(memblock_alloc(size, align));
+}
+
+/**
+ * setup_machine_fdt - Machine setup when an dtb was passed to the kernel
+ * @dt: virtual address pointer to dt blob
+ *
+ * If a dtb was passed to the kernel, then use it to choose the correct
+ * machine_desc and to setup the system.
+ */
+struct machine_desc * __init setup_machine_fdt(void *dt)
+{
+ struct boot_param_header *devtree = dt;
+ struct machine_desc *mdesc = NULL, *mdesc_best = NULL;
+ unsigned int score, mdesc_score = ~1;
+ unsigned long dt_root;
+ const char *model, *compat;
+ void *clk;
+ char manufacturer[16];
+ unsigned long len;
+
+ /* check device tree validity */
+ if (be32_to_cpu(devtree->magic) != OF_DT_HEADER)
+ return NULL;
+
+ initial_boot_params = devtree;
+ dt_root = of_get_flat_dt_root();
+
+ /*
+ * The kernel could be multi-platform enabled, thus could have many
+ * "baked-in" machine descriptors. Search thru all for the best
+ * "compatible" string match.
+ */
+ for_each_machine_desc(mdesc) {
+ score = of_flat_dt_match(dt_root, mdesc->dt_compat);
+ if (score > 0 && score < mdesc_score) {
+ mdesc_best = mdesc;
+ mdesc_score = score;
+ }
+ }
+ if (!mdesc_best) {
+ const char *prop;
+ long size;
+
+ pr_err("\n unrecognized device tree list:\n[ ");
+
+ prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
+ if (prop) {
+ while (size > 0) {
+ printk("'%s' ", prop);
+ size -= strlen(prop) + 1;
+ prop += strlen(prop) + 1;
+ }
+ }
+ printk("]\n\n");
+
+ machine_halt();
+ }
+
+ /* compat = "<manufacturer>,<model>" */
+ compat = mdesc_best->dt_compat[0];
+
+ model = strchr(compat, ',');
+ if (model)
+ model++;
+
+ strlcpy(manufacturer, compat, model ? model - compat : strlen(compat));
+
+ pr_info("Board \"%s\" from %s (Manufacturer)\n", model, manufacturer);
+
+ /* Retrieve various information from the /chosen node */
+ of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
+
+ /* Initialize {size,address}-cells info */
+ of_scan_flat_dt(early_init_dt_scan_root, NULL);
+
+ /* Setup memory, calling early_init_dt_add_memory_arch */
+ of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+
+ clk = of_get_flat_dt_prop(dt_root, "clock-frequency", &len);
+ if (clk)
+ arc_set_core_freq(of_read_ulong(clk, len/4));
+
+ return mdesc_best;
+}
+
+/*
+ * Copy the flattened DT out of .init since unflattening doesn't copy strings
+ * and the normal DT APIs refs them from orig flat DT
+ */
+void __init copy_devtree(void)
+{
+ void *alloc = early_init_dt_alloc_memory_arch(
+ be32_to_cpu(initial_boot_params->totalsize), 64);
+ if (alloc) {
+ memcpy(alloc, initial_boot_params,
+ be32_to_cpu(initial_boot_params->totalsize));
+ initial_boot_params = alloc;
+ }
+}
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
new file mode 100644
index 00000000000..2f390289a79
--- /dev/null
+++ b/arch/arc/kernel/disasm.c
@@ -0,0 +1,538 @@
+/*
+ * several functions that help interpret ARC instructions
+ * used for unaligned accesses, kprobes and kgdb
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <asm/disasm.h>
+#include <asm/uaccess.h>
+
+#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \
+ defined(CONFIG_KPROBES)
+
+/* disasm_instr: Analyses instruction at addr, stores
+ * findings in *state
+ */
+void __kprobes disasm_instr(unsigned long addr, struct disasm_state *state,
+ int userspace, struct pt_regs *regs, struct callee_regs *cregs)
+{
+ int fieldA = 0;
+ int fieldC = 0, fieldCisReg = 0;
+ uint16_t word1 = 0, word0 = 0;
+ int subopcode, is_linked, op_format;
+ uint16_t *ins_ptr;
+ uint16_t ins_buf[4];
+ int bytes_not_copied = 0;
+
+ memset(state, 0, sizeof(struct disasm_state));
+
+ /* This fetches the upper part of the 32 bit instruction
+ * in both the cases of Little Endian or Big Endian configurations. */
+ if (userspace) {
+ bytes_not_copied = copy_from_user(ins_buf,
+ (const void __user *) addr, 8);
+ if (bytes_not_copied > 6)
+ goto fault;
+ ins_ptr = ins_buf;
+ } else {
+ ins_ptr = (uint16_t *) addr;
+ }
+
+ word1 = *((uint16_t *)addr);
+
+ state->major_opcode = (word1 >> 11) & 0x1F;
+
+ /* Check if the instruction is 32 bit or 16 bit instruction */
+ if (state->major_opcode < 0x0B) {
+ if (bytes_not_copied > 4)
+ goto fault;
+ state->instr_len = 4;
+ word0 = *((uint16_t *)(addr+2));
+ state->words[0] = (word1 << 16) | word0;
+ } else {
+ state->instr_len = 2;
+ state->words[0] = word1;
+ }
+
+ /* Read the second word in case of limm */
+ word1 = *((uint16_t *)(addr + state->instr_len));
+ word0 = *((uint16_t *)(addr + state->instr_len + 2));
+ state->words[1] = (word1 << 16) | word0;
+
+ switch (state->major_opcode) {
+ case op_Bcc:
+ state->is_branch = 1;
+
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 16)) ?
+ FIELD_s25(state->words[0]) :
+ FIELD_s21(state->words[0]);
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->flow = direct_jump;
+ break;
+
+ case op_BLcc:
+ if (IS_BIT(state->words[0], 16)) {
+ /* Branch and Link*/
+ /* unconditional branch s25, conditional branch s21 */
+ fieldA = (IS_BIT(state->words[0], 17)) ?
+ (FIELD_s25(state->words[0]) & ~0x3) :
+ FIELD_s21(state->words[0]);
+
+ state->flow = direct_call;
+ } else {
+ /*Branch On Compare */
+ fieldA = FIELD_s9(state->words[0]) & ~0x3;
+ state->flow = direct_jump;
+ }
+
+ state->delay_slot = IS_BIT(state->words[0], 5);
+ state->target = fieldA + (addr & ~0x3);
+ state->is_branch = 1;
+ break;
+
+ case op_LD: /* LD<zz> a,[b,s9] */
+ state->write = 0;
+ state->di = BITS(state->words[0], 11, 11);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 7, 8);
+ state->aa = BITS(state->words[0], 9, 10);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->aa = 0;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src2 = FIELD_s9(state->words[0]);
+ state->dest = FIELD_A(state->words[0]);
+ state->pref = (state->dest == REG_LIMM);
+ break;
+
+ case op_ST:
+ state->write = 1;
+ state->di = BITS(state->words[0], 5, 5);
+ if (state->di)
+ break;
+ state->aa = BITS(state->words[0], 3, 4);
+ state->zz = BITS(state->words[0], 1, 2);
+ state->src1 = FIELD_C(state->words[0]);
+ if (state->src1 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->src1, regs, cregs);
+ }
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->aa = 0;
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->wb_reg, regs, cregs);
+ }
+ state->src3 = FIELD_s9(state->words[0]);
+ break;
+
+ case op_MAJOR_4:
+ subopcode = MINOR_OPCODE(state->words[0]);
+ switch (subopcode) {
+ case 32: /* Jcc */
+ case 33: /* Jcc.D */
+ case 34: /* JLcc */
+ case 35: /* JLcc.D */
+ is_linked = 0;
+
+ if (subopcode == 33 || subopcode == 35)
+ state->delay_slot = 1;
+
+ if (subopcode == 34 || subopcode == 35)
+ is_linked = 1;
+
+ fieldCisReg = 0;
+ op_format = BITS(state->words[0], 22, 23);
+ if (op_format == 0 || ((op_format == 3) &&
+ (!IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+
+ if (fieldC == REG_LIMM) {
+ fieldC = state->words[1];
+ state->instr_len += 4;
+ } else {
+ fieldCisReg = 1;
+ }
+ } else if (op_format == 1 || ((op_format == 3)
+ && (IS_BIT(state->words[0], 5)))) {
+ fieldC = FIELD_C(state->words[0]);
+ } else {
+ /* op_format == 2 */
+ fieldC = FIELD_s12(state->words[0]);
+ }
+
+ if (!fieldCisReg) {
+ state->target = fieldC;
+ state->flow = is_linked ?
+ direct_call : direct_jump;
+ } else {
+ state->target = get_reg(fieldC, regs, cregs);
+ state->flow = is_linked ?
+ indirect_call : indirect_jump;
+ }
+ state->is_branch = 1;
+ break;
+
+ case 40: /* LPcc */
+ if (BITS(state->words[0], 22, 23) == 3) {
+ /* Conditional LPcc u7 */
+ fieldC = FIELD_C(state->words[0]);
+
+ fieldC = fieldC << 1;
+ fieldC += (addr & ~0x03);
+ state->is_branch = 1;
+ state->flow = direct_jump;
+ state->target = fieldC;
+ }
+ /* For Unconditional lp, next pc is the fall through
+ * which is updated */
+ break;
+
+ case 48 ... 55: /* LD a,[b,c] */
+ state->di = BITS(state->words[0], 15, 15);
+ if (state->di)
+ break;
+ state->x = BITS(state->words[0], 16, 16);
+ state->zz = BITS(state->words[0], 17, 18);
+ state->aa = BITS(state->words[0], 22, 23);
+ state->wb_reg = FIELD_B(state->words[0]);
+ if (state->wb_reg == REG_LIMM) {
+ state->instr_len += 4;
+ state->src1 = state->words[1];
+ } else {
+ state->src1 = get_reg(state->wb_reg, regs,
+ cregs);
+ }
+ state->src2 = FIELD_C(state->words[0]);
+ if (state->src2 == REG_LIMM) {
+ state->instr_len += 4;
+ state->src2 = state->words[1];
+ } else {
+ state->src2 = get_reg(state->src2, regs,
+ cregs);
+ }
+ state->dest = FIELD_A(state->words[0]);
+ if (state->dest == REG_LIMM)
+ state->pref = 1;
+ break;
+
+ case 10: /* MOV */
+ /* still need to check for limm to extract instr len */
+ /* MOV is special case because it only takes 2 args */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if (FIELD_C(state->words[0]) == REG_LIMM)
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+
+
+ default:
+ /* Not a Load, Jump or Loop instruction */
+ /* still need to check for limm to extract instr len */
+ switch (BITS(state->words[0], 22, 23)) {
+ case 0: /* OP a,b,c */
+ if ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+ case 1: /* OP a,b,u6 */
+ break;
+ case 2: /* OP b,b,s12 */
+ break;
+ case 3: /* OP.cc b,b,c/u6 */
+ if ((!IS_BIT(state->words[0], 5)) &&
+ ((FIELD_B(state->words[0]) == REG_LIMM) ||
+ (FIELD_C(state->words[0]) == REG_LIMM)))
+ state->instr_len += 4;
+ break;
+ }
+ break;
+ }
+ break;
+
+ /* 16 Bit Instructions */
+ case op_LD_ADD: /* LD_S|LDB_S|LDW_S a,[b,c] */
+ state->zz = BITS(state->words[0], 3, 4);
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->dest = FIELD_S_A(state->words[0]);
+ break;
+
+ case op_ADD_MOV_CMP:
+ /* check for limm, ignore mov_s h,b (== mov_s 0,b) */
+ if ((BITS(state->words[0], 3, 4) < 3) &&
+ (FIELD_S_H(state->words[0]) == REG_LIMM))
+ state->instr_len += 4;
+ break;
+
+ case op_S:
+ subopcode = BITS(state->words[0], 5, 7);
+ switch (subopcode) {
+ case 0: /* j_s */
+ case 1: /* j_s.d */
+ case 2: /* jl_s */
+ case 3: /* jl_s.d */
+ state->target = get_reg(FIELD_S_B(state->words[0]),
+ regs, cregs);
+ state->delay_slot = subopcode & 1;
+ state->flow = (subopcode >= 2) ?
+ direct_call : indirect_jump;
+ break;
+ case 7:
+ switch (BITS(state->words[0], 8, 10)) {
+ case 4: /* jeq_s [blink] */
+ case 5: /* jne_s [blink] */
+ case 6: /* j_s [blink] */
+ case 7: /* j_s.d [blink] */
+ state->delay_slot = (subopcode == 7);
+ state->flow = indirect_jump;
+ state->target = get_reg(31, regs, cregs);
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+ break;
+
+ case op_LD_S: /* LD_S c, [b, u7] */
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_LDB_S:
+ case op_STB_S:
+ /* no further handling required as byte accesses should not
+ * cause an unaligned access exception */
+ state->zz = 1;
+ break;
+
+ case op_LDWX_S: /* LDWX_S c, [b, u6] */
+ state->x = 1;
+ /* intentional fall-through */
+
+ case op_LDW_S: /* LDW_S c, [b, u6] */
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src2 = FIELD_S_u6(state->words[0]);
+ state->dest = FIELD_S_C(state->words[0]);
+ break;
+
+ case op_ST_S: /* ST_S c, [b, u7] */
+ state->write = 1;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ break;
+
+ case op_STW_S: /* STW_S c,[b,u6] */
+ state->write = 1;
+ state->zz = 2;
+ state->src1 = get_reg(FIELD_S_C(state->words[0]), regs, cregs);
+ state->src2 = get_reg(FIELD_S_B(state->words[0]), regs, cregs);
+ state->src3 = FIELD_S_u6(state->words[0]);
+ break;
+
+ case op_SP: /* LD_S|LDB_S b,[sp,u7], ST_S|STB_S b,[sp,u7] */
+ /* note: we are ignoring possibility of:
+ * ADD_S, SUB_S, PUSH_S, POP_S as these should not
+ * cause unaliged exception anyway */
+ state->write = BITS(state->words[0], 6, 6);
+ state->zz = BITS(state->words[0], 5, 5);
+ if (state->zz)
+ break; /* byte accesses should not come here */
+ if (!state->write) {
+ state->src1 = get_reg(28, regs, cregs);
+ state->src2 = FIELD_S_u7(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ } else {
+ state->src1 = get_reg(FIELD_S_B(state->words[0]), regs,
+ cregs);
+ state->src2 = get_reg(28, regs, cregs);
+ state->src3 = FIELD_S_u7(state->words[0]);
+ }
+ break;
+
+ case op_GP: /* LD_S|LDB_S|LDW_S r0,[gp,s11/s9/s10] */
+ /* note: ADD_S r0, gp, s11 is ignored */
+ state->zz = BITS(state->words[0], 9, 10);
+ state->src1 = get_reg(26, regs, cregs);
+ state->src2 = state->zz ? FIELD_S_s10(state->words[0]) :
+ FIELD_S_s11(state->words[0]);
+ state->dest = 0;
+ break;
+
+ case op_Pcl: /* LD_S b,[pcl,u10] */
+ state->src1 = regs->ret & ~3;
+ state->src2 = FIELD_S_u10(state->words[0]);
+ state->dest = FIELD_S_B(state->words[0]);
+ break;
+
+ case op_BR_S:
+ state->target = FIELD_S_s8(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_B_S:
+ fieldA = (BITS(state->words[0], 9, 10) == 3) ?
+ FIELD_S_s7(state->words[0]) :
+ FIELD_S_s10(state->words[0]);
+ state->target = fieldA + (addr & ~0x03);
+ state->flow = direct_jump;
+ state->is_branch = 1;
+ break;
+
+ case op_BL_S:
+ state->target = FIELD_S_s13(state->words[0]) + (addr & ~0x03);
+ state->flow = direct_call;
+ state->is_branch = 1;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bytes_not_copied <= (8 - state->instr_len))
+ return;
+
+fault: state->fault = 1;
+}
+
+long __kprobes get_reg(int reg, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ if (reg <= 12) {
+ p = &regs->r0;
+ return p[-reg];
+ }
+
+ if (cregs && (reg <= 25)) {
+ p = &cregs->r13;
+ return p[13-reg];
+ }
+
+ if (reg == 26)
+ return regs->r26;
+ if (reg == 27)
+ return regs->fp;
+ if (reg == 28)
+ return regs->sp;
+ if (reg == 31)
+ return regs->blink;
+
+ return 0;
+}
+
+void __kprobes set_reg(int reg, long val, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ long *p;
+
+ switch (reg) {
+ case 0 ... 12:
+ p = &regs->r0;
+ p[-reg] = val;
+ break;
+ case 13 ... 25:
+ if (cregs) {
+ p = &cregs->r13;
+ p[13-reg] = val;
+ }
+ break;
+ case 26:
+ regs->r26 = val;
+ break;
+ case 27:
+ regs->fp = val;
+ break;
+ case 28:
+ regs->sp = val;
+ break;
+ case 31:
+ regs->blink = val;
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Disassembles the insn at @pc and sets @next_pc to next PC (which could be
+ * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns).
+ *
+ * If @pc is a branch
+ * -@tgt_if_br is set to branch target.
+ * -If branch has delay slot, @next_pc updated with actual next PC.
+ */
+int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
+ struct callee_regs *cregs,
+ unsigned long *next_pc, unsigned long *tgt_if_br)
+{
+ struct disasm_state instr;
+
+ memset(&instr, 0, sizeof(struct disasm_state));
+ disasm_instr(pc, &instr, 0, regs, cregs);
+
+ *next_pc = pc + instr.instr_len;
+
+ /* Instruction with possible two targets branch, jump and loop */
+ if (instr.is_branch)
+ *tgt_if_br = instr.target;
+
+ /* For the instructions with delay slots, the fall through is the
+ * instruction following the instruction in delay slot.
+ */
+ if (instr.delay_slot) {
+ struct disasm_state instr_d;
+
+ disasm_instr(*next_pc, &instr_d, 0, regs, cregs);
+
+ *next_pc += instr_d.instr_len;
+ }
+
+ /* Zero Overhead Loop - end of the loop */
+ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end)
+ && (regs->lp_count > 1)) {
+ *next_pc = regs->lp_start;
+ }
+
+ return instr.is_branch;
+}
+
+#endif /* CONFIG_KGDB || CONFIG_MISALIGN_ACCESS || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
new file mode 100644
index 00000000000..ef6800ba2f0
--- /dev/null
+++ b/arch/arc/kernel/entry.S
@@ -0,0 +1,839 @@
+/*
+ * Low Level Interrupts/Traps/Exceptions(non-TLB) Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -Userspace unaligned access emulation
+ *
+ * vineetg: Feb 2011 (ptrace low level code fixes)
+ * -traced syscall return code (r0) was not saved into pt_regs for restoring
+ * into user reg-file when traded task rets to user space.
+ * -syscalls needing arch-wrappers (mainly for passing sp as pt_regs)
+ * were not invoking post-syscall trace hook (jumping directly into
+ * ret_from_system_call)
+ *
+ * vineetg: Nov 2010:
+ * -Vector table jumps (@8 bytes) converted into branches (@4 bytes)
+ * -To maintain the slot size of 8 bytes/vector, added nop, which is
+ * not executed at runtime.
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal()invoked upon TIF_RESTORE_SIGMASK as well
+ * -Wrappers for sys_{,rt_}sigsuspend() nolonger needed as they don't
+ * need ptregs anymore
+ *
+ * Vineetg: Oct 2009
+ * -In a rare scenario, Process gets a Priv-V exception and gets scheduled
+ * out. Since we don't do FAKE RTIE for Priv-V, CPU excpetion state remains
+ * active (AE bit enabled). This causes a double fault for a subseq valid
+ * exception. Thus FAKE RTIE needed in low level Priv-Violation handler.
+ * Instr Error could also cause similar scenario, so same there as well.
+ *
+ * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
+ *
+ * Vineetg: Aug 28th 2008: Bug #94984
+ * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
+ * Normally CPU does this automatically, however when doing FAKE rtie,
+ * we need to explicitly do this. The problem in macros
+ * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
+ * was being "CLEARED" rather then "SET". Since it is Loop INHIBIT Bit,
+ * setting it and not clearing it clears ZOL context
+ *
+ * Vineetg: May 16th, 2008
+ * - r25 now contains the Current Task when in kernel
+ *
+ * Vineetg: Dec 22, 2007
+ * Minor Surgery of Low Level ISR to make it SMP safe
+ * - MMU_SCRATCH0 Reg used for freeing up r9 in Level 1 ISR
+ * - _current_task is made an array of NR_CPUS
+ * - Access of _current_task wrapped inside a macro so that if hardware
+ * team agrees for a dedicated reg, no other code is touched
+ *
+ * Amit Bhor, Rahul Trivedi, Kanika Nema, Sameer Dhavale : Codito Tech 2004
+ */
+
+/*------------------------------------------------------------------
+ * Function ABI
+ *------------------------------------------------------------------
+ *
+ * Arguments r0 - r7
+ * Caller Saved Registers r0 - r12
+ * Callee Saved Registers r13- r25
+ * Global Pointer (gp) r26
+ * Frame Pointer (fp) r27
+ * Stack Pointer (sp) r28
+ * Interrupt link register (ilink1) r29
+ * Interrupt link register (ilink2) r30
+ * Branch link register (blink) r31
+ *------------------------------------------------------------------
+ */
+
+ .cpu A7
+
+;############################ Vector Table #################################
+
+.macro VECTOR lbl
+#if 1 /* Just in case, build breaks */
+ j \lbl
+#else
+ b \lbl
+ nop
+#endif
+.endm
+
+ .section .vector, "ax",@progbits
+ .align 4
+
+/* Each entry in the vector table must occupy 2 words. Since it is a jump
+ * across sections (.vector to .text) we are gauranteed that 'j somewhere'
+ * will use the 'j limm' form of the intrsuction as long as somewhere is in
+ * a section other than .vector.
+ */
+
+; ********* Critical System Events **********************
+VECTOR res_service ; 0x0, Restart Vector (0x0)
+VECTOR mem_service ; 0x8, Mem exception (0x1)
+VECTOR instr_service ; 0x10, Instrn Error (0x2)
+
+; ******************** Device ISRs **********************
+#ifdef CONFIG_ARC_IRQ3_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+VECTOR handle_interrupt_level1
+
+#ifdef CONFIG_ARC_IRQ5_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+#ifdef CONFIG_ARC_IRQ6_LV2
+VECTOR handle_interrupt_level2
+#else
+VECTOR handle_interrupt_level1
+#endif
+
+.rept 25
+VECTOR handle_interrupt_level1 ; Other devices
+.endr
+
+/* FOR ARC600: timer = 0x3, uart = 0x8, emac = 0x10 */
+
+; ******************** Exceptions **********************
+VECTOR EV_MachineCheck ; 0x100, Fatal Machine check (0x20)
+VECTOR EV_TLBMissI ; 0x108, Intruction TLB miss (0x21)
+VECTOR EV_TLBMissD ; 0x110, Data TLB miss (0x22)
+VECTOR EV_TLBProtV ; 0x118, Protection Violation (0x23)
+ ; or Misaligned Access
+VECTOR EV_PrivilegeV ; 0x120, Privilege Violation (0x24)
+VECTOR EV_Trap ; 0x128, Trap exception (0x25)
+VECTOR EV_Extension ; 0x130, Extn Intruction Excp (0x26)
+
+.rept 24
+VECTOR reserved ; Reserved Exceptions
+.endr
+
+#include <linux/linkage.h> /* ARC_{EXTRY,EXIT} */
+#include <asm/entry.h> /* SAVE_ALL_{INT1,INT2,TRAP...} */
+#include <asm/errno.h>
+#include <asm/arcregs.h>
+#include <asm/irqflags.h>
+
+;##################### Scratch Mem for IRQ stack switching #############
+
+ARCFP_DATA int1_saved_reg
+ .align 32
+ .type int1_saved_reg, @object
+ .size int1_saved_reg, 4
+int1_saved_reg:
+ .zero 4
+
+/* Each Interrupt level needs it's own scratch */
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ARCFP_DATA int2_saved_reg
+ .type int2_saved_reg, @object
+ .size int2_saved_reg, 4
+int2_saved_reg:
+ .zero 4
+
+#endif
+
+; ---------------------------------------------
+ .section .text, "ax",@progbits
+
+res_service: ; processor restart
+ flag 0x1 ; not implemented
+ nop
+ nop
+
+reserved: ; processor restart
+ rtie ; jump to processor initializations
+
+;##################### Interrupt Handling ##############################
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+; ---------------------------------------------
+; Level 2 ISR: Can interrupt a Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level2
+
+ ; TODO-vineetg for SMP this wont work
+ ; free up r9 as scratchpad
+ st r9, [@int2_saved_reg]
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l2]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT2
+
+ ;------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, disable preemption
+ ;------------------------------------------------------
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 1f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; bump thread_info->preempt_count (Disable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+ add r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+1:
+ ;------------------------------------------------------
+ ; setup params for Linux common ISR and invoke it
+ ;------------------------------------------------------
+ lr r0, [icause2]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x2
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+
+ARC_EXIT handle_interrupt_level2
+
+#endif
+
+; ---------------------------------------------
+; Level 1 ISR
+; ---------------------------------------------
+ARC_ENTRY handle_interrupt_level1
+
+ /* free up r9 as scratchpad */
+#ifdef CONFIG_SMP
+ sr r9, [ARC_REG_SCRATCH_DATA0]
+#else
+ st r9, [@int1_saved_reg]
+#endif
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [status32_l1]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_INT1
+
+ lr r0, [icause1]
+ and r0, r0, 0x1f
+
+ bl.d @arch_do_IRQ
+ mov r1, sp
+
+ mov r8,0x1
+ sr r8, [AUX_IRQ_LV12] ; clear bit in Sticky Status Reg
+
+ b ret_from_exception
+ARC_EXIT handle_interrupt_level1
+
+;################### Non TLB Exception Handling #############################
+
+; ---------------------------------------------
+; Instruction Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY instr_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_insterror_or_kprobe
+ b ret_from_exception
+ARC_EXIT instr_service
+
+; ---------------------------------------------
+; Memory Error Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY mem_service
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_memory_error
+ b ret_from_exception
+ARC_EXIT mem_service
+
+; ---------------------------------------------
+; Machine Check Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_MachineCheck
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ brne r0, 0x200100, 1f
+ bl do_tlb_overlap_fault
+ b ret_from_exception
+
+1:
+ ; DEAD END: can't do much, display Regs and HALT
+ SAVE_CALLEE_SAVED_USER
+
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ j do_machine_check_fault
+
+ARC_EXIT EV_MachineCheck
+
+; ---------------------------------------------
+; Protection Violation Exception Handler
+; ---------------------------------------------
+
+ARC_ENTRY EV_TLBProtV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when Exception occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ;---------(3) Save some more regs-----------------
+ ; vineetg: Mar 6th: Random Seg Fault issue #1
+ ; ecr and efa were not saved in case an Intr sneaks in
+ ; after fake rtie
+ ;
+ lr r3, [ecr]
+ lr r4, [efa]
+
+ ; --------(4) Return from CPU Exception Mode ---------
+ ; Fake a rtie, but rtie to next label
+ ; That way, subsequently, do_page_fault ( ) executes in pure kernel
+ ; mode with further Exceptions enabled
+
+ FAKE_RET_FROM_EXCPN r9
+
+ ;------ (5) Type of Protection Violation? ----------
+ ;
+ ; ProtV Hardware Exception is triggered for Access Faults of 2 types
+ ; -Access Violaton (WRITE to READ ONLY Page) - for linux COW
+ ; -Unaligned Access (READ/WRITE on odd boundary)
+ ;
+ cmp r3, 0x230400 ; Misaligned data access ?
+ beq 4f
+
+ ;========= (6a) Access Violation Processing ========
+ cmp r3, 0x230100
+ mov r1, 0x0 ; if LD exception ? write = 0
+ mov.ne r1, 0x1 ; else write = 1
+
+ mov r2, r4 ; faulting address
+ mov r0, sp ; pt_regs
+ bl do_page_fault
+ b ret_from_exception
+
+ ;========== (6b) Non aligned access ============
+4:
+ mov r0, r3 ; cause code
+ mov r1, r4 ; faulting address
+ mov r2, sp ; pt_regs
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ SAVE_CALLEE_SAVED_USER
+ mov r3, sp ; callee_regs
+#endif
+
+ bl do_misaligned_access
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+ DISCARD_CALLEE_SAVED_USER
+#endif
+
+ b ret_from_exception
+
+ARC_EXIT EV_TLBProtV
+
+; ---------------------------------------------
+; Privilege Violation Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_PrivilegeV
+
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_privilege_fault
+ b ret_from_exception
+ARC_EXIT EV_PrivilegeV
+
+; ---------------------------------------------
+; Extension Instruction Exception Handler
+; ---------------------------------------------
+ARC_ENTRY EV_Extension
+
+ EXCPN_PROLOG_FREEUP_REG r9
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ lr r0, [ecr]
+ lr r1, [efa]
+ mov r2, sp
+ bl do_extension_fault
+ b ret_from_exception
+ARC_EXIT EV_Extension
+
+;######################### System Call Tracing #########################
+
+tracesys:
+ ; save EFA in case tracer wants the PC of traced task
+ ; using ERET won't work since next-PC has already committed
+ lr r12, [efa]
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
+ st r12, [r11, THREAD_FAULT_ADDR]
+
+ ; PRE Sys Call Ptrace hook
+ mov r0, sp ; pt_regs needed
+ bl @syscall_trace_entry
+
+ ; Tracing code now returns the syscall num (orig or modif)
+ mov r8, r0
+
+ ; Do the Sys Call as we normally would.
+ ; Validate the Sys Call number
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi tracesys_exit
+
+ ; Restore the sys-call args. Mere invocation of the hook abv could have
+ ; clobbered them (since they are in scratch regs). The tracer could also
+ ; have deliberately changed the syscall args: r0-r7
+ ld r0, [sp, PT_r0]
+ ld r1, [sp, PT_r1]
+ ld r2, [sp, PT_r2]
+ ld r3, [sp, PT_r3]
+ ld r4, [sp, PT_r4]
+ ld r5, [sp, PT_r5]
+ ld r6, [sp, PT_r6]
+ ld r7, [sp, PT_r7]
+ ld.as r9, [sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+tracesys_exit:
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ;POST Sys Call Ptrace Hook
+ bl @syscall_trace_exit
+ b ret_from_exception ; NOT ret_from_system_call at is saves r0 which
+ ; we'd done before calling post hook above
+
+;################### Break Point TRAP ##########################
+
+ ; ======= (5b) Trap is due to Break-Point =========
+
+trap_with_param:
+
+ ; stop_pc info by gdb needs this info
+ stw orig_r8_IS_BRKPT, [sp, PT_orig_r8]
+
+ mov r0, r12
+ lr r1, [efa]
+ mov r2, sp
+
+ ; Now that we have read EFA, its safe to do "fake" rtie
+ ; and get out of CPU exception mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; Save callee regs in case gdb wants to have a look
+ ; SP will grow up by size of CALLEE Reg-File
+ ; NOTE: clobbers r12
+ SAVE_CALLEE_SAVED_USER
+
+ ; save location of saved Callee Regs @ thread_struct->pc
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ ; Call the trap handler
+ bl do_non_swi_trap
+
+ ; unwind stack to discard Callee saved Regs
+ DISCARD_CALLEE_SAVED_USER
+
+ b ret_from_exception
+
+;##################### Trap Handling ##############################
+;
+; EV_Trap caused by TRAP_S and TRAP0 instructions.
+;------------------------------------------------------------------
+; (1) System Calls
+; :parameters in r0-r7.
+; :r8 has the system call number
+; (2) Break Points
+;------------------------------------------------------------------
+
+ARC_ENTRY EV_Trap
+
+ ; Need at least 1 reg to code the early exception prolog
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ ;Which mode (user/kernel) was the system in when intr occured
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_TRAP
+
+ ;------- (4) What caused the Trap --------------
+ lr r12, [ecr]
+ and.f 0, r12, ECR_PARAM_MASK
+ bnz trap_with_param
+
+ ; ======= (5a) Trap is due to System Call ========
+
+ ; Before doing anything, return from CPU Exception Mode
+ FAKE_RET_FROM_EXCPN r11
+
+ ; If syscall tracing ongoing, invoke pre-pos-hooks
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys ; this never comes back
+
+ ;============ This is normal System Call case ==========
+ ; Sys-call num shd not exceed the total system calls avail
+ cmp r8, NR_syscalls
+ mov.hi r0, -ENOSYS
+ bhi ret_from_system_call
+
+ ; Offset into the syscall_table and call handler
+ ld.as r9,[sys_call_table, r8]
+ jl [r9] ; Entry into Sys Call Handler
+
+ ; fall through to ret_from_system_call
+ARC_EXIT EV_Trap
+
+ARC_ENTRY ret_from_system_call
+
+ st r0, [sp, PT_r0] ; sys call return value in pt_regs
+
+ ; fall through yet again to ret_from_exception
+
+;############# Return from Intr/Excp/Trap (Linux Specifics) ##############
+;
+; If ret to user mode do we need to handle signals, schedule() et al.
+
+ARC_ENTRY ret_from_exception
+
+ ; Pre-{IRQ,Trap,Exception} K/U mode from pt_regs->status32
+ ld r8, [sp, PT_status32] ; returning to User/Kernel Mode
+
+#ifdef CONFIG_PREEMPT
+ bbit0 r8, STATUS_U_BIT, resume_kernel_mode
+#else
+ bbit0 r8, STATUS_U_BIT, restore_regs
+#endif
+
+ ; Before returning to User mode check-for-and-complete any pending work
+ ; such as rescheduling/signal-delivery etc.
+resume_user_mode_begin:
+
+ ; Disable IRQs to ensures that chk for pending work itself is atomic
+ ; (and we don't end up missing a NEED_RESCHED/SIGPENDING due to an
+ ; interim IRQ).
+ IRQ_DISABLE r10
+
+ ; Fast Path return to user mode if no pending work
+ GET_CURR_THR_INFO_FLAGS r9
+ and.f 0, r9, _TIF_WORK_MASK
+ bz restore_regs
+
+ ; --- (Slow Path #1) task preemption ---
+ bbit0 r9, TIF_NEED_RESCHED, .Lchk_pend_signals
+ mov blink, resume_user_mode_begin ; tail-call to U mode ret chks
+ b @schedule ; BTST+Bnz causes relo error in link
+
+.Lchk_pend_signals:
+ IRQ_ENABLE r10
+
+ ; --- (Slow Path #2) pending signal ---
+ mov r0, sp ; pt_regs for arg to do_signal()/do_notify_resume()
+
+ bbit0 r9, TIF_SIGPENDING, .Lchk_notify_resume
+
+ ; Normal Trap/IRQ entry only saves Scratch (caller-saved) regs
+ ; in pt_reg since the "C" ABI (kernel code) will automatically
+ ; save/restore callee-saved regs.
+ ;
+ ; However, here we need to explicitly save callee regs because
+ ; (i) If this signal causes coredump - full regfile needed
+ ; (ii) If signal is SIGTRAP/SIGSTOP, task is being traced thus
+ ; tracer might call PEEKUSR(CALLEE reg)
+ ;
+ ; NOTE: SP will grow up by size of CALLEE Reg-File
+ SAVE_CALLEE_SAVED_USER ; clobbers r12
+
+ ; save location of saved Callee Regs @ thread_struct->callee
+ GET_CURR_TASK_FIELD_PTR TASK_THREAD, r10
+ st sp, [r10, THREAD_CALLEE_REG]
+
+ bl @do_signal
+
+ ; Ideally we want to discard the Callee reg above, however if this was
+ ; a tracing signal, tracer could have done a POKEUSR(CALLEE reg)
+ RESTORE_CALLEE_SAVED_USER
+
+ b resume_user_mode_begin ; loop back to start of U mode ret
+
+ ; --- (Slow Path #3) notify_resume ---
+.Lchk_notify_resume:
+ btst r9, TIF_NOTIFY_RESUME
+ blnz @do_notify_resume
+ b resume_user_mode_begin ; unconditionally back to U mode ret chks
+ ; for single exit point from this block
+
+#ifdef CONFIG_PREEMPT
+
+resume_kernel_mode:
+
+ ; Can't preempt if preemption disabled
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r8, [r10, THREAD_INFO_PREEMPT_COUNT]
+ brne r8, 0, restore_regs
+
+ ; check if this task's NEED_RESCHED flag set
+ ld r9, [r10, THREAD_INFO_FLAGS]
+ bbit0 r9, TIF_NEED_RESCHED, restore_regs
+
+ IRQ_DISABLE r9
+
+ ; Invoke PREEMPTION
+ bl preempt_schedule_irq
+
+ ; preempt_schedule_irq() always returns with IRQ disabled
+#endif
+
+ ; fall through
+
+;############# Return from Intr/Excp/Trap (ARC Specifics) ##############
+;
+; Restore the saved sys context (common exit-path for EXCPN/IRQ/Trap)
+; IRQ shd definitely not happen between now and rtie
+
+restore_regs :
+
+ ; Disable Interrupts while restoring reg-file back
+ ; XXX can this be optimised out
+ IRQ_DISABLE_SAVE r9, r10 ;@r10 has prisitine (pre-disable) copy
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ; Restore User R25
+ ; Earlier this used to be only for returning to user mode
+ ; However with 2 levels of IRQ this can also happen even if
+ ; in kernel mode
+ ld r9, [sp, PT_sp]
+ brhs r9, VMALLOC_START, 8f
+ RESTORE_USER_R25
+8:
+#endif
+
+ ; Restore REG File. In case multiple Events outstanding,
+ ; use the same priorty as rtie: EXCPN, L2 IRQ, L1 IRQ, None
+ ; Note that we use realtime STATUS32 (not pt_regs->status32) to
+ ; decide that.
+
+ ; if Returning from Exception
+ bbit0 r10, STATUS_AE_BIT, not_exception
+ RESTORE_ALL_SYS
+ rtie
+
+ ; Not Exception so maybe Interrupts (Level 1 or 2)
+
+not_exception:
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
+
+ bbit0 r10, STATUS_A2_BIT, not_level2_interrupt
+
+ ;------------------------------------------------------------------
+ ; if L2 IRQ interrupted a L1 ISR, we'd disbaled preemption earlier
+ ; so that sched doesnt move to new task, causing L1 to be delayed
+ ; undeterministically. Now that we've achieved that, lets reset
+ ; things to what they were, before returning from L2 context
+ ;----------------------------------------------------------------
+
+ ldw r9, [sp, PT_orig_r8] ; get orig_r8 to make sure it is
+ brne r9, orig_r8_IS_IRQ2, 149f ; infact a L2 ISR ret path
+
+ ld r9, [sp, PT_status32] ; get statu32_l2 (saved in pt_regs)
+ bbit0 r9, STATUS_A1_BIT, 149f ; L1 not active when L2 IRQ, so normal
+
+ ; A1 is set in status32_l2
+ ; decrement thread_info->preempt_count (re-enable preemption)
+ GET_CURR_THR_INFO_FROM_SP r10
+ ld r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+ ; paranoid check, given A1 was active when A2 happened, preempt count
+ ; must not be 0 beccause we would have incremented it.
+ ; If this does happen we simply HALT as it means a BUG !!!
+ cmp r9, 0
+ bnz 2f
+ flag 1
+
+2:
+ sub r9, r9, 1
+ st r9, [r10, THREAD_INFO_PREEMPT_COUNT]
+
+149:
+ ;return from level 2
+ RESTORE_ALL_INT2
+debug_marker_l2:
+ rtie
+
+not_level2_interrupt:
+
+#endif
+
+ bbit0 r10, STATUS_A1_BIT, not_level1_interrupt
+
+ ;return from level 1
+
+ RESTORE_ALL_INT1
+debug_marker_l1:
+ rtie
+
+not_level1_interrupt:
+
+ ;this case is for syscalls or Exceptions (with fake rtie)
+
+ RESTORE_ALL_SYS
+debug_marker_syscall:
+ rtie
+
+ARC_EXIT ret_from_exception
+
+ARC_ENTRY ret_from_fork
+ ; when the forked child comes here from the __switch_to function
+ ; r0 has the last task pointer.
+ ; put last task in scheduler queue
+ bl @schedule_tail
+
+ ; If kernel thread, jump to it's entry-point
+ ld r9, [sp, PT_status32]
+ brne r9, 0, 1f
+
+ jl.d [r14]
+ mov r0, r13 ; arg to payload
+
+1:
+ ; special case of kernel_thread entry point returning back due to
+ ; kernel_execve() - pretend return from syscall to ret to userland
+ b ret_from_exception
+ARC_EXIT ret_from_fork
+
+;################### Special Sys Call Wrappers ##########################
+
+; TBD: call do_fork directly from here
+ARC_ENTRY sys_fork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_fork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_fork_wrapper
+
+ARC_ENTRY sys_vfork_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_vfork
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_vfork_wrapper
+
+ARC_ENTRY sys_clone_wrapper
+ SAVE_CALLEE_SAVED_USER
+ bl @sys_clone
+ DISCARD_CALLEE_SAVED_USER
+
+ GET_CURR_THR_INFO_FLAGS r10
+ btst r10, TIF_SYSCALL_TRACE
+ bnz tracesys_exit
+
+ b ret_from_system_call
+ARC_EXIT sys_clone_wrapper
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+; Workaround for bug 94179 (STAR ):
+; Despite -fasynchronous-unwind-tables, linker is not making dwarf2 unwinder
+; section (.debug_frame) as loadable. So we force it here.
+; This also fixes STAR 9000487933 where the prev-workaround (objcopy --setflag)
+; would not work after a clean build due to kernel build system dependencies.
+.section .debug_frame, "wa",@progbits
+#endif
diff --git a/arch/arc/kernel/fpu.c b/arch/arc/kernel/fpu.c
new file mode 100644
index 00000000000..f352e512cbd
--- /dev/null
+++ b/arch/arc/kernel/fpu.c
@@ -0,0 +1,55 @@
+/*
+ * fpu.c - save/restore of Floating Point Unit Registers on task switch
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/sched.h>
+#include <asm/switch_to.h>
+
+/*
+ * To save/restore FPU regs, simplest scheme would use LR/SR insns.
+ * However since SR serializes the pipeline, an alternate "hack" can be used
+ * which uses the FPU Exchange insn (DEXCL) to r/w FPU regs.
+ *
+ * Store to 64bit dpfp1 reg from a pair of core regs:
+ * dexcl1 0, r1, r0 ; where r1:r0 is the 64 bit val
+ *
+ * Read from dpfp1 into pair of core regs (w/o clobbering dpfp1)
+ * mov_s r3, 0
+ * daddh11 r1, r3, r3 ; get "hi" into r1 (dpfp1 unchanged)
+ * dexcl1 r0, r1, r3 ; get "low" into r0 (dpfp1 low clobbered)
+ * dexcl1 0, r1, r0 ; restore dpfp1 to orig value
+ *
+ * However we can tweak the read, so that read-out of outgoing task's FPU regs
+ * and write of incoming task's regs happen in one shot. So all the work is
+ * done before context switch
+ */
+
+void fpu_save_restore(struct task_struct *prev, struct task_struct *next)
+{
+ unsigned int *saveto = &prev->thread.fpu.aux_dpfp[0].l;
+ unsigned int *readfrom = &next->thread.fpu.aux_dpfp[0].l;
+
+ const unsigned int zero = 0;
+
+ __asm__ __volatile__(
+ "daddh11 %0, %2, %2\n"
+ "dexcl1 %1, %3, %4\n"
+ : "=&r" (*(saveto + 1)), /* early clobber must here */
+ "=&r" (*(saveto))
+ : "r" (zero), "r" (*(readfrom + 1)), "r" (*(readfrom))
+ );
+
+ __asm__ __volatile__(
+ "daddh22 %0, %2, %2\n"
+ "dexcl2 %1, %3, %4\n"
+ : "=&r"(*(saveto + 3)), /* early clobber must here */
+ "=&r"(*(saveto + 2))
+ : "r" (zero), "r" (*(readfrom + 3)), "r" (*(readfrom + 2))
+ );
+}
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
new file mode 100644
index 00000000000..006dec3fc35
--- /dev/null
+++ b/arch/arc/kernel/head.S
@@ -0,0 +1,111 @@
+/*
+ * ARC CPU startup Code
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: Dec 2007
+ * -Check if we are running on Simulator or on real hardware
+ * to skip certain things during boot on simulator
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+#include <linux/linkage.h>
+#include <asm/arcregs.h>
+
+ .cpu A7
+
+ .section .init.text, "ax",@progbits
+ .type stext, @function
+ .globl stext
+stext:
+ ;-------------------------------------------------------------------
+ ; Don't clobber r0-r4 yet. It might have bootloader provided info
+ ;-------------------------------------------------------------------
+
+#ifdef CONFIG_SMP
+ ; Only Boot (Master) proceeds. Others wait in platform dependent way
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (cpu-id) ^^^ => Zero for UP ARC700
+ ; => #Core-ID if SMP (Master 0)
+ GET_CPU_ID r5
+ cmp r5, 0
+ jnz arc_platform_smp_wait_to_boot
+#endif
+ ; Clear BSS before updating any globals
+ ; XXX: use ZOL here
+ mov r5, __bss_start
+ mov r6, __bss_stop
+1:
+ st.ab 0, [r5,4]
+ brlt r5, r6, 1b
+
+#ifdef CONFIG_CMDLINE_UBOOT
+ ; support for bootloader provided cmdline
+ ; If cmdline passed by u-boot, then
+ ; r0 = 1 (because ATAGS parsing, now retired, used to use 0)
+ ; r1 = magic number (board identity)
+ ; r2 = addr of cmdline string (somewhere in memory/flash)
+
+ brne r0, 1, .Lother_bootup_chores ; u-boot didn't pass cmdline
+ breq r2, 0, .Lother_bootup_chores ; or cmdline is NULL
+
+ mov r5, @command_line
+1:
+ ldb.ab r6, [r2, 1]
+ breq r6, 0, .Lother_bootup_chores
+ b.d 1b
+ stb.ab r6, [r5, 1]
+#endif
+
+.Lother_bootup_chores:
+
+ ; Identify if running on ISS vs Silicon
+ ; IDENTITY Reg [ 3 2 1 0 ]
+ ; (chip-id) ^^^^^ ==> 0xffff for ISS
+ lr r0, [identity]
+ lsr r3, r0, 16
+ cmp r3, 0xffff
+ mov.z r4, 0
+ mov.nz r4, 1
+ st r4, [@running_on_hw]
+
+ ; setup "current" tsk and optionally cache it in dedicated r25
+ mov r9, @init_task
+ SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; tsk->thread_info is really a PAGE, whose bottom hoists stack
+ GET_TSK_STACK_BASE r9, sp ; r9 = tsk, sp = stack base(output)
+
+ j start_kernel ; "C" entry point
+
+#ifdef CONFIG_SMP
+;----------------------------------------------------------------
+; First lines of code run by secondary before jumping to 'C'
+;----------------------------------------------------------------
+ .section .init.text, "ax",@progbits
+ .type first_lines_of_secondary, @function
+ .globl first_lines_of_secondary
+
+first_lines_of_secondary:
+
+ ; setup per-cpu idle task as "current" on this CPU
+ ld r0, [@secondary_idle_tsk]
+ SET_CURR_TASK_ON_CPU r0, r1
+
+ ; setup stack (fp, sp)
+ mov fp, 0
+
+ ; set it's stack base to tsk->thread_info bottom
+ GET_TSK_STACK_BASE r0, sp
+
+ j start_kernel_secondary
+
+#endif
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
new file mode 100644
index 00000000000..551c10dff48
--- /dev/null
+++ b/arch/arc/kernel/irq.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+#include <asm/sections.h>
+#include <asm/irq.h>
+#include <asm/mach_desc.h>
+
+/*
+ * Early Hardware specific Interrupt setup
+ * -Called very early (start_kernel -> setup_arch -> setup_processor)
+ * -Platform Independent (must for any ARC700)
+ * -Needed for each CPU (hence not foldable into init_IRQ)
+ *
+ * what it does ?
+ * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
+ * -Disable all IRQs (on CPU side)
+ * -Optionally, setup the High priority Interrupts as Level 2 IRQs
+ */
+void __init arc_init_IRQ(void)
+{
+ int level_mask = 0;
+
+ write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
+
+ /* Disable all IRQs: enable them as devices request */
+ write_aux_reg(AUX_IENABLE, 0);
+
+ /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
+#ifdef CONFIG_ARC_IRQ3_LV2
+ level_mask |= (1 << 3);
+#endif
+#ifdef CONFIG_ARC_IRQ5_LV2
+ level_mask |= (1 << 5);
+#endif
+#ifdef CONFIG_ARC_IRQ6_LV2
+ level_mask |= (1 << 6);
+#endif
+
+ if (level_mask) {
+ pr_info("Level-2 interrupts bitset %x\n", level_mask);
+ write_aux_reg(AUX_IRQ_LEV, level_mask);
+ }
+}
+
+/*
+ * ARC700 core includes a simple on-chip intc supporting
+ * -per IRQ enable/disable
+ * -2 levels of interrupts (high/low)
+ * -all interrupts being level triggered
+ *
+ * To reduce platform code, we assume all IRQs directly hooked-up into intc.
+ * Platforms with external intc, hence cascaded IRQs, are free to over-ride
+ * below, per IRQ.
+ */
+
+static void arc_mask_irq(struct irq_data *data)
+{
+ arch_mask_irq(data->irq);
+}
+
+static void arc_unmask_irq(struct irq_data *data)
+{
+ arch_unmask_irq(data->irq);
+}
+
+static struct irq_chip onchip_intc = {
+ .name = "ARC In-core Intc",
+ .irq_mask = arc_mask_irq,
+ .irq_unmask = arc_unmask_irq,
+};
+
+static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ if (irq == TIMER0_IRQ)
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
+ else
+ irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops arc_intc_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = arc_intc_domain_map,
+};
+
+static struct irq_domain *root_domain;
+
+void __init init_onchip_IRQ(void)
+{
+ struct device_node *intc = NULL;
+
+ intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
+ if(!intc)
+ panic("DeviceTree Missing incore intc\n");
+
+ root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
+ &arc_intc_domain_ops, NULL);
+
+ if (!root_domain)
+ panic("root irq domain not avail\n");
+
+ /* with this we don't need to export root_domain */
+ irq_set_default_host(root_domain);
+}
+
+/*
+ * Late Interrupt system init called from start_kernel for Boot CPU only
+ *
+ * Since slab must already be initialized, platforms can start doing any
+ * needed request_irq( )s
+ */
+void __init init_IRQ(void)
+{
+ init_onchip_IRQ();
+
+ /* Any external intc can be setup here */
+ if (machine_desc->init_irq)
+ machine_desc->init_irq();
+
+#ifdef CONFIG_SMP
+ /* Master CPU can initialize it's side of IPI */
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+#endif
+}
+
+/*
+ * "C" Entry point for any ARC ISR, called from low level vector handler
+ * @irq is the vector number read from ICAUSE reg of on-chip intc
+ */
+void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+
+ irq_enter();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+int __init get_hw_config_num_irq(void)
+{
+ uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
+
+ switch (val & 0x03) {
+ case 0:
+ return 16;
+ case 1:
+ return 32;
+ case 2:
+ return 8;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/*
+ * arch_local_irq_enable - Enable interrupts.
+ *
+ * 1. Explicitly called to re-enable interrupts
+ * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
+ * which maybe in hard ISR itself
+ *
+ * Semantics of this function change depending on where it is called from:
+ *
+ * -If called from hard-ISR, it must not invert interrupt priorities
+ * e.g. suppose TIMER is high priority (Level 2) IRQ
+ * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
+ * Here local_irq_enable( ) shd not re-enable lower priority interrupts
+ * -If called from soft-ISR, it must re-enable all interrupts
+ * soft ISR are low prioity jobs which can be very slow, thus all IRQs
+ * must be enabled while they run.
+ * Now hardware context wise we may still be in L2 ISR (not done rtie)
+ * still we must re-enable both L1 and L2 IRQs
+ * Another twist is prev scenario with flow being
+ * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
+ * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
+ * over-written (this is deficiency in ARC700 Interrupt mechanism)
+ */
+
+#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
+
+void arch_local_irq_enable(void)
+{
+
+ unsigned long flags;
+ flags = arch_local_save_flags();
+
+ /* Allow both L1 and L2 at the onset */
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Called from hard ISR (between irq_enter and irq_exit) */
+ if (in_irq()) {
+
+ /* If in L2 ISR, don't re-enable any further IRQs as this can
+ * cause IRQ priorities to get upside down. e.g. it could allow
+ * L1 be taken while in L2 hard ISR which is wrong not only in
+ * theory, it can also cause the dreaded L1-L2-L1 scenario
+ */
+ if (flags & STATUS_A2_MASK)
+ flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
+
+ /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
+ else if (flags & STATUS_A1_MASK)
+ flags &= ~(STATUS_E1_MASK);
+ }
+
+ /* called from soft IRQ, ideally we want to re-enable all levels */
+
+ else if (in_softirq()) {
+
+ /* However if this is case of L1 interrupted by L2,
+ * re-enabling both may cause whaco L1-L2-L1 scenario
+ * because ARC700 allows level 1 to interrupt an active L2 ISR
+ * Thus we disable both
+ * However some code, executing in soft ISR wants some IRQs
+ * to be enabled so we re-enable L2 only
+ *
+ * How do we determine L1 intr by L2
+ * -A2 is set (means in L2 ISR)
+ * -E1 is set in this ISR's pt_regs->status32 which is
+ * saved copy of status32_l2 when l2 ISR happened
+ */
+ struct pt_regs *pt = get_irq_regs();
+ if ((flags & STATUS_A2_MASK) && pt &&
+ (pt->status32 & STATUS_A1_MASK)) {
+ /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
+ flags &= ~(STATUS_E1_MASK);
+ }
+ }
+
+ arch_local_irq_restore(flags);
+}
+
+#else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
+
+/*
+ * Simpler version for only 1 level of interrupt
+ * Here we only Worry about Level 1 Bits
+ */
+void arch_local_irq_enable(void)
+{
+ unsigned long flags;
+
+ /*
+ * ARC IDE Drivers tries to re-enable interrupts from hard-isr
+ * context which is simply wrong
+ */
+ if (in_irq()) {
+ WARN_ONCE(1, "IRQ enabled from hard-isr");
+ return;
+ }
+
+ flags = arch_local_save_flags();
+ flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
+ arch_local_irq_restore(flags);
+}
+#endif
+EXPORT_SYMBOL(arch_local_irq_enable);
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
new file mode 100644
index 00000000000..2888ba5be47
--- /dev/null
+++ b/arch/arc/kernel/kgdb.c
@@ -0,0 +1,205 @@
+/*
+ * kgdb support for ARC
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kgdb.h>
+#include <asm/disasm.h>
+#include <asm/cacheflush.h>
+
+static void to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ gdb_regs[_R0 + regno] = get_reg(regno, kernel_regs, cregs);
+
+ for (regno = 27; regno < GDB_MAX_REGS; regno++)
+ gdb_regs[regno] = 0;
+
+ gdb_regs[_FP] = kernel_regs->fp;
+ gdb_regs[__SP] = kernel_regs->sp;
+ gdb_regs[_BLINK] = kernel_regs->blink;
+ gdb_regs[_RET] = kernel_regs->ret;
+ gdb_regs[_STATUS32] = kernel_regs->status32;
+ gdb_regs[_LP_COUNT] = kernel_regs->lp_count;
+ gdb_regs[_LP_END] = kernel_regs->lp_end;
+ gdb_regs[_LP_START] = kernel_regs->lp_start;
+ gdb_regs[_BTA] = kernel_regs->bta;
+ gdb_regs[_STOP_PC] = kernel_regs->ret;
+}
+
+static void from_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs,
+ struct callee_regs *cregs)
+{
+ int regno;
+
+ for (regno = 0; regno <= 26; regno++)
+ set_reg(regno, gdb_regs[regno + _R0], kernel_regs, cregs);
+
+ kernel_regs->fp = gdb_regs[_FP];
+ kernel_regs->sp = gdb_regs[__SP];
+ kernel_regs->blink = gdb_regs[_BLINK];
+ kernel_regs->ret = gdb_regs[_RET];
+ kernel_regs->status32 = gdb_regs[_STATUS32];
+ kernel_regs->lp_count = gdb_regs[_LP_COUNT];
+ kernel_regs->lp_end = gdb_regs[_LP_END];
+ kernel_regs->lp_start = gdb_regs[_LP_START];
+ kernel_regs->bta = gdb_regs[_BTA];
+}
+
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ to_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *kernel_regs)
+{
+ from_gdb_regs(gdb_regs, kernel_regs, (struct callee_regs *)
+ current->thread.callee_reg);
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs,
+ struct task_struct *task)
+{
+ if (task)
+ to_gdb_regs(gdb_regs, task_pt_regs(task),
+ (struct callee_regs *) task->thread.callee_reg);
+}
+
+struct single_step_data_t {
+ uint16_t opcode[2];
+ unsigned long address[2];
+ int is_branch;
+ int armed;
+} single_step_data;
+
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (single_step_data.armed) {
+ int i;
+
+ for (i = 0; i < (single_step_data.is_branch ? 2 : 1); i++) {
+ memcpy((void *) single_step_data.address[i],
+ &single_step_data.opcode[i],
+ BREAK_INSTR_SIZE);
+
+ flush_icache_range(single_step_data.address[i],
+ single_step_data.address[i] +
+ BREAK_INSTR_SIZE);
+ }
+ single_step_data.armed = 0;
+ }
+}
+
+static void place_trap(unsigned long address, void *save)
+{
+ memcpy(save, (void *) address, BREAK_INSTR_SIZE);
+ memcpy((void *) address, &arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ flush_icache_range(address, address + BREAK_INSTR_SIZE);
+}
+
+static void do_single_step(struct pt_regs *regs)
+{
+ single_step_data.is_branch = disasm_next_pc((unsigned long)
+ regs->ret, regs, (struct callee_regs *)
+ current->thread.callee_reg,
+ &single_step_data.address[0],
+ &single_step_data.address[1]);
+
+ place_trap(single_step_data.address[0], &single_step_data.opcode[0]);
+
+ if (single_step_data.is_branch) {
+ place_trap(single_step_data.address[1],
+ &single_step_data.opcode[1]);
+ }
+
+ single_step_data.armed++;
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+ char *remcomInBuffer, char *remcomOutBuffer,
+ struct pt_regs *regs)
+{
+ unsigned long addr;
+ char *ptr;
+
+ undo_single_step(regs);
+
+ switch (remcomInBuffer[0]) {
+ case 's':
+ case 'c':
+ ptr = &remcomInBuffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ret = addr;
+
+ case 'D':
+ case 'k':
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+
+ if (remcomInBuffer[0] == 's') {
+ do_single_step(regs);
+ atomic_set(&kgdb_cpu_doing_single_step,
+ smp_processor_id());
+ }
+
+ return 0;
+ }
+ return -1;
+}
+
+unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+int kgdb_arch_init(void)
+{
+ single_step_data.armed = 0;
+ return 0;
+}
+
+void kgdb_trap(struct pt_regs *regs, int param)
+{
+ /* trap_s 3 is used for breakpoints that overwrite existing
+ * instructions, while trap_s 4 is used for compiled breakpoints.
+ *
+ * with trap_s 3 breakpoints the original instruction needs to be
+ * restored and continuation needs to start at the location of the
+ * breakpoint.
+ *
+ * with trap_s 4 (compiled) breakpoints, continuation needs to
+ * start after the breakpoint.
+ */
+ if (param == 3)
+ instruction_pointer(regs) -= BREAK_INSTR_SIZE;
+
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ instruction_pointer(regs) = ip;
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+ /* breakpoint instruction: TRAP_S 0x3 */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = {0x78, 0x7e},
+#else
+ .gdb_bpt_instr = {0x7e, 0x78},
+#endif
+};
diff --git a/arch/arc/kernel/kprobes.c b/arch/arc/kernel/kprobes.c
new file mode 100644
index 00000000000..3bfeacb674d
--- /dev/null
+++ b/arch/arc/kernel/kprobes.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kprobes.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/current.h>
+#include <asm/disasm.h>
+
+#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Attempt to probe at unaligned address */
+ if ((unsigned long)p->addr & 0x01)
+ return -EINVAL;
+
+ /* Address should not be in exception handling code */
+
+ p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
+ p->opcode = *p->addr;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = UNIMP_S_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ arch_disarm_kprobe(p);
+
+ /* Can we remove the kprobe in the middle of kprobe handling? */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static inline void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __get_cpu_var(current_kprobe) = p;
+}
+
+static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
+ struct pt_regs *regs)
+{
+ /* Remove the trap instructions inserted for single step and
+ * restore the original instructions
+ */
+ if (p->ainsn.t1_addr) {
+ *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t1_addr = NULL;
+ }
+
+ if (p->ainsn.t2_addr) {
+ *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+
+ p->ainsn.t2_addr = NULL;
+ }
+
+ return;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ unsigned long next_pc;
+ unsigned long tgt_if_br = 0;
+ int is_branch;
+ unsigned long bta;
+
+ /* Copy the opcode back to the kprobe location and execute the
+ * instruction. Because of this we will not be able to get into the
+ * same kprobe until this kprobe is done
+ */
+ *(p->addr) = p->opcode;
+
+ flush_icache_range((unsigned long)p->addr,
+ (unsigned long)p->addr + sizeof(kprobe_opcode_t));
+
+ /* Now we insert the trap at the next location after this instruction to
+ * single step. If it is a branch we insert the trap at possible branch
+ * targets
+ */
+
+ bta = regs->bta;
+
+ if (regs->status32 & 0x40) {
+ /* We are in a delay slot with the branch taken */
+
+ next_pc = bta & ~0x01;
+
+ if (!p->ainsn.is_short) {
+ if (bta & 0x01)
+ regs->blink += 2;
+ else {
+ /* Branch not taken */
+ next_pc += 2;
+
+ /* next pc is taken from bta after executing the
+ * delay slot instruction
+ */
+ regs->bta += 2;
+ }
+ }
+
+ is_branch = 0;
+ } else
+ is_branch =
+ disasm_next_pc((unsigned long)p->addr, regs,
+ (struct callee_regs *) current->thread.callee_reg,
+ &next_pc, &tgt_if_br);
+
+ p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
+ p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
+ *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t1_addr,
+ (unsigned long)p->ainsn.t1_addr +
+ sizeof(kprobe_opcode_t));
+
+ if (is_branch) {
+ p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
+ p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
+ *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
+
+ flush_icache_range((unsigned long)p->ainsn.t2_addr,
+ (unsigned long)p->ainsn.t2_addr +
+ sizeof(kprobe_opcode_t));
+ }
+}
+
+int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
+{
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ preempt_disable();
+
+ kcb = get_kprobe_ctlblk();
+ p = get_kprobe((unsigned long *)addr);
+
+ if (p) {
+ /*
+ * We have reentered the kprobe_handler, since another kprobe
+ * was hit while within the handler, we save the original
+ * kprobes and single step on the instruction of the new probe
+ * without calling any user handlers to avoid recursive
+ * kprobes.
+ */
+ if (kprobe_running()) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ }
+
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /* If we have no pre-handler or it returned 0, we continue with
+ * normal processing. If we have a pre-handler and it returned
+ * non-zero - which is expected from setjmp_pre_handler for
+ * jprobe, we return without single stepping and leave that to
+ * the break-handler which is invoked by a kprobe from
+ * jprobe_return
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ return 1;
+ } else if (kprobe_running()) {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ setup_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+ }
+ }
+
+ /* no_kprobe: */
+ preempt_enable_no_resched();
+ return 0;
+}
+
+static int __kprobes arc_post_kprobe_handler(unsigned long addr,
+ struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ resume_execution(cur, addr, regs);
+
+ /* Rearm the kprobe */
+ arch_arm_kprobe(cur);
+
+ /*
+ * When we return from trap instruction we go to the next instruction
+ * We restored the actual instruction in resume_exectuiont and we to
+ * return to the same address and execute it
+ */
+ regs->ret = addr;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+
+ reset_current_kprobe();
+
+out:
+ preempt_enable_no_resched();
+ return 1;
+}
+
+/*
+ * Fault can be for the instruction being single stepped or for the
+ * pre/post handlers in the module.
+ * This is applicable for applications like user probes, where we have the
+ * probe in user space and the handlers in the kernel
+ */
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single stepped
+ * caused the fault. We reset the current kprobe and allow the
+ * exception handler as if it is regular exception. In our
+ * case it doesn't matter because the system will be halted
+ */
+ resume_execution(cur, (unsigned long)cur->addr, regs);
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+ preempt_enable_no_resched();
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We are here because the instructions in the pre/post handler
+ * caused the fault.
+ */
+
+ /* We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned zero,
+ * try to fix up.
+ */
+ if (fixup_exception(regs))
+ return 1;
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ unsigned long addr = args->err;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_IERR:
+ if (arc_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ case DIE_TRAP:
+ if (arc_post_kprobe_handler(addr, args->regs))
+ return NOTIFY_STOP;
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr = regs->sp;
+
+ kcb->jprobe_saved_regs = *regs;
+ memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
+ regs->ret = (unsigned long)(jp->entry);
+
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ __asm__ __volatile__("unimp_s");
+ return;
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long sp_addr;
+
+ *regs = kcb->jprobe_saved_regs;
+ sp_addr = regs->sp;
+ memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+static void __used kretprobe_trampoline_holder(void)
+{
+ __asm__ __volatile__(".global kretprobe_trampoline\n"
+ "kretprobe_trampoline:\n" "nop\n");
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+
+ ri->ret_addr = (kprobe_opcode_t *) regs->blink;
+
+ /* Replace the return addr with trampoline addr */
+ regs->blink = (unsigned long)&kretprobe_trampoline;
+}
+
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more than one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+ regs->ret = orig_ret_address;
+
+ reset_current_kprobe();
+ kretprobe_hash_unlock(current, &flags);
+ preempt_enable_no_resched();
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ /* By returning a non zero value, we are telling the kprobe handler
+ * that we don't want the post_handler to run
+ */
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ /* Registering the trampoline code for the kret probe */
+ return register_kprobe(&trampoline_p);
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+
+void trap_is_kprobe(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
+}
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
new file mode 100644
index 00000000000..cdd359352c0
--- /dev/null
+++ b/arch/arc/kernel/module.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <asm/unwind.h>
+
+static inline void arc_write_me(unsigned short *addr, unsigned long value)
+{
+ *addr = (value & 0xffff0000) >> 16;
+ *(addr + 1) = (value & 0xffff);
+}
+
+/* ARC specific section quirks - before relocation loop in generic loader
+ *
+ * For dwarf unwinding out of modules, this needs to
+ * 1. Ensure the .debug_frame is allocatable (ARC Linker bug: despite
+ * -fasynchronous-unwind-tables it doesn't).
+ * 2. Since we are iterating thru sec hdr tbl anyways, make a note of
+ * the exact section index, for later use.
+ */
+int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
+ char *secstr, struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int i;
+
+ mod->arch.unw_sec_idx = 0;
+ mod->arch.unw_info = NULL;
+
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstr+sechdrs[i].sh_name, ".debug_frame") == 0) {
+ sechdrs[i].sh_flags |= SHF_ALLOC;
+ mod->arch.unw_sec_idx = i;
+ break;
+ }
+ }
+#endif
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ if (mod->arch.unw_info)
+ unwind_remove_table(mod->arch.unw_info, 0);
+#endif
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex, /* sec index for sym tbl */
+ unsigned int relsec, /* sec index for relo sec */
+ struct module *module)
+{
+ int i, n;
+ Elf32_Rela *rel_entry = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym_entry, *sym_sec;
+ Elf32_Addr relocation;
+ Elf32_Addr location;
+ Elf32_Addr sec_to_patch;
+ int relo_type;
+
+ sec_to_patch = sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ sym_sec = (Elf32_Sym *) sechdrs[symindex].sh_addr;
+ n = sechdrs[relsec].sh_size / sizeof(*rel_entry);
+
+ pr_debug("\n========== Module Sym reloc ===========================\n");
+ pr_debug("Section to fixup %x\n", sec_to_patch);
+ pr_debug("=========================================================\n");
+ pr_debug("rela->r_off | rela->addend | sym->st_value | ADDR | VALUE\n");
+ pr_debug("=========================================================\n");
+
+ /* Loop thru entries in relocation section */
+ for (i = 0; i < n; i++) {
+
+ /* This is where to make the change */
+ location = sec_to_patch + rel_entry[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym_entry = sym_sec + ELF32_R_SYM(rel_entry[i].r_info);
+
+ relocation = sym_entry->st_value + rel_entry[i].r_addend;
+
+ pr_debug("\t%x\t\t%x\t\t%x %x %x [%s]\n",
+ rel_entry[i].r_offset, rel_entry[i].r_addend,
+ sym_entry->st_value, location, relocation,
+ strtab + sym_entry->st_name);
+
+ /* This assumes modules are built with -mlong-calls
+ * so any branches/jumps are absolute 32 bit jmps
+ * global data access again is abs 32 bit.
+ * Both of these are handled by same relocation type
+ */
+ relo_type = ELF32_R_TYPE(rel_entry[i].r_info);
+
+ if (likely(R_ARC_32_ME == relo_type))
+ arc_write_me((unsigned short *)location, relocation);
+ else if (R_ARC_32 == relo_type)
+ *((Elf32_Addr *) location) = relocation;
+ else
+ goto relo_err;
+
+ }
+ return 0;
+
+relo_err:
+ pr_err("%s: unknown relocation: %u\n",
+ module->name, ELF32_R_TYPE(rel_entry[i].r_info));
+ return -ENOEXEC;
+
+}
+
+/* Just before lift off: After sections have been relocated, we add the
+ * dwarf section to unwinder table pool
+ * This couldn't be done in module_frob_arch_sections() because
+ * relocations had not been applied by then
+ */
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ void *unw;
+ int unwsec = mod->arch.unw_sec_idx;
+
+ if (unwsec) {
+ unw = unwind_add_table(mod, (void *)sechdrs[unwsec].sh_addr,
+ sechdrs[unwsec].sh_size);
+ mod->arch.unw_info = unw;
+ }
+#endif
+ return 0;
+}
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
new file mode 100644
index 00000000000..0a7531d9929
--- /dev/null
+++ b/arch/arc/kernel/process.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Amit Bhor, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/elf.h>
+#include <linux/tick.h>
+
+SYSCALL_DEFINE1(arc_settls, void *, user_tls_data_ptr)
+{
+ task_thread_info(current)->thr_ptr = (unsigned int)user_tls_data_ptr;
+ return 0;
+}
+
+/*
+ * We return the user space TLS data ptr as sys-call return code
+ * Ideally it should be copy to user.
+ * However we can cheat by the fact that some sys-calls do return
+ * absurdly high values
+ * Since the tls dat aptr is not going to be in range of 0xFFFF_xxxx
+ * it won't be considered a sys-call error
+ * and it will be loads better than copy-to-user, which is a definite
+ * D-TLB Miss
+ */
+SYSCALL_DEFINE0(arc_gettls)
+{
+ return task_thread_info(current)->thr_ptr;
+}
+
+static inline void arch_idle(void)
+{
+ /* sleep, but enable all interrupts before committing */
+ __asm__("sleep 0x3");
+}
+
+void cpu_idle(void)
+{
+ /* Since we SLEEP in idle loop, TIF_POLLING_NRFLAG can't be set */
+
+ /* endless idle loop with no priority at all */
+ while (1) {
+ tick_nohz_idle_enter();
+ rcu_idle_enter();
+
+doze:
+ local_irq_disable();
+ if (!need_resched()) {
+ arch_idle();
+ goto doze;
+ } else {
+ local_irq_enable();
+ }
+
+ rcu_idle_exit();
+ tick_nohz_idle_exit();
+
+ schedule_preempt_disabled();
+ }
+}
+
+asmlinkage void ret_from_fork(void);
+
+/* Layout of Child kernel mode stack as setup at the end of this function is
+ *
+ * | ... |
+ * | ... |
+ * | unused |
+ * | |
+ * ------------------ <==== top of Stack (thread.ksp)
+ * | UNUSED 1 word|
+ * ------------------
+ * | r25 |
+ * ~ ~
+ * | --to-- | (CALLEE Regs of user mode)
+ * | r13 |
+ * ------------------
+ * | fp |
+ * | blink | @ret_from_fork
+ * ------------------
+ * | |
+ * ~ ~
+ * ~ ~
+ * | |
+ * ------------------
+ * | r12 |
+ * ~ ~
+ * | --to-- | (scratch Regs of user mode)
+ * | r0 |
+ * ------------------
+ * | UNUSED 1 word|
+ * ------------------ <===== END of PAGE
+ */
+int copy_thread(unsigned long clone_flags,
+ unsigned long usp, unsigned long arg,
+ struct task_struct *p)
+{
+ struct pt_regs *c_regs; /* child's pt_regs */
+ unsigned long *childksp; /* to unwind out of __switch_to() */
+ struct callee_regs *c_callee; /* child's callee regs */
+ struct callee_regs *parent_callee; /* paren't callee */
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Mark the specific anchors to begin with (see pic above) */
+ c_regs = task_pt_regs(p);
+ childksp = (unsigned long *)c_regs - 2; /* 2 words for FP/BLINK */
+ c_callee = ((struct callee_regs *)childksp) - 1;
+
+ /*
+ * __switch_to() uses thread.ksp to start unwinding stack
+ * For kernel threads we don't need to create callee regs, the
+ * stack layout nevertheless needs to remain the same.
+ * Also, since __switch_to anyways unwinds callee regs, we use
+ * this to populate kernel thread entry-pt/args into callee regs,
+ * so that ret_from_kernel_thread() becomes simpler.
+ */
+ p->thread.ksp = (unsigned long)c_callee; /* THREAD_KSP */
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top */
+ childksp[0] = 0; /* fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* blink */
+
+ if (unlikely(p->flags & PF_KTHREAD)) {
+ memset(c_regs, 0, sizeof(struct pt_regs));
+
+ c_callee->r13 = arg; /* argument to kernel thread */
+ c_callee->r14 = usp; /* function */
+
+ return 0;
+ }
+
+ /*--------- User Task Only --------------*/
+
+ /* __switch_to expects FP(0), BLINK(return addr) at top of stack */
+ childksp[0] = 0; /* for POP fp */
+ childksp[1] = (unsigned long)ret_from_fork; /* for POP blink */
+
+ /* Copy parents pt regs on child's kernel mode stack */
+ *c_regs = *regs;
+
+ if (usp)
+ c_regs->sp = usp;
+
+ c_regs->r0 = 0; /* fork returns 0 in child */
+
+ parent_callee = ((struct callee_regs *)regs) - 1;
+ *c_callee = *parent_callee;
+
+ if (unlikely(clone_flags & CLONE_SETTLS)) {
+ /*
+ * set task's userland tls data ptr from 4th arg
+ * clone C-lib call is difft from clone sys-call
+ */
+ task_thread_info(p)->thr_ptr = regs->r3;
+ } else {
+ /* Normal fork case: set parent's TLS ptr in child */
+ task_thread_info(p)->thr_ptr =
+ task_thread_info(current)->thr_ptr;
+ }
+
+ return 0;
+}
+
+/*
+ * Some archs flush debug and FPU info here
+ */
+void flush_thread(void)
+{
+}
+
+/*
+ * Free any architecture-specific thread data structures, etc.
+ */
+void exit_thread(void)
+{
+}
+
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
+{
+ return 0;
+}
+
+/*
+ * API: expected by schedular Code: If thread is sleeping where is that.
+ * What is this good for? it will be always the scheduler or ret_from_fork.
+ * So we hard code that anyways.
+ */
+unsigned long thread_saved_pc(struct task_struct *t)
+{
+ struct pt_regs *regs = task_pt_regs(t);
+ unsigned long blink = 0;
+
+ /*
+ * If the thread being queried for in not itself calling this, then it
+ * implies it is not executing, which in turn implies it is sleeping,
+ * which in turn implies it got switched OUT by the schedular.
+ * In that case, it's kernel mode blink can reliably retrieved as per
+ * the picture above (right above pt_regs).
+ */
+ if (t != current && t->state != TASK_RUNNING)
+ blink = *((unsigned int *)regs - 1);
+
+ return blink;
+}
+
+int elf_check_arch(const struct elf32_hdr *x)
+{
+ unsigned int eflags;
+
+ if (x->e_machine != EM_ARCOMPACT)
+ return 0;
+
+ eflags = x->e_flags;
+ if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
+ pr_err("ABI mismatch - you need newer toolchain\n");
+ force_sigsegv(SIGSEGV, current);
+ return 0;
+ }
+
+ return 1;
+}
+EXPORT_SYMBOL(elf_check_arch);
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
new file mode 100644
index 00000000000..c6a81c58d0f
--- /dev/null
+++ b/arch/arc/kernel/ptrace.c
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/ptrace.h>
+#include <linux/tracehook.h>
+#include <linux/regset.h>
+#include <linux/unistd.h>
+#include <linux/elf.h>
+
+static struct callee_regs *task_callee_regs(struct task_struct *tsk)
+{
+ struct callee_regs *tmp = (struct callee_regs *)tsk->thread.callee_reg;
+ return tmp;
+}
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ void *kbuf, void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+ unsigned int stop_pc_val;
+
+#define REG_O_CHUNK(START, END, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, START), \
+ offsetof(struct user_regs_struct, END));
+
+#define REG_O_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, PTR, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ REG_O_CHUNK(scratch, callee, ptregs);
+ REG_O_CHUNK(callee, efa, cregs);
+ REG_O_CHUNK(efa, stop_pc, &target->thread.fault_address);
+
+ if (!ret) {
+ if (in_brkpt_trap(ptregs)) {
+ stop_pc_val = target->thread.fault_address;
+ pr_debug("\t\tstop_pc (brk-pt)\n");
+ } else {
+ stop_pc_val = ptregs->ret;
+ pr_debug("\t\tstop_pc (others)\n");
+ }
+
+ REG_O_ONE(stop_pc, &stop_pc_val);
+ }
+
+ return ret;
+}
+
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const struct pt_regs *ptregs = task_pt_regs(target);
+ const struct callee_regs *cregs = task_callee_regs(target);
+ int ret = 0;
+
+#define REG_IN_CHUNK(FIRST, NEXT, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, FIRST), \
+ offsetof(struct user_regs_struct, NEXT));
+
+#define REG_IN_ONE(LOC, PTR) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+#define REG_IGNORE_ONE(LOC) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ offsetof(struct user_regs_struct, LOC), \
+ offsetof(struct user_regs_struct, LOC) + 4);
+
+ /* TBD: disallow updates to STATUS32, orig_r8 etc*/
+ REG_IN_CHUNK(scratch, callee, ptregs); /* pt_regs[bta..orig_r8] */
+ REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
+ REG_IGNORE_ONE(efa); /* efa update invalid */
+ REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
+
+ return ret;
+}
+
+enum arc_getset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset arc_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view user_arc_view = {
+ .name = UTS_MACHINE,
+ .e_machine = EM_ARCOMPACT,
+ .regsets = arc_regsets,
+ .n = ARRAY_SIZE(arc_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_arc_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret = -EIO;
+
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+asmlinkage int syscall_trace_entry(struct pt_regs *regs)
+{
+ if (tracehook_report_syscall_entry(regs))
+ return ULONG_MAX;
+
+ return regs->r8;
+}
+
+asmlinkage void syscall_trace_exit(struct pt_regs *regs)
+{
+ tracehook_report_syscall_exit(regs, 0);
+}
diff --git a/arch/arc/kernel/reset.c b/arch/arc/kernel/reset.c
new file mode 100644
index 00000000000..e227a2b1c94
--- /dev/null
+++ b/arch/arc/kernel/reset.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+void machine_halt(void)
+{
+ /* Halt the processor */
+ __asm__ __volatile__("flag 1\n");
+}
+
+void machine_restart(char *__unused)
+{
+ /* Soft reset : jump to reset vector */
+ pr_info("Put your restart handler here\n");
+ machine_halt();
+}
+
+void machine_power_off(void)
+{
+ /* FIXME :: power off ??? */
+ machine_halt();
+}
+
+void (*pm_power_off) (void) = NULL;
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
new file mode 100644
index 00000000000..dc0f968dae0
--- /dev/null
+++ b/arch/arc/kernel/setup.c
@@ -0,0 +1,473 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/seq_file.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/cpu.h>
+#include <linux/of_fdt.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+#include <asm/tlb.h>
+#include <asm/cache.h>
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/prom.h>
+#include <asm/unwind.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
+
+int running_on_hw = 1; /* vs. on ISS */
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+struct machine_desc *machine_desc __initdata;
+
+struct task_struct *_current_task[NR_CPUS]; /* For stack switching */
+
+struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+
+
+void __init read_arc_build_cfg_regs(void)
+{
+ struct bcr_perip uncached_space;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+ FIX_PTR(cpu);
+
+ READ_BCR(AUX_IDENTITY, cpu->core);
+
+ cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
+
+ cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
+ if (cpu->vec_base == 0)
+ cpu->vec_base = (unsigned int)_int_vec_base_lds;
+
+ READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
+ cpu->uncached_base = uncached_space.start << 24;
+
+ cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR);
+ cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
+ cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
+ cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
+ cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
+ READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
+
+ cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR);
+ cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR);
+
+ /* Note that we read the CCM BCRs independent of kernel config
+ * This is to catch the cases where user doesn't know that
+ * CCMs are present in hardware build
+ */
+ {
+ struct bcr_iccm iccm;
+ struct bcr_dccm dccm;
+ struct bcr_dccm_base dccm_base;
+ unsigned int bcr_32bit_val;
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
+ if (bcr_32bit_val) {
+ iccm = *((struct bcr_iccm *)&bcr_32bit_val);
+ cpu->iccm.base_addr = iccm.base << 16;
+ cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
+ }
+
+ bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
+ if (bcr_32bit_val) {
+ dccm = *((struct bcr_dccm *)&bcr_32bit_val);
+ cpu->dccm.sz = 0x800 << (dccm.sz);
+
+ READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
+ cpu->dccm.base_addr = dccm_base.addr << 8;
+ }
+ }
+
+ READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+
+ read_decode_mmu_bcr();
+ read_decode_cache_bcr();
+
+ READ_BCR(ARC_REG_FP_BCR, cpu->fp);
+ READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp);
+}
+
+static const struct cpuinfo_data arc_cpu_tbl[] = {
+ { {0x10, "ARCTangent A5"}, 0x1F},
+ { {0x20, "ARC 600" }, 0x2F},
+ { {0x30, "ARC 700" }, 0x33},
+ { {0x34, "ARC 700 R4.10"}, 0x34},
+ { {0x00, NULL } }
+};
+
+char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+ struct bcr_identity *core = &cpu->core;
+ const struct cpuinfo_data *tbl;
+ int be = 0;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ be = 1;
+#endif
+ FIX_PTR(cpu);
+
+ n += scnprintf(buf + n, len - n,
+ "\nARC IDENTITY\t: Family [%#02x]"
+ " Cpu-id [%#02x] Chip-id [%#4x]\n",
+ core->family, core->cpu_id,
+ core->chip_id);
+
+ for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
+ if ((core->family >= tbl->info.id) &&
+ (core->family <= tbl->up_range)) {
+ n += scnprintf(buf + n, len - n,
+ "processor\t: %s %s\n",
+ tbl->info.str,
+ be ? "[Big Endian]" : "");
+ break;
+ }
+ }
+
+ if (tbl->info.id == 0)
+ n += scnprintf(buf + n, len - n, "UNKNOWN ARC Processor\n");
+
+ n += scnprintf(buf + n, len - n, "CPU speed\t: %u.%02u Mhz\n",
+ (unsigned int)(arc_get_core_freq() / 1000000),
+ (unsigned int)(arc_get_core_freq() / 10000) % 100);
+
+ n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n",
+ (cpu->timers & 0x200) ? "TIMER1" : "",
+ (cpu->timers & 0x100) ? "TIMER0" : "");
+
+ n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n",
+ cpu->vec_base);
+
+ n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n",
+ cpu->uncached_base);
+
+ return buf;
+}
+
+static const struct id_to_str mul_type_nm[] = {
+ { 0x0, "N/A"},
+ { 0x1, "32x32 (spl Result Reg)" },
+ { 0x2, "32x32 (ANY Result Reg)" }
+};
+
+static const struct id_to_str mac_mul_nm[] = {
+ {0x0, "N/A"},
+ {0x1, "N/A"},
+ {0x2, "Dual 16 x 16"},
+ {0x3, "N/A"},
+ {0x4, "32x16"},
+ {0x5, "N/A"},
+ {0x6, "Dual 16x16 and 32x16"}
+};
+
+char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
+
+ FIX_PTR(cpu);
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
+#define IS_USED(var) ((var) ? "(in-use)" : "(not used)")
+
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-Base]\t: %s %s %s %s %s %s\n",
+ IS_AVAIL2(cpu->extn.norm, "norm,"),
+ IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"),
+ IS_AVAIL1(cpu->extn.swap, "swap,"),
+ IS_AVAIL2(cpu->extn.minmax, "minmax,"),
+ IS_AVAIL1(cpu->extn.crc, "crc,"),
+ IS_AVAIL2(cpu->extn.ext_arith, "ext-arith"));
+
+ n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s",
+ mul_type_nm[cpu->extn.mul].str);
+
+ n += scnprintf(buf + n, len - n, " MAC MPY: %s\n",
+ mac_mul_nm[cpu->extn_mac_mul.type].str);
+
+ if (cpu->core.family == 0x34) {
+ n += scnprintf(buf + n, len - n,
+ "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n",
+ IS_USED(__CONFIG_ARC_HAS_LLSC_VAL),
+ IS_USED(__CONFIG_ARC_HAS_SWAPE_VAL),
+ IS_USED(__CONFIG_ARC_HAS_RTSC_VAL));
+ }
+
+ n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
+ !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
+
+ if (cpu->dccm.sz)
+ n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
+ cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
+
+ if (cpu->iccm.sz)
+ n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
+ cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+
+ n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
+ !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
+
+ if (cpu->fp.ver)
+ n += scnprintf(buf + n, len - n, "SP [v%d] %s",
+ cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
+
+ if (cpu->dpfp.ver)
+ n += scnprintf(buf + n, len - n, "DP [v%d] %s",
+ cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
+
+ n += scnprintf(buf + n, len - n, "\n");
+
+#ifdef _ASM_GENERIC_UNISTD_H
+ n += scnprintf(buf + n, len - n,
+ "OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
+#endif
+
+ return buf;
+}
+
+void __init arc_chk_ccms(void)
+{
+#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ /*
+ * DCCM can be arbit placed in hardware.
+ * Make sure it's placement/sz matches what Linux is built with
+ */
+ if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
+ panic("Linux built with incorrect DCCM Base address\n");
+
+ if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
+ panic("Linux built with incorrect DCCM Size\n");
+#endif
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
+ panic("Linux built with incorrect ICCM Size\n");
+#endif
+#endif
+}
+
+/*
+ * Ensure that FP hardware and kernel config match
+ * -If hardware contains DPFP, kernel needs to save/restore FPU state
+ * across context switches
+ * -If hardware lacks DPFP, but kernel configured to save FPU state then
+ * kernel trying to access non-existant DPFP regs will crash
+ *
+ * We only check for Dbl precision Floating Point, because only DPFP
+ * hardware has dedicated regs which need to be saved/restored on ctx-sw
+ * (Single Precision uses core regs), thus kernel is kind of oblivious to it
+ */
+void __init arc_chk_fpu(void)
+{
+ struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
+
+ if (cpu->dpfp.ver) {
+#ifndef CONFIG_ARC_FPU_SAVE_RESTORE
+ pr_warn("DPFP support broken in this kernel...\n");
+#endif
+ } else {
+#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
+ panic("H/w lacks DPFP support, apps won't work\n");
+#endif
+ }
+}
+
+/*
+ * Initialize and setup the processor core
+ * This is called by all the CPUs thus should not do special case stuff
+ * such as only for boot CPU etc
+ */
+
+void __init setup_processor(void)
+{
+ char str[512];
+ int cpu_id = smp_processor_id();
+
+ read_arc_build_cfg_regs();
+ arc_init_IRQ();
+
+ printk(arc_cpu_mumbojumbo(cpu_id, str, sizeof(str)));
+
+ arc_mmu_init();
+ arc_cache_init();
+ arc_chk_ccms();
+
+ printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
+
+#ifdef CONFIG_SMP
+ printk(arc_platform_smp_cpuinfo());
+#endif
+
+ arc_chk_fpu();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+#ifdef CONFIG_CMDLINE_UBOOT
+ /* Make sure that a whitespace is inserted before */
+ strlcat(command_line, " ", sizeof(command_line));
+#endif
+ /*
+ * Append .config cmdline to base command line, which might already
+ * contain u-boot "bootargs" (handled by head.S, if so configured)
+ */
+ strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line));
+
+ /* Save unparsed command line copy for /proc/cmdline */
+ strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+
+ /* To force early parsing of things like mem=xxx */
+ parse_early_param();
+
+ /* Platform/board specific: e.g. early console registration */
+ if (machine_desc->init_early)
+ machine_desc->init_early();
+
+ setup_processor();
+
+#ifdef CONFIG_SMP
+ smp_init_cpus();
+#endif
+
+ setup_arch_memory();
+
+ /* copy flat DT out of .init and then unflatten it */
+ copy_devtree();
+ unflatten_device_tree();
+
+ /* Can be issue if someone passes cmd line arg "ro"
+ * But that is unlikely so keeping it as it is
+ */
+ root_mountflags &= ~MS_RDONLY;
+
+ console_verbose();
+
+#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+
+ arc_unwind_init();
+ arc_unwind_setup();
+}
+
+static int __init customize_machine(void)
+{
+ /* Add platform devices */
+ if (machine_desc->init_machine)
+ machine_desc->init_machine();
+
+ return 0;
+}
+arch_initcall(customize_machine);
+
+static int __init init_late_machine(void)
+{
+ if (machine_desc->init_late)
+ machine_desc->init_late();
+
+ return 0;
+}
+late_initcall(init_late_machine);
+/*
+ * Get CPU information for use by the procfs.
+ */
+
+#define cpu_to_ptr(c) ((void *)(0xFFFF0000 | (unsigned int)(c)))
+#define ptr_to_cpu(p) (~0xFFFF0000UL & (unsigned int)(p))
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *str;
+ int cpu_id = ptr_to_cpu(v);
+
+ str = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!str)
+ goto done;
+
+ seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n",
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+ seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
+
+#ifdef CONFIG_SMP
+ seq_printf(m, arc_platform_smp_cpuinfo());
+#endif
+
+ free_page((unsigned long)str);
+done:
+ seq_printf(m, "\n\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ /*
+ * Callback returns cpu-id to iterator for show routine, NULL to stop.
+ * However since NULL is also a valid cpu-id (0), we use a round-about
+ * way to pass it w/o having to kmalloc/free a 2 byte string.
+ * Encode cpu-id as 0xFFcccc, which is decoded by show routine.
+ */
+ return *pos < num_possible_cpus() ? cpu_to_ptr(*pos) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo
+};
+
+static DEFINE_PER_CPU(struct cpu, cpu_topology);
+
+static int __init topology_init(void)
+{
+ int cpu;
+
+ for_each_present_cpu(cpu)
+ register_cpu(&per_cpu(cpu_topology, cpu), cpu);
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
new file mode 100644
index 00000000000..ee6ef2f60a2
--- /dev/null
+++ b/arch/arc/kernel/signal.c
@@ -0,0 +1,360 @@
+/*
+ * Signal Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 2010 (Restarting of timer related syscalls)
+ *
+ * vineetg: Nov 2009 (Everything needed for TIF_RESTORE_SIGMASK)
+ * -do_signal() supports TIF_RESTORE_SIGMASK
+ * -do_signal() no loner needs oldset, required by OLD sys_sigsuspend
+ * -sys_rt_sigsuspend() now comes from generic code, so discard arch implemen
+ * -sys_sigsuspend() no longer needs to fudge ptregs, hence that arg removed
+ * -sys_sigsuspend() no longer loops for do_signal(), sets TIF_xxx and leaves
+ * the job to do_signal()
+ *
+ * vineetg: July 2009
+ * -Modified Code to support the uClibc provided userland sigreturn stub
+ * to avoid kernel synthesing it on user stack at runtime, costing TLB
+ * probes and Cache line flushes.
+ *
+ * vineetg: July 2009
+ * -In stash_usr_regs( ) and restore_usr_regs( ), save/restore of user regs
+ * in done in block copy rather than one word at a time.
+ * This saves around 2K of code and improves LMBench lat_sig <catch>
+ *
+ * rajeshwarr: Feb 2009
+ * - Support for Realtime Signals
+ *
+ * vineetg: Aug 11th 2008: Bug #94183
+ * -ViXS were still seeing crashes when using insmod to load drivers.
+ * It turned out that the code to change Execute permssions for TLB entries
+ * of user was not guarded for interrupts (mod_tlb_permission)
+ * This was cauing TLB entries to be overwritten on unrelated indexes
+ *
+ * Vineetg: July 15th 2008: Bug #94183
+ * -Exception happens in Delay slot of a JMP, and before user space resumes,
+ * Signal is delivered (Ctrl + C) = >SIGINT.
+ * setup_frame( ) sets up PC,SP,BLINK to enable user space signal handler
+ * to run, but doesn't clear the Delay slot bit from status32. As a result,
+ * on resuming user mode, signal handler branches off to BTA of orig JMP
+ * -FIX: clear the DE bit from status32 in setup_frame( )
+ *
+ * Rahul Trivedi, Kanika Nema: Codito Technologies 2004
+ */
+
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <asm/ucontext.h>
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#define MAGIC_SIGALTSTK 0x07302004
+ unsigned int sigret_magic;
+};
+
+static int
+stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
+ sigset_t *set)
+{
+ int err;
+ err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs,
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+ err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t));
+
+ return err;
+}
+
+static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
+{
+ sigset_t set;
+ int err;
+
+ err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+ if (!err)
+ set_current_blocked(&set);
+
+ err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs),
+ sizeof(sf->uc.uc_mcontext.regs.scratch));
+
+ return err;
+}
+
+static inline int is_do_ss_needed(unsigned int magic)
+{
+ if (MAGIC_SIGALTSTK == magic)
+ return 1;
+ else
+ return 0;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic;
+ int err;
+ struct pt_regs *regs = current_pt_regs();
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* Since we stacked the signal on a word boundary,
+ * then 'sp' should be word aligned here. If it's
+ * not, then the user is trying to mess with us.
+ */
+ if (regs->sp & 3)
+ goto badframe;
+
+ sf = (struct rt_sigframe __force __user *)(regs->sp);
+
+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
+ goto badframe;
+
+ err = restore_usr_regs(regs, sf);
+ err |= __get_user(magic, &sf->sigret_magic);
+ if (err)
+ goto badframe;
+
+ if (unlikely(is_do_ss_needed(magic)))
+ if (restore_altstack(&sf->uc.uc_stack))
+ goto badframe;
+
+ /* Don't restart from sigreturn */
+ syscall_wont_restart(regs);
+
+ return regs->r0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *get_sigframe(struct k_sigaction *ka,
+ struct pt_regs *regs,
+ unsigned long framesize)
+{
+ unsigned long sp = regs->sp;
+ void __user *frame;
+
+ /* This is the X/Open sanctioned signal stack switching */
+ if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp))
+ sp = current->sas_ss_sp + current->sas_ss_size;
+
+ /* No matter what happens, 'sp' must be word
+ * aligned otherwise nasty things could happen
+ */
+
+ /* ATPCS B01 mandates 8-byte alignment */
+ frame = (void __user *)((sp - framesize) & ~7);
+
+ /* Check that we can actually write to the signal frame */
+ if (!access_ok(VERIFY_WRITE, frame, framesize))
+ frame = NULL;
+
+ return frame;
+}
+
+/*
+ * translate the signal
+ */
+static inline int map_sig(int sig)
+{
+ struct thread_info *thread = current_thread_info();
+ if (thread->exec_domain && thread->exec_domain->signal_invmap
+ && sig < 32)
+ sig = thread->exec_domain->signal_invmap[sig];
+ return sig;
+}
+
+static int
+setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe __user *sf;
+ unsigned int magic = 0;
+ int err = 0;
+
+ sf = get_sigframe(ka, regs, sizeof(struct rt_sigframe));
+ if (!sf)
+ return 1;
+
+ /*
+ * SA_SIGINFO requires 3 args to signal handler:
+ * #1: sig-no (common to any handler)
+ * #2: struct siginfo
+ * #3: struct ucontext (completely populated)
+ */
+ if (unlikely(ka->sa.sa_flags & SA_SIGINFO)) {
+ err |= copy_siginfo_to_user(&sf->info, info);
+ err |= __put_user(0, &sf->uc.uc_flags);
+ err |= __put_user(NULL, &sf->uc.uc_link);
+ err |= __save_altstack(&sf->uc.uc_stack, regs->sp);
+
+ /* setup args 2 and 3 for user mode handler */
+ regs->r1 = (unsigned long)&sf->info;
+ regs->r2 = (unsigned long)&sf->uc;
+
+ /*
+ * small optim to avoid unconditonally calling do_sigaltstack
+ * in sigreturn path, now that we only have rt_sigreturn
+ */
+ magic = MAGIC_SIGALTSTK;
+ }
+
+ /*
+ * w/o SA_SIGINFO, struct ucontext is partially populated (only
+ * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
+ * during signal handler execution. This works for SA_SIGINFO as well
+ * although the semantics are now overloaded (the same reg state can be
+ * inspected by userland: but are they allowed to fiddle with it ?
+ */
+ err |= stash_usr_regs(sf, regs, set);
+ err |= __put_user(magic, &sf->sigret_magic);
+ if (err)
+ return err;
+
+ /* #1 arg to the user Signal handler */
+ regs->r0 = map_sig(signo);
+
+ /* setup PC of user space signal handler */
+ regs->ret = (unsigned long)ka->sa.sa_handler;
+
+ /*
+ * handler returns using sigreturn stub provided already by userpsace
+ */
+ BUG_ON(!(ka->sa.sa_flags & SA_RESTORER));
+ regs->blink = (unsigned long)ka->sa.sa_restorer;
+
+ /* User Stack for signal handler will be above the frame just carved */
+ regs->sp = (unsigned long)sf;
+
+ /*
+ * Bug 94183, Clear the DE bit, so that when signal handler
+ * starts to run, it doesn't use BTA
+ */
+ regs->status32 &= ~STATUS_DE_MASK;
+ regs->status32 |= STATUS_L_MASK;
+
+ return err;
+}
+
+static void arc_restart_syscall(struct k_sigaction *ka, struct pt_regs *regs)
+{
+ switch (regs->r0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ /*
+ * ERESTARTNOHAND means that the syscall should
+ * only be restarted if there was no handler for
+ * the signal, and since we only get here if there
+ * is a handler, we don't restart
+ */
+ regs->r0 = -EINTR; /* ERESTART_xxx is internal */
+ break;
+
+ case -ERESTARTSYS:
+ /*
+ * ERESTARTSYS means to restart the syscall if
+ * there is no handler or the handler was
+ * registered with SA_RESTART
+ */
+ if (!(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+
+ case -ERESTARTNOINTR:
+ /*
+ * ERESTARTNOINTR means that the syscall should
+ * be called again after the signal handler returns.
+ * Setup reg state just as it was before doing the trap
+ * r0 has been clobbered with sys call ret code thus it
+ * needs to be reloaded with orig first arg to syscall
+ * in orig_r0. Rest of relevant reg-file:
+ * r8 (syscall num) and (r1 - r7) will be reset to
+ * their orig user space value when we ret from kernel
+ */
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
+ struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(sig, ka, info, oldset, regs);
+
+ if (ret)
+ force_sigsegv(sig, current);
+ else
+ signal_delivered(sig, info, ka, regs, 0);
+}
+
+void do_signal(struct pt_regs *regs)
+{
+ struct k_sigaction ka;
+ siginfo_t info;
+ int signr;
+ int restart_scall;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+
+ restart_scall = in_syscall(regs) && syscall_restartable(regs);
+
+ if (signr > 0) {
+ if (restart_scall) {
+ arc_restart_syscall(&ka, regs);
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+ handle_signal(signr, &ka, &info, regs);
+ return;
+ }
+
+ if (restart_scall) {
+ /* No handler for syscall: restart it */
+ if (regs->r0 == -ERESTARTNOHAND ||
+ regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) {
+ regs->r0 = regs->orig_r0;
+ regs->ret -= 4;
+ } else if (regs->r0 == -ERESTART_RESTARTBLOCK) {
+ regs->r8 = __NR_restart_syscall;
+ regs->ret -= 4;
+ }
+ syscall_wont_restart(regs); /* No more restarts */
+ }
+
+ /* If there's no signal to deliver, restore the saved sigmask back */
+ restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * ASM glue gaurantees that this is only called when returning to
+ * user mode
+ */
+ if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
new file mode 100644
index 00000000000..3af3e06dcf0
--- /dev/null
+++ b/arch/arc/kernel/smp.c
@@ -0,0 +1,332 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RajeshwarR: Dec 11, 2007
+ * -- Added support for Inter Processor Interrupts
+ *
+ * Vineetg: Nov 1st, 2007
+ * -- Initial Write (Borrowed heavily from ARM)
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock_types.h>
+#include <linux/reboot.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/mach_desc.h>
+
+arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+
+struct plat_smp_ops plat_smp_ops;
+
+/* XXX: per cpu ? Only needed once in early seconday boot */
+struct task_struct *secondary_idle_tsk;
+
+/* Called from start_kernel */
+void __init smp_prepare_boot_cpu(void)
+{
+}
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+void __init smp_init_cpus(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpu_possible(i, true);
+}
+
+/* called from init ( ) => process 1 */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+
+}
+
+/*
+ * After power-up, a non Master CPU needs to wait for Master to kick start it
+ *
+ * The default implementation halts
+ *
+ * This relies on platform specific support allowing Master to directly set
+ * this CPU's PC (to be @first_lines_of_secondary() and kick start it.
+ *
+ * In lack of such h/w assist, platforms can override this function
+ * - make this function busy-spin on a token, eventually set by Master
+ * (from arc_platform_smp_wakeup_cpu())
+ * - Once token is available, jump to @first_lines_of_secondary
+ * (using inline asm).
+ *
+ * Alert: can NOT use stack here as it has not been determined/setup for CPU.
+ * If it turns out to be elaborate, it's better to code it in assembly
+ *
+ */
+void __attribute__((weak)) arc_platform_smp_wait_to_boot(int cpu)
+{
+ /*
+ * As a hack for debugging - since debugger will single-step over the
+ * FLAG insn - wrap the halt itself it in a self loop
+ */
+ __asm__ __volatile__(
+ "1: \n"
+ " flag 1 \n"
+ " b 1b \n");
+}
+
+const char *arc_platform_smp_cpuinfo(void)
+{
+ return plat_smp_ops.info;
+}
+
+/*
+ * The very first "C" code executed by secondary
+ * Called from asm stub in head.S
+ * "current"/R25 already setup by low level boot code
+ */
+void __cpuinit start_kernel_secondary(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu = smp_processor_id();
+
+ /* MMU, Caches, Vector Table, Interrupts etc */
+ setup_processor();
+
+ atomic_inc(&mm->mm_users);
+ atomic_inc(&mm->mm_count);
+ current->active_mm = mm;
+
+ notify_cpu_starting(cpu);
+ set_cpu_online(cpu, true);
+
+ pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
+
+ if (machine_desc->init_smp)
+ machine_desc->init_smp(smp_processor_id());
+
+ arc_local_timer_setup(cpu);
+
+ local_irq_enable();
+ preempt_disable();
+ cpu_idle();
+}
+
+/*
+ * Called from kernel_init( ) -> smp_init( ) - for each CPU
+ *
+ * At this point, Secondary Processor is "HALT"ed:
+ * -It booted, but was halted in head.S
+ * -It was configured to halt-on-reset
+ * So need to wake it up.
+ *
+ * Essential requirements being where to run from (PC) and stack (SP)
+*/
+int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle)
+{
+ unsigned long wait_till;
+
+ secondary_idle_tsk = idle;
+
+ pr_info("Idle Task [%d] %p", cpu, idle);
+ pr_info("Trying to bring up CPU%u ...\n", cpu);
+
+ if (plat_smp_ops.cpu_kick)
+ plat_smp_ops.cpu_kick(cpu,
+ (unsigned long)first_lines_of_secondary);
+
+ /* wait for 1 sec after kicking the secondary */
+ wait_till = jiffies + HZ;
+ while (time_before(jiffies, wait_till)) {
+ if (cpu_online(cpu))
+ break;
+ }
+
+ if (!cpu_online(cpu)) {
+ pr_info("Timeout: CPU%u FAILED to comeup !!!\n", cpu);
+ return -1;
+ }
+
+ secondary_idle_tsk = NULL;
+
+ return 0;
+}
+
+/*
+ * not supported here
+ */
+int __init setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+/*****************************************************************************/
+/* Inter Processor Interrupt Handling */
+/*****************************************************************************/
+
+/*
+ * structures for inter-processor calls
+ * A Collection of single bit ipi messages
+ *
+ */
+
+/*
+ * TODO_rajesh investigate tlb message types.
+ * IPI Timer not needed because each ARC has an individual Interrupting Timer
+ */
+enum ipi_msg_type {
+ IPI_NOP = 0,
+ IPI_RESCHEDULE = 1,
+ IPI_CALL_FUNC,
+ IPI_CALL_FUNC_SINGLE,
+ IPI_CPU_STOP
+};
+
+struct ipi_data {
+ unsigned long bits;
+};
+
+static DEFINE_PER_CPU(struct ipi_data, ipi_data);
+
+static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
+{
+ unsigned long flags;
+ unsigned int cpu;
+
+ local_irq_save(flags);
+
+ for_each_cpu(cpu, callmap) {
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ set_bit(msg, &ipi->bits);
+ }
+
+ /* Call the platform specific cross-CPU call function */
+ if (plat_smp_ops.ipi_send)
+ plat_smp_ops.ipi_send((void *)callmap);
+
+ local_irq_restore(flags);
+}
+
+void smp_send_reschedule(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_RESCHEDULE);
+}
+
+void smp_send_stop(void)
+{
+ struct cpumask targets;
+ cpumask_copy(&targets, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &targets);
+ ipi_send_msg(&targets, IPI_CPU_STOP);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ ipi_send_msg(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+ ipi_send_msg(mask, IPI_CALL_FUNC);
+}
+
+/*
+ * ipi_cpu_stop - handle IPI from smp_send_stop()
+ */
+static void ipi_cpu_stop(unsigned int cpu)
+{
+ machine_halt();
+}
+
+static inline void __do_IPI(unsigned long *ops, struct ipi_data *ipi, int cpu)
+{
+ unsigned long msg = 0;
+
+ do {
+ msg = find_next_bit(ops, BITS_PER_LONG, msg+1);
+
+ switch (msg) {
+ case IPI_RESCHEDULE:
+ scheduler_ipi();
+ break;
+
+ case IPI_CALL_FUNC:
+ generic_smp_call_function_interrupt();
+ break;
+
+ case IPI_CALL_FUNC_SINGLE:
+ generic_smp_call_function_single_interrupt();
+ break;
+
+ case IPI_CPU_STOP:
+ ipi_cpu_stop(cpu);
+ break;
+ }
+ } while (msg < BITS_PER_LONG);
+
+}
+
+/*
+ * arch-common ISR to handle for inter-processor interrupts
+ * Has hooks for platform specific IPI
+ */
+irqreturn_t do_IPI(int irq, void *dev_id)
+{
+ int cpu = smp_processor_id();
+ struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
+ unsigned long ops;
+
+ if (plat_smp_ops.ipi_clear)
+ plat_smp_ops.ipi_clear(cpu, irq);
+
+ /*
+ * XXX: is this loop really needed
+ * And do we need to move ipi_clean inside
+ */
+ while ((ops = xchg(&ipi->bits, 0)) != 0)
+ __do_IPI(&ops, ipi, cpu);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * API called by platform code to hookup arch-common ISR to their IPI IRQ
+ */
+static DEFINE_PER_CPU(int, ipi_dev);
+int smp_ipi_irq_setup(int cpu, int irq)
+{
+ int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
+ return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+}
diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c
new file mode 100644
index 00000000000..a63ff842564
--- /dev/null
+++ b/arch/arc/kernel/stacktrace.c
@@ -0,0 +1,254 @@
+/*
+ * stacktrace.c : stacktracing APIs needed by rest of kernel
+ * (wrappers over ARC dwarf based unwinder)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: aug 2009
+ * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
+ * for displaying task's kernel mode call stack in /proc/<pid>/stack
+ * -Iterator based approach to have single copy of unwinding core and APIs
+ * needing unwinding, implement the logic in iterator regarding:
+ * = which frame onwards to start capture
+ * = which frame to stop capturing (wchan)
+ * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
+ *
+ * vineetg: March 2009
+ * -Implemented correct versions of thread_saved_pc() and get_wchan()
+ *
+ * rajeshwarr: 2008
+ * -Initial implementation
+ */
+
+#include <linux/ptrace.h>
+#include <linux/export.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <asm/arcregs.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+/*-------------------------------------------------------------------------
+ * Unwinder Iterator
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+
+static void seed_unwind_frame_info(struct task_struct *tsk,
+ struct pt_regs *regs,
+ struct unwind_frame_info *frame_info)
+{
+ if (tsk == NULL && regs == NULL) {
+ unsigned long fp, sp, blink, ret;
+ frame_info->task = current;
+
+ __asm__ __volatile__(
+ "mov %0,r27\n\t"
+ "mov %1,r28\n\t"
+ "mov %2,r31\n\t"
+ "mov %3,r63\n\t"
+ : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
+ );
+
+ frame_info->regs.r27 = fp;
+ frame_info->regs.r28 = sp;
+ frame_info->regs.r31 = blink;
+ frame_info->regs.r63 = ret;
+ frame_info->call_frame = 0;
+ } else if (regs == NULL) {
+
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = KSTK_FP(tsk);
+ frame_info->regs.r28 = KSTK_ESP(tsk);
+ frame_info->regs.r31 = KSTK_BLINK(tsk);
+ frame_info->regs.r63 = (unsigned int)__switch_to;
+
+ /* In the prologue of __switch_to, first FP is saved on stack
+ * and then SP is copied to FP. Dwarf assumes cfa as FP based
+ * but we didn't save FP. The value retrieved above is FP's
+ * state in previous frame.
+ * As a work around for this, we unwind from __switch_to start
+ * and adjust SP accordingly. The other limitation is that
+ * __switch_to macro is dwarf rules are not generated for inline
+ * assembly code
+ */
+ frame_info->regs.r27 = 0;
+ frame_info->regs.r28 += 64;
+ frame_info->call_frame = 0;
+
+ } else {
+ frame_info->task = tsk;
+
+ frame_info->regs.r27 = regs->fp;
+ frame_info->regs.r28 = regs->sp;
+ frame_info->regs.r31 = regs->blink;
+ frame_info->regs.r63 = regs->ret;
+ frame_info->call_frame = 0;
+ }
+}
+
+#endif
+
+static noinline unsigned int
+arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
+ int (*consumer_fn) (unsigned int, void *), void *arg)
+{
+#ifdef CONFIG_ARC_DW2_UNWIND
+ int ret = 0;
+ unsigned int address;
+ struct unwind_frame_info frame_info;
+
+ seed_unwind_frame_info(tsk, regs, &frame_info);
+
+ while (1) {
+ address = UNW_PC(&frame_info);
+
+ if (address && __kernel_text_address(address)) {
+ if (consumer_fn(address, arg) == -1)
+ break;
+ }
+
+ ret = arc_unwind(&frame_info);
+
+ if (ret == 0) {
+ frame_info.regs.r63 = frame_info.regs.r31;
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ return address; /* return the last address it saw */
+#else
+ /* On ARC, only Dward based unwinder works. fp based backtracing is
+ * not possible (-fno-omit-frame-pointer) because of the way function
+ * prelogue is setup (callee regs saved and then fp set and not other
+ * way around
+ */
+ pr_warn("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
+ return 0;
+
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * callbacks called by unwinder iterator to implement kernel APIs
+ *
+ * The callback can return -1 to force the iterator to stop, which by default
+ * keeps going till the bottom-most frame.
+ *-------------------------------------------------------------------------
+ */
+
+/* Call-back which plugs into unwinding core to dump the stack in
+ * case of panic/OOPs/BUG etc
+ */
+static int __print_sym(unsigned int address, void *unused)
+{
+ __print_symbol(" %s\n", address);
+ return 0;
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/* Call-back which plugs into unwinding core to capture the
+ * traces needed by kernel on /proc/<pid>/stack
+ */
+static int __collect_all(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+static int __collect_all_but_sched(unsigned int address, void *arg)
+{
+ struct stack_trace *trace = arg;
+
+ if (in_sched_functions(address))
+ return 0;
+
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = address;
+
+ if (trace->nr_entries >= trace->max_entries)
+ return -1;
+
+ return 0;
+}
+
+#endif
+
+static int __get_first_nonsched(unsigned int address, void *unused)
+{
+ if (in_sched_functions(address))
+ return 0;
+
+ return -1;
+}
+
+/*-------------------------------------------------------------------------
+ * APIs expected by various kernel sub-systems
+ *-------------------------------------------------------------------------
+ */
+
+noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs)
+{
+ pr_info("\nStack Trace:\n");
+ arc_unwind_core(tsk, regs, __print_sym, NULL);
+}
+EXPORT_SYMBOL(show_stacktrace);
+
+/* Expected by sched Code */
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+ show_stacktrace(tsk, NULL);
+}
+
+/* Expected by Rest of kernel code */
+void dump_stack(void)
+{
+ show_stacktrace(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
+
+/* Another API expected by schedular, shows up in "ps" as Wait Channel
+ * Ofcourse just returning schedule( ) would be pointless so unwind until
+ * the function is not in schedular code
+ */
+unsigned int get_wchan(struct task_struct *tsk)
+{
+ return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+/*
+ * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
+ * A typical use is when /proc/<pid>/stack is queried by userland
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
+}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ arc_unwind_core(current, NULL, __collect_all, trace);
+}
+#endif
diff --git a/arch/arc/kernel/sys.c b/arch/arc/kernel/sys.c
new file mode 100644
index 00000000000..f6bdd07583f
--- /dev/null
+++ b/arch/arc/kernel/sys.c
@@ -0,0 +1,18 @@
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#define sys_clone sys_clone_wrapper
+#define sys_fork sys_fork_wrapper
+#define sys_vfork sys_vfork_wrapper
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[NR_syscalls] = {
+ [0 ... NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
new file mode 100644
index 00000000000..f13f72807aa
--- /dev/null
+++ b/arch/arc/kernel/time.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Jan 1011
+ * -sched_clock( ) no longer jiffies based. Uses the same clocksource
+ * as gtod
+ *
+ * Rajeshwarr/Vineetg: Mar 2008
+ * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
+ * for arch independent gettimeofday()
+ * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
+ *
+ * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
+ */
+
+/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
+ * Each can programmed to go from @count to @limit and optionally
+ * interrupt when that happens.
+ * A write to Control Register clears the Interrupt
+ *
+ * We've designated TIMER0 for events (clockevents)
+ * while TIMER1 for free running (clocksource)
+ *
+ * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
+ */
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <asm/irq.h>
+#include <asm/arcregs.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+
+#define ARC_TIMER_MAX 0xFFFFFFFF
+
+/********** Clock Source Device *********/
+
+#ifdef CONFIG_ARC_HAS_RTSC
+
+int __cpuinit arc_counter_setup(void)
+{
+ /* RTSC insn taps into cpu clk, needs no setup */
+
+ /* For SMP, only allowed if cross-core-sync, hence usable as cs */
+ return 1;
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ unsigned long flags;
+ union {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ struct { u32 high, low; };
+#else
+ struct { u32 low, high; };
+#endif
+ cycle_t full;
+ } stamp;
+
+ flags = arch_local_irq_save();
+
+ __asm__ __volatile(
+ " .extCoreRegister tsch, 58, r, cannot_shortcut \n"
+ " rtsc %0, 0 \n"
+ " mov %1, 0 \n"
+ : "=r" (stamp.low), "=r" (stamp.high));
+
+ arch_local_irq_restore(flags);
+
+ return stamp.full;
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC RTSC",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#else /* !CONFIG_ARC_HAS_RTSC */
+
+static bool is_usable_as_clocksource(void)
+{
+#ifdef CONFIG_SMP
+ return 0;
+#else
+ return 1;
+#endif
+}
+
+/*
+ * set 32bit TIMER1 to keep counting monotonically and wraparound
+ */
+int __cpuinit arc_counter_setup(void)
+{
+ write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
+ write_aux_reg(ARC_REG_TIMER1_CNT, 0);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+
+ return is_usable_as_clocksource();
+}
+
+static cycle_t arc_counter_read(struct clocksource *cs)
+{
+ return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
+}
+
+static struct clocksource arc_counter = {
+ .name = "ARC Timer1",
+ .rating = 300,
+ .read = arc_counter_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+#endif
+
+/********** Clock Event Device *********/
+
+/*
+ * Arm the timer to interrupt after @limit cycles
+ * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
+ */
+static void arc_timer_event_setup(unsigned int limit)
+{
+ write_aux_reg(ARC_REG_TIMER0_LIMIT, limit);
+ write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
+
+ write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+}
+
+/*
+ * Acknowledge the interrupt (oneshot) and optionally re-arm it (periodic)
+ * -Any write to CTRL Reg will ack the intr (NH bit: Count when not halted)
+ * -Rearming is done by setting the IE bit
+ *
+ * Small optimisation: Normal code would have been
+ * if (irq_reenable)
+ * CTRL_REG = (IE | NH);
+ * else
+ * CTRL_REG = NH;
+ * However since IE is BIT0 we can fold the branch
+ */
+static void arc_timer_event_ack(unsigned int irq_reenable)
+{
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+}
+
+static int arc_clkevent_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ arc_timer_event_setup(delta);
+ return 0;
+}
+
+static void arc_clkevent_set_mode(enum clock_event_mode mode,
+ struct clock_event_device *dev)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_PERIODIC:
+ arc_timer_event_setup(arc_get_core_freq() / HZ);
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ break;
+ default:
+ break;
+ }
+
+ return;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
+ .name = "ARC Timer0",
+ .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+ .mode = CLOCK_EVT_MODE_UNUSED,
+ .rating = 300,
+ .irq = TIMER0_IRQ, /* hardwired, no need for resources */
+ .set_next_event = arc_clkevent_set_next_event,
+ .set_mode = arc_clkevent_set_mode,
+};
+
+static irqreturn_t timer_irq_handler(int irq, void *dev_id)
+{
+ struct clock_event_device *clk = &__get_cpu_var(arc_clockevent_device);
+
+ arc_timer_event_ack(clk->mode == CLOCK_EVT_MODE_PERIODIC);
+ clk->event_handler(clk);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction arc_timer_irq = {
+ .name = "Timer0 (clock-evt-dev)",
+ .flags = IRQF_TIMER | IRQF_PERCPU,
+ .handler = timer_irq_handler,
+};
+
+/*
+ * Setup the local event timer for @cpu
+ * N.B. weak so that some exotic ARC SoCs can completely override it
+ */
+void __attribute__((weak)) __cpuinit arc_local_timer_setup(unsigned int cpu)
+{
+ struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
+
+ clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
+
+ clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
+ clk->cpumask = cpumask_of(cpu);
+
+ clockevents_register_device(clk);
+
+ /*
+ * setup the per-cpu timer IRQ handler - for all cpus
+ * For non boot CPU explicitly unmask at intc
+ * setup_irq() -> .. -> irq_startup() already does this on boot-cpu
+ */
+ if (!cpu)
+ setup_irq(TIMER0_IRQ, &arc_timer_irq);
+ else
+ arch_unmask_irq(TIMER0_IRQ);
+}
+
+/*
+ * Called from start_kernel() - boot CPU only
+ *
+ * -Sets up h/w timers as applicable on boot cpu
+ * -Also sets up any global state needed for timer subsystem:
+ * - for "counting" timer, registers a clocksource, usable across CPUs
+ * (provided that underlying counter h/w is synchronized across cores)
+ * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
+ */
+void __init time_init(void)
+{
+ /*
+ * sets up the timekeeping free-flowing counter which also returns
+ * whether the counter is usable as clocksource
+ */
+ if (arc_counter_setup())
+ /*
+ * CLK upto 4.29 GHz can be safely represented in 32 bits
+ * because Max 32 bit number is 4,294,967,295
+ */
+ clocksource_register_hz(&arc_counter, arc_get_core_freq());
+
+ /* sets up the periodic event timer */
+ arc_local_timer_setup(smp_processor_id());
+
+ if (machine_desc->init_time)
+ machine_desc->init_time();
+}
diff --git a/arch/arc/kernel/traps.c b/arch/arc/kernel/traps.c
new file mode 100644
index 00000000000..7496995371e
--- /dev/null
+++ b/arch/arc/kernel/traps.c
@@ -0,0 +1,170 @@
+/*
+ * Traps/Non-MMU Exception handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011
+ * -user-space unaligned access emulation
+ *
+ * Rahul Trivedi: Codito Technologies 2004
+ */
+
+#include <linux/sched.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/kprobes.h>
+#include <asm/unaligned.h>
+#include <asm/kgdb.h>
+
+void __init trap_init(void)
+{
+ return;
+}
+
+void die(const char *str, struct pt_regs *regs, unsigned long address,
+ unsigned long cause_reg)
+{
+ show_kernel_fault_diag(str, regs, address, cause_reg);
+
+ /* DEAD END */
+ __asm__("flag 1");
+}
+
+/*
+ * Helper called for bulk of exceptions NOT needing specific handling
+ * -for user faults enqueues requested signal
+ * -for kernel, chk if due to copy_(to|from)_user, otherwise die()
+ */
+static noinline int handle_exception(unsigned long cause, char *str,
+ struct pt_regs *regs, siginfo_t *info)
+{
+ if (user_mode(regs)) {
+ struct task_struct *tsk = current;
+
+ tsk->thread.fault_address = (__force unsigned int)info->si_addr;
+ tsk->thread.cause_code = cause;
+
+ force_sig_info(info->si_signo, info, tsk);
+
+ } else {
+ /* If not due to copy_(to|from)_user, we are doomed */
+ if (fixup_exception(regs))
+ return 0;
+
+ die(str, regs, (unsigned long)info->si_addr, cause);
+ }
+
+ return 1;
+}
+
+#define DO_ERROR_INFO(signr, str, name, sicode) \
+int name(unsigned long cause, unsigned long address, struct pt_regs *regs) \
+{ \
+ siginfo_t info = { \
+ .si_signo = signr, \
+ .si_errno = 0, \
+ .si_code = sicode, \
+ .si_addr = (void __user *)address, \
+ }; \
+ return handle_exception(cause, str, regs, &info);\
+}
+
+/*
+ * Entry points for exceptions NOT needing specific handling
+ */
+DO_ERROR_INFO(SIGILL, "Priv Op/Disabled Extn", do_privilege_fault, ILL_PRVOPC)
+DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC)
+DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
+DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR)
+DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
+
+#ifdef CONFIG_ARC_MISALIGN_ACCESS
+/*
+ * Entry Point for Misaligned Data access Exception, for emulating in software
+ */
+int do_misaligned_access(unsigned long cause, unsigned long address,
+ struct pt_regs *regs, struct callee_regs *cregs)
+{
+ if (misaligned_fixup(address, regs, cause, cregs) != 0) {
+ siginfo_t info;
+
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void __user *)address;
+ return handle_exception(cause, "Misaligned Access", regs,
+ &info);
+ }
+ return 0;
+}
+
+#else
+DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR)
+#endif
+
+/*
+ * Entry point for miscll errors such as Nested Exceptions
+ * -Duplicate TLB entry is handled seperately though
+ */
+void do_machine_check_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ die("Machine Check Exception", regs, address, cause);
+}
+
+
+/*
+ * Entry point for traps induced by ARCompact TRAP_S <n> insn
+ * This is same family as TRAP0/SWI insn (use the same vector).
+ * The only difference being SWI insn take no operand, while TRAP_S does
+ * which reflects in ECR Reg as 8 bit param.
+ * Thus TRAP_S <n> can be used for specific purpose
+ * -1 used for software breakpointing (gdb)
+ * -2 used by kprobes
+ */
+void do_non_swi_trap(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ unsigned int param = cause & 0xff;
+
+ switch (param) {
+ case 1:
+ trap_is_brkpt(cause, address, regs);
+ break;
+
+ case 2:
+ trap_is_kprobe(param, address, regs);
+ break;
+
+ case 3:
+ case 4:
+ kgdb_trap(regs, param);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Entry point for Instruction Error Exception
+ * -For a corner case, ARC kprobes implementation resorts to using
+ * this exception, hence the check
+ */
+void do_insterror_or_kprobe(unsigned long cause,
+ unsigned long address,
+ struct pt_regs *regs)
+{
+ /* Check if this exception is caused by kprobes */
+ if (notify_die(DIE_IERR, "kprobe_ierr", regs, address,
+ cause, SIGILL) == NOTIFY_STOP)
+ return;
+
+ insterror_is_error(cause, address, regs);
+}
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
new file mode 100644
index 00000000000..7c10873c311
--- /dev/null
+++ b/arch/arc/kernel/troubleshoot.c
@@ -0,0 +1,322 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ */
+
+#include <linux/ptrace.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kdev_t.h>
+#include <linux/fs_struct.h>
+#include <linux/proc_fs.h>
+#include <linux/file.h>
+#include <asm/arcregs.h>
+
+/*
+ * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
+ * -Prints 3 regs per line and a CR.
+ * -To continue, callee regs right after scratch, special handling of CR
+ */
+static noinline void print_reg_file(long *reg_rev, int start_num)
+{
+ unsigned int i;
+ char buf[512];
+ int n = 0, len = sizeof(buf);
+
+ /* weird loop because pt_regs regs rev r12..r0, r25..r13 */
+ for (i = start_num; i < start_num + 13; i++) {
+ n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t",
+ i, (unsigned long)*reg_rev);
+
+ if (((i + 1) % 3) == 0)
+ n += scnprintf(buf + n, len - n, "\n");
+
+ reg_rev--;
+ }
+
+ if (start_num != 0)
+ n += scnprintf(buf + n, len - n, "\n\n");
+
+ pr_info("%s", buf);
+}
+
+static void show_callee_regs(struct callee_regs *cregs)
+{
+ print_reg_file(&(cregs->r13), 13);
+}
+
+void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+{
+ struct path path;
+ char *path_nm = NULL;
+ struct mm_struct *mm;
+ struct file *exe_file;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ goto done;
+
+ exe_file = get_mm_exe_file(mm);
+ mmput(mm);
+
+ if (exe_file) {
+ path = exe_file->f_path;
+ path_get(&exe_file->f_path);
+ fput(exe_file);
+ path_nm = d_path(&path, buf, 255);
+ path_put(&path);
+ }
+
+done:
+ pr_info("%s, TGID %u\n", path_nm, tsk->tgid);
+}
+EXPORT_SYMBOL(print_task_path_n_nm);
+
+static void show_faulting_vma(unsigned long address, char *buf)
+{
+ struct vm_area_struct *vma;
+ struct inode *inode;
+ unsigned long ino = 0;
+ dev_t dev = 0;
+ char *nm = buf;
+
+ vma = find_vma(current->active_mm, address);
+
+ /* check against the find_vma( ) behaviour which returns the next VMA
+ * if the container VMA is not found
+ */
+ if (vma && (vma->vm_start <= address)) {
+ struct file *file = vma->vm_file;
+ if (file) {
+ struct path *path = &file->f_path;
+ nm = d_path(path, buf, PAGE_SIZE - 1);
+ inode = vma->vm_file->f_path.dentry->d_inode;
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ }
+ pr_info(" @off 0x%lx in [%s]\n"
+ " VMA: 0x%08lx to 0x%08lx\n\n",
+ address - vma->vm_start, nm, vma->vm_start, vma->vm_end);
+ } else
+ pr_info(" @No matching VMA found\n");
+}
+
+static void show_ecr_verbose(struct pt_regs *regs)
+{
+ unsigned int vec, cause_code, cause_reg;
+ unsigned long address;
+
+ cause_reg = current->thread.cause_code;
+ pr_info("\n[ECR]: 0x%08x => ", cause_reg);
+
+ /* For Data fault, this is data address not instruction addr */
+ address = current->thread.fault_address;
+
+ vec = cause_reg >> 16;
+ cause_code = (cause_reg >> 8) & 0xFF;
+
+ /* For DTLB Miss or ProtV, display the memory involved too */
+ if (vec == ECR_V_DTLB_MISS) {
+ pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n",
+ (cause_code == 0x01) ? "Read From" :
+ ((cause_code == 0x02) ? "Write to" : "EX"),
+ address, regs->ret);
+ } else if (vec == ECR_V_ITLB_MISS) {
+ pr_cont("Insn could not be fetched\n");
+ } else if (vec == ECR_V_MACH_CHK) {
+ pr_cont("%s\n", (cause_code == 0x0) ?
+ "Double Fault" : "Other Fatal Err");
+
+ } else if (vec == ECR_V_PROTV) {
+ if (cause_code == ECR_C_PROTV_INST_FETCH)
+ pr_cont("Execute from Non-exec Page\n");
+ else if (cause_code == ECR_C_PROTV_LOAD)
+ pr_cont("Read from Non-readable Page\n");
+ else if (cause_code == ECR_C_PROTV_STORE)
+ pr_cont("Write to Non-writable Page\n");
+ else if (cause_code == ECR_C_PROTV_XCHG)
+ pr_cont("Data exchange protection violation\n");
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
+ } else if (vec == ECR_V_INSN_ERR) {
+ pr_cont("Illegal Insn\n");
+ } else {
+ pr_cont("Check Programmer's Manual\n");
+ }
+}
+
+/************************************************************************
+ * API called by rest of kernel
+ ***********************************************************************/
+
+void show_regs(struct pt_regs *regs)
+{
+ struct task_struct *tsk = current;
+ struct callee_regs *cregs;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_TEMPORARY);
+ if (!buf)
+ return;
+
+ print_task_path_n_nm(tsk, buf);
+
+ if (current->thread.cause_code)
+ show_ecr_verbose(regs);
+
+ pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address);
+ pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret);
+
+ show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+
+ /* can't use print_vma_addr() yet as it doesn't check for
+ * non-inclusive vma
+ */
+
+ /* print special regs */
+ pr_info("status32: 0x%08lx\n", regs->status32);
+ pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp);
+ pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n",
+ regs->bta, regs->blink);
+ pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n",
+ regs->lp_start, regs->lp_end, regs->lp_count);
+
+ /* print regs->r0 thru regs->r12
+ * Sequential printing was generating horrible code
+ */
+ print_reg_file(&(regs->r0), 0);
+
+ /* If Callee regs were saved, display them too */
+ cregs = (struct callee_regs *)current->thread.callee_reg;
+ if (cregs)
+ show_callee_regs(cregs);
+
+ free_page((unsigned long)buf);
+}
+
+void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
+ unsigned long address, unsigned long cause_reg)
+{
+ current->thread.fault_address = address;
+ current->thread.cause_code = cause_reg;
+
+ /* Caller and Callee regs */
+ show_regs(regs);
+
+ /* Show stack trace if this Fatality happened in kernel mode */
+ if (!user_mode(regs))
+ show_stacktrace(current, regs);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mount.h>
+#include <linux/pagemap.h>
+#include <linux/init.h>
+#include <linux/namei.h>
+#include <linux/debugfs.h>
+
+static struct dentry *test_dentry;
+static struct dentry *test_dir;
+static struct dentry *test_u32_dentry;
+
+static u32 clr_on_read = 1;
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+u32 numitlb, numdtlb, num_pte_not_present;
+
+static int fill_display_data(char *kbuf)
+{
+ size_t num = 0;
+ num += sprintf(kbuf + num, "I-TLB Miss %x\n", numitlb);
+ num += sprintf(kbuf + num, "D-TLB Miss %x\n", numdtlb);
+ num += sprintf(kbuf + num, "PTE not present %x\n", num_pte_not_present);
+
+ if (clr_on_read)
+ numitlb = numdtlb = num_pte_not_present = 0;
+
+ return num;
+}
+
+static int tlb_stats_open(struct inode *inode, struct file *file)
+{
+ file->private_data = (void *)__get_free_page(GFP_KERNEL);
+ return 0;
+}
+
+/* called on user read(): display the couters */
+static ssize_t tlb_stats_output(struct file *file, /* file descriptor */
+ char __user *user_buf, /* user buffer */
+ size_t len, /* length of buffer */
+ loff_t *offset) /* offset in the file */
+{
+ size_t num;
+ char *kbuf = (char *)file->private_data;
+
+ /* All of the data can he shoved in one iteration */
+ if (*offset != 0)
+ return 0;
+
+ num = fill_display_data(kbuf);
+
+ /* simple_read_from_buffer() is helper for copy to user space
+ It copies up to @2 (num) bytes from kernel buffer @4 (kbuf) at offset
+ @3 (offset) into the user space address starting at @1 (user_buf).
+ @5 (len) is max size of user buffer
+ */
+ return simple_read_from_buffer(user_buf, num, offset, kbuf, len);
+}
+
+/* called on user write : clears the counters */
+static ssize_t tlb_stats_clear(struct file *file, const char __user *user_buf,
+ size_t length, loff_t *offset)
+{
+ numitlb = numdtlb = num_pte_not_present = 0;
+ return length;
+}
+
+static int tlb_stats_close(struct inode *inode, struct file *file)
+{
+ free_page((unsigned long)(file->private_data));
+ return 0;
+}
+
+static const struct file_operations tlb_stats_file_ops = {
+ .read = tlb_stats_output,
+ .write = tlb_stats_clear,
+ .open = tlb_stats_open,
+ .release = tlb_stats_close
+};
+#endif
+
+static int __init arc_debugfs_init(void)
+{
+ test_dir = debugfs_create_dir("arc", NULL);
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ test_dentry = debugfs_create_file("tlb_stats", 0444, test_dir, NULL,
+ &tlb_stats_file_ops);
+#endif
+
+ test_u32_dentry =
+ debugfs_create_u32("clr_on_read", 0444, test_dir, &clr_on_read);
+
+ return 0;
+}
+
+module_init(arc_debugfs_init);
+
+static void __exit arc_debugfs_exit(void)
+{
+ debugfs_remove(test_u32_dentry);
+ debugfs_remove(test_dentry);
+ debugfs_remove(test_dir);
+}
+module_exit(arc_debugfs_exit);
+
+#endif
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
new file mode 100644
index 00000000000..4cd81633feb
--- /dev/null
+++ b/arch/arc/kernel/unaligned.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg : May 2011
+ * -Adapted (from .26 to .35)
+ * -original contribution by Tim.yao@amlogic.com
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <asm/disasm.h>
+
+#define __get8_unaligned_check(val, addr, err) \
+ __asm__( \
+ "1: ldb.ab %1, [%2, 1]\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "3: mov %0, 1\n" \
+ " b 2b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 3b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (val), "=r" (addr) \
+ : "0" (err), "2" (addr))
+
+#define get16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v ; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define get32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v, a = addr; \
+ __get8_unaligned_check(v, a, err); \
+ val = v << 0; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 8; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 16; \
+ __get8_unaligned_check(v, a, err); \
+ val |= v << 24; \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put16_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ \
+ __asm__( \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb %1, [%2]\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "4: mov %0, 1\n" \
+ " b 3b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 4b\n" \
+ " .long 2b, 4b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+#define put32_unaligned_check(val, addr) \
+ do { \
+ unsigned int err = 0, v = val, a = addr;\
+ __asm__( \
+ \
+ "1: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "2: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "3: stb.ab %1, [%2, 1]\n" \
+ " lsr %1, %1, 8\n" \
+ "4: stb %1, [%2]\n" \
+ "5:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .align 4\n" \
+ "6: mov %0, 1\n" \
+ " b 5b\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .align 4\n" \
+ " .long 1b, 6b\n" \
+ " .long 2b, 6b\n" \
+ " .long 3b, 6b\n" \
+ " .long 4b, 6b\n" \
+ " .previous\n" \
+ : "=r" (err), "=&r" (v), "=&r" (a) \
+ : "0" (err), "1" (v), "2" (a)); \
+ \
+ if (err) \
+ goto fault; \
+ } while (0)
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ int val;
+
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
+
+ if (state->aa == 2)
+ state->src2 = 0;
+ }
+
+ if (state->zz == 0) {
+ get32_unaligned_check(val, state->src1 + state->src2);
+ } else {
+ get16_unaligned_check(val, state->src1 + state->src2);
+
+ if (state->x)
+ val = (val << 16) >> 16;
+ }
+
+ if (state->pref == 0)
+ set_reg(state->dest, val, regs, cregs);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
+ struct callee_regs *cregs)
+{
+ /* register write back */
+ if ((state->aa == 1) || (state->aa == 2)) {
+ set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
+
+ if (state->aa == 3)
+ state->src3 = 0;
+ } else if (state->aa == 3) {
+ if (state->zz == 2) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
+ regs, cregs);
+ } else if (!state->zz) {
+ set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
+ regs, cregs);
+ } else {
+ goto fault;
+ }
+ }
+
+ /* write fix-up */
+ if (!state->zz)
+ put32_unaligned_check(state->src1, state->src2 + state->src3);
+ else
+ put16_unaligned_check(state->src1, state->src2 + state->src3);
+
+ return;
+
+fault: state->fault = 1;
+}
+
+/*
+ * Handle an unaligned access
+ * Returns 0 if successfully handled, 1 if some error happened
+ */
+int misaligned_fixup(unsigned long address, struct pt_regs *regs,
+ unsigned long cause, struct callee_regs *cregs)
+{
+ struct disasm_state state;
+ char buf[TASK_COMM_LEN];
+
+ /* handle user mode only and only if enabled by sysadmin */
+ if (!user_mode(regs) || !unaligned_enabled)
+ return 1;
+
+ if (no_unaligned_warning) {
+ pr_warn_once("%s(%d) made unaligned access which was emulated"
+ " by kernel assist\n. This can degrade application"
+ " performance significantly\n. To enable further"
+ " logging of such instances, please \n"
+ " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
+ get_task_comm(buf, current), task_pid_nr(current));
+ } else {
+ /* Add rate limiting if it gets down to it */
+ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
+ get_task_comm(buf, current), task_pid_nr(current),
+ address, regs->ret);
+
+ }
+
+ disasm_instr(regs->ret, &state, 1, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ /* ldb/stb should not have unaligned exception */
+ if ((state.zz == 1) || (state.di))
+ goto fault;
+
+ if (!state.write)
+ fixup_load(&state, regs, cregs);
+ else
+ fixup_store(&state, regs, cregs);
+
+ if (state.fault)
+ goto fault;
+
+ if (delay_mode(regs)) {
+ regs->ret = regs->bta;
+ regs->status32 &= ~STATUS_DE_MASK;
+ } else {
+ regs->ret += state.instr_len;
+ }
+
+ return 0;
+
+fault:
+ pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
+ state.words[0], address);
+
+ return 1;
+}
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
new file mode 100644
index 00000000000..a8d02223da4
--- /dev/null
+++ b/arch/arc/kernel/unwind.c
@@ -0,0 +1,1329 @@
+/*
+ * Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ * Copyright (C) 2002-2006 Novell, Inc.
+ * Jan Beulich <jbeulich@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * A simple API for unwinding kernel stacks. This is used for
+ * debugging and error reporting purposes. The kernel doesn't need
+ * full-blown stack unwinding with all the bells and whistles, so there
+ * is not much point in implementing the full Dwarf2 unwind API.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <asm/sections.h>
+#include <asm/unaligned.h>
+#include <asm/unwind.h>
+
+extern char __start_unwind[], __end_unwind[];
+/* extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];*/
+
+/* #define UNWIND_DEBUG */
+
+#ifdef UNWIND_DEBUG
+int dbg_unw;
+#define unw_debug(fmt, ...) \
+do { \
+ if (dbg_unw) \
+ pr_info(fmt, ##__VA_ARGS__); \
+} while (0);
+#else
+#define unw_debug(fmt, ...)
+#endif
+
+#define MAX_STACK_DEPTH 8
+
+#define EXTRA_INFO(f) { \
+ BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \
+ % FIELD_SIZEOF(struct unwind_frame_info, f)) \
+ + offsetof(struct unwind_frame_info, f) \
+ / FIELD_SIZEOF(struct unwind_frame_info, f), \
+ FIELD_SIZEOF(struct unwind_frame_info, f) \
+ }
+#define PTREGS_INFO(f) EXTRA_INFO(regs.f)
+
+static const struct {
+ unsigned offs:BITS_PER_LONG / 2;
+ unsigned width:BITS_PER_LONG / 2;
+} reg_info[] = {
+UNW_REGISTER_INFO};
+
+#undef PTREGS_INFO
+#undef EXTRA_INFO
+
+#ifndef REG_INVALID
+#define REG_INVALID(r) (reg_info[r].width == 0)
+#endif
+
+#define DW_CFA_nop 0x00
+#define DW_CFA_set_loc 0x01
+#define DW_CFA_advance_loc1 0x02
+#define DW_CFA_advance_loc2 0x03
+#define DW_CFA_advance_loc4 0x04
+#define DW_CFA_offset_extended 0x05
+#define DW_CFA_restore_extended 0x06
+#define DW_CFA_undefined 0x07
+#define DW_CFA_same_value 0x08
+#define DW_CFA_register 0x09
+#define DW_CFA_remember_state 0x0a
+#define DW_CFA_restore_state 0x0b
+#define DW_CFA_def_cfa 0x0c
+#define DW_CFA_def_cfa_register 0x0d
+#define DW_CFA_def_cfa_offset 0x0e
+#define DW_CFA_def_cfa_expression 0x0f
+#define DW_CFA_expression 0x10
+#define DW_CFA_offset_extended_sf 0x11
+#define DW_CFA_def_cfa_sf 0x12
+#define DW_CFA_def_cfa_offset_sf 0x13
+#define DW_CFA_val_offset 0x14
+#define DW_CFA_val_offset_sf 0x15
+#define DW_CFA_val_expression 0x16
+#define DW_CFA_lo_user 0x1c
+#define DW_CFA_GNU_window_save 0x2d
+#define DW_CFA_GNU_args_size 0x2e
+#define DW_CFA_GNU_negative_offset_extended 0x2f
+#define DW_CFA_hi_user 0x3f
+
+#define DW_EH_PE_FORM 0x07
+#define DW_EH_PE_native 0x00
+#define DW_EH_PE_leb128 0x01
+#define DW_EH_PE_data2 0x02
+#define DW_EH_PE_data4 0x03
+#define DW_EH_PE_data8 0x04
+#define DW_EH_PE_signed 0x08
+#define DW_EH_PE_ADJUST 0x70
+#define DW_EH_PE_abs 0x00
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80
+#define DW_EH_PE_omit 0xff
+
+typedef unsigned long uleb128_t;
+typedef signed long sleb128_t;
+
+static struct unwind_table {
+ struct {
+ unsigned long pc;
+ unsigned long range;
+ } core, init;
+ const void *address;
+ unsigned long size;
+ const unsigned char *header;
+ unsigned long hdrsz;
+ struct unwind_table *link;
+ const char *name;
+} root_table;
+
+struct unwind_item {
+ enum item_location {
+ Nowhere,
+ Memory,
+ Register,
+ Value
+ } where;
+ uleb128_t value;
+};
+
+struct unwind_state {
+ uleb128_t loc, org;
+ const u8 *cieStart, *cieEnd;
+ uleb128_t codeAlign;
+ sleb128_t dataAlign;
+ struct cfa {
+ uleb128_t reg, offs;
+ } cfa;
+ struct unwind_item regs[ARRAY_SIZE(reg_info)];
+ unsigned stackDepth:8;
+ unsigned version:8;
+ const u8 *label;
+ const u8 *stack[MAX_STACK_DEPTH];
+};
+
+static const struct cfa badCFA = { ARRAY_SIZE(reg_info), 1 };
+
+static struct unwind_table *find_table(unsigned long pc)
+{
+ struct unwind_table *table;
+
+ for (table = &root_table; table; table = table->link)
+ if ((pc >= table->core.pc
+ && pc < table->core.pc + table->core.range)
+ || (pc >= table->init.pc
+ && pc < table->init.pc + table->init.range))
+ break;
+
+ return table;
+}
+
+static unsigned long read_pointer(const u8 **pLoc,
+ const void *end, signed ptrType);
+
+static void init_unwind_table(struct unwind_table *table, const char *name,
+ const void *core_start, unsigned long core_size,
+ const void *init_start, unsigned long init_size,
+ const void *table_start, unsigned long table_size,
+ const u8 *header_start, unsigned long header_size)
+{
+ const u8 *ptr = header_start + 4;
+ const u8 *end = header_start + header_size;
+
+ table->core.pc = (unsigned long)core_start;
+ table->core.range = core_size;
+ table->init.pc = (unsigned long)init_start;
+ table->init.range = init_size;
+ table->address = table_start;
+ table->size = table_size;
+
+ /* See if the linker provided table looks valid. */
+ if (header_size <= 4
+ || header_start[0] != 1
+ || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
+ || header_start[2] == DW_EH_PE_omit
+ || read_pointer(&ptr, end, header_start[2]) <= 0
+ || header_start[3] == DW_EH_PE_omit)
+ header_start = NULL;
+
+ table->hdrsz = header_size;
+ smp_wmb();
+ table->header = header_start;
+ table->link = NULL;
+ table->name = name;
+}
+
+void __init arc_unwind_init(void)
+{
+ init_unwind_table(&root_table, "kernel", _text, _end - _text, NULL, 0,
+ __start_unwind, __end_unwind - __start_unwind,
+ NULL, 0);
+ /*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
+}
+
+static const u32 bad_cie, not_fde;
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
+static signed fde_pointer_type(const u32 *cie);
+
+struct eh_frame_hdr_table_entry {
+ unsigned long start, fde;
+};
+
+static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
+{
+ const struct eh_frame_hdr_table_entry *e1 = p1;
+ const struct eh_frame_hdr_table_entry *e2 = p2;
+
+ return (e1->start > e2->start) - (e1->start < e2->start);
+}
+
+static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
+{
+ struct eh_frame_hdr_table_entry *e1 = p1;
+ struct eh_frame_hdr_table_entry *e2 = p2;
+ unsigned long v;
+
+ v = e1->start;
+ e1->start = e2->start;
+ e2->start = v;
+ v = e1->fde;
+ e1->fde = e2->fde;
+ e2->fde = v;
+}
+
+static void __init setup_unwind_table(struct unwind_table *table,
+ void *(*alloc) (unsigned long))
+{
+ const u8 *ptr;
+ unsigned long tableSize = table->size, hdrSize;
+ unsigned n;
+ const u32 *fde;
+ struct {
+ u8 version;
+ u8 eh_frame_ptr_enc;
+ u8 fde_count_enc;
+ u8 table_enc;
+ unsigned long eh_frame_ptr;
+ unsigned int fde_count;
+ struct eh_frame_hdr_table_entry table[];
+ } __attribute__ ((__packed__)) *header;
+
+ if (table->header)
+ return;
+
+ if (table->hdrsz)
+ pr_warn(".eh_frame_hdr for '%s' present but unusable\n",
+ table->name);
+
+ if (tableSize & (sizeof(*fde) - 1))
+ return;
+
+ for (fde = table->address, n = 0;
+ tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ const u32 *cie = cie_for_fde(fde, table);
+ signed ptrType;
+
+ if (cie == &not_fde)
+ continue;
+ if (cie == NULL || cie == &bad_cie)
+ return;
+ ptrType = fde_pointer_type(cie);
+ if (ptrType < 0)
+ return;
+
+ ptr = (const u8 *)(fde + 2);
+ if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
+ ptrType)) {
+ /* FIXME_Rajesh We have 4 instances of null addresses
+ * instead of the initial loc addr
+ * return;
+ */
+ }
+ ++n;
+ }
+
+ if (tableSize || !n)
+ return;
+
+ hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
+ + 2 * n * sizeof(unsigned long);
+ header = alloc(hdrSize);
+ if (!header)
+ return;
+ header->version = 1;
+ header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
+ header->table_enc = DW_EH_PE_abs | DW_EH_PE_native;
+ put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
+ BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
+ % __alignof(typeof(header->fde_count)));
+ header->fde_count = n;
+
+ BUILD_BUG_ON(offsetof(typeof(*header), table)
+ % __alignof(typeof(*header->table)));
+ for (fde = table->address, tableSize = table->size, n = 0;
+ tableSize;
+ tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
+ /* const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); */
+ const u32 *cie = (const u32 *)(fde[1]);
+
+ if (fde[1] == 0xffffffff)
+ continue; /* this is a CIE */
+ ptr = (const u8 *)(fde + 2);
+ header->table[n].start = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde,
+ fde_pointer_type(cie));
+ header->table[n].fde = (unsigned long)fde;
+ ++n;
+ }
+ WARN_ON(n != header->fde_count);
+
+ sort(header->table,
+ n,
+ sizeof(*header->table),
+ cmp_eh_frame_hdr_table_entries, swap_eh_frame_hdr_table_entries);
+
+ table->hdrsz = hdrSize;
+ smp_wmb();
+ table->header = (const void *)header;
+}
+
+static void *__init balloc(unsigned long sz)
+{
+ return __alloc_bootmem_nopanic(sz,
+ sizeof(unsigned int),
+ __pa(MAX_DMA_ADDRESS));
+}
+
+void __init arc_unwind_setup(void)
+{
+ setup_unwind_table(&root_table, balloc);
+}
+
+#ifdef CONFIG_MODULES
+
+static struct unwind_table *last_table;
+
+/* Must be called with module_mutex held. */
+void *unwind_add_table(struct module *module, const void *table_start,
+ unsigned long table_size)
+{
+ struct unwind_table *table;
+
+ if (table_size <= 0)
+ return NULL;
+
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return NULL;
+
+ init_unwind_table(table, module->name,
+ module->module_core, module->core_size,
+ module->module_init, module->init_size,
+ table_start, table_size,
+ NULL, 0);
+
+#ifdef UNWIND_DEBUG
+ unw_debug("Table added for [%s] %lx %lx\n",
+ module->name, table->core.pc, table->core.range);
+#endif
+ if (last_table)
+ last_table->link = table;
+ else
+ root_table.link = table;
+ last_table = table;
+
+ return table;
+}
+
+struct unlink_table_info {
+ struct unwind_table *table;
+ int init_only;
+};
+
+static int unlink_table(void *arg)
+{
+ struct unlink_table_info *info = arg;
+ struct unwind_table *table = info->table, *prev;
+
+ for (prev = &root_table; prev->link && prev->link != table;
+ prev = prev->link)
+ ;
+
+ if (prev->link) {
+ if (info->init_only) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ info->table = NULL;
+ } else {
+ prev->link = table->link;
+ if (!prev->link)
+ last_table = prev;
+ }
+ } else
+ info->table = NULL;
+
+ return 0;
+}
+
+/* Must be called with module_mutex held. */
+void unwind_remove_table(void *handle, int init_only)
+{
+ struct unwind_table *table = handle;
+ struct unlink_table_info info;
+
+ if (!table || table == &root_table)
+ return;
+
+ if (init_only && table == last_table) {
+ table->init.pc = 0;
+ table->init.range = 0;
+ return;
+ }
+
+ info.table = table;
+ info.init_only = init_only;
+
+ unlink_table(&info); /* XXX: SMP */
+ kfree(table);
+}
+
+#endif /* CONFIG_MODULES */
+
+static uleb128_t get_uleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ uleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (uleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur++ & 0x80))
+ break;
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
+{
+ const u8 *cur = *pcur;
+ sleb128_t value;
+ unsigned shift;
+
+ for (shift = 0, value = 0; cur < end; shift += 7) {
+ if (shift + 7 > 8 * sizeof(value)
+ && (*cur & 0x7fU) >= (1U << (8 * sizeof(value) - shift))) {
+ cur = end + 1;
+ break;
+ }
+ value |= (sleb128_t) (*cur & 0x7f) << shift;
+ if (!(*cur & 0x80)) {
+ value |= -(*cur++ & 0x40) << shift;
+ break;
+ }
+ }
+ *pcur = cur;
+
+ return value;
+}
+
+static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
+{
+ const u32 *cie;
+
+ if (!*fde || (*fde & (sizeof(*fde) - 1)))
+ return &bad_cie;
+
+ if (fde[1] == 0xffffffff)
+ return &not_fde; /* this is a CIE */
+
+ if ((fde[1] & (sizeof(*fde) - 1)))
+/* || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) */
+ return NULL; /* this is not a valid FDE */
+
+ /* cie = fde + 1 - fde[1] / sizeof(*fde); */
+ cie = (u32 *) fde[1];
+
+ if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
+ || (*cie & (sizeof(*cie) - 1))
+ || (cie[1] != 0xffffffff))
+ return NULL; /* this is not a (valid) CIE */
+ return cie;
+}
+
+static unsigned long read_pointer(const u8 **pLoc, const void *end,
+ signed ptrType)
+{
+ unsigned long value = 0;
+ union {
+ const u8 *p8;
+ const u16 *p16u;
+ const s16 *p16s;
+ const u32 *p32u;
+ const s32 *p32s;
+ const unsigned long *pul;
+ } ptr;
+
+ if (ptrType < 0 || ptrType == DW_EH_PE_omit)
+ return 0;
+ ptr.p8 = *pLoc;
+ switch (ptrType & DW_EH_PE_FORM) {
+ case DW_EH_PE_data2:
+ if (end < (const void *)(ptr.p16u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned((u16 *) ptr.p16s++);
+ else
+ value = get_unaligned((u16 *) ptr.p16u++);
+ break;
+ case DW_EH_PE_data4:
+#ifdef CONFIG_64BIT
+ if (end < (const void *)(ptr.p32u + 1))
+ return 0;
+ if (ptrType & DW_EH_PE_signed)
+ value = get_unaligned(ptr.p32s++);
+ else
+ value = get_unaligned(ptr.p32u++);
+ break;
+ case DW_EH_PE_data8:
+ BUILD_BUG_ON(sizeof(u64) != sizeof(value));
+#else
+ BUILD_BUG_ON(sizeof(u32) != sizeof(value));
+#endif
+ case DW_EH_PE_native:
+ if (end < (const void *)(ptr.pul + 1))
+ return 0;
+ value = get_unaligned((unsigned long *)ptr.pul++);
+ break;
+ case DW_EH_PE_leb128:
+ BUILD_BUG_ON(sizeof(uleb128_t) > sizeof(value));
+ value = ptrType & DW_EH_PE_signed ? get_sleb128(&ptr.p8, end)
+ : get_uleb128(&ptr.p8, end);
+ if ((const void *)ptr.p8 > end)
+ return 0;
+ break;
+ default:
+ return 0;
+ }
+ switch (ptrType & DW_EH_PE_ADJUST) {
+ case DW_EH_PE_abs:
+ break;
+ case DW_EH_PE_pcrel:
+ value += (unsigned long)*pLoc;
+ break;
+ default:
+ return 0;
+ }
+ if ((ptrType & DW_EH_PE_indirect)
+ && __get_user(value, (unsigned long __user *)value))
+ return 0;
+ *pLoc = ptr.p8;
+
+ return value;
+}
+
+static signed fde_pointer_type(const u32 *cie)
+{
+ const u8 *ptr = (const u8 *)(cie + 2);
+ unsigned version = *ptr;
+
+ if (version != 1)
+ return -1; /* unsupported */
+
+ if (*++ptr) {
+ const char *aug;
+ const u8 *end = (const u8 *)(cie + 1) + *cie;
+ uleb128_t len;
+
+ /* check if augmentation size is first (and thus present) */
+ if (*ptr != 'z')
+ return -1;
+
+ /* check if augmentation string is nul-terminated */
+ aug = (const void *)ptr;
+ ptr = memchr(aug, 0, end - ptr);
+ if (ptr == NULL)
+ return -1;
+
+ ++ptr; /* skip terminator */
+ get_uleb128(&ptr, end); /* skip code alignment */
+ get_sleb128(&ptr, end); /* skip data alignment */
+ /* skip return address column */
+ version <= 1 ? (void) ++ptr : (void)get_uleb128(&ptr, end);
+ len = get_uleb128(&ptr, end); /* augmentation length */
+
+ if (ptr + len < ptr || ptr + len > end)
+ return -1;
+
+ end = ptr + len;
+ while (*++aug) {
+ if (ptr >= end)
+ return -1;
+ switch (*aug) {
+ case 'L':
+ ++ptr;
+ break;
+ case 'P':{
+ signed ptrType = *ptr++;
+
+ if (!read_pointer(&ptr, end, ptrType)
+ || ptr > end)
+ return -1;
+ }
+ break;
+ case 'R':
+ return *ptr;
+ default:
+ return -1;
+ }
+ }
+ }
+ return DW_EH_PE_native | DW_EH_PE_abs;
+}
+
+static int advance_loc(unsigned long delta, struct unwind_state *state)
+{
+ state->loc += delta * state->codeAlign;
+
+ /* FIXME_Rajesh: Probably we are defining for the initial range as well;
+ return delta > 0;
+ */
+ unw_debug("delta %3lu => loc 0x%lx: ", delta, state->loc);
+ return 1;
+}
+
+static void set_rule(uleb128_t reg, enum item_location where, uleb128_t value,
+ struct unwind_state *state)
+{
+ if (reg < ARRAY_SIZE(state->regs)) {
+ state->regs[reg].where = where;
+ state->regs[reg].value = value;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("r%lu: ", reg);
+ switch (where) {
+ case Nowhere:
+ unw_debug("s ");
+ break;
+ case Memory:
+ unw_debug("c(%lu) ", value);
+ break;
+ case Register:
+ unw_debug("r(%lu) ", value);
+ break;
+ case Value:
+ unw_debug("v(%lu) ", value);
+ break;
+ default:
+ break;
+ }
+#endif
+ }
+}
+
+static int processCFI(const u8 *start, const u8 *end, unsigned long targetLoc,
+ signed ptrType, struct unwind_state *state)
+{
+ union {
+ const u8 *p8;
+ const u16 *p16;
+ const u32 *p32;
+ } ptr;
+ int result = 1;
+ u8 opcode;
+
+ if (start != state->cieStart) {
+ state->loc = state->org;
+ result =
+ processCFI(state->cieStart, state->cieEnd, 0, ptrType,
+ state);
+ if (targetLoc == 0 && state->label == NULL)
+ return result;
+ }
+ for (ptr.p8 = start; result && ptr.p8 < end;) {
+ switch (*ptr.p8 >> 6) {
+ uleb128_t value;
+
+ case 0:
+ opcode = *ptr.p8++;
+
+ switch (opcode) {
+ case DW_CFA_nop:
+ unw_debug("cfa nop ");
+ break;
+ case DW_CFA_set_loc:
+ state->loc = read_pointer(&ptr.p8, end,
+ ptrType);
+ if (state->loc == 0)
+ result = 0;
+ unw_debug("cfa_set_loc: 0x%lx ", state->loc);
+ break;
+ case DW_CFA_advance_loc1:
+ unw_debug("\ncfa advance loc1:");
+ result = ptr.p8 < end
+ && advance_loc(*ptr.p8++, state);
+ break;
+ case DW_CFA_advance_loc2:
+ value = *ptr.p8++;
+ value += *ptr.p8++ << 8;
+ unw_debug("\ncfa advance loc2:");
+ result = ptr.p8 <= end + 2
+ /* && advance_loc(*ptr.p16++, state); */
+ && advance_loc(value, state);
+ break;
+ case DW_CFA_advance_loc4:
+ unw_debug("\ncfa advance loc4:");
+ result = ptr.p8 <= end + 4
+ && advance_loc(*ptr.p32++, state);
+ break;
+ case DW_CFA_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_offset_extended: ");
+ set_rule(value, Memory,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_offset_extended_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Memory,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_val_offset_sf:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value, Value,
+ get_sleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_restore_extended:
+ unw_debug("cfa_restore_extended: ");
+ case DW_CFA_undefined:
+ unw_debug("cfa_undefined: ");
+ case DW_CFA_same_value:
+ unw_debug("cfa_same_value: ");
+ set_rule(get_uleb128(&ptr.p8, end), Nowhere, 0,
+ state);
+ break;
+ case DW_CFA_register:
+ unw_debug("cfa_register: ");
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Register,
+ get_uleb128(&ptr.p8, end), state);
+ break;
+ case DW_CFA_remember_state:
+ unw_debug("cfa_remember_state: ");
+ if (ptr.p8 == state->label) {
+ state->label = NULL;
+ return 1;
+ }
+ if (state->stackDepth >= MAX_STACK_DEPTH)
+ return 0;
+ state->stack[state->stackDepth++] = ptr.p8;
+ break;
+ case DW_CFA_restore_state:
+ unw_debug("cfa_restore_state: ");
+ if (state->stackDepth) {
+ const uleb128_t loc = state->loc;
+ const u8 *label = state->label;
+
+ state->label =
+ state->stack[state->stackDepth - 1];
+ memcpy(&state->cfa, &badCFA,
+ sizeof(state->cfa));
+ memset(state->regs, 0,
+ sizeof(state->regs));
+ state->stackDepth = 0;
+ result =
+ processCFI(start, end, 0, ptrType,
+ state);
+ state->loc = loc;
+ state->label = label;
+ } else
+ return 0;
+ break;
+ case DW_CFA_def_cfa:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa: r%lu ", state->cfa.reg);
+ /*nobreak*/
+ case DW_CFA_def_cfa_offset:
+ state->cfa.offs = get_uleb128(&ptr.p8, end);
+ unw_debug("cfa_def_cfa_offset: 0x%lx ",
+ state->cfa.offs);
+ break;
+ case DW_CFA_def_cfa_sf:
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ /*nobreak */
+ case DW_CFA_def_cfa_offset_sf:
+ state->cfa.offs = get_sleb128(&ptr.p8, end)
+ * state->dataAlign;
+ break;
+ case DW_CFA_def_cfa_register:
+ unw_debug("cfa_def_cfa_regsiter: ");
+ state->cfa.reg = get_uleb128(&ptr.p8, end);
+ break;
+ /*todo case DW_CFA_def_cfa_expression: */
+ /*todo case DW_CFA_expression: */
+ /*todo case DW_CFA_val_expression: */
+ case DW_CFA_GNU_args_size:
+ get_uleb128(&ptr.p8, end);
+ break;
+ case DW_CFA_GNU_negative_offset_extended:
+ value = get_uleb128(&ptr.p8, end);
+ set_rule(value,
+ Memory,
+ (uleb128_t) 0 - get_uleb128(&ptr.p8,
+ end),
+ state);
+ break;
+ case DW_CFA_GNU_window_save:
+ default:
+ unw_debug("UNKNOW OPCODE 0x%x\n", opcode);
+ result = 0;
+ break;
+ }
+ break;
+ case 1:
+ unw_debug("\ncfa_adv_loc: ");
+ result = advance_loc(*ptr.p8++ & 0x3f, state);
+ break;
+ case 2:
+ unw_debug("cfa_offset: ");
+ value = *ptr.p8++ & 0x3f;
+ set_rule(value, Memory, get_uleb128(&ptr.p8, end),
+ state);
+ break;
+ case 3:
+ unw_debug("cfa_restore: ");
+ set_rule(*ptr.p8++ & 0x3f, Nowhere, 0, state);
+ break;
+ }
+
+ if (ptr.p8 > end)
+ result = 0;
+ if (result && targetLoc != 0 && targetLoc < state->loc)
+ return 1;
+ }
+
+ return result && ptr.p8 == end && (targetLoc == 0 || (
+ /*todo While in theory this should apply, gcc in practice omits
+ everything past the function prolog, and hence the location
+ never reaches the end of the function.
+ targetLoc < state->loc && */ state->label == NULL));
+}
+
+/* Unwind to previous to frame. Returns 0 if successful, negative
+ * number in case of an error. */
+int arc_unwind(struct unwind_frame_info *frame)
+{
+#define FRAME_REG(r, t) (((t *)frame)[reg_info[r].offs])
+ const u32 *fde = NULL, *cie = NULL;
+ const u8 *ptr = NULL, *end = NULL;
+ unsigned long pc = UNW_PC(frame) - frame->call_frame;
+ unsigned long startLoc = 0, endLoc = 0, cfa;
+ unsigned i;
+ signed ptrType = -1;
+ uleb128_t retAddrReg = 0;
+ const struct unwind_table *table;
+ struct unwind_state state;
+ unsigned long *fptr;
+ unsigned long addr;
+
+ unw_debug("\n\nUNWIND FRAME:\n");
+ unw_debug("PC: 0x%lx BLINK: 0x%lx, SP: 0x%lx, FP: 0x%x\n",
+ UNW_PC(frame), UNW_BLINK(frame), UNW_SP(frame),
+ UNW_FP(frame));
+
+ if (UNW_PC(frame) == 0)
+ return -EINVAL;
+
+#ifdef UNWIND_DEBUG
+ {
+ unsigned long *sptr = (unsigned long *)UNW_SP(frame);
+ unw_debug("\nStack Dump:\n");
+ for (i = 0; i < 20; i++, sptr++)
+ unw_debug("0x%p: 0x%lx\n", sptr, *sptr);
+ unw_debug("\n");
+ }
+#endif
+
+ table = find_table(pc);
+ if (table != NULL
+ && !(table->size & (sizeof(*fde) - 1))) {
+ const u8 *hdr = table->header;
+ unsigned long tableSize;
+
+ smp_rmb();
+ if (hdr && hdr[0] == 1) {
+ switch (hdr[3] & DW_EH_PE_FORM) {
+ case DW_EH_PE_native:
+ tableSize = sizeof(unsigned long);
+ break;
+ case DW_EH_PE_data2:
+ tableSize = 2;
+ break;
+ case DW_EH_PE_data4:
+ tableSize = 4;
+ break;
+ case DW_EH_PE_data8:
+ tableSize = 8;
+ break;
+ default:
+ tableSize = 0;
+ break;
+ }
+ ptr = hdr + 4;
+ end = hdr + table->hdrsz;
+ if (tableSize && read_pointer(&ptr, end, hdr[1])
+ == (unsigned long)table->address
+ && (i = read_pointer(&ptr, end, hdr[2])) > 0
+ && i == (end - ptr) / (2 * tableSize)
+ && !((end - ptr) % (2 * tableSize))) {
+ do {
+ const u8 *cur =
+ ptr + (i / 2) * (2 * tableSize);
+
+ startLoc = read_pointer(&cur,
+ cur + tableSize,
+ hdr[3]);
+ if (pc < startLoc)
+ i /= 2;
+ else {
+ ptr = cur - tableSize;
+ i = (i + 1) / 2;
+ }
+ } while (startLoc && i > 1);
+ if (i == 1
+ && (startLoc = read_pointer(&ptr,
+ ptr + tableSize,
+ hdr[3])) != 0
+ && pc >= startLoc)
+ fde = (void *)read_pointer(&ptr,
+ ptr +
+ tableSize,
+ hdr[3]);
+ }
+ }
+
+ if (fde != NULL) {
+ cie = cie_for_fde(fde, table);
+ ptr = (const u8 *)(fde + 2);
+ if (cie != NULL
+ && cie != &bad_cie
+ && cie != &not_fde
+ && (ptrType = fde_pointer_type(cie)) >= 0
+ && read_pointer(&ptr,
+ (const u8 *)(fde + 1) + *fde,
+ ptrType) == startLoc) {
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= endLoc)
+ fde = NULL;
+ } else
+ fde = NULL;
+ }
+ if (fde == NULL) {
+ for (fde = table->address, tableSize = table->size;
+ cie = NULL, tableSize > sizeof(*fde)
+ && tableSize - sizeof(*fde) >= *fde;
+ tableSize -= sizeof(*fde) + *fde,
+ fde += 1 + *fde / sizeof(*fde)) {
+ cie = cie_for_fde(fde, table);
+ if (cie == &bad_cie) {
+ cie = NULL;
+ break;
+ }
+ if (cie == NULL
+ || cie == &not_fde
+ || (ptrType = fde_pointer_type(cie)) < 0)
+ continue;
+ ptr = (const u8 *)(fde + 2);
+ startLoc = read_pointer(&ptr,
+ (const u8 *)(fde + 1) +
+ *fde, ptrType);
+ if (!startLoc)
+ continue;
+ if (!(ptrType & DW_EH_PE_indirect))
+ ptrType &=
+ DW_EH_PE_FORM | DW_EH_PE_signed;
+ endLoc =
+ startLoc + read_pointer(&ptr,
+ (const u8 *)(fde +
+ 1) +
+ *fde, ptrType);
+ if (pc >= startLoc && pc < endLoc)
+ break;
+ }
+ }
+ }
+ if (cie != NULL) {
+ memset(&state, 0, sizeof(state));
+ state.cieEnd = ptr; /* keep here temporarily */
+ ptr = (const u8 *)(cie + 2);
+ end = (const u8 *)(cie + 1) + *cie;
+ frame->call_frame = 1;
+ if ((state.version = *ptr) != 1)
+ cie = NULL; /* unsupported version */
+ else if (*++ptr) {
+ /* check if augmentation size is first (thus present) */
+ if (*ptr == 'z') {
+ while (++ptr < end && *ptr) {
+ switch (*ptr) {
+ /* chk for ignorable or already handled
+ * nul-terminated augmentation string */
+ case 'L':
+ case 'P':
+ case 'R':
+ continue;
+ case 'S':
+ frame->call_frame = 0;
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ if (ptr >= end || *ptr)
+ cie = NULL;
+ }
+ ++ptr;
+ }
+ if (cie != NULL) {
+ /* get code aligment factor */
+ state.codeAlign = get_uleb128(&ptr, end);
+ /* get data aligment factor */
+ state.dataAlign = get_sleb128(&ptr, end);
+ if (state.codeAlign == 0 || state.dataAlign == 0 || ptr >= end)
+ cie = NULL;
+ else {
+ retAddrReg =
+ state.version <= 1 ? *ptr++ : get_uleb128(&ptr,
+ end);
+ unw_debug("CIE Frame Info:\n");
+ unw_debug("return Address register 0x%lx\n",
+ retAddrReg);
+ unw_debug("data Align: %ld\n", state.dataAlign);
+ unw_debug("code Align: %lu\n", state.codeAlign);
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ ptr += augSize;
+ }
+ if (ptr > end || retAddrReg >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(retAddrReg)
+ || reg_info[retAddrReg].width !=
+ sizeof(unsigned long))
+ cie = NULL;
+ }
+ }
+ if (cie != NULL) {
+ state.cieStart = ptr;
+ ptr = state.cieEnd;
+ state.cieEnd = end;
+ end = (const u8 *)(fde + 1) + *fde;
+ /* skip augmentation */
+ if (((const char *)(cie + 2))[1] == 'z') {
+ uleb128_t augSize = get_uleb128(&ptr, end);
+
+ if ((ptr += augSize) > end)
+ fde = NULL;
+ }
+ }
+ if (cie == NULL || fde == NULL) {
+#ifdef CONFIG_FRAME_POINTER
+ unsigned long top, bottom;
+
+ top = STACK_TOP_UNW(frame->task);
+ bottom = STACK_BOTTOM_UNW(frame->task);
+#if FRAME_RETADDR_OFFSET < 0
+ if (UNW_SP(frame) < top && UNW_FP(frame) <= UNW_SP(frame)
+ && bottom < UNW_FP(frame)
+#else
+ if (UNW_SP(frame) > top && UNW_FP(frame) >= UNW_SP(frame)
+ && bottom > UNW_FP(frame)
+#endif
+ && !((UNW_SP(frame) | UNW_FP(frame))
+ & (sizeof(unsigned long) - 1))) {
+ unsigned long link;
+
+ if (!__get_user(link, (unsigned long *)
+ (UNW_FP(frame) + FRAME_LINK_OFFSET))
+#if FRAME_RETADDR_OFFSET < 0
+ && link > bottom && link < UNW_FP(frame)
+#else
+ && link > UNW_FP(frame) && link < bottom
+#endif
+ && !(link & (sizeof(link) - 1))
+ && !__get_user(UNW_PC(frame),
+ (unsigned long *)(UNW_FP(frame)
+ + FRAME_RETADDR_OFFSET)))
+ {
+ UNW_SP(frame) =
+ UNW_FP(frame) + FRAME_RETADDR_OFFSET
+#if FRAME_RETADDR_OFFSET < 0
+ -
+#else
+ +
+#endif
+ sizeof(UNW_PC(frame));
+ UNW_FP(frame) = link;
+ return 0;
+ }
+ }
+#endif
+ return -ENXIO;
+ }
+ state.org = startLoc;
+ memcpy(&state.cfa, &badCFA, sizeof(state.cfa));
+
+ unw_debug("\nProcess instructions\n");
+
+ /* process instructions
+ * For ARC, we optimize by having blink(retAddrReg) with
+ * the sameValue in the leaf function, so we should not check
+ * state.regs[retAddrReg].where == Nowhere
+ */
+ if (!processCFI(ptr, end, pc, ptrType, &state)
+ || state.loc > endLoc
+/* || state.regs[retAddrReg].where == Nowhere */
+ || state.cfa.reg >= ARRAY_SIZE(reg_info)
+ || reg_info[state.cfa.reg].width != sizeof(unsigned long)
+ || state.cfa.offs % sizeof(unsigned long))
+ return -EIO;
+
+#ifdef UNWIND_DEBUG
+ unw_debug("\n");
+
+ unw_debug("\nRegister State Based on the rules parsed from FDE:\n");
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+
+ if (REG_INVALID(i))
+ continue;
+
+ switch (state.regs[i].where) {
+ case Nowhere:
+ break;
+ case Memory:
+ unw_debug(" r%d: c(%lu),", i, state.regs[i].value);
+ break;
+ case Register:
+ unw_debug(" r%d: r(%lu),", i, state.regs[i].value);
+ break;
+ case Value:
+ unw_debug(" r%d: v(%lu),", i, state.regs[i].value);
+ break;
+ }
+ }
+
+ unw_debug("\n");
+#endif
+
+ /* update frame */
+#ifndef CONFIG_AS_CFI_SIGNAL_FRAME
+ if (frame->call_frame
+ && !UNW_DEFAULT_RA(state.regs[retAddrReg], state.dataAlign))
+ frame->call_frame = 0;
+#endif
+ cfa = FRAME_REG(state.cfa.reg, unsigned long) + state.cfa.offs;
+ startLoc = min_t(unsigned long, UNW_SP(frame), cfa);
+ endLoc = max_t(unsigned long, UNW_SP(frame), cfa);
+ if (STACK_LIMIT(startLoc) != STACK_LIMIT(endLoc)) {
+ startLoc = min(STACK_LIMIT(cfa), cfa);
+ endLoc = max(STACK_LIMIT(cfa), cfa);
+ }
+
+ unw_debug("\nCFA reg: 0x%lx, offset: 0x%lx => 0x%lx\n",
+ state.cfa.reg, state.cfa.offs, cfa);
+
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i) {
+ if (REG_INVALID(i)) {
+ if (state.regs[i].where == Nowhere)
+ continue;
+ return -EIO;
+ }
+ switch (state.regs[i].where) {
+ default:
+ break;
+ case Register:
+ if (state.regs[i].value >= ARRAY_SIZE(reg_info)
+ || REG_INVALID(state.regs[i].value)
+ || reg_info[i].width >
+ reg_info[state.regs[i].value].width)
+ return -EIO;
+ switch (reg_info[state.regs[i].value].width) {
+ case sizeof(u8):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u8);
+ break;
+ case sizeof(u16):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u16);
+ break;
+ case sizeof(u32):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u32);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ state.regs[i].value =
+ FRAME_REG(state.regs[i].value, const u64);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ }
+ }
+
+ unw_debug("\nRegister state after evaluation with realtime Stack:\n");
+ fptr = (unsigned long *)(&frame->regs);
+ for (i = 0; i < ARRAY_SIZE(state.regs); ++i, fptr++) {
+
+ if (REG_INVALID(i))
+ continue;
+ switch (state.regs[i].where) {
+ case Nowhere:
+ if (reg_info[i].width != sizeof(UNW_SP(frame))
+ || &FRAME_REG(i, __typeof__(UNW_SP(frame)))
+ != &UNW_SP(frame))
+ continue;
+ UNW_SP(frame) = cfa;
+ break;
+ case Register:
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ FRAME_REG(i, u8) = state.regs[i].value;
+ break;
+ case sizeof(u16):
+ FRAME_REG(i, u16) = state.regs[i].value;
+ break;
+ case sizeof(u32):
+ FRAME_REG(i, u32) = state.regs[i].value;
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ FRAME_REG(i, u64) = state.regs[i].value;
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+ break;
+ case Value:
+ if (reg_info[i].width != sizeof(unsigned long))
+ return -EIO;
+ FRAME_REG(i, unsigned long) = cfa + state.regs[i].value
+ * state.dataAlign;
+ break;
+ case Memory:
+ addr = cfa + state.regs[i].value * state.dataAlign;
+
+ if ((state.regs[i].value * state.dataAlign)
+ % sizeof(unsigned long)
+ || addr < startLoc
+ || addr + sizeof(unsigned long) < addr
+ || addr + sizeof(unsigned long) > endLoc)
+ return -EIO;
+
+ switch (reg_info[i].width) {
+ case sizeof(u8):
+ __get_user(FRAME_REG(i, u8),
+ (u8 __user *)addr);
+ break;
+ case sizeof(u16):
+ __get_user(FRAME_REG(i, u16),
+ (u16 __user *)addr);
+ break;
+ case sizeof(u32):
+ __get_user(FRAME_REG(i, u32),
+ (u32 __user *)addr);
+ break;
+#ifdef CONFIG_64BIT
+ case sizeof(u64):
+ __get_user(FRAME_REG(i, u64),
+ (u64 __user *)addr);
+ break;
+#endif
+ default:
+ return -EIO;
+ }
+
+ break;
+ }
+ unw_debug("r%d: 0x%lx ", i, *fptr);
+ }
+
+ return 0;
+#undef FRAME_REG
+}
+EXPORT_SYMBOL(arc_unwind);
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..d3c92f52d44
--- /dev/null
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_ARCH(arc)
+ENTRY(_stext)
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+jiffies = jiffies_64 + 4;
+#else
+jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+ /*
+ * ICCM starts at 0x8000_0000. So if kernel is relocated to some other
+ * address, make sure peripheral at 0x8z doesn't clash with ICCM
+ * Essentially vector is also in ICCM.
+ */
+
+ . = CONFIG_LINUX_LINK_BASE;
+
+ _int_vec_base_lds = .;
+ .vector : {
+ *(.vector)
+ . = ALIGN(PAGE_SIZE);
+ }
+
+#ifdef CONFIG_ARC_HAS_ICCM
+ .text.arcfp : {
+ *(.text.arcfp)
+ . = ALIGN(CONFIG_ARC_ICCM_SZ * 1024);
+ }
+#endif
+
+ /*
+ * The reason for having a seperate subsection .init.ramfs is to
+ * prevent objump from including it in kernel dumps
+ *
+ * Reason for having .init.ramfs above .init is to make sure that the
+ * binary blob is tucked away to one side, reducing the displacement
+ * between .init.text and .text, avoiding any possible relocation
+ * errors because of calls from .init.text to .text
+ * Yes such calls do exist. e.g.
+ * decompress_inflate.c:gunzip( ) -> zlib_inflate_workspace( )
+ */
+
+ __init_begin = .;
+
+ .init.ramfs : { INIT_RAM_FS }
+
+ . = ALIGN(PAGE_SIZE);
+ _stext = .;
+
+ HEAD_TEXT_SECTION
+ INIT_TEXT_SECTION(L1_CACHE_BYTES)
+
+ /* INIT_DATA_SECTION open-coded: special INIT_RAM_FS handling */
+ .init.data : {
+ INIT_DATA
+ INIT_SETUP(L1_CACHE_BYTES)
+ INIT_CALLS
+ CON_INITCALL
+ SECURITY_INITCALL
+ }
+
+ .init.arch.info : {
+ __arch_info_begin = .;
+ *(.arch.info.init)
+ __arch_info_end = .;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ /*
+ * .exit.text is discard at runtime, not link time, to deal with
+ * references from .debug_frame
+ * It will be init freed, being inside [__init_start : __init_end]
+ */
+ .exit.text : { EXIT_TEXT }
+ .exit.data : { EXIT_DATA }
+
+ . = ALIGN(PAGE_SIZE);
+ __init_end = .;
+
+ .text : {
+ _text = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ }
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+ _etext = .;
+
+ _sdata = .;
+ RO_DATA_SECTION(PAGE_SIZE)
+
+ /*
+ * 1. this is .data essentially
+ * 2. THREAD_SIZE for init.task, must be kernel-stk sz aligned
+ */
+ RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+
+#ifdef CONFIG_ARC_DW2_UNWIND
+ . = ALIGN(PAGE_SIZE);
+ .debug_frame : {
+ __start_unwind = .;
+ *(.debug_frame)
+ __end_unwind = .;
+ }
+#else
+ /DISCARD/ : { *(.debug_frame) }
+#endif
+
+ NOTES
+
+ . = ALIGN(PAGE_SIZE);
+ _end = . ;
+
+ STABS_DEBUG
+ DISCARDS
+
+ .arcextmap 0 : {
+ *(.gnu.linkonce.arcextmap.*)
+ *(.arcextmap.*)
+ }
+
+ /* open-coded because we need .debug_frame seperately for unwinding */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+
+#ifdef CONFIG_ARC_HAS_DCCM
+ . = CONFIG_ARC_DCCM_BASE;
+ __arc_dccm_base = .;
+ .data.arcfp : {
+ *(.data.arcfp)
+ }
+ . = ALIGN(CONFIG_ARC_DCCM_SZ * 1024);
+#endif
+}
diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile
new file mode 100644
index 00000000000..db46e200bab
--- /dev/null
+++ b/arch/arc/lib/Makefile
@@ -0,0 +1,9 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+
+lib-y := strchr-700.o strcmp.o strcpy-700.o strlen.o
+lib-y += memcmp.o memcpy-700.o memset.o
diff --git a/arch/arc/lib/memcmp.S b/arch/arc/lib/memcmp.S
new file mode 100644
index 00000000000..bc813d55b6c
--- /dev/null
+++ b/arch/arc/lib/memcmp.S
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#ifdef __LITTLE_ENDIAN__
+#define WORD2 r2
+#define SHIFT r3
+#else /* BIG ENDIAN */
+#define WORD2 r3
+#define SHIFT r2
+#endif
+
+ARC_ENTRY memcmp
+ or r12,r0,r1
+ asl_s r12,r12,30
+ sub r3,r2,1
+ brls r2,r12,.Lbytewise
+ ld r4,[r0,0]
+ ld r5,[r1,0]
+ lsr.f lp_count,r3,3
+ lpne .Loop_end
+ ld_s WORD2,[r0,4]
+ ld_s r12,[r1,4]
+ brne r4,r5,.Leven
+ ld.a r4,[r0,8]
+ ld.a r5,[r1,8]
+ brne WORD2,r12,.Lodd
+.Loop_end:
+ asl_s SHIFT,SHIFT,3
+ bhs_s .Last_cmp
+ brne r4,r5,.Leven
+ ld r4,[r0,4]
+ ld r5,[r1,4]
+#ifdef __LITTLE_ENDIAN__
+ nop_s
+ ; one more load latency cycle
+.Last_cmp:
+ xor r0,r4,r5
+ bset r0,r0,SHIFT
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ b.d .Leven_cmp
+ and r1,r1,24
+.Leven:
+ xor r0,r4,r5
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+.Leven_cmp:
+ asl r2,r4,r1
+ asl r12,r5,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+ .balign 4
+.Lodd:
+ xor r0,WORD2,r12
+ sub_s r1,r0,1
+ bic_s r1,r1,r0
+ norm r1,r1
+ ; slow track insn
+ and r1,r1,24
+ asl_s r2,r2,r1
+ asl_s r12,r12,r1
+ lsr_s r2,r2,1
+ lsr_s r12,r12,1
+ j_s.d [blink]
+ sub r0,r2,r12
+#else /* BIG ENDIAN */
+.Last_cmp:
+ neg_s SHIFT,SHIFT
+ lsr r4,r4,SHIFT
+ lsr r5,r5,SHIFT
+ ; slow track insn
+.Leven:
+ sub.f r0,r4,r5
+ mov.ne r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+.Lodd:
+ cmp_s WORD2,r12
+
+ mov_s r0,1
+ j_s.d [blink]
+ bset.cs r0,r0,31
+#endif /* ENDIAN */
+ .balign 4
+.Lbytewise:
+ breq r2,0,.Lnil
+ ldb r4,[r0,0]
+ ldb r5,[r1,0]
+ lsr.f lp_count,r3
+ lpne .Lbyte_end
+ ldb_s r3,[r0,1]
+ ldb r12,[r1,1]
+ brne r4,r5,.Lbyte_even
+ ldb.a r4,[r0,2]
+ ldb.a r5,[r1,2]
+ brne r3,r12,.Lbyte_odd
+.Lbyte_end:
+ bcc .Lbyte_even
+ brne r4,r5,.Lbyte_even
+ ldb_s r3,[r0,1]
+ ldb_s r12,[r1,1]
+.Lbyte_odd:
+ j_s.d [blink]
+ sub r0,r3,r12
+.Lbyte_even:
+ j_s.d [blink]
+ sub r0,r4,r5
+.Lnil:
+ j_s.d [blink]
+ mov r0,0
+ARC_EXIT memcmp
diff --git a/arch/arc/lib/memcpy-700.S b/arch/arc/lib/memcpy-700.S
new file mode 100644
index 00000000000..b64cc10ac91
--- /dev/null
+++ b/arch/arc/lib/memcpy-700.S
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY memcpy
+ or r3,r0,r1
+ asl_s r3,r3,30
+ mov_s r5,r0
+ brls.d r2,r3,.Lcopy_bytewise
+ sub.f r3,r2,1
+ ld_s r12,[r1,0]
+ asr.f lp_count,r3,3
+ bbit0.d r3,2,.Lnox4
+ bmsk_s r2,r2,1
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,4]
+.Lnox4:
+ lppnz .Lendloop
+ ld_s r3,[r1,4]
+ st.ab r12,[r5,4]
+ ld.a r12,[r1,8]
+ st.ab r3,[r5,4]
+.Lendloop:
+ breq r2,0,.Last_store
+ ld r3,[r5,0]
+#ifdef __LITTLE_ENDIAN__
+ add3 r2,-1,r2
+ ; uses long immediate
+ xor_s r12,r12,r3
+ bmsk r12,r12,r2
+ xor_s r12,r12,r3
+#else /* BIG ENDIAN */
+ sub3 r2,31,r2
+ ; uses long immediate
+ xor_s r3,r3,r12
+ bmsk r3,r3,r2
+ xor_s r12,r12,r3
+#endif /* ENDIAN */
+.Last_store:
+ j_s.d [blink]
+ st r12,[r5,0]
+
+ .balign 4
+.Lcopy_bytewise:
+ jcs [blink]
+ ldb_s r12,[r1,0]
+ lsr.f lp_count,r3
+ bhs_s .Lnox1
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,1]
+.Lnox1:
+ lppnz .Lendbloop
+ ldb_s r3,[r1,1]
+ stb.ab r12,[r5,1]
+ ldb.a r12,[r1,2]
+ stb.ab r3,[r5,1]
+.Lendbloop:
+ j_s.d [blink]
+ stb r12,[r5,0]
+ARC_EXIT memcpy
diff --git a/arch/arc/lib/memset.S b/arch/arc/lib/memset.S
new file mode 100644
index 00000000000..9b2d88d2e14
--- /dev/null
+++ b/arch/arc/lib/memset.S
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+#define SMALL 7 /* Must be at least 6 to deal with alignment/loop issues. */
+
+ARC_ENTRY memset
+ mov_s r4,r0
+ or r12,r0,r2
+ bmsk.f r12,r12,1
+ extb_s r1,r1
+ asl r3,r1,8
+ beq.d .Laligned
+ or_s r1,r1,r3
+ brls r2,SMALL,.Ltiny
+ add r3,r2,r0
+ stb r1,[r3,-1]
+ bclr_s r3,r3,0
+ stw r1,[r3,-2]
+ bmsk.f r12,r0,1
+ add_s r2,r2,r12
+ sub.ne r2,r2,4
+ stb.ab r1,[r4,1]
+ and r4,r4,-2
+ stw.ab r1,[r4,2]
+ and r4,r4,-4
+.Laligned: ; This code address should be aligned for speed.
+ asl r3,r1,16
+ lsr.f lp_count,r2,2
+ or_s r1,r1,r3
+ lpne .Loop_end
+ st.ab r1,[r4,4]
+.Loop_end:
+ j_s [blink]
+
+ .balign 4
+.Ltiny:
+ mov.f lp_count,r2
+ lpne .Ltiny_end
+ stb.ab r1,[r4,1]
+.Ltiny_end:
+ j_s [blink]
+ARC_EXIT memset
+
+; memzero: @r0 = mem, @r1 = size_t
+; memset: @r0 = mem, @r1 = char, @r2 = size_t
+
+ARC_ENTRY memzero
+ ; adjust bzero args to memset args
+ mov r2, r1
+ mov r1, 0
+ b memset ;tail call so need to tinker with blink
+ARC_EXIT memzero
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
new file mode 100644
index 00000000000..99c10475d47
--- /dev/null
+++ b/arch/arc/lib/strchr-700.S
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* ARC700 has a relatively long pipeline and branch prediction, so we want
+ to avoid branches that are hard to predict. On the other hand, the
+ presence of the norm instruction makes it easier to operate on whole
+ words branch-free. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strchr
+ extb_s r1,r1
+ asl r5,r1,8
+ bmsk r2,r0,1
+ or r5,r5,r1
+ mov_s r3,0x01010101
+ breq.d r2,r0,.Laligned
+ asl r4,r5,16
+ sub_s r0,r0,r2
+ asl r7,r2,3
+ ld_s r2,[r0]
+#ifdef __LITTLE_ENDIAN__
+ asl r7,r3,r7
+#else
+ lsr r7,r3,r7
+#endif
+ or r5,r5,r4
+ ror r4,r3
+ sub r12,r2,r7
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0_ua
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r7
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
+ b .Lfound_char ; Likewise this one.
+; /* We require this code address to be unaligned for speed... */
+.Laligned:
+ ld_s r2,[r0]
+ or r5,r5,r4
+ ror r4,r3
+; /* ... so that this code address is aligned, for itself and ... */
+.Loop:
+ sub r12,r2,r3
+ bic_s r12,r12,r2
+ and r12,r12,r4
+ brne.d r12,0,.Lfound0
+ xor r6,r2,r5
+ ld.a r2,[r0,4]
+ sub r12,r6,r3
+ bic r12,r12,r6
+ and r7,r12,r4
+ breq r7,0,.Loop /* ... so that this branch is unaligned. */
+ ; Found searched-for character. r0 has already advanced to next word.
+#ifdef __LITTLE_ENDIAN__
+/* We only need the information about the first matching byte
+ (i.e. the least significant matching byte) to be exact,
+ hence there is no problem with carry effects. */
+.Lfound_char:
+ sub r3,r7,1
+ bic r3,r3,r7
+ norm r2,r3
+ sub_s r0,r0,1
+ asr_s r2,r2,3
+ j.d [blink]
+ sub_s r0,r0,r2
+
+ .balign 4
+.Lfound0_ua:
+ mov r3,r7
+.Lfound0:
+ sub r3,r6,r3
+ bic r3,r3,r6
+ and r2,r3,r4
+ or_s r12,r12,r2
+ sub_s r3,r12,1
+ bic_s r3,r3,r12
+ norm r3,r3
+ add_s r0,r0,3
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ sub_s r0,r0,r12
+ j_s.d [blink]
+ mov.pl r0,0
+#else /* BIG ENDIAN */
+.Lfound_char:
+ lsr r7,r7,7
+
+ bic r2,r7,r6
+ norm r2,r2
+ sub_s r0,r0,4
+ asr_s r2,r2,3
+ j.d [blink]
+ add_s r0,r0,r2
+
+.Lfound0_ua:
+ mov_s r3,r7
+.Lfound0:
+ asl_s r2,r2,7
+ or r7,r6,r4
+ bic_s r12,r12,r2
+ sub r2,r7,r3
+ or r2,r2,r6
+ bic r12,r2,r12
+ bic.f r3,r4,r12
+ norm r3,r3
+
+ add.pl r3,r3,1
+ asr_s r12,r3,3
+ asl.f 0,r2,r3
+ add_s r0,r0,r12
+ j_s.d [blink]
+ mov.mi r0,0
+#endif /* ENDIAN */
+ARC_EXIT strchr
diff --git a/arch/arc/lib/strcmp.S b/arch/arc/lib/strcmp.S
new file mode 100644
index 00000000000..5dc802b45cf
--- /dev/null
+++ b/arch/arc/lib/strcmp.S
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* This is optimized primarily for the ARC700.
+ It would be possible to speed up the loops by one cycle / word
+ respective one cycle / byte by forcing double source 1 alignment, unrolling
+ by a factor of two, and speculatively loading the second word / byte of
+ source 1; however, that would increase the overhead for loop setup / finish,
+ and strcmp might often terminate early. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcmp
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne r2,0,.Lcharloop
+ mov_s r12,0x01010101
+ ror r5,r12
+.Lwordloop:
+ ld.ab r2,[r0,4]
+ ld.ab r3,[r1,4]
+ nop_s
+ sub r4,r2,r12
+ bic r4,r4,r2
+ and r4,r4,r5
+ brne r4,0,.Lfound0
+ breq r2,r3,.Lwordloop
+#ifdef __LITTLE_ENDIAN__
+ xor r0,r2,r3 ; mask for difference
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+#endif /* LITTLE ENDIAN */
+ cmp_s r2,r3
+ mov_s r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+
+ .balign 4
+#ifdef __LITTLE_ENDIAN__
+.Lfound0:
+ xor r0,r2,r3 ; mask for difference
+ or r0,r0,r4 ; or in zero indicator
+ sub_s r1,r0,1
+ bic_s r0,r0,r1 ; mask for least significant difference bit
+ sub r1,r5,r0
+ xor r0,r5,r1 ; mask for least significant difference byte
+ and_s r2,r2,r0
+ and_s r3,r3,r0
+ sub.f r0,r2,r3
+ mov.hi r0,1
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#else /* BIG ENDIAN */
+ /* The zero-detection above can mis-detect 0x01 bytes as zeroes
+ because of carry-propagateion from a lower significant zero byte.
+ We can compensate for this by checking that bit0 is zero.
+ This compensation is not necessary in the step where we
+ get a low estimate for r2, because in any affected bytes
+ we already have 0x00 or 0x01, which will remain unchanged
+ when bit 7 is cleared. */
+ .balign 4
+.Lfound0:
+ lsr r0,r4,8
+ lsr_s r1,r2
+ bic_s r2,r2,r0 ; get low estimate for r2 and get ...
+ bic_s r0,r0,r1 ; <this is the adjusted mask for zeros>
+ or_s r3,r3,r0 ; ... high estimate r3 so that r2 > r3 will ...
+ cmp_s r3,r2 ; ... be independent of trailing garbage
+ or_s r2,r2,r0 ; likewise for r3 > r2
+ bic_s r3,r3,r0
+ rlc r0,0 ; r0 := r2 > r3 ? 1 : 0
+ cmp_s r2,r3
+ j_s.d [blink]
+ bset.lo r0,r0,31
+#endif /* ENDIAN */
+
+ .balign 4
+.Lcharloop:
+ ldb.ab r2,[r0,1]
+ ldb.ab r3,[r1,1]
+ nop_s
+ breq r2,0,.Lcmpend
+ breq r2,r3,.Lcharloop
+.Lcmpend:
+ j_s.d [blink]
+ sub r0,r2,r3
+ARC_EXIT strcmp
diff --git a/arch/arc/lib/strcpy-700.S b/arch/arc/lib/strcpy-700.S
new file mode 100644
index 00000000000..b7ca4ae81d8
--- /dev/null
+++ b/arch/arc/lib/strcpy-700.S
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* If dst and src are 4 byte aligned, copy 8 bytes at a time.
+ If the src is 4, but not 8 byte aligned, we first read 4 bytes to get
+ it 8 byte aligned. Thus, we can do a little read-ahead, without
+ dereferencing a cache line that we should not touch.
+ Note that short and long instructions have been scheduled to avoid
+ branch stalls.
+ The beq_s to r3z could be made unaligned & long to avoid a stall
+ there, but the it is not likely to be taken often, and it
+ would also be likey to cost an unaligned mispredict at the next call. */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strcpy
+ or r2,r0,r1
+ bmsk_s r2,r2,1
+ brne.d r2,0,charloop
+ mov_s r10,r0
+ ld_s r3,[r1,0]
+ mov r8,0x01010101
+ bbit0.d r1,2,loop_start
+ ror r12,r8
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne r3z
+ mov_s r4,r3
+ .balign 4
+loop:
+ ld.a r3,[r1,4]
+ st.ab r4,[r10,4]
+loop_start:
+ ld.a r4,[r1,4]
+ sub r2,r3,r8
+ bic_s r2,r2,r3
+ tst_s r2,r12
+ bne_s r3z
+ st.ab r3,[r10,4]
+ sub r2,r4,r8
+ bic r2,r2,r4
+ tst r2,r12
+ beq loop
+ mov_s r3,r4
+#ifdef __LITTLE_ENDIAN__
+r3z: bmsk.f r1,r3,7
+ lsr_s r3,r3,8
+#else
+r3z: lsr.f r1,r3,24
+ asl_s r3,r3,8
+#endif
+ bne.d r3z
+ stb.ab r1,[r10,1]
+ j_s [blink]
+
+ .balign 4
+charloop:
+ ldb.ab r3,[r1,1]
+
+
+ brne.d r3,0,charloop
+ stb.ab r3,[r10,1]
+ j [blink]
+ARC_EXIT strcpy
diff --git a/arch/arc/lib/strlen.S b/arch/arc/lib/strlen.S
new file mode 100644
index 00000000000..39759e09969
--- /dev/null
+++ b/arch/arc/lib/strlen.S
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/linkage.h>
+
+ARC_ENTRY strlen
+ or r3,r0,7
+ ld r2,[r3,-7]
+ ld.a r6,[r3,-3]
+ mov r4,0x01010101
+ ; uses long immediate
+#ifdef __LITTLE_ENDIAN__
+ asl_s r1,r0,3
+ btst_s r0,2
+ asl r7,r4,r1
+ ror r5,r4
+ sub r1,r2,r7
+ bic_s r1,r1,r2
+ mov.eq r7,r4
+ sub r12,r6,r7
+ bic r12,r12,r6
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#else /* BIG ENDIAN */
+ ror r5,r4
+ btst_s r0,2
+ mov_s r1,31
+ sub3 r7,r1,r0
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ bmsk r1,r1,r7
+ sub r12,r6,r4
+ bic r12,r12,r6
+ bmsk.ne r12,r12,r7
+ or.eq r12,r12,r1
+ and r12,r12,r5
+ brne r12,0,.Learly_end
+#endif /* ENDIAN */
+
+.Loop:
+ ld_s r2,[r3,4]
+ ld.a r6,[r3,8]
+ ; stall for load result
+ sub r1,r2,r4
+ bic_s r1,r1,r2
+ sub r12,r6,r4
+ bic r12,r12,r6
+ or r12,r12,r1
+ and r12,r12,r5
+ breq r12,0,.Loop
+.Lend:
+ and.f r1,r1,r5
+ sub.ne r3,r3,4
+ mov.eq r1,r12
+#ifdef __LITTLE_ENDIAN__
+ sub_s r2,r1,1
+ bic_s r2,r2,r1
+ norm r1,r2
+ sub_s r0,r0,3
+ lsr_s r1,r1,3
+ sub r0,r3,r0
+ j_s.d [blink]
+ sub r0,r0,r1
+#else /* BIG ENDIAN */
+ lsr_s r1,r1,7
+ mov.eq r2,r6
+ bic_s r1,r1,r2
+ norm r1,r1
+ sub r0,r3,r0
+ lsr_s r1,r1,3
+ j_s.d [blink]
+ add r0,r0,r1
+#endif /* ENDIAN */
+.Learly_end:
+ b.d .Lend
+ sub_s.ne r1,r1,r1
+ARC_EXIT strlen
diff --git a/arch/arc/mm/Makefile b/arch/arc/mm/Makefile
new file mode 100644
index 00000000000..168dc146a8f
--- /dev/null
+++ b/arch/arc/mm/Makefile
@@ -0,0 +1,10 @@
+#
+# Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+obj-y := extable.o ioremap.o dma.o fault.o init.o
+obj-y += tlb.o tlbex.o cache_arc700.o
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
new file mode 100644
index 00000000000..88d617d8423
--- /dev/null
+++ b/arch/arc/mm/cache_arc700.c
@@ -0,0 +1,768 @@
+/*
+ * ARC700 VIPT Cache Management
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: May 2011: for Non-aliasing VIPT D-cache following can be NOPs
+ * -flush_cache_dup_mm (fork)
+ * -likewise for flush_cache_mm (exit/execve)
+ * -likewise for flush_cache_range,flush_cache_page (munmap, exit, COW-break)
+ *
+ * vineetg: Apr 2011
+ * -Now that MMU can support larger pg sz (16K), the determiniation of
+ * aliasing shd not be based on assumption of 8k pg
+ *
+ * vineetg: Mar 2011
+ * -optimised version of flush_icache_range( ) for making I/D coherent
+ * when vaddr is available (agnostic of num of aliases)
+ *
+ * vineetg: Mar 2011
+ * -Added documentation about I-cache aliasing on ARC700 and the way it
+ * was handled up until MMU V2.
+ * -Spotted a three year old bug when killing the 4 aliases, which needs
+ * bottom 2 bits, so we need to do paddr | {0x00, 0x01, 0x02, 0x03}
+ * instead of paddr | {0x00, 0x01, 0x10, 0x11}
+ * (Rajesh you owe me one now)
+ *
+ * vineetg: Dec 2010
+ * -Off-by-one error when computing num_of_lines to flush
+ * This broke signal handling with bionic which uses synthetic sigret stub
+ *
+ * vineetg: Mar 2010
+ * -GCC can't generate ZOL for core cache flush loops.
+ * Conv them into iterations based as opposed to while (start < end) types
+ *
+ * Vineetg: July 2009
+ * -In I-cache flush routine we used to chk for aliasing for every line INV.
+ * Instead now we setup routines per cache geometry and invoke them
+ * via function pointers.
+ *
+ * Vineetg: Jan 2009
+ * -Cache Line flush routines used to flush an extra line beyond end addr
+ * because check was while (end >= start) instead of (end > start)
+ * =Some call sites had to work around by doing -1, -4 etc to end param
+ * =Some callers didnt care. This was spec bad in case of INV routines
+ * which would discard valid data (cause of the horrible ext2 bug
+ * in ARC IDE driver)
+ *
+ * vineetg: June 11th 2008: Fixed flush_icache_range( )
+ * -Since ARC700 caches are not coherent (I$ doesnt snoop D$) both need
+ * to be flushed, which it was not doing.
+ * -load_module( ) passes vmalloc addr (Kernel Virtual Addr) to the API,
+ * however ARC cache maintenance OPs require PHY addr. Thus need to do
+ * vmalloc_to_phy.
+ * -Also added optimisation there, that for range > PAGE SIZE we flush the
+ * entire cache in one shot rather than line by line. For e.g. a module
+ * with Code sz 600k, old code flushed 600k worth of cache (line-by-line),
+ * while cache is only 16 or 32k.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/cache.h>
+#include <linux/mmu_context.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/cachectl.h>
+#include <asm/setup.h>
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+static void __ic_line_inv_no_alias(unsigned long, int);
+static void __ic_line_inv_2_alias(unsigned long, int);
+static void __ic_line_inv_4_alias(unsigned long, int);
+
+/* Holds the ptr to flush routine, dependign on size due to aliasing issues */
+static void (*___flush_icache_rtn) (unsigned long, int);
+#endif
+
+char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ unsigned int c = smp_processor_id();
+
+#define PR_CACHE(p, enb, str) \
+{ \
+ if (!(p)->ver) \
+ n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \
+ else \
+ n += scnprintf(buf + n, len - n, \
+ str"\t\t: (%uK) VIPT, %dway set-asc, %ub Line %s\n", \
+ TO_KB((p)->sz), (p)->assoc, (p)->line_len, \
+ enb ? "" : "DISABLED (kernel-build)"); \
+}
+
+ PR_CACHE(&cpuinfo_arc700[c].icache, __CONFIG_ARC_HAS_ICACHE, "I-Cache");
+ PR_CACHE(&cpuinfo_arc700[c].dcache, __CONFIG_ARC_HAS_DCACHE, "D-Cache");
+
+ return buf;
+}
+
+/*
+ * Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation done here, simply read/convert the BCRs
+ */
+void __init read_decode_cache_bcr(void)
+{
+ struct bcr_cache ibcr, dbcr;
+ struct cpuinfo_arc_cache *p_ic, *p_dc;
+ unsigned int cpu = smp_processor_id();
+
+ p_ic = &cpuinfo_arc700[cpu].icache;
+ READ_BCR(ARC_REG_IC_BCR, ibcr);
+
+ if (ibcr.config == 0x3)
+ p_ic->assoc = 2;
+ p_ic->line_len = 8 << ibcr.line_len;
+ p_ic->sz = 0x200 << ibcr.sz;
+ p_ic->ver = ibcr.ver;
+
+ p_dc = &cpuinfo_arc700[cpu].dcache;
+ READ_BCR(ARC_REG_DC_BCR, dbcr);
+
+ if (dbcr.config == 0x2)
+ p_dc->assoc = 4;
+ p_dc->line_len = 16 << dbcr.line_len;
+ p_dc->sz = 0x200 << dbcr.sz;
+ p_dc->ver = dbcr.ver;
+}
+
+/*
+ * 1. Validate the Cache Geomtery (compile time config matches hardware)
+ * 2. If I-cache suffers from aliasing, setup work arounds (difft flush rtn)
+ * (aliasing D-cache configurations are not supported YET)
+ * 3. Enable the Caches, setup default flush mode for D-Cache
+ * 3. Calculate the SHMLBA used by user space
+ */
+void __init arc_cache_init(void)
+{
+ unsigned int temp;
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
+ struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
+ int way_pg_ratio = way_pg_ratio;
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ if (!ic->ver)
+ goto chk_dc;
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ /* 1. Confirm some of I-cache params which Linux assumes */
+ if ((ic->assoc != ARC_ICACHE_WAYS) ||
+ (ic->line_len != ARC_ICACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+#if (CONFIG_ARC_MMU_VER > 2)
+ if (ic->ver != 3) {
+ if (running_on_hw)
+ panic("Cache ver doesn't match MMU ver\n");
+
+ /* For ISS - suggest the toggles to use */
+ pr_err("Use -prop=icache_version=3,-prop=dcache_version=3\n");
+
+ }
+#endif
+
+ /*
+ * if Cache way size is <= page size then no aliasing exhibited
+ * otherwise ratio determines num of aliases.
+ * e.g. 32K I$, 2 way set assoc, 8k pg size
+ * way-sz = 32k/2 = 16k
+ * way-pg-ratio = 16k/8k = 2, so 2 aliases possible
+ * (meaning 1 line could be in 2 possible locations).
+ */
+ way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE;
+ switch (way_pg_ratio) {
+ case 0:
+ case 1:
+ ___flush_icache_rtn = __ic_line_inv_no_alias;
+ break;
+ case 2:
+ ___flush_icache_rtn = __ic_line_inv_2_alias;
+ break;
+ case 4:
+ ___flush_icache_rtn = __ic_line_inv_4_alias;
+ break;
+ default:
+ panic("Unsupported I-Cache Sz\n");
+ }
+#endif
+
+ /* Enable/disable I-Cache */
+ temp = read_aux_reg(ARC_REG_IC_CTRL);
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+ temp &= ~IC_CTRL_CACHE_DISABLE;
+#else
+ temp |= IC_CTRL_CACHE_DISABLE;
+#endif
+
+ write_aux_reg(ARC_REG_IC_CTRL, temp);
+
+chk_dc:
+ if (!dc->ver)
+ return;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ if ((dc->assoc != ARC_DCACHE_WAYS) ||
+ (dc->line_len != ARC_DCACHE_LINE_LEN)) {
+ panic("Cache H/W doesn't match kernel Config");
+ }
+
+ /* check for D-Cache aliasing */
+ if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE)
+ panic("D$ aliasing not handled right now\n");
+#endif
+
+ /* Set the default Invalidate Mode to "simpy discard dirty lines"
+ * as this is more frequent then flush before invalidate
+ * Ofcourse we toggle this default behviour when desired
+ */
+ temp = read_aux_reg(ARC_REG_DC_CTRL);
+ temp &= ~DC_CTRL_INV_MODE_FLUSH;
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+ /* Enable D-Cache: Clear Bit 0 */
+ write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE);
+#else
+ /* Flush D cache */
+ write_aux_reg(ARC_REG_DC_FLSH, 0x1);
+ /* Disable D cache */
+ write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE);
+#endif
+
+ return;
+}
+
+#define OP_INV 0x1
+#define OP_FLUSH 0x2
+#define OP_FLUSH_N_INV 0x3
+
+#ifdef CONFIG_ARC_HAS_DCACHE
+
+/***************************************************************
+ * Machine specific helpers for Entire D-Cache or Per Line ops
+ */
+
+static inline void wait_for_flush(void)
+{
+ while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
+ ;
+}
+
+/*
+ * Operation on Entire D-Cache
+ * @cacheop = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
+ * Note that constant propagation ensures all the checks are gone
+ * in generated code
+ */
+static inline void __dc_entire_op(const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /* Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDC;
+ else
+ aux = ARC_REG_DC_FLSH;
+
+ write_aux_reg(aux, 0x1);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Per Line Operation on D-Cache
+ * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete
+ * It's sole purpose is to help gcc generate ZOL
+ */
+static inline void __dc_line_loop(unsigned long start, unsigned long sz,
+ int aux_reg)
+{
+ int num_lines, slack;
+
+ /* Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line and integral @num_lines.
+ * This however can be avoided for page sized since:
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~DCACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN);
+
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * Just as for I$, in MMU v3, D$ ops also require
+ * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops
+ * But we pass phy addr for both. This works since Linux
+ * doesn't support aliasing configs for D$, yet.
+ * Thus paddr is enough to provide both tag and index.
+ */
+ write_aux_reg(ARC_REG_DC_PTAG, start);
+#endif
+ write_aux_reg(aux_reg, start);
+ start += ARC_DCACHE_LINE_LEN;
+ }
+}
+
+/*
+ * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback)
+ */
+static inline void __dc_line_op(unsigned long start, unsigned long sz,
+ const int cacheop)
+{
+ unsigned long flags, tmp = tmp;
+ int aux;
+
+ local_irq_save(flags);
+
+ if (cacheop == OP_FLUSH_N_INV) {
+ /*
+ * Dcache provides 2 cmd: FLUSH or INV
+ * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
+ * flush-n-inv is achieved by INV cmd but with IM=1
+ * Default INV sub-mode is DISCARD, which needs to be toggled
+ */
+ tmp = read_aux_reg(ARC_REG_DC_CTRL);
+ write_aux_reg(ARC_REG_DC_CTRL, tmp | DC_CTRL_INV_MODE_FLUSH);
+ }
+
+ if (cacheop & OP_INV) /* Inv / flush-n-inv use same cmd reg */
+ aux = ARC_REG_DC_IVDL;
+ else
+ aux = ARC_REG_DC_FLDL;
+
+ __dc_line_loop(start, sz, aux);
+
+ if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */
+ wait_for_flush();
+
+ /* Switch back the DISCARD ONLY Invalidate mode */
+ if (cacheop == OP_FLUSH_N_INV)
+ write_aux_reg(ARC_REG_DC_CTRL, tmp & ~DC_CTRL_INV_MODE_FLUSH);
+
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __dc_entire_op(cacheop)
+#define __dc_line_op(start, sz, cacheop)
+
+#endif /* CONFIG_ARC_HAS_DCACHE */
+
+
+#ifdef CONFIG_ARC_HAS_ICACHE
+
+/*
+ * I-Cache Aliasing in ARC700 VIPT caches
+ *
+ * For fetching code from I$, ARC700 uses vaddr (embedded in program code)
+ * to "index" into SET of cache-line and paddr from MMU to match the TAG
+ * in the WAYS of SET.
+ *
+ * However the CDU iterface (to flush/inv) lines from software, only takes
+ * paddr (to have simpler hardware interface). For simpler cases, using paddr
+ * alone suffices.
+ * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size):
+ * way_sz = cache_sz / num_ways = 16k/2 = 8k
+ * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits
+ * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline,
+ * bits req for calc set-index = bits 12:5 (0 based). Since this range fits
+ * inside the bottom 13 bits of paddr, which are same for vaddr and paddr
+ * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously
+ * locate a cache-line.
+ *
+ * However for a difft sized cache, say 32k I$, above math yields need
+ * for 14 bits of vaddr to locate a cache line, which can't be provided by
+ * paddr, since the bit 13 (0 based) might differ between the two.
+ *
+ * This lack of extra bits needed for correct line addressing, defines the
+ * classical problem of Cache aliasing with VIPT architectures
+ * num_aliases = 1 << extra_bits
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases
+ * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases
+ * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases
+ *
+ * ------------------
+ * MMU v1/v2 (Fixed Page Size 8k)
+ * ------------------
+ * The solution was to provide CDU with these additonal vaddr bits. These
+ * would be bits [x:13], x would depend on cache-geom.
+ * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
+ * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
+ * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
+ * represent the offset within cache-line. The adv of using this "clumsy"
+ * interface for additional info was no new reg was needed in CDU.
+ *
+ * 17:13 represented the max num of bits passable, actual bits needed were
+ * fewer, based on the num-of-aliases possible.
+ * -for 2 alias possibility, only bit 13 needed (32K cache)
+ * -for 4 alias possibility, bits 14:13 needed (64K cache)
+ *
+ * Since vaddr was not available for all instances of I$ flush req by core
+ * kernel, the only safe way (non-optimal though) was to kill all possible
+ * lines which could represent an alias (even if they didnt represent one
+ * in execution).
+ * e.g. for 64K I$, 4 aliases possible, so we did
+ * flush start
+ * flush start | 0x01
+ * flush start | 0x2
+ * flush start | 0x3
+ *
+ * The penalty was invoking the operation itself, since tag match is anyways
+ * paddr based, a line which didn't represent an alias would not match the
+ * paddr, hence wont be killed
+ *
+ * Note that aliasing concerns are independent of line-sz for a given cache
+ * geometry (size + set_assoc) because the extra bits required by line-sz are
+ * reduced from the set calc.
+ * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above
+ * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit
+ * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit
+ *
+ * ------------------
+ * MMU v3
+ * ------------------
+ * This ver of MMU supports var page sizes (1k-16k) - Linux will support
+ * 8k (default), 16k and 4k.
+ * However from hardware perspective, smaller page sizes aggrevate aliasing
+ * meaning more vaddr bits needed to disambiguate the cache-line-op ;
+ * the existing scheme of piggybacking won't work for certain configurations.
+ * Two new registers IC_PTAG and DC_PTAG inttoduced.
+ * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
+ */
+
+/***********************************************************
+ * Machine specific helpers for per line I-Cache invalidate.
+ * 3 routines to accpunt for 1, 2, 4 aliases possible
+ */
+
+static void __ic_line_inv_no_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+#endif
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_2_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ /*
+ * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG
+ * reg to pass the "tag" bits and existing IVIL reg only looks
+ * at bits relevant for "index" (details above)
+ * Programming Notes:
+ * -when writing tag to PTAG reg, bit chopping can be avoided,
+ * CDU ignores non-tag bits.
+ * -Ideally "index" must be computed from vaddr, but it is not
+ * avail in these rtns. So to be safe, we kill the lines in all
+ * possible indexes corresp to num of aliases possible for
+ * given cache config.
+ */
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv_4_alias(unsigned long start, int num_lines)
+{
+ while (num_lines-- > 0) {
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ write_aux_reg(ARC_REG_IC_PTAG, start);
+
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x3 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x2 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL,
+ start & ~(0x1 << PAGE_SHIFT));
+ write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT));
+#else
+ write_aux_reg(ARC_REG_IC_IVIL, start);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x01);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x02);
+ write_aux_reg(ARC_REG_IC_IVIL, start | 0x03);
+#endif
+ start += ARC_ICACHE_LINE_LEN;
+ }
+}
+
+static void __ic_line_inv(unsigned long start, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+
+ /*
+ * Ensure we properly floor/ceil the non-line aligned/sized requests
+ * and have @start - aligned to cache line, and integral @num_lines
+ * However page sized flushes can be compile time optimised.
+ * -@start will be cache-line aligned already (being page aligned)
+ * -@sz will be integral multiple of line size (being page sized).
+ */
+ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) {
+ slack = start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ start -= slack;
+ }
+
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+ local_irq_save(flags);
+ (*___flush_icache_rtn) (start, num_lines);
+ local_irq_restore(flags);
+}
+
+/* Unlike routines above, having vaddr for flush op (along with paddr),
+ * prevents the need to speculatively kill the lines in multiple sets
+ * based on ratio of way_sz : pg_sz
+ */
+static void __ic_line_inv_vaddr(unsigned long phy_start,
+ unsigned long vaddr, unsigned long sz)
+{
+ unsigned long flags;
+ int num_lines, slack;
+ unsigned int addr;
+
+ slack = phy_start & ~ICACHE_LINE_MASK;
+ sz += slack;
+ phy_start -= slack;
+ num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN);
+
+#if (CONFIG_ARC_MMU_VER > 2)
+ vaddr &= ~ICACHE_LINE_MASK;
+ addr = phy_start;
+#else
+ /* bits 17:13 of vaddr go as bits 4:0 of paddr */
+ addr = phy_start | ((vaddr >> 13) & 0x1F);
+#endif
+
+ local_irq_save(flags);
+ while (num_lines-- > 0) {
+#if (CONFIG_ARC_MMU_VER > 2)
+ /* tag comes from phy addr */
+ write_aux_reg(ARC_REG_IC_PTAG, addr);
+
+ /* index bits come from vaddr */
+ write_aux_reg(ARC_REG_IC_IVIL, vaddr);
+ vaddr += ARC_ICACHE_LINE_LEN;
+#else
+ /* this paddr contains vaddrs bits as needed */
+ write_aux_reg(ARC_REG_IC_IVIL, addr);
+#endif
+ addr += ARC_ICACHE_LINE_LEN;
+ }
+ local_irq_restore(flags);
+}
+
+#else
+
+#define __ic_line_inv(start, sz)
+#define __ic_line_inv_vaddr(pstart, vstart, sz)
+
+#endif /* CONFIG_ARC_HAS_ICACHE */
+
+
+/***********************************************************
+ * Exported APIs
+ */
+
+/* TBD: use pg_arch_1 to optimize this */
+void flush_dcache_page(struct page *page)
+{
+ __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH);
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+
+void dma_cache_wback_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH_N_INV);
+}
+EXPORT_SYMBOL(dma_cache_wback_inv);
+
+void dma_cache_inv(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_INV);
+}
+EXPORT_SYMBOL(dma_cache_inv);
+
+void dma_cache_wback(unsigned long start, unsigned long sz)
+{
+ __dc_line_op(start, sz, OP_FLUSH);
+}
+EXPORT_SYMBOL(dma_cache_wback);
+
+/*
+ * This is API for making I/D Caches consistent when modifying code
+ * (loadable modules, kprobes, etc)
+ * This is called on insmod, with kernel virtual address for CODE of
+ * the module. ARC cache maintenance ops require PHY address thus we
+ * need to convert vmalloc addr to PHY addr
+ */
+void flush_icache_range(unsigned long kstart, unsigned long kend)
+{
+ unsigned int tot_sz, off, sz;
+ unsigned long phy, pfn;
+ unsigned long flags;
+
+ /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */
+
+ /* This is not the right API for user virtual address */
+ if (kstart < TASK_SIZE) {
+ BUG_ON("Flush icache range for user virtual addr space");
+ return;
+ }
+
+ /* Shortcut for bigger flush ranges.
+ * Here we don't care if this was kernel virtual or phy addr
+ */
+ tot_sz = kend - kstart;
+ if (tot_sz > PAGE_SIZE) {
+ flush_cache_all();
+ return;
+ }
+
+ /* Case: Kernel Phy addr (0x8000_0000 onwards) */
+ if (likely(kstart > PAGE_OFFSET)) {
+ __ic_line_inv(kstart, kend - kstart);
+ __dc_line_op(kstart, kend - kstart, OP_FLUSH);
+ return;
+ }
+
+ /*
+ * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
+ * (1) ARC Cache Maintenance ops only take Phy addr, hence special
+ * handling of kernel vaddr.
+ *
+ * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
+ * it still needs to handle a 2 page scenario, where the range
+ * straddles across 2 virtual pages and hence need for loop
+ */
+ while (tot_sz > 0) {
+ off = kstart % PAGE_SIZE;
+ pfn = vmalloc_to_pfn((void *)kstart);
+ phy = (pfn << PAGE_SHIFT) + off;
+ sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
+ local_irq_save(flags);
+ __dc_line_op(phy, sz, OP_FLUSH);
+ __ic_line_inv(phy, sz);
+ local_irq_restore(flags);
+ kstart += sz;
+ tot_sz -= sz;
+ }
+}
+
+/*
+ * Optimised ver of flush_icache_range() with spec callers: ptrace/signals
+ * where vaddr is also available. This allows passing both vaddr and paddr
+ * bits to CDU for cache flush, short-circuting the current pessimistic algo
+ * which kills all possible aliases.
+ * An added adv of knowing that vaddr is user-vaddr avoids various checks
+ * and handling for k-vaddr, k-paddr as done in orig ver above
+ */
+void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr,
+ int len)
+{
+ __ic_line_inv_vaddr(paddr, u_vaddr, len);
+ __dc_line_op(paddr, len, OP_FLUSH);
+}
+
+/*
+ * XXX: This also needs to be optim using pg_arch_1
+ * This is called when a page-cache page is about to be mapped into a
+ * user process' address space. It offers an opportunity for a
+ * port to ensure d-cache/i-cache coherency if necessary.
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ if (!(vma->vm_flags & VM_EXEC))
+ return;
+
+ __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE);
+}
+
+void flush_icache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ write_aux_reg(ARC_REG_IC_IVIC, 1);
+
+ /* lr will not complete till the icache inv operation is not over */
+ read_aux_reg(ARC_REG_IC_CTRL);
+ local_irq_restore(flags);
+}
+
+noinline void flush_cache_all(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ flush_icache_all();
+ __dc_entire_op(OP_FLUSH_N_INV);
+
+ local_irq_restore(flags);
+
+}
+
+/**********************************************************************
+ * Explicit Cache flush request from user space via syscall
+ * Needed for JITs which generate code on the fly
+ */
+SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
+{
+ /* TBD: optimize this */
+ flush_cache_all();
+ return 0;
+}
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
new file mode 100644
index 00000000000..12cc6485b21
--- /dev/null
+++ b/arch/arc/mm/dma.c
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * DMA Coherent API Notes
+ *
+ * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
+ * implemented by accessintg it using a kernel virtual address, with
+ * Cache bit off in the TLB entry.
+ *
+ * The default DMA address == Phy address which is 0x8000_0000 based.
+ * A platform/device can make it zero based, by over-riding
+ * plat_{dma,kernel}_addr_to_{kernel,dma}
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma-debug.h>
+#include <linux/export.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Helpers for Coherent DMA API.
+ */
+void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return paddr;
+}
+EXPORT_SYMBOL(dma_alloc_noncoherent);
+
+void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle)
+{
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_noncoherent);
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ void *paddr, *kvaddr;
+
+ /* This is linear addr (0x8000_0000 based) */
+ paddr = alloc_pages_exact(size, gfp);
+ if (!paddr)
+ return NULL;
+
+ /* This is kernel Virtual address (0x7000_0000 based) */
+ kvaddr = ioremap_nocache((unsigned long)paddr, size);
+ if (kvaddr != NULL)
+ memset(kvaddr, 0, size);
+
+ /* This is bus address, platform dependent */
+ *dma_handle = plat_kernel_addr_to_dma(dev, paddr);
+
+ return kvaddr;
+}
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
+ dma_addr_t dma_handle)
+{
+ iounmap((void __force __iomem *)kvaddr);
+
+ free_pages_exact((void *)plat_dma_addr_to_kernel(dev, dma_handle),
+ size);
+}
+EXPORT_SYMBOL(dma_free_coherent);
+
+/*
+ * Helper for streaming DMA...
+ */
+void __arc_dma_cache_sync(unsigned long paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ __inline_dma_cache_sync(paddr, size, dir);
+}
+EXPORT_SYMBOL(__arc_dma_cache_sync);
diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c
new file mode 100644
index 00000000000..014172ba843
--- /dev/null
+++ b/arch/arc/mm/extable.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Borrowed heavily from MIPS
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(instruction_pointer(regs));
+ if (fixup) {
+ regs->ret = fixup->fixup;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
+
+long arc_copy_from_user_noinline(void *to, const void __user * from,
+ unsigned long n)
+{
+ return __arc_copy_from_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_from_user_noinline);
+
+long arc_copy_to_user_noinline(void __user *to, const void *from,
+ unsigned long n)
+{
+ return __arc_copy_to_user(to, from, n);
+}
+EXPORT_SYMBOL(arc_copy_to_user_noinline);
+
+unsigned long arc_clear_user_noinline(void __user *to,
+ unsigned long n)
+{
+ return __arc_clear_user(to, n);
+}
+EXPORT_SYMBOL(arc_clear_user_noinline);
+
+long arc_strncpy_from_user_noinline (char *dst, const char __user *src,
+ long count)
+{
+ return __arc_strncpy_from_user(dst, src, count);
+}
+EXPORT_SYMBOL(arc_strncpy_from_user_noinline);
+
+long arc_strnlen_user_noinline(const char __user *src, long n)
+{
+ return __arc_strnlen_user(src, n);
+}
+EXPORT_SYMBOL(arc_strnlen_user_noinline);
+#endif
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
new file mode 100644
index 00000000000..af55aab803d
--- /dev/null
+++ b/arch/arc/mm/fault.c
@@ -0,0 +1,228 @@
+/* Page Fault Handling for ARC (TLB Miss / ProtV)
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/signal.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <asm/pgalloc.h>
+
+static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+{
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ */
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ pmd_t *pmd, *pmd_k;
+
+ pgd = pgd_offset_fast(mm, address);
+ pgd_k = pgd_offset_k(address);
+
+ if (!pgd_present(*pgd_k))
+ goto bad_area;
+
+ pud = pud_offset(pgd, address);
+ pud_k = pud_offset(pgd_k, address);
+ if (!pud_present(*pud_k))
+ goto bad_area;
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+ if (!pmd_present(*pmd_k))
+ goto bad_area;
+
+ set_pmd(pmd, *pmd_k);
+
+ /* XXX: create the TLB entry here */
+ return 0;
+
+bad_area:
+ return 1;
+}
+
+void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
+ unsigned long cause_code)
+{
+ struct vm_area_struct *vma = NULL;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ siginfo_t info;
+ int fault, ret;
+ unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+ (write ? FAULT_FLAG_WRITE : 0);
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address >= VMALLOC_START && address <= VMALLOC_END) {
+ ret = handle_vmalloc_fault(mm, address);
+ if (unlikely(ret))
+ goto bad_area_nosemaphore;
+ else
+ return;
+ }
+
+ info.si_code = SEGV_MAPERR;
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (in_atomic() || !mm)
+ goto no_context;
+
+retry:
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, address);
+ if (!vma)
+ goto bad_area;
+ if (vma->vm_start <= address)
+ goto good_area;
+ if (!(vma->vm_flags & VM_GROWSDOWN))
+ goto bad_area;
+ if (expand_stack(vma, address))
+ goto bad_area;
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ info.si_code = SEGV_ACCERR;
+
+ /* Handle protection violation, execute on heap or stack */
+
+ if (cause_code == ((ECR_V_PROTV << 16) | ECR_C_PROTV_INST_FETCH))
+ goto bad_area;
+
+ if (write) {
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+ }
+
+survive:
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(mm, vma, address, flags);
+
+ /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
+ if (unlikely(fatal_signal_pending(current))) {
+ if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
+ up_read(&mm->mmap_sem);
+ if (user_mode(regs))
+ return;
+ }
+
+ if (likely(!(fault & VM_FAULT_ERROR))) {
+ if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ /* To avoid updating stats twice for retry case */
+ if (fault & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
+ if (fault & VM_FAULT_RETRY) {
+ flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+ }
+
+ /* Fault Handled Gracefully */
+ up_read(&mm->mmap_sem);
+ return;
+ }
+
+ /* TBD: switch to pagefault_out_of_memory() */
+ if (fault & VM_FAULT_OOM)
+ goto out_of_memory;
+ else if (fault & VM_FAULT_SIGBUS)
+ goto do_sigbus;
+
+ /* no man's land */
+ BUG();
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ /* info.si_code has been set above */
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+ return;
+ }
+
+no_context:
+ /* Are we prepared to handle this kernel fault?
+ *
+ * (The kernel has valid exception-points in the source
+ * when it acesses user-memory. When it fails in one
+ * of those points, we find it in a table and do a jump
+ * to some fixup code that loads an appropriate error
+ * code)
+ */
+ if (fixup_exception(regs))
+ return;
+
+ die("Oops", regs, address, cause_code);
+
+out_of_memory:
+ if (is_global_init(tsk)) {
+ yield();
+ goto survive;
+ }
+ up_read(&mm->mmap_sem);
+
+ if (user_mode(regs))
+ do_group_exit(SIGKILL); /* This will never return */
+
+ goto no_context;
+
+do_sigbus:
+ up_read(&mm->mmap_sem);
+
+ if (!user_mode(regs))
+ goto no_context;
+
+ tsk->thread.fault_address = address;
+ tsk->thread.cause_code = cause_code;
+ info.si_signo = SIGBUS;
+ info.si_errno = 0;
+ info.si_code = BUS_ADRERR;
+ info.si_addr = (void __user *)address;
+ force_sig_info(SIGBUS, &info, tsk);
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
new file mode 100644
index 00000000000..caf797de23f
--- /dev/null
+++ b/arch/arc/mm/init.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#ifdef CONFIG_BLOCK_DEV_RAM
+#include <linux/blk.h>
+#endif
+#include <linux/swap.h>
+#include <linux/module.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/arcregs.h>
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __aligned(PAGE_SIZE);
+char empty_zero_page[PAGE_SIZE] __aligned(PAGE_SIZE);
+EXPORT_SYMBOL(empty_zero_page);
+
+/* Default tot mem from .config */
+static unsigned long arc_mem_sz = 0x20000000; /* some default */
+
+/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
+static int __init setup_mem_sz(char *str)
+{
+ arc_mem_sz = memparse(str, NULL) & PAGE_MASK;
+
+ /* early console might not be setup yet - it will show up later */
+ pr_info("\"mem=%s\": mem sz set to %ldM\n", str, TO_MB(arc_mem_sz));
+
+ return 0;
+}
+early_param("mem", setup_mem_sz);
+
+void __init early_init_dt_add_memory_arch(u64 base, u64 size)
+{
+ arc_mem_sz = size & PAGE_MASK;
+ pr_info("Memory size set via devicetree %ldM\n", TO_MB(arc_mem_sz));
+}
+
+/*
+ * First memory setup routine called from setup_arch()
+ * 1. setup swapper's mm @init_mm
+ * 2. Count the pages we have and setup bootmem allocator
+ * 3. zone setup
+ */
+void __init setup_arch_memory(void)
+{
+ unsigned long zones_size[MAX_NR_ZONES] = { 0, 0 };
+ unsigned long end_mem = CONFIG_LINUX_LINK_BASE + arc_mem_sz;
+
+ init_mm.start_code = (unsigned long)_text;
+ init_mm.end_code = (unsigned long)_etext;
+ init_mm.end_data = (unsigned long)_edata;
+ init_mm.brk = (unsigned long)_end;
+
+ /*
+ * We do it here, so that memory is correctly instantiated
+ * even if "mem=xxx" cmline over-ride is given and/or
+ * DT has memory node. Each causes an update to @arc_mem_sz
+ * and we finally add memory one here
+ */
+ memblock_add(CONFIG_LINUX_LINK_BASE, arc_mem_sz);
+
+ /*------------- externs in mm need setting up ---------------*/
+
+ /* first page of system - kernel .vector starts here */
+ min_low_pfn = PFN_DOWN(CONFIG_LINUX_LINK_BASE);
+
+ /* Last usable page of low mem (no HIGHMEM yet for ARC port) */
+ max_low_pfn = max_pfn = PFN_DOWN(end_mem);
+
+ max_mapnr = num_physpages = max_low_pfn - min_low_pfn;
+
+ /*------------- reserve kernel image -----------------------*/
+ memblock_reserve(CONFIG_LINUX_LINK_BASE,
+ __pa(_end) - CONFIG_LINUX_LINK_BASE);
+
+ memblock_dump_all();
+
+ /*-------------- node setup --------------------------------*/
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_NORMAL] = num_physpages;
+
+ /*
+ * We can't use the helper free_area_init(zones[]) because it uses
+ * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
+ * when our kernel doesn't start at PAGE_OFFSET, i.e.
+ * PAGE_OFFSET != CONFIG_LINUX_LINK_BASE
+ */
+ free_area_init_node(0, /* node-id */
+ zones_size, /* num pages per zone */
+ min_low_pfn, /* first pfn of node */
+ NULL); /* NO holes */
+}
+
+/*
+ * mem_init - initializes memory
+ *
+ * Frees up bootmem
+ * Calculates and displays memory available/used
+ */
+void __init mem_init(void)
+{
+ int codesize, datasize, initsize, reserved_pages, free_pages;
+ int tmp;
+
+ high_memory = (void *)(CONFIG_LINUX_LINK_BASE + arc_mem_sz);
+
+ totalram_pages = free_all_bootmem();
+
+ /* count all reserved pages [kernel code/data/mem_map..] */
+ reserved_pages = 0;
+ for (tmp = 0; tmp < max_mapnr; tmp++)
+ if (PageReserved(mem_map + tmp))
+ reserved_pages++;
+
+ /* XXX: nr_free_pages() is equivalent */
+ free_pages = max_mapnr - reserved_pages;
+
+ /*
+ * For the purpose of display below, split the "reserve mem"
+ * kernel code/data is already shown explicitly,
+ * Show any other reservations (mem_map[ ] et al)
+ */
+ reserved_pages -= (((unsigned int)_end - CONFIG_LINUX_LINK_BASE) >>
+ PAGE_SHIFT);
+
+ codesize = _etext - _text;
+ datasize = _end - _etext;
+ initsize = __init_end - __init_begin;
+
+ pr_info("Memory Available: %dM / %ldM (%dK code, %dK data, %dK init, %dK reserv)\n",
+ PAGES_TO_MB(free_pages),
+ TO_MB(arc_mem_sz),
+ TO_KB(codesize), TO_KB(datasize), TO_KB(initsize),
+ PAGES_TO_KB(reserved_pages));
+}
+
+static void __init free_init_pages(const char *what, unsigned long begin,
+ unsigned long end)
+{
+ unsigned long addr;
+
+ pr_info("Freeing %s: %ldk [%lx] to [%lx]\n",
+ what, TO_KB(end - begin), begin, end);
+
+ /* need to check that the page we free is not a partial page */
+ for (addr = begin; addr + PAGE_SIZE <= end; addr += PAGE_SIZE) {
+ ClearPageReserved(virt_to_page(addr));
+ init_page_count(virt_to_page(addr));
+ free_page(addr);
+ totalram_pages++;
+ }
+}
+
+/*
+ * free_initmem: Free all the __init memory.
+ */
+void __init_refok free_initmem(void)
+{
+ free_init_pages("unused kernel memory",
+ (unsigned long)__init_begin,
+ (unsigned long)__init_end);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+ free_init_pages("initrd memory", start, end);
+}
+#endif
+
+#ifdef CONFIG_OF_FLATTREE
+void __init early_init_dt_setup_initrd_arch(unsigned long start,
+ unsigned long end)
+{
+ pr_err("%s(%lx, %lx)\n", __func__, start, end);
+}
+#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arc/mm/ioremap.c b/arch/arc/mm/ioremap.c
new file mode 100644
index 00000000000..3e5c92c7993
--- /dev/null
+++ b/arch/arc/mm/ioremap.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <asm/cache.h>
+
+void __iomem *ioremap(unsigned long paddr, unsigned long size)
+{
+ unsigned long end;
+
+ /* Don't allow wraparound or zero size */
+ end = paddr + size - 1;
+ if (!size || (end < paddr))
+ return NULL;
+
+ /* If the region is h/w uncached, avoid MMU mappings */
+ if (paddr >= ARC_UNCACHED_ADDR_SPACE)
+ return (void __iomem *)paddr;
+
+ return ioremap_prot(paddr, size, PAGE_KERNEL_NO_CACHE);
+}
+EXPORT_SYMBOL(ioremap);
+
+/*
+ * ioremap with access flags
+ * Cache semantics wise it is same as ioremap - "forced" uncached.
+ * However unline vanilla ioremap which bypasses ARC MMU for addresses in
+ * ARC hardware uncached region, this one still goes thru the MMU as caller
+ * might need finer access control (R/W/X)
+ */
+void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
+ unsigned long flags)
+{
+ void __iomem *vaddr;
+ struct vm_struct *area;
+ unsigned long off, end;
+ pgprot_t prot = __pgprot(flags);
+
+ /* Don't allow wraparound, zero size */
+ end = paddr + size - 1;
+ if ((!size) || (end < paddr))
+ return NULL;
+
+ /* An early platform driver might end up here */
+ if (!slab_is_available())
+ return NULL;
+
+ /* force uncached */
+ prot = pgprot_noncached(prot);
+
+ /* Mappings have to be page-aligned */
+ off = paddr & ~PAGE_MASK;
+ paddr &= PAGE_MASK;
+ size = PAGE_ALIGN(end + 1) - paddr;
+
+ /*
+ * Ok, go for it..
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+ area->phys_addr = paddr;
+ vaddr = (void __iomem *)area->addr;
+ if (ioremap_page_range((unsigned long)vaddr,
+ (unsigned long)vaddr + size, paddr, prot)) {
+ vunmap((void __force *)vaddr);
+ return NULL;
+ }
+ return (void __iomem *)(off + (char __iomem *)vaddr);
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+
+void iounmap(const void __iomem *addr)
+{
+ if (addr >= (void __force __iomem *)ARC_UNCACHED_ADDR_SPACE)
+ return;
+
+ vfree((void *)(PAGE_MASK & (unsigned long __force)addr));
+}
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
new file mode 100644
index 00000000000..9b9ce23f4ec
--- /dev/null
+++ b/arch/arc/mm/tlb.c
@@ -0,0 +1,645 @@
+/*
+ * TLB Management (flush/create/diagnostics) for ARC700
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Aug 2011
+ * -Reintroduce duplicate PD fixup - some customer chips still have the issue
+ *
+ * vineetg: May 2011
+ * -No need to flush_cache_page( ) for each call to update_mmu_cache()
+ * some of the LMBench tests improved amazingly
+ * = page-fault thrice as fast (75 usec to 28 usec)
+ * = mmap twice as fast (9.6 msec to 4.6 msec),
+ * = fork (5.3 msec to 3.7 msec)
+ *
+ * vineetg: April 2011 :
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * vineetg: April 2011 : Preparing for MMU V3
+ * -MMU v2/v3 BCRs decoded differently
+ * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512
+ * -tlb_entry_erase( ) can be void
+ * -local_flush_tlb_range( ):
+ * = need not "ceil" @end
+ * = walks MMU only if range spans < 32 entries, as opposed to 256
+ *
+ * Vineetg: Sept 10th 2008
+ * -Changes related to MMU v2 (Rel 4.8)
+ *
+ * Vineetg: Aug 29th 2008
+ * -In TLB Flush operations (Metal Fix MMU) there is a explict command to
+ * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd,
+ * it fails. Thus need to load it with ANY valid value before invoking
+ * TLBIVUTLB cmd
+ *
+ * Vineetg: Aug 21th 2008:
+ * -Reduced the duration of IRQ lockouts in TLB Flush routines
+ * -Multiple copies of TLB erase code seperated into a "single" function
+ * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID
+ * in interrupt-safe region.
+ *
+ * Vineetg: April 23rd Bug #93131
+ * Problem: tlb_flush_kernel_range() doesnt do anything if the range to
+ * flush is more than the size of TLB itself.
+ *
+ * Rahul Trivedi : Codito Technologies 2004
+ */
+
+#include <linux/module.h>
+#include <asm/arcregs.h>
+#include <asm/setup.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Need for ARC MMU v2
+ *
+ * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc.
+ * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages
+ * map into same set, there would be contention for the 2 ways causing severe
+ * Thrashing.
+ *
+ * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has
+ * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways.
+ * Given this, the thrasing problem should never happen because once the 3
+ * J-TLB entries are created (even though 3rd will knock out one of the prev
+ * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy
+ *
+ * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs.
+ * This is a simple design for keeping them in sync. So what do we do?
+ * The solution which James came up was pretty neat. It utilised the assoc
+ * of uTLBs by not invalidating always but only when absolutely necessary.
+ *
+ * - Existing TLB commands work as before
+ * - New command (TLBWriteNI) for TLB write without clearing uTLBs
+ * - New command (TLBIVUTLB) to invalidate uTLBs.
+ *
+ * The uTLBs need only be invalidated when pages are being removed from the
+ * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB
+ * as a result of a miss, the removed entry is still allowed to exist in the
+ * uTLBs as it is still valid and present in the OS page table. This allows the
+ * full associativity of the uTLBs to hide the limited associativity of the main
+ * TLB.
+ *
+ * During a miss handler, the new "TLBWriteNI" command is used to load
+ * entries without clearing the uTLBs.
+ *
+ * When the OS page table is updated, TLB entries that may be associated with a
+ * removed page are removed (flushed) from the TLB using TLBWrite. In this
+ * circumstance, the uTLBs must also be cleared. This is done by using the
+ * existing TLBWrite command. An explicit IVUTLB is also required for those
+ * corner cases when TLBWrite was not executed at all because the corresp
+ * J-TLB entry got evicted/replaced.
+ */
+
+/* A copy of the ASID from the PID reg is kept in asid_cache */
+int asid_cache = FIRST_ASID;
+
+/* ASID to mm struct mapping. We have one extra entry corresponding to
+ * NO_ASID to save us a compare when clearing the mm entry for old asid
+ * see get_new_mmu_context (asm-arc/mmu_context.h)
+ */
+struct mm_struct *asid_mm_map[NUM_ASID + 1];
+
+/*
+ * Utility Routine to erase a J-TLB entry
+ * The procedure is to look it up in the MMU. If found, ERASE it by
+ * issuing a TlbWrite CMD with PD0 = PD1 = 0
+ */
+
+static void __tlb_entry_erase(void)
+{
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+}
+
+static void tlb_entry_erase(unsigned int vaddr_n_asid)
+{
+ unsigned int idx;
+
+ /* Locate the TLB entry for this vaddr + ASID */
+ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* No error means entry found, zero it out */
+ if (likely(!(idx & TLB_LKUP_ERR))) {
+ __tlb_entry_erase();
+ } else { /* Some sort of Error */
+
+ /* Duplicate entry error */
+ if (idx & 0x1) {
+ /* TODO we need to handle this case too */
+ pr_emerg("unhandled Duplicate flush for %x\n",
+ vaddr_n_asid);
+ }
+ /* else entry not found so nothing to do */
+ }
+}
+
+/****************************************************************************
+ * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs)
+ *
+ * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB
+ *
+ * utlb_invalidate ( )
+ * -For v2 MMU calls Flush uTLB Cmd
+ * -For v1 MMU does nothing (except for Metal Fix v1 MMU)
+ * This is because in v1 TLBWrite itself invalidate uTLBs
+ ***************************************************************************/
+
+static void utlb_invalidate(void)
+{
+#if (CONFIG_ARC_MMU_VER >= 2)
+
+#if (CONFIG_ARC_MMU_VER < 3)
+ /* MMU v2 introduced the uTLB Flush command.
+ * There was however an obscure hardware bug, where uTLB flush would
+ * fail when a prior probe for J-TLB (both totally unrelated) would
+ * return lkup err - because the entry didnt exist in MMU.
+ * The Workround was to set Index reg with some valid value, prior to
+ * flush. This was fixed in MMU v3 hence not needed any more
+ */
+ unsigned int idx;
+
+ /* make sure INDEX Reg is valid */
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /* If not write some dummy val */
+ if (unlikely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBINDEX, 0xa);
+#endif
+
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
+#endif
+
+}
+
+/*
+ * Un-conditionally (without lookup) erase the entire MMU contents
+ */
+
+noinline void local_flush_tlb_all(void)
+{
+ unsigned long flags;
+ unsigned int entry;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* Load PD0 and PD1 with template for a Blank Entry */
+ write_aux_reg(ARC_REG_TLBPD1, 0);
+ write_aux_reg(ARC_REG_TLBPD0, 0);
+
+ for (entry = 0; entry < mmu->num_tlb; entry++) {
+ /* write this entry to the TLB */
+ write_aux_reg(ARC_REG_TLBINDEX, entry);
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Flush the entrie MM for userland. The fastest way is to move to Next ASID
+ */
+noinline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ /*
+ * Small optimisation courtesy IA64
+ * flush_mm called during fork,exit,munmap etc, multiple times as well.
+ * Only for fork( ) do we need to move parent to a new MMU ctxt,
+ * all other cases are NOPs, hence this check.
+ */
+ if (atomic_read(&mm->mm_users) == 0)
+ return;
+
+ /*
+ * Workaround for Android weirdism:
+ * A binder VMA could end up in a task such that vma->mm != tsk->mm
+ * old code would cause h/w - s/w ASID to get out of sync
+ */
+ if (current->mm != mm)
+ destroy_context(mm);
+ else
+ get_new_mmu_context(mm);
+}
+
+/*
+ * Flush a Range of TLB entries for userland.
+ * @start is inclusive, while @end is exclusive
+ * Difference between this and Kernel Range Flush is
+ * -Here the fastest way (if range is too large) is to move to next ASID
+ * without doing any explicit Shootdown
+ * -In case of kernel Flush, entry has to be shot down explictly
+ */
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+ unsigned int asid;
+
+ /* If range @start to @end is more than 32 TLB entries deep,
+ * its better to move to a new ASID rather than searching for
+ * individual entries and then shooting them down
+ *
+ * The calc above is rough, doesn't account for unaligned parts,
+ * since this is heuristics based anyways
+ */
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_mm(vma->vm_mm);
+ return;
+ }
+
+ /*
+ * @start moved to page start: this alone suffices for checking
+ * loop end condition below, w/o need for aligning @end to end
+ * e.g. 2000 to 4001 will anyhow loop twice
+ */
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ asid = vma->vm_mm->context.asid;
+
+ if (asid != NO_ASID) {
+ while (start < end) {
+ tlb_entry_erase(start | (asid & 0xff));
+ start += PAGE_SIZE;
+ }
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
+ * @start, @end interpreted as kvaddr
+ * Interestingly, shared TLB entries can also be flushed using just
+ * @start,@end alone (interpreted as user vaddr), although technically SASID
+ * is also needed. However our smart TLbProbe lookup takes care of that.
+ */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+
+ /* exactly same as above, except for TLB entry not taking ASID */
+
+ if (unlikely((end - start) >= PAGE_SIZE * 32)) {
+ local_flush_tlb_all();
+ return;
+ }
+
+ start &= PAGE_MASK;
+
+ local_irq_save(flags);
+ while (start < end) {
+ tlb_entry_erase(start);
+ start += PAGE_SIZE;
+ }
+
+ utlb_invalidate();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Delete TLB entry in MMU for a given page (??? address)
+ * NOTE One TLB entry contains translation for single PAGE
+ */
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ unsigned long flags;
+
+ /* Note that it is critical that interrupts are DISABLED between
+ * checking the ASID and using it flush the TLB entry
+ */
+ local_irq_save(flags);
+
+ if (vma->vm_mm->context.asid != NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) |
+ (vma->vm_mm->context.asid & 0xff));
+ utlb_invalidate();
+ }
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Routine to create a TLB entry
+ */
+void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ unsigned long flags;
+ unsigned int idx, asid_or_sasid;
+ unsigned long pd0_flags;
+
+ /*
+ * create_tlb() assumes that current->mm == vma->mm, since
+ * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
+ * -completes the lazy write to SASID reg (again valid for curr tsk)
+ *
+ * Removing the assumption involves
+ * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
+ * -Fix the TLB paranoid debug code to not trigger false negatives.
+ * -More importantly it makes this handler inconsistent with fast-path
+ * TLB Refill handler which always deals with "current"
+ *
+ * Lets see the use cases when current->mm != vma->mm and we land here
+ * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault
+ * Here VM wants to pre-install a TLB entry for user stack while
+ * current->mm still points to pre-execve mm (hence the condition).
+ * However the stack vaddr is soon relocated (randomization) and
+ * move_page_tables() tries to undo that TLB entry.
+ * Thus not creating TLB entry is not any worse.
+ *
+ * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
+ * breakpoint in debugged task. Not creating a TLB now is not
+ * performance critical.
+ *
+ * Both the cases above are not good enough for code churn.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ local_irq_save(flags);
+
+ tlb_paranoid_check(vma->vm_mm->context.asid, address);
+
+ address &= PAGE_MASK;
+
+ /* update this PTE credentials */
+ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
+
+ /* Create HW TLB entry Flags (in PD0) from PTE Flags */
+#if (CONFIG_ARC_MMU_VER <= 2)
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1);
+#else
+ pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0));
+#endif
+
+ /* ASID for this task */
+ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
+
+ /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
+ write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1));
+
+ /* First verify if entry for this vaddr+ASID already exists */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
+ idx = read_aux_reg(ARC_REG_TLBINDEX);
+
+ /*
+ * If Not already present get a free slot from MMU.
+ * Otherwise, Probe would have located the entry and set INDEX Reg
+ * with existing location. This will cause Write CMD to over-write
+ * existing entry with new PD0 and PD1
+ */
+ if (likely(idx & TLB_LKUP_ERR))
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
+
+ /*
+ * Commit the Entry to MMU
+ * It doesnt sound safe to use the TLBWriteNI cmd here
+ * which doesn't flush uTLBs. I'd rather be safe than sorry.
+ */
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
+
+ local_irq_restore(flags);
+}
+
+/* arch hook called by core VM at the end of handle_mm_fault( ),
+ * when a new PTE is entered in Page Tables or an existing one
+ * is modified. We aggresively pre-install a TLB entry
+ */
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress,
+ pte_t *ptep)
+{
+
+ create_tlb(vma, vaddress, ptep);
+}
+
+/* Read the Cache Build Confuration Registers, Decode them and save into
+ * the cpuinfo structure for later use.
+ * No Validation is done here, simply read/convert the BCRs
+ */
+void __init read_decode_mmu_bcr(void)
+{
+ unsigned int tmp;
+ struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */
+ struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ tmp = read_aux_reg(ARC_REG_MMU_BCR);
+ mmu->ver = (tmp >> 24);
+
+ if (mmu->ver <= 2) {
+ mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+ mmu->pg_sz = PAGE_SIZE;
+ mmu->sets = 1 << mmu2->sets;
+ mmu->ways = 1 << mmu2->ways;
+ mmu->u_dtlb = mmu2->u_dtlb;
+ mmu->u_itlb = mmu2->u_itlb;
+ } else {
+ mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu->pg_sz = 512 << mmu3->pg_sz;
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ mmu->u_dtlb = mmu3->u_dtlb;
+ mmu->u_itlb = mmu3->u_itlb;
+ }
+
+ mmu->num_tlb = mmu->sets * mmu->ways;
+}
+
+char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
+{
+ int n = 0;
+ struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
+ p_mmu->ver, TO_KB(p_mmu->pg_sz));
+
+ n += scnprintf(buf + n, len - n,
+ "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n",
+ p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
+ p_mmu->u_dtlb, p_mmu->u_itlb,
+ __CONFIG_ARC_MMU_SASID_VAL ? "SASID" : "");
+
+ return buf;
+}
+
+void __init arc_mmu_init(void)
+{
+ char str[256];
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ printk(arc_mmu_mumbojumbo(0, str, sizeof(str)));
+
+ /* For efficiency sake, kernel is compile time built for a MMU ver
+ * This must match the hardware it is running on.
+ * Linux built for MMU V2, if run on MMU V1 will break down because V1
+ * hardware doesn't understand cmds such as WriteNI, or IVUTLB
+ * On the other hand, Linux built for V1 if run on MMU V2 will do
+ * un-needed workarounds to prevent memcpy thrashing.
+ * Similarly MMU V3 has new features which won't work on older MMU
+ */
+ if (mmu->ver != CONFIG_ARC_MMU_VER) {
+ panic("MMU ver %d doesn't match kernel built for %d...\n",
+ mmu->ver, CONFIG_ARC_MMU_VER);
+ }
+
+ if (mmu->pg_sz != PAGE_SIZE)
+ panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
+
+ /*
+ * ASID mgmt data structures are compile time init
+ * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes
+ */
+
+ local_flush_tlb_all();
+
+ /* Enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE);
+
+ /* In smp we use this reg for interrupt 1 scratch */
+#ifndef CONFIG_SMP
+ /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */
+ write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir);
+#endif
+}
+
+/*
+ * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
+ * The mapping is Column-first.
+ * --------------------- -----------
+ * |way0|way1|way2|way3| |way0|way1|
+ * --------------------- -----------
+ * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 |
+ * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 |
+ * ~ ~ ~ ~
+ * [set127] | 508| 509| 510| 511| | 254| 255|
+ * --------------------- -----------
+ * For normal operations we don't(must not) care how above works since
+ * MMU cmd getIndex(vaddr) abstracts that out.
+ * However for walking WAYS of a SET, we need to know this
+ */
+#define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way))
+
+/* Handling of Duplicate PD (TLB entry) in MMU.
+ * -Could be due to buggy customer tapeouts or obscure kernel bugs
+ * -MMU complaints not at the time of duplicate PD installation, but at the
+ * time of lookup matching multiple ways.
+ * -Ideally these should never happen - but if they do - workaround by deleting
+ * the duplicate one.
+ * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
+ */
+volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */
+
+void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+ struct pt_regs *regs)
+{
+ int set, way, n;
+ unsigned int pd0[4], pd1[4]; /* assume max 4 ways */
+ unsigned long flags, is_valid;
+ struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+
+ local_irq_save(flags);
+
+ /* re-enable the MMU */
+ write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+ /* read out all the ways of current set */
+ for (way = 0, is_valid = 0; way < mmu->ways; way++) {
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
+ pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
+ pd1[way] = read_aux_reg(ARC_REG_TLBPD1);
+ is_valid |= pd0[way] & _PAGE_PRESENT;
+ }
+
+ /* If all the WAYS in SET are empty, skip to next SET */
+ if (!is_valid)
+ continue;
+
+ /* Scan the set for duplicate ways: needs a nested loop */
+ for (way = 0; way < mmu->ways; way++) {
+ if (!pd0[way])
+ continue;
+
+ for (n = way + 1; n < mmu->ways; n++) {
+ if ((pd0[way] & PAGE_MASK) ==
+ (pd0[n] & PAGE_MASK)) {
+
+ if (dup_pd_verbose) {
+ pr_info("Duplicate PD's @"
+ "[%d:%d]/[%d:%d]\n",
+ set, way, set, n);
+ pr_info("TLBPD0[%u]: %08x\n",
+ way, pd0[way]);
+ }
+
+ /*
+ * clear entry @way and not @n. This is
+ * critical to our optimised loop
+ */
+ pd0[way] = pd1[way] = 0;
+ write_aux_reg(ARC_REG_TLBINDEX,
+ SET_WAY_TO_IDX(mmu, set, way));
+ __tlb_entry_erase();
+ }
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+/***********************************************************************
+ * Diagnostic Routines
+ * -Called from Low Level TLB Hanlders if things don;t look good
+ **********************************************************************/
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+/*
+ * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS
+ * don't match
+ */
+void print_asid_mismatch(int is_fast_path)
+{
+ int pid_sw, pid_hw;
+ pid_sw = current->active_mm->context.asid;
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n",
+ is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw);
+
+ __asm__ __volatile__("flag 1");
+}
+
+void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr)
+{
+ unsigned int pid_hw;
+
+ pid_hw = read_aux_reg(ARC_REG_PID) & 0xff;
+
+ if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID)))
+ print_asid_mismatch(0);
+}
+#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
new file mode 100644
index 00000000000..9df765dc7c3
--- /dev/null
+++ b/arch/arc/mm/tlbex.S
@@ -0,0 +1,408 @@
+/*
+ * TLB Exception Handling for ARC
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineetg: April 2011 :
+ * -MMU v1: moved out legacy code into a seperate file
+ * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore,
+ * helps avoid a shift when preparing PD0 from PTE
+ *
+ * Vineetg: July 2009
+ * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB
+ * entry, so that it doesn't knock out it's I-TLB entry
+ * -Some more fine tuning:
+ * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc
+ *
+ * Vineetg: July 2009
+ * -Practically rewrote the I/D TLB Miss handlers
+ * Now 40 and 135 instructions a peice as compared to 131 and 449 resp.
+ * Hence Leaner by 1.5 K
+ * Used Conditional arithmetic to replace excessive branching
+ * Also used short instructions wherever possible
+ *
+ * Vineetg: Aug 13th 2008
+ * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing
+ * more information in case of a Fatality
+ *
+ * Vineetg: March 25th Bug #92690
+ * -Added Debug Code to check if sw-ASID == hw-ASID
+
+ * Rahul Trivedi, Amit Bhor: Codito Technologies 2004
+ */
+
+ .cpu A7
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/tlb.h>
+#include <asm/pgtable.h>
+#include <asm/arcregs.h>
+#include <asm/cache.h>
+#include <asm/processor.h>
+#if (CONFIG_ARC_MMU_VER == 1)
+#include <asm/tlb-mmu1.h>
+#endif
+
+;--------------------------------------------------------------------------
+; scratch memory to save the registers (r0-r3) used to code TLB refill Handler
+; For details refer to comments before TLBMISS_FREEUP_REGS below
+;--------------------------------------------------------------------------
+
+ARCFP_DATA ex_saved_reg1
+ .align 1 << L1_CACHE_SHIFT ; IMP: Must be Cache Line aligned
+ .type ex_saved_reg1, @object
+#ifdef CONFIG_SMP
+ .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+ex_saved_reg1:
+ .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT)
+#else
+ .size ex_saved_reg1, 16
+ex_saved_reg1:
+ .zero 16
+#endif
+
+;============================================================================
+; Troubleshooting Stuff
+;============================================================================
+
+; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid
+; When Creating TLB Entries, instead of doing 3 dependent loads from memory,
+; we use the MMU PID Reg to get current ASID.
+; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble.
+; So we try to detect this in TLB Mis shandler
+
+
+.macro DBG_ASID_MISMATCH
+
+#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
+
+ ; make sure h/w ASID is same as s/w ASID
+
+ GET_CURR_TASK_ON_CPU r3
+ ld r0, [r3, TASK_ACT_MM]
+ ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+
+ lr r1, [ARC_REG_PID]
+ and r1, r1, 0xFF
+ breq r1, r0, 5f
+
+ ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
+ lr r0, [erstatus]
+ bbit0 r0, STATUS_U_BIT, 5f
+
+ ; We sure are in troubled waters, Flag the error, but to do so
+ ; need to switch to kernel mode stack to call error routine
+ GET_TSK_STACK_BASE r3, sp
+
+ ; Call printk to shoutout aloud
+ mov r0, 1
+ j print_asid_mismatch
+
+5: ; ASIDs match so proceed normally
+ nop
+
+#endif
+
+.endm
+
+;============================================================================
+;TLB Miss handling Code
+;============================================================================
+
+;-----------------------------------------------------------------------------
+; This macro does the page-table lookup for the faulting address.
+; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address
+.macro LOAD_FAULT_PTE
+
+ lr r2, [efa]
+
+#ifndef CONFIG_SMP
+ lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd
+#else
+ GET_CURR_TASK_ON_CPU r1
+ ld r1, [r1, TASK_ACT_MM]
+ ld r1, [r1, MM_PGD]
+#endif
+
+ lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD
+ ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr
+ and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags
+ ; contains Ptr to Page Table
+ bz.d do_slow_path_pf ; if no Page Table, do page fault
+
+ ; Get the PTE entry: The idea is
+ ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr
+ ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index
+ ; (3) z = pgtbl[y]
+ ; To avoid the multiply by in end, we do the -2, <<2 below
+
+ lsr r0, r2, (PAGE_SHIFT - 2)
+ and r0, r0, ( (PTRS_PER_PTE - 1) << 2)
+ ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ and.f 0, r0, _PAGE_PRESENT
+ bz 1f
+ ld r2, [num_pte_not_present]
+ add r2, r2, 1
+ st r2, [num_pte_not_present]
+1:
+#endif
+
+.endm
+
+;-----------------------------------------------------------------
+; Convert Linux PTE entry into TLB entry
+; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu
+; IN: r0 = PTE, r1 = ptr to PTE
+
+.macro CONV_PTE_TO_TLB
+ and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE
+ sr r3, [ARC_REG_TLBPD1] ; these go in PD1
+
+ and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
+#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
+ lsr r2, r2 ; shift PTE flags to match layout in PD0
+#endif
+
+ lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid
+
+ or r3, r3, r2 ; S | vaddr | {sasid|asid}
+ sr r3,[ARC_REG_TLBPD0] ; rewrite PD0
+.endm
+
+;-----------------------------------------------------------------
+; Commit the TLB entry into MMU
+
+.macro COMMIT_ENTRY_TO_MMU
+
+ /* Get free TLB slot: Set = computed from vaddr, way = random */
+ sr TLBGetIndex, [ARC_REG_TLBCOMMAND]
+
+ /* Commit the Write */
+#if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */
+ sr TLBWriteNI, [ARC_REG_TLBCOMMAND]
+#else
+ sr TLBWrite, [ARC_REG_TLBCOMMAND]
+#endif
+.endm
+
+;-----------------------------------------------------------------
+; ARC700 Exception Handling doesn't auto-switch stack and it only provides
+; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0"
+;
+; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a
+; "global" is used to free-up FIRST core reg to be able to code the rest of
+; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe).
+; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3
+; need to be saved as well by extending the "global" to be 4 words. Hence
+; ".size ex_saved_reg1, 16"
+; [All of this dance is to avoid stack switching for each TLB Miss, since we
+; only need to save only a handful of regs, as opposed to complete reg file]
+;
+; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST
+; core reg as it will not be SMP safe.
+; Thus scratch AUX reg is used (and no longer used to cache task PGD).
+; To save the rest of 3 regs - per cpu, the global is made "per-cpu".
+; Epilogue thus has to locate the "per-cpu" storage for regs.
+; To avoid cache line bouncing the per-cpu global is aligned/sized per
+; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence
+; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)"
+
+; As simple as that....
+
+.macro TLBMISS_FREEUP_REGS
+#ifdef CONFIG_SMP
+ sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with
+ GET_CPU_ID r0 ; get to per cpu scratch mem,
+ lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu
+ add r0, @ex_saved_reg1, r0
+#else
+ st r0, [@ex_saved_reg1]
+ mov_s r0, @ex_saved_reg1
+#endif
+ st_s r1, [r0, 4]
+ st_s r2, [r0, 8]
+ st_s r3, [r0, 12]
+
+ ; VERIFY if the ASID in MMU-PID Reg is same as
+ ; one in Linux data structures
+
+ DBG_ASID_MISMATCH
+.endm
+
+;-----------------------------------------------------------------
+.macro TLBMISS_RESTORE_REGS
+#ifdef CONFIG_SMP
+ GET_CPU_ID r0 ; get to per cpu scratch mem
+ lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide
+ add r0, @ex_saved_reg1, r0
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ lr r0, [ARC_REG_SCRATCH_DATA0]
+#else
+ mov_s r0, @ex_saved_reg1
+ ld_s r3, [r0,12]
+ ld_s r2, [r0, 8]
+ ld_s r1, [r0, 4]
+ ld_s r0, [r0]
+#endif
+.endm
+
+ARCFP_CODE ;Fast Path Code, candidate for ICCM
+
+;-----------------------------------------------------------------------------
+; I-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissI
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numitlb]
+ add r0, r0, 1
+ st r0, [@numitlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Check if PTE permissions approp for executing code
+ cmp_s r2, VMALLOC_START
+ mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE)
+ mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
+
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
+ bnz do_slow_path_pf
+
+ ; Let Linux VM know that the page was accessed
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; set Accessed Bit
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+ARC_EXIT EV_TLBMissI
+
+;-----------------------------------------------------------------------------
+; D-TLB Miss Exception Handler
+;-----------------------------------------------------------------------------
+
+ARC_ENTRY EV_TLBMissD
+
+ TLBMISS_FREEUP_REGS
+
+#ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT
+ ld r0, [@numdtlb]
+ add r0, r0, 1
+ st r0, [@numdtlb]
+#endif
+
+ ;----------------------------------------------------------------
+ ; Get the PTE corresponding to V-addr accessed
+ ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE
+ LOAD_FAULT_PTE
+
+ ;----------------------------------------------------------------
+ ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
+
+ mov_s r2, 0
+ lr r3, [ecr]
+ btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
+ or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
+ or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
+ ; Above laddering takes care of XCHG access
+ ; which is both Read and Write
+
+ ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
+ ; For copy_(to|from)_user, despite exception taken in kernel mode,
+ ; this code is not hit, because EFA would still be the user mode
+ ; address (EFA < 0x6000_0000).
+ ; This code is for legit kernel mode faults, vmalloc specifically
+ ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
+
+ lr r3, [efa]
+ cmp r3, VMALLOC_START - 1 ; If kernel mode access
+ asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
+ or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
+
+ ; By now, r2 setup with all the Flags we need to check in PTE
+ and r3, r0, r2 ; Mask out NON Flag bits from PTE
+ brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test)
+
+ ;----------------------------------------------------------------
+ ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty
+ lr r3, [ecr]
+ or r0, r0, (_PAGE_PRESENT | _PAGE_ACCESSED) ; Accessed bit always
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ?
+ or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well
+ st_s r0, [r1] ; Write back PTE
+
+ CONV_PTE_TO_TLB
+
+#if (CONFIG_ARC_MMU_VER == 1)
+ ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of
+ ; memcpy where 3 parties contend for 2 ways, ensuing a livelock.
+ ; But only for old MMU or one with Metal Fix
+ TLB_WRITE_HEURISTICS
+#endif
+
+ COMMIT_ENTRY_TO_MMU
+ TLBMISS_RESTORE_REGS
+ rtie
+
+;-------- Common routine to call Linux Page Fault Handler -----------
+do_slow_path_pf:
+
+ ; Restore the 4-scratch regs saved by fast path miss handler
+ TLBMISS_RESTORE_REGS
+
+ ; Slow path TLB Miss handled as a regular ARC Exception
+ ; (stack switching / save the complete reg-file).
+ ; That requires freeing up r9
+ EXCPN_PROLOG_FREEUP_REG r9
+
+ lr r9, [erstatus]
+
+ SWITCH_TO_KERNEL_STK
+ SAVE_ALL_SYS
+
+ ; ------- setup args for Linux Page fault Hanlder ---------
+ mov_s r0, sp
+ lr r2, [efa]
+ lr r3, [ecr]
+
+ ; Both st and ex imply WRITE access of some sort, hence do_page_fault( )
+ ; invoked with write=1 for DTLB-st/ex Miss and write=0 for ITLB miss or
+ ; DTLB-ld Miss
+ ; DTLB Miss Cause code is ld = 0x01 , st = 0x02, ex = 0x03
+ ; Following code uses that fact that st/ex have one bit in common
+
+ btst_s r3, ECR_C_BIT_DTLB_ST_MISS
+ mov.z r1, 0
+ mov.nz r1, 1
+
+ ; We don't want exceptions to be disabled while the fault is handled.
+ ; Now that we have saved the context we return from exception hence
+ ; exceptions get re-enable
+
+ FAKE_RET_FROM_EXCPN r9
+
+ bl do_page_fault
+ b ret_from_exception
+
+ARC_EXIT EV_TLBMissD
+
+ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr
diff --git a/arch/arc/oprofile/Makefile b/arch/arc/oprofile/Makefile
new file mode 100644
index 00000000000..ce417a6e70b
--- /dev/null
+++ b/arch/arc/oprofile/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_OPROFILE) += oprofile.o
+
+DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
+ oprof.o cpu_buffer.o buffer_sync.o \
+ event_buffer.o oprofile_files.o \
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
+oprofile-y := $(DRIVER_OBJS) common.o
diff --git a/arch/arc/oprofile/common.c b/arch/arc/oprofile/common.c
new file mode 100644
index 00000000000..c80fcad4a5a
--- /dev/null
+++ b/arch/arc/oprofile/common.c
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Based on orig code from @author John Levon <levon@movementarian.org>
+ */
+
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+int __init oprofile_arch_init(struct oprofile_operations *ops)
+{
+ /*
+ * A failure here, forces oprofile core to switch to Timer based PC
+ * sampling, which will happen if say perf is not enabled/available
+ */
+ return oprofile_perf_init(ops);
+}
+
+void oprofile_arch_exit(void)
+{
+ oprofile_perf_exit();
+}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
new file mode 100644
index 00000000000..b41e786cdbc
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+menuconfig ARC_PLAT_FPGA_LEGACY
+ bool "\"Legacy\" ARC FPGA dev Boards"
+ select ISS_SMP_EXTN if SMP
+ help
+ Support for ARC development boards, provided by Synopsys.
+ These are based on FPGA or ISS. e.g.
+ - ARCAngel4
+ - ML509
+ - MetaWare ISS
+
+if ARC_PLAT_FPGA_LEGACY
+
+config ARC_BOARD_ANGEL4
+ bool "ARC Angel4"
+ default y
+ help
+ ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
+
+config ARC_BOARD_ML509
+ bool "ML509"
+ help
+ ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
+
+config ISS_SMP_EXTN
+ bool "ARC SMP Extensions (ISS Models only)"
+ default n
+ depends on SMP
+ select ARC_HAS_COH_RTSC
+ help
+ SMP Extensions to ARC700, in a "simulation only" Model, supported in
+ ARC ISS (Instruction Set Simulator).
+ The SMP extensions include:
+ -IDU (Interrupt Distribution Unit)
+ -XTL (To enable CPU start/stop/set-PC for another CPU)
+ It doesn't provide coherent Caches and/or Atomic Ops (LLOCK/SCOND)
+
+config ARC_SERIAL_BAUD
+ int "UART Baud rate"
+ default "115200"
+ depends on SERIAL_ARC || SERIAL_ARC_CONSOLE
+ help
+ Baud rate for the ARC UART
+
+menuconfig ARC_HAS_BVCI_LAT_UNIT
+ bool "BVCI Bus Latency Unit"
+ depends on ARC_BOARD_ML509 || ARC_BOARD_ANGEL4
+ help
+ IP to add artifical latency to BVCI Bus Based FPGA builds.
+ The default latency (even worst case) for FPGA is non-realistic
+ (~10 SDRAM, ~5 SSRAM).
+
+config BVCI_LAT_UNITS
+ hex "Latency Unit(s) Bitmap"
+ default "0x0"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+ help
+ There are multiple Latency Units corresponding to the many
+ interfaces of the system bus arbiter (both CPU side as well as
+ the peripheral side).
+ To add latency to ALL memory transaction, choose Unit 0, otherwise
+ for finer grainer - interface wise latency, specify a bitmap (1 bit
+ per unit) of all units. e.g. 1,2,12 will be 0x1003
+
+ Unit 0 - System Arb and Mem Controller
+ Unit 1 - I$ and System Bus
+ Unit 2 - D$ and System Bus
+ ..
+ Unit 12 - IDE Disk controller and System Bus
+
+config BVCI_LAT_CYCLES
+ int "Latency Value in cycles"
+ range 0 63
+ default "30"
+ depends on ARC_HAS_BVCI_LAT_UNIT
+
+endif
diff --git a/arch/arc/plat-arcfpga/Makefile b/arch/arc/plat-arcfpga/Makefile
new file mode 100644
index 00000000000..a44e22ebc1b
--- /dev/null
+++ b/arch/arc/plat-arcfpga/Makefile
@@ -0,0 +1,12 @@
+#
+# Copyright (C) 2011-2012 Synopsys, Inc. (www.synopsys.com)
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+
+KBUILD_CFLAGS += -Iarch/arc/plat-arcfpga/include
+
+obj-y := platform.o irq.o
+obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
new file mode 100644
index 00000000000..41e335670f6
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/irq.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, IRQ assignments to peripherals
+ */
+
+#ifndef __PLAT_IRQ_H
+#define __PLAT_IRQ_H
+
+#define UART0_IRQ 5
+#define UART1_IRQ 10
+#define UART2_IRQ 11
+
+#define VMAC_IRQ 6
+
+#define IDE_IRQ 13
+#define PCI_IRQ 14
+#define PS2_IRQ 15
+
+#ifdef CONFIG_SMP
+#define IDU_INTERRUPT_0 16
+#endif
+
+extern void __init plat_fpga_init_IRQ(void);
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
new file mode 100644
index 00000000000..1663f338808
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/memmap.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * vineetg: Feb 2009
+ * -For AA4 board, System Memory Map for Peripherals etc
+ */
+
+#ifndef __PLAT_MEMMAP_H
+#define __PLAT_MEMMAP_H
+
+#define UART0_BASE 0xC0FC1000
+#define UART1_BASE 0xC0FC1100
+
+#define VMAC_REG_BASEADDR 0xC0FC2000
+
+#define IDE_CONTROLLER_BASE 0xC0FC9000
+
+#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000
+
+#define PGU_BASEADDR 0xC0FC8000
+#define VLCK_ADDR 0xC0FCF028
+
+#define BVCI_LAT_UNIT_BASE 0xC0FED000
+
+#define PS2_BASE_ADDR 0xC0FCC000
+
+#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/smp.h b/arch/arc/plat-arcfpga/include/plat/smp.h
new file mode 100644
index 00000000000..c09eb4cfc77
--- /dev/null
+++ b/arch/arc/plat-arcfpga/include/plat/smp.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Rajeshwar Ranga: Interrupt Distribution Unit API's
+ */
+
+#ifndef __PLAT_ARCFPGA_SMP_H
+#define __PLAT_ARCFPGA_SMP_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/types.h>
+#include <asm/arcregs.h>
+
+#define ARC_AUX_IDU_REG_CMD 0x2000
+#define ARC_AUX_IDU_REG_PARAM 0x2001
+
+#define ARC_AUX_XTL_REG_CMD 0x2002
+#define ARC_AUX_XTL_REG_PARAM 0x2003
+
+#define ARC_REG_MP_BCR 0x2021
+
+#define ARC_XTL_CMD_WRITE_PC 0x04
+#define ARC_XTL_CMD_CLEAR_HALT 0x02
+
+/*
+ * Build Configuration Register which identifies the sub-components
+ */
+struct bcr_mp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int mp_arch:16, pad:5, sdu:1, idu:1, scu:1, ver:8;
+#else
+ unsigned int ver:8, scu:1, idu:1, sdu:1, pad:5, mp_arch:16;
+#endif
+};
+
+/* IDU supports 256 common interrupts */
+#define NR_IDU_IRQS 256
+
+/*
+ * The Aux Regs layout is same bit-by-bit in both BE/LE modes.
+ * However when casted as a bitfield encoded "C" struct, gcc treats it as
+ * memory, generating different code for BE/LE, requiring strcture adj (see
+ * include/asm/arcregs.h)
+ *
+ * However when manually "carving" the value for a Aux, no special handling
+ * of BE is needed because of the property discribed above
+ */
+#define IDU_SET_COMMAND(irq, cmd) \
+do { \
+ uint32_t __val; \
+ __val = (((irq & 0xFF) << 8) | (cmd & 0xFF)); \
+ write_aux_reg(ARC_AUX_IDU_REG_CMD, __val); \
+} while (0)
+
+#define IDU_SET_PARAM(par) write_aux_reg(ARC_AUX_IDU_REG_PARAM, par)
+#define IDU_GET_PARAM() read_aux_reg(ARC_AUX_IDU_REG_PARAM)
+
+/* IDU Commands */
+#define IDU_DISABLE 0x00
+#define IDU_ENABLE 0x01
+#define IDU_IRQ_CLEAR 0x02
+#define IDU_IRQ_ASSERT 0x03
+#define IDU_IRQ_WMODE 0x04
+#define IDU_IRQ_STATUS 0x05
+#define IDU_IRQ_ACK 0x06
+#define IDU_IRQ_PEND 0x07
+#define IDU_IRQ_RMODE 0x08
+#define IDU_IRQ_WBITMASK 0x09
+#define IDU_IRQ_RBITMASK 0x0A
+
+#define idu_enable() IDU_SET_COMMAND(0, IDU_ENABLE)
+#define idu_disable() IDU_SET_COMMAND(0, IDU_DISABLE)
+
+#define idu_irq_assert(irq) IDU_SET_COMMAND((irq), IDU_IRQ_ASSERT)
+#define idu_irq_clear(irq) IDU_SET_COMMAND((irq), IDU_IRQ_CLEAR)
+
+/* IDU Interrupt Mode - Destination Encoding */
+#define IDU_IRQ_MOD_DISABLE 0x00
+#define IDU_IRQ_MOD_ROUND_RECP 0x01
+#define IDU_IRQ_MOD_TCPU_FIRSTRECP 0x02
+#define IDU_IRQ_MOD_TCPU_ALLRECP 0x03
+
+/* IDU Interrupt Mode - Triggering Mode */
+#define IDU_IRQ_MODE_LEVEL_TRIG 0x00
+#define IDU_IRQ_MODE_PULSE_TRIG 0x01
+
+#define IDU_IRQ_MODE_PARAM(dest_mode, trig_mode) \
+ (((trig_mode & 0x01) << 15) | (dest_mode & 0xFF))
+
+struct idu_irq_config {
+ uint8_t irq;
+ uint8_t dest_mode;
+ uint8_t trig_mode;
+};
+
+struct idu_irq_status {
+ uint8_t irq;
+ bool enabled;
+ bool status;
+ bool ack;
+ bool pend;
+ uint8_t next_rr;
+};
+
+extern void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask);
+extern void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode);
+
+extern void iss_model_init_smp(unsigned int cpu);
+extern void iss_model_init_early_smp(void);
+
+#endif /* CONFIG_SMP */
+
+#endif
diff --git a/arch/arc/plat-arcfpga/irq.c b/arch/arc/plat-arcfpga/irq.c
new file mode 100644
index 00000000000..d2215fd889c
--- /dev/null
+++ b/arch/arc/plat-arcfpga/irq.c
@@ -0,0 +1,25 @@
+/*
+ * ARC FPGA Platform IRQ hookups
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <plat/irq.h>
+
+void __init plat_fpga_init_IRQ(void)
+{
+ /*
+ * SMP Hack because UART IRQ hardwired to cpu0 (boot-cpu) but if the
+ * request_irq() comes from any other CPU, the low level IRQ unamsking
+ * essential for getting Interrupts won't be enabled on cpu0, locking
+ * up the UART state machine.
+ */
+#ifdef CONFIG_SMP
+ arch_unmask_irq(UART0_IRQ);
+#endif
+}
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
new file mode 100644
index 00000000000..4e20a1a5104
--- /dev/null
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -0,0 +1,226 @@
+/*
+ * ARC FPGA Platform support code
+ *
+ * Copyright (C) 2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/clk.h>
+#include <asm/mach_desc.h>
+#include <plat/memmap.h>
+#include <plat/smp.h>
+#include <plat/irq.h>
+
+/*-----------------------BVCI Latency Unit -----------------------------*/
+
+#ifdef CONFIG_ARC_HAS_BVCI_LAT_UNIT
+
+int lat_cycles = CONFIG_BVCI_LAT_CYCLES;
+
+/* BVCI Bus Profiler: Latency Unit */
+static void __init setup_bvci_lat_unit(void)
+{
+#define MAX_BVCI_UNITS 12
+
+ unsigned int i;
+ unsigned int *base = (unsigned int *)BVCI_LAT_UNIT_BASE;
+ const unsigned long units_req = CONFIG_BVCI_LAT_UNITS;
+ const unsigned int REG_UNIT = 21;
+ const unsigned int REG_VAL = 22;
+
+ /*
+ * There are multiple Latency Units corresponding to the many
+ * interfaces of the system bus arbiter (both CPU side as well as
+ * the peripheral side).
+ *
+ * Unit 0 - System Arb and Mem Controller - adds latency to all
+ * memory trasactions
+ * Unit 1 - I$ and System Bus
+ * Unit 2 - D$ and System Bus
+ * ..
+ * Unit 12 - IDE Disk controller and System Bus
+ *
+ * The programmers model requires writing to lat_unit reg first
+ * and then the latency value (cycles) to lat_value reg
+ */
+
+ if (CONFIG_BVCI_LAT_UNITS == 0) {
+ writel(0, base + REG_UNIT);
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for all Memory Transactions %d cycles\n",
+ lat_cycles);
+ } else {
+ for_each_set_bit(i, &units_req, MAX_BVCI_UNITS) {
+ writel(i + 1, base + REG_UNIT); /* loop is 0 based */
+ writel(lat_cycles, base + REG_VAL);
+ pr_info("BVCI Latency for Unit[%d] = %d cycles\n",
+ (i + 1), lat_cycles);
+ }
+ }
+}
+#else
+static void __init setup_bvci_lat_unit(void)
+{
+}
+#endif
+
+/*----------------------- Platform Devices -----------------------------*/
+
+static unsigned long arc_uart_info[] = {
+ 0, /* uart->is_emulated (runtime @running_on_hw) */
+ 0, /* uart->port.uartclk */
+ 0, /* uart->baud */
+ 0
+};
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+/*
+ * static platform data - but only for early serial
+ * TBD: derive this from a special DT node
+ */
+static struct resource arc_uart0_res[] = {
+ {
+ .start = UART0_BASE,
+ .end = UART0_BASE + 0xFF,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = UART0_IRQ,
+ .end = UART0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device arc_uart0_dev = {
+ .name = "arc-uart",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(arc_uart0_res),
+ .resource = arc_uart0_res,
+ .dev = {
+ .platform_data = &arc_uart_info,
+ },
+};
+
+static struct platform_device *fpga_early_devs[] __initdata = {
+ &arc_uart0_dev,
+};
+#endif
+
+static void arc_fpga_serial_init(void)
+{
+ /* To let driver workaround ISS bug: baudh Reg can't be set to 0 */
+ arc_uart_info[0] = !running_on_hw;
+
+ arc_uart_info[1] = arc_get_core_freq();
+
+ arc_uart_info[2] = CONFIG_ARC_SERIAL_BAUD;
+
+#if defined(CONFIG_SERIAL_ARC_CONSOLE)
+ early_platform_add_devices(fpga_early_devs,
+ ARRAY_SIZE(fpga_early_devs));
+
+ /*
+ * ARC console driver registers itself as an early platform driver
+ * of class "earlyprintk".
+ * Install it here, followed by probe of devices.
+ * The installation here doesn't require earlyprintk in command line
+ * To do so however, replace the lines below with
+ * parse_early_param();
+ * early_platform_driver_probe("earlyprintk", 1, 1);
+ * ^^
+ */
+ early_platform_driver_register_all("earlyprintk");
+ early_platform_driver_probe("earlyprintk", 1, 0);
+
+ /*
+ * This is to make sure that arc uart would be preferred console
+ * despite one/more of following:
+ * -command line lacked "console=ttyARC0" or
+ * -CONFIG_VT_CONSOLE was enabled (for no reason whatsoever)
+ * Note that this needs to be done after above early console is reg,
+ * otherwise the early console never gets a chance to run.
+ */
+ add_preferred_console("ttyARC", 0, "115200");
+#endif
+}
+
+static void __init plat_fpga_early_init(void)
+{
+ pr_info("[plat-arcfpga]: registering early dev resources\n");
+
+ setup_bvci_lat_unit();
+
+ arc_fpga_serial_init();
+
+#ifdef CONFIG_SMP
+ iss_model_init_early_smp();
+#endif
+}
+
+static struct of_dev_auxdata plat_auxdata_lookup[] __initdata = {
+#if defined(CONFIG_SERIAL_ARC) || defined(CONFIG_SERIAL_ARC_MODULE)
+ OF_DEV_AUXDATA("snps,arc-uart", UART0_BASE, "arc-uart", arc_uart_info),
+#endif
+ {}
+};
+
+static void __init plat_fpga_populate_dev(void)
+{
+ pr_info("[plat-arcfpga]: registering device resources\n");
+
+ /*
+ * Traverses flattened DeviceTree - registering platform devices
+ * complete with their resources
+ */
+ of_platform_populate(NULL, of_default_bus_match_table,
+ plat_auxdata_lookup, NULL);
+}
+
+/*----------------------- Machine Descriptions ------------------------------
+ *
+ * Machine description is simply a set of platform/board specific callbacks
+ * This is not directly related to DeviceTree based dynamic device creation,
+ * however as part of early device tree scan, we also select the right
+ * callback set, by matching the DT compatible name.
+ */
+
+static const char *aa4_compat[] __initdata = {
+ "snps,arc-angel4",
+ NULL,
+};
+
+MACHINE_START(ANGEL4, "angel4")
+ .dt_compat = aa4_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
+
+static const char *ml509_compat[] __initdata = {
+ "snps,arc-ml509",
+ NULL,
+};
+
+MACHINE_START(ML509, "ml509")
+ .dt_compat = ml509_compat,
+ .init_early = plat_fpga_early_init,
+ .init_machine = plat_fpga_populate_dev,
+ .init_irq = plat_fpga_init_IRQ,
+#ifdef CONFIG_SMP
+ .init_smp = iss_model_init_smp,
+#endif
+MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
new file mode 100644
index 00000000000..91b55349a5f
--- /dev/null
+++ b/arch/arc/plat-arcfpga/smp.c
@@ -0,0 +1,171 @@
+/*
+ * ARC700 Simulation-only Extensions for SMP
+ *
+ * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Vineet Gupta - 2012 : split off arch common and plat specific SMP
+ * Rajeshwar Ranga - 2007 : Interrupt Distribution Unit API's
+ */
+
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <plat/irq.h>
+#include <plat/smp.h>
+
+static char smp_cpuinfo_buf[128];
+
+/*
+ *-------------------------------------------------------------------
+ * Platform specific callbacks expected by arch SMP code
+ *-------------------------------------------------------------------
+ */
+
+/*
+ * Master kick starting another CPU
+ */
+static void iss_model_smp_wakeup_cpu(int cpu, unsigned long pc)
+{
+ /* setup the start PC */
+ write_aux_reg(ARC_AUX_XTL_REG_PARAM, pc);
+
+ /* Trigger WRITE_PC cmd for this cpu */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_WRITE_PC | (cpu << 8)));
+
+ /* Take the cpu out of Halt */
+ write_aux_reg(ARC_AUX_XTL_REG_CMD,
+ (ARC_XTL_CMD_CLEAR_HALT | (cpu << 8)));
+
+}
+
+/*
+ * Any SMP specific init any CPU does when it comes up.
+ * Here we setup the CPU to enable Inter-Processor-Interrupts
+ * Called for each CPU
+ * -Master : init_IRQ()
+ * -Other(s) : start_kernel_secondary()
+ */
+void iss_model_init_smp(unsigned int cpu)
+{
+ /* Check if CPU is configured for more than 16 interrupts */
+ if (NR_IRQS <= 16 || get_hw_config_num_irq() <= 16)
+ panic("[arcfpga] IRQ system can't support IDU IPI\n");
+
+ idu_disable();
+
+ /****************************************************************
+ * IDU provides a set of Common IRQs, each of which can be dynamically
+ * attached to (1|many|all) CPUs.
+ * The Common IRQs [0-15] are mapped as CPU pvt [16-31]
+ *
+ * Here we use a simple 1:1 mapping:
+ * A CPU 'x' is wired to Common IRQ 'x'.
+ * So an IDU ASSERT on IRQ 'x' will trigger Interupt on CPU 'x', which
+ * makes up for our simple IPI plumbing.
+ *
+ * TBD: Have a dedicated multicast IRQ for sending IPIs to all CPUs
+ * w/o having to do one-at-a-time
+ ******************************************************************/
+
+ /*
+ * Claim an IRQ which would trigger IPI on this CPU.
+ * In IDU parlance it involves setting up a cpu bitmask for the IRQ
+ * The bitmap here contains only 1 CPU (self).
+ */
+ idu_irq_set_tgtcpu(cpu, 0x1 << cpu);
+
+ /* Set the IRQ destination to use the bitmask above */
+ idu_irq_set_mode(cpu, 7, /* XXX: IDU_IRQ_MOD_TCPU_ALLRECP: ISS bug */
+ IDU_IRQ_MODE_PULSE_TRIG);
+
+ idu_enable();
+
+ /* Attach the arch-common IPI ISR to our IDU IRQ */
+ smp_ipi_irq_setup(cpu, IDU_INTERRUPT_0 + cpu);
+}
+
+static void iss_model_ipi_send(void *arg)
+{
+ struct cpumask *callmap = arg;
+ unsigned int cpu;
+
+ for_each_cpu(cpu, callmap)
+ idu_irq_assert(cpu);
+}
+
+static void iss_model_ipi_clear(int cpu, int irq)
+{
+ idu_irq_clear(IDU_INTERRUPT_0 + cpu);
+}
+
+void iss_model_init_early_smp(void)
+{
+#define IS_AVAIL1(var, str) ((var) ? str : "")
+
+ struct bcr_mp mp;
+
+ READ_BCR(ARC_REG_MP_BCR, mp);
+
+ sprintf(smp_cpuinfo_buf, "Extn [ISS-SMP]: v%d, arch(%d) %s %s %s\n",
+ mp.ver, mp.mp_arch, IS_AVAIL1(mp.scu, "SCU"),
+ IS_AVAIL1(mp.idu, "IDU"), IS_AVAIL1(mp.sdu, "SDU"));
+
+ plat_smp_ops.info = smp_cpuinfo_buf;
+
+ plat_smp_ops.cpu_kick = iss_model_smp_wakeup_cpu;
+ plat_smp_ops.ipi_send = iss_model_ipi_send;
+ plat_smp_ops.ipi_clear = iss_model_ipi_clear;
+}
+
+/*
+ *-------------------------------------------------------------------
+ * Low level Platform IPI Providers
+ *-------------------------------------------------------------------
+ */
+
+/* Set the Mode for the Common IRQ */
+void idu_irq_set_mode(uint8_t irq, uint8_t dest_mode, uint8_t trig_mode)
+{
+ uint32_t par = IDU_IRQ_MODE_PARAM(dest_mode, trig_mode);
+
+ IDU_SET_PARAM(par);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WMODE);
+}
+
+/* Set the target cpu Bitmask for Common IRQ */
+void idu_irq_set_tgtcpu(uint8_t irq, uint32_t mask)
+{
+ IDU_SET_PARAM(mask);
+ IDU_SET_COMMAND(irq, IDU_IRQ_WBITMASK);
+}
+
+/* Get the Interrupt Acknowledged status for IRQ (as CPU Bitmask) */
+bool idu_irq_get_ack(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_ACK);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
+
+/*
+ * Get the Interrupt Pending status for IRQ (as CPU Bitmask)
+ * -Pending means CPU has not yet noticed the IRQ (e.g. disabled)
+ * -After Interrupt has been taken, the IPI expcitily needs to be
+ * cleared, to be acknowledged.
+ */
+bool idu_irq_get_pend(uint8_t irq)
+{
+ uint32_t val;
+
+ IDU_SET_COMMAND(irq, IDU_IRQ_PEND);
+ val = IDU_GET_PARAM();
+
+ return val & (1 << irq);
+}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index e60e386178d..12f22492df4 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -40,7 +40,7 @@ __SYSCALL(15, sys_chmod)
__SYSCALL(16, sys_lchown16)
__SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */
__SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */
-__SYSCALL(19, compat_sys_lseek_wrapper)
+__SYSCALL(19, compat_sys_lseek)
__SYSCALL(20, sys_getpid)
__SYSCALL(21, compat_sys_mount)
__SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */
@@ -113,8 +113,8 @@ __SYSCALL(88, sys_reboot)
__SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */
__SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */
__SYSCALL(91, sys_munmap)
-__SYSCALL(92, sys_truncate)
-__SYSCALL(93, sys_ftruncate)
+__SYSCALL(92, compat_sys_truncate)
+__SYSCALL(93, compat_sys_ftruncate)
__SYSCALL(94, sys_fchmod)
__SYSCALL(95, sys_fchown16)
__SYSCALL(96, sys_getpriority)
diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S
index 6abb0572161..9416d045a68 100644
--- a/arch/arm64/kernel/sys32.S
+++ b/arch/arm64/kernel/sys32.S
@@ -58,11 +58,6 @@ ENDPROC(compat_sys_fstatfs64_wrapper)
* in registers or that take 32-bit parameters which require sign
* extension.
*/
-compat_sys_lseek_wrapper:
- sxtw x1, w1
- b sys_lseek
-ENDPROC(compat_sys_lseek_wrapper)
-
compat_sys_pread64_wrapper:
orr x3, x4, x5, lsl #32
b sys_pread64
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 91b9d69f465..4b597d91a8d 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -18,10 +18,10 @@ platforms += loongson1
platforms += mti-malta
platforms += mti-sead3
platforms += netlogic
-platforms += pmc-sierra
+platforms += pmcs-msp71xx
platforms += pnx833x
-platforms += pnx8550
platforms += powertv
+platforms += ralink
platforms += rb532
platforms += sgi-ip22
platforms += sgi-ip27
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 1eabe575321..ae9c716c46b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -108,12 +108,14 @@ config ATH79
config BCM47XX
bool "Broadcom BCM47XX based boards"
select ARCH_WANT_OPTIONAL_GPIOLIB
+ select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
+ select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
@@ -295,6 +297,7 @@ config MIPS_MALTA
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
+ select CSRC_GIC
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
@@ -354,6 +357,7 @@ config MIPS_SEAD3
select USB_ARCH_HAS_EHCI
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
+ select USE_OF
help
This enables support for the MIPS Technologies SEAD3 evaluation
board.
@@ -385,16 +389,6 @@ config NXP_STB225
help
Support for NXP Semiconductors STB225 Development Board.
-config PNX8550_JBS
- bool "NXP PNX8550 based JBS board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
-config PNX8550_STB810
- bool "NXP PNX8550 based STB810 board"
- select PNX8550
- select SYS_SUPPORTS_LITTLE_ENDIAN
-
config PMC_MSP
bool "PMC-Sierra MSP chipsets"
select CEVT_R4K
@@ -434,6 +428,22 @@ config POWERTV
help
This enables support for the Cisco PowerTV Platform.
+config RALINK
+ bool "Ralink based machines"
+ select CEVT_R4K
+ select CSRC_R4K
+ select BOOT_RAW
+ select DMA_NONCOHERENT
+ select IRQ_CPU
+ select USE_OF
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_HAS_EARLY_PRINTK
+ select HAVE_MACH_CLKDEV
+ select CLKDEV_LOOKUP
+
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
select FW_ARC
@@ -835,8 +845,9 @@ source "arch/mips/jazz/Kconfig"
source "arch/mips/jz4740/Kconfig"
source "arch/mips/lantiq/Kconfig"
source "arch/mips/lasat/Kconfig"
-source "arch/mips/pmc-sierra/Kconfig"
+source "arch/mips/pmcs-msp71xx/Kconfig"
source "arch/mips/powertv/Kconfig"
+source "arch/mips/ralink/Kconfig"
source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
@@ -917,6 +928,9 @@ config CSRC_POWERTV
config CSRC_R4K
bool
+config CSRC_GIC
+ bool
+
config CSRC_SB1250
bool
@@ -1103,19 +1117,6 @@ config SOC_PNX8335
bool
select SOC_PNX833X
-config PNX8550
- bool
- select SOC_PNX8550
-
-config SOC_PNX8550
- bool
- select DMA_NONCOHERENT
- select HW_HAS_PCI
- select SYS_HAS_CPU_MIPS32_R1
- select SYS_HAS_EARLY_PRINTK
- select SYS_SUPPORTS_32BIT_KERNEL
- select GENERIC_GPIO
-
config SWAP_IO_SPACE
bool
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index f2dfd404550..6f7978f9509 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -191,7 +191,7 @@ endif
include $(srctree)/arch/mips/Kbuild.platforms
ifdef CONFIG_PHYSICAL_START
-load-y = $(CONFIG_PHYSICAL_START)
+load-y = $(CONFIG_PHYSICAL_START)
endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
diff --git a/arch/mips/alchemy/Platform b/arch/mips/alchemy/Platform
index 942c5800a68..fa1bdd1aea1 100644
--- a/arch/mips/alchemy/Platform
+++ b/arch/mips/alchemy/Platform
@@ -1,7 +1,7 @@
#
# Core Alchemy code
#
-platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
+platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/
#
@@ -45,7 +45,7 @@ load-$(CONFIG_MIPS_MTX1) += 0xffffffff80100000
#
# MyCable eval board
#
-platform-$(CONFIG_MIPS_XXS1500) += alchemy/
+platform-$(CONFIG_MIPS_XXS1500) += alchemy/
load-$(CONFIG_MIPS_XXS1500) += 0xffffffff80100000
#
@@ -56,7 +56,7 @@ load-$(CONFIG_MIPS_GPR) += 0xffffffff80100000
# boards can specify their own <gpio.h> in one of their include dirs.
# If they do, placing this line here at the end will make sure the
-# compiler picks the board one. If they don't, it will make sure
+# compiler picks the board one. If they don't, it will make sure
# the alchemy generic gpio header is picked up.
cflags-$(CONFIG_MIPS_ALCHEMY) += -I$(srctree)/arch/mips/include/asm/mach-au1x00
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index ba3259086b9..cb0f6afb738 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -135,33 +135,33 @@ static struct mtd_partition gpr_mtd_partitions[] = {
{
.name = "kernel",
.size = 0x00200000,
- .offset = 0,
+ .offset = 0,
},
{
.name = "rootfs",
.size = 0x00800000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "config",
.size = 0x00200000,
- .offset = 0x01d00000,
+ .offset = 0x01d00000,
},
{
.name = "yamon",
.size = 0x00100000,
- .offset = 0x01c00000,
+ .offset = 0x01c00000,
},
{
.name = "yamon env vars",
.size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
{
.name = "kernel+rootfs",
.size = 0x00a00000,
- .offset = 0,
+ .offset = 0,
},
};
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index a124c251c0c..4a9baa9f633 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -173,23 +173,23 @@ static struct mtd_partition mtx1_mtd_partitions[] = {
{
.name = "filesystem",
.size = 0x01C00000,
- .offset = 0,
+ .offset = 0,
},
{
.name = "yamon",
.size = 0x00100000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.mask_flags = MTD_WRITEABLE,
},
{
.name = "kernel",
.size = 0x002c0000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
{
.name = "yamon env",
.size = 0x00040000,
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
},
};
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index cf02d7dc2df..19d5642c16d 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -252,7 +252,7 @@ EXPORT_SYMBOL(au1xxx_ddma_del_device);
u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
void (*callback)(int, void *), void *callparam)
{
- unsigned long flags;
+ unsigned long flags;
u32 used, chan;
u32 dcp;
int i;
@@ -512,7 +512,7 @@ u32 au1xxx_dbdma_ring_alloc(u32 chanid, int entries)
break;
}
- /* If source input is FIFO, set static address. */
+ /* If source input is FIFO, set static address. */
if (stp->dev_flags & DEV_FLAGS_IN) {
if (stp->dev_flags & DEV_FLAGS_BURSTABLE)
src1 |= DSCR_SRC1_SAM(DSCR_xAM_BURST);
@@ -635,7 +635,7 @@ u32 au1xxx_dbdma_put_source(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -697,7 +697,7 @@ u32 au1xxx_dbdma_put_dest(u32 chanid, dma_addr_t buf, int nbytes, u32 flags)
dma_cache_wback_inv((unsigned long)dp, sizeof(*dp));
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -742,7 +742,7 @@ u32 au1xxx_dbdma_get_dest(u32 chanid, void **buf, int *nbytes)
*nbytes = dp->dscr_cmd1;
rv = dp->dscr_stat;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->get_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
@@ -891,7 +891,7 @@ void au1xxx_dbdma_dump(u32 chanid)
chan_tab_t *ctp;
au1x_ddma_desc_t *dp;
dbdev_tab_t *stp, *dtp;
- au1x_dma_chan_t *cp;
+ au1x_dma_chan_t *cp;
u32 i = 0;
ctp = *((chan_tab_t **)chanid);
@@ -969,7 +969,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;
- /* Get next descriptor pointer. */
+ /* Get next descriptor pointer. */
ctp->put_ptr = phys_to_virt(DSCR_GET_NXTPTR(dp->dscr_nxtptr));
/* Return something non-zero. */
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
index f1b50f0c01d..f9bc4f52044 100644
--- a/arch/mips/alchemy/common/gpiolib.c
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -106,14 +106,14 @@ struct gpio_chip alchemy_gpio_chip[] = {
.ngpio = ALCHEMY_GPIO1_NUM,
},
[1] = {
- .label = "alchemy-gpio2",
- .direction_input = gpio2_direction_input,
- .direction_output = gpio2_direction_output,
- .get = gpio2_get,
- .set = gpio2_set,
+ .label = "alchemy-gpio2",
+ .direction_input = gpio2_direction_input,
+ .direction_output = gpio2_direction_output,
+ .get = gpio2_get,
+ .set = gpio2_set,
.to_irq = gpio2_to_irq,
- .base = ALCHEMY_GPIO2_BASE,
- .ngpio = ALCHEMY_GPIO2_NUM,
+ .base = ALCHEMY_GPIO2_BASE,
+ .ngpio = ALCHEMY_GPIO2_NUM,
},
};
diff --git a/arch/mips/alchemy/common/irq.c b/arch/mips/alchemy/common/irq.c
index 94fbcd19eb9..63a71817a00 100644
--- a/arch/mips/alchemy/common/irq.c
+++ b/arch/mips/alchemy/common/irq.c
@@ -84,20 +84,20 @@ static int au1300_gpic_settype(struct irq_data *d, unsigned int type);
* needs the highest priority.
*/
struct alchemy_irqmap au1000_irqmap[] __initdata = {
- { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -106,33 +106,33 @@ struct alchemy_irqmap au1000_irqmap[] __initdata = {
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1500_irqmap[] __initdata = {
- { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -141,31 +141,31 @@ struct alchemy_irqmap au1500_irqmap[] __initdata = {
{ AU1500_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1500_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1500_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1500_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1500_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1500_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1500_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1100_irqmap[] __initdata = {
- { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -174,33 +174,33 @@ struct alchemy_irqmap au1100_irqmap[] __initdata = {
{ AU1100_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1100_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
- { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1100_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1100_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1100_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
{ AU1100_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1100_AC97C_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1550_irqmap[] __initdata = {
- { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1550_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -210,26 +210,26 @@ struct alchemy_irqmap au1550_irqmap[] __initdata = {
{ AU1550_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1550_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
+ { AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0, 0 },
{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
- { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 1, 0 },
+ { AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
struct alchemy_irqmap au1200_irqmap[] __initdata = {
- { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ AU1200_TOY_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
@@ -239,9 +239,9 @@ struct alchemy_irqmap au1200_irqmap[] __initdata = {
{ AU1200_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
{ AU1200_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0, 0 },
{ AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 1, 0 },
- { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
- { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
+ { AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 1, 0 },
{ -1, },
};
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c
index 7af941d8e71..9837a134a6d 100644
--- a/arch/mips/alchemy/common/platform.c
+++ b/arch/mips/alchemy/common/platform.c
@@ -53,7 +53,7 @@ static void alchemy_8250_pm(struct uart_port *port, unsigned int state,
.irq = _irq, \
.regshift = 2, \
.iotype = UPIO_AU, \
- .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
+ .flags = UPF_SKIP_TEST | UPF_IOREMAP | \
UPF_FIXED_TYPE, \
.type = PORT_16550A, \
.pm = alchemy_8250_pm, \
@@ -137,7 +137,7 @@ static void alchemy_ehci_power_off(struct platform_device *pdev)
}
static struct usb_ehci_pdata alchemy_ehci_pdata = {
- .no_io_watchdog = 1,
+ .no_io_watchdog = 1,
.power_on = alchemy_ehci_power_on,
.power_off = alchemy_ehci_power_off,
.power_suspend = alchemy_ehci_power_off,
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index 37ffd997c61..62b4e7bbeab 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -59,7 +59,7 @@ void __init plat_mem_setup(void)
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
- board_setup(); /* board specific setup */
+ board_setup(); /* board specific setup */
/* IO/MEM resources. */
set_io_port_base(0);
diff --git a/arch/mips/alchemy/common/sleeper.S b/arch/mips/alchemy/common/sleeper.S
index c7bcc7e5c82..706d933e008 100644
--- a/arch/mips/alchemy/common/sleeper.S
+++ b/arch/mips/alchemy/common/sleeper.S
@@ -102,12 +102,12 @@ LEAF(alchemy_sleep_au1000)
cache 0x14, 96(t0)
.set mips0
-1: lui a0, 0xb400 /* mem_xxx */
- sw zero, 0x001c(a0) /* Precharge */
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x001c(a0) /* Precharge */
sync
sw zero, 0x0020(a0) /* Auto Refresh */
sync
- sw zero, 0x0030(a0) /* Sleep */
+ sw zero, 0x0030(a0) /* Sleep */
sync
DO_SLEEP
@@ -128,15 +128,15 @@ LEAF(alchemy_sleep_au1550)
cache 0x14, 96(t0)
.set mips0
-1: lui a0, 0xb400 /* mem_xxx */
- sw zero, 0x08c0(a0) /* Precharge */
+1: lui a0, 0xb400 /* mem_xxx */
+ sw zero, 0x08c0(a0) /* Precharge */
sync
sw zero, 0x08d0(a0) /* Self Refresh */
sync
/* wait for sdram to enter self-refresh mode */
- lui t0, 0x0100
-2: lw t1, 0x0850(a0) /* mem_sdstat */
+ lui t0, 0x0100
+2: lw t1, 0x0850(a0) /* mem_sdstat */
and t2, t1, t0
beq t2, zero, 2b
nop
@@ -144,9 +144,9 @@ LEAF(alchemy_sleep_au1550)
/* disable SDRAM clocks */
lui t0, 0xcfff
ori t0, t0, 0xffff
- lw t1, 0x0840(a0) /* mem_sdconfiga */
- and t1, t0, t1 /* clear CE[1:0] */
- sw t1, 0x0840(a0) /* mem_sdconfiga */
+ lw t1, 0x0840(a0) /* mem_sdconfiga */
+ and t1, t0, t1 /* clear CE[1:0] */
+ sw t1, 0x0840(a0) /* mem_sdconfiga */
sync
DO_SLEEP
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index b67930d1932..38afb11ba2c 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -85,7 +85,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 1500,
- .set_next_event = au1x_rtcmatch2_set_next_event,
+ .set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = cpu_all_mask,
};
diff --git a/arch/mips/alchemy/common/usb.c b/arch/mips/alchemy/common/usb.c
index 936af8359fb..fcc69562611 100644
--- a/arch/mips/alchemy/common/usb.c
+++ b/arch/mips/alchemy/common/usb.c
@@ -122,7 +122,7 @@ static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
unsigned long r;
if (enable) {
- __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
wmb();
r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c
index f2039ef2c29..c98c9ea3372 100644
--- a/arch/mips/alchemy/devboards/bcsr.c
+++ b/arch/mips/alchemy/devboards/bcsr.c
@@ -20,7 +20,7 @@ static struct bcsr_reg {
spinlock_t lock;
} bcsr_regs[BCSR_CNT];
-static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
+static void __iomem *bcsr_virt; /* KSEG1 addr of BCSR base */
static int bcsr_csc_base; /* linux-irq of first cascaded irq */
void __init bcsr_init(unsigned long bcsr1_phys, unsigned long bcsr2_phys)
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 8187845650f..11f3ad20321 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -276,7 +276,7 @@ static void db1100_mmcled_set(struct led_classdev *led, enum led_brightness b)
}
static struct led_classdev db1100_mmc_led = {
- .brightness_set = db1100_mmcled_set,
+ .brightness_set = db1100_mmcled_set,
};
static int db1100_mmc1_card_readonly(void *mmc_host)
@@ -314,7 +314,7 @@ static void db1100_mmc1led_set(struct led_classdev *led, enum led_brightness b)
}
static struct led_classdev db1100_mmc1_led = {
- .brightness_set = db1100_mmc1led_set,
+ .brightness_set = db1100_mmc1led_set,
};
static struct au1xmmc_platform_data db1100_mmc_platdata[2] = {
@@ -357,7 +357,7 @@ static struct resource au1100_mmc0_resources[] = {
}
};
-static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1100_mmc0_dev = {
.name = "au1xxx-mmc",
@@ -482,7 +482,7 @@ static struct spi_board_info db1100_spi_info[] __initdata = {
.mode = 0,
.irq = AU1100_GPIO21_INT,
.platform_data = &db1100_touch_pd,
- .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
+ .controller_data = (void *)210, /* for spi_gpio: CS# GPIO210 */
},
};
@@ -572,7 +572,7 @@ static int __init db1000_dev_init(void)
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
- * SRAM 1MB/2MB 0x1a000000
+ * SRAM 1MB/2MB 0x1a000000
* DS1693 RTC 0x0c000000
*/
} else if (board == BCSR_WHOAMI_PB1100) {
@@ -586,7 +586,7 @@ static int __init db1000_dev_init(void)
irq_set_irq_type(AU1100_GPIO12_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1100_GPIO13_INT, IRQ_TYPE_LEVEL_LOW);
/* EPSON S1D13806 0x1b000000
- * SRAM 1MB/2MB 0x1a000000
+ * SRAM 1MB/2MB 0x1a000000
* DiskOnChip 0x0d000000
* DS1693 RTC 0x0c000000
*/
@@ -605,7 +605,7 @@ static int __init db1000_dev_init(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
- c0, d0, /*s0*/0, 0, 0);
+ c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
@@ -619,7 +619,7 @@ static int __init db1000_dev_init(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
- c1, d1, /*s1*/0, 0, 1);
+ c1, d1, /*s1*/0, 0, 1);
}
platform_add_devices(db1x00_devs, ARRAY_SIZE(db1x00_devs));
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 299b7d202be..a84d98b8f96 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -90,14 +90,14 @@ int __init db1200_board_setup(void)
whoami = bcsr_read(BCSR_WHOAMI);
printk(KERN_INFO "Alchemy/AMD/RMI %s Board, CPLD Rev %d"
- " Board-ID %d Daughtercard ID %d\n", get_system_type(),
+ " Board-ID %d Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
/* SMBus/SPI on PSC0, Audio on PSC1 */
pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
pfc &= ~(SYS_PINFUNC_P0A | SYS_PINFUNC_P0B);
pfc &= ~(SYS_PINFUNC_P1A | SYS_PINFUNC_P1B | SYS_PINFUNC_FS3);
- pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
+ pfc |= SYS_PINFUNC_P1C; /* SPI is configured later */
__raw_writel(pfc, (void __iomem *)SYS_PINFUNC);
wmb();
@@ -129,7 +129,7 @@ int __init db1200_board_setup(void)
static struct mtd_partition db1200_spiflash_parts[] = {
{
.name = "spi_flash",
- .offset = 0,
+ .offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
@@ -200,12 +200,12 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1200_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -395,7 +395,7 @@ static void db1200_mmcled_set(struct led_classdev *led,
}
static struct led_classdev db1200_mmc_led = {
- .brightness_set = db1200_mmcled_set,
+ .brightness_set = db1200_mmcled_set,
};
/* -- */
@@ -463,7 +463,7 @@ static void pb1200_mmc1led_set(struct led_classdev *led,
}
static struct led_classdev pb1200_mmc1_led = {
- .brightness_set = pb1200_mmc1led_set,
+ .brightness_set = pb1200_mmc1led_set,
};
static void pb1200_mmc1_set_power(void *mmc_host, int state)
@@ -526,7 +526,7 @@ static struct resource au1200_mmc0_resources[] = {
}
};
-static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
+static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1200_mmc0_dev = {
.name = "au1xxx-mmc",
@@ -601,7 +601,7 @@ static int db1200fb_panel_shutdown(void)
static struct au1200fb_platdata db1200fb_pd = {
.panel_index = db1200fb_panel_index,
.panel_init = db1200fb_panel_init,
- .panel_shutdown = db1200fb_panel_shutdown,
+ .panel_shutdown = db1200fb_panel_shutdown,
};
static struct resource au1200_lcd_res[] = {
@@ -772,11 +772,11 @@ static int __init pb1200_res_fixup(void)
}
db1200_nand_res[0].start = PB1200_NAND_PHYS_ADDR;
- db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
+ db1200_nand_res[0].end = PB1200_NAND_PHYS_ADDR + 0xff;
db1200_ide_res[0].start = PB1200_IDE_PHYS_ADDR;
- db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
+ db1200_ide_res[0].end = PB1200_IDE_PHYS_ADDR + DB1200_IDE_PHYS_LEN - 1;
db1200_eth_res[0].start = PB1200_ETH_PHYS_ADDR;
- db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
+ db1200_eth_res[0].end = PB1200_ETH_PHYS_ADDR + 0xff;
return 0;
}
@@ -797,7 +797,7 @@ int __init db1200_dev_setup(void)
irq_set_irq_type(AU1200_GPIO7_INT, IRQ_TYPE_LEVEL_LOW);
bcsr_init_irq(DB1200_INT_BEGIN, DB1200_INT_END, AU1200_GPIO7_INT);
- /* insert/eject pairs: one of both is always screaming. To avoid
+ /* insert/eject pairs: one of both is always screaming. To avoid
* issues they must not be automatically enabled when initially
* requested.
*/
@@ -813,7 +813,7 @@ int __init db1200_dev_setup(void)
spi_register_board_info(db1200_spi_devs,
ARRAY_SIZE(db1200_i2c_devs));
- /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
+ /* SWITCHES: S6.8 I2C/SPI selector (OFF=I2C ON=SPI)
* S6.7 AC97/I2S selector (OFF=AC97 ON=I2S)
* or S12 on the PB1200.
*/
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index cdf37cbd3d1..6167e73eef9 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -80,7 +80,7 @@ static int db1300_dev_pins[] __initdata = {
AU1300_PIN_PSC0D1,
AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
AU1300_PIN_PSC1D1,
- AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
+ AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
AU1300_PIN_PSC2D1,
AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
AU1300_PIN_PSC3D1,
@@ -143,12 +143,12 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1300_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -487,7 +487,7 @@ static void db1300_mmcled_set(struct led_classdev *led,
}
static struct led_classdev db1300_mmc_led = {
- .brightness_set = db1300_mmcled_set,
+ .brightness_set = db1300_mmcled_set,
};
struct au1xmmc_platform_data db1300_sd1_platdata = {
@@ -646,7 +646,7 @@ static int db1300fb_panel_shutdown(void)
static struct au1200fb_platdata db1300fb_pd = {
.panel_index = db1300fb_panel_index,
.panel_init = db1300fb_panel_init,
- .panel_shutdown = db1300fb_panel_shutdown,
+ .panel_shutdown = db1300fb_panel_shutdown,
};
static struct resource au1300_lcd_res[] = {
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 5a9ae609542..016cddacd7e 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -67,7 +67,7 @@ int __init db1550_board_setup(void)
bcsr_init(PB1550_BCSR_PHYS_ADDR,
PB1550_BCSR_PHYS_ADDR + PB1550_BCSR_HEXLED_OFS);
- pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
+ pr_info("Alchemy/AMD %s Board, CPLD Rev %d Board-ID %d " \
"Daughtercard ID %d\n", get_system_type(),
(whoami >> 4) & 0xf, (whoami >> 8) & 0xf, whoami & 0xf);
@@ -80,7 +80,7 @@ int __init db1550_board_setup(void)
static struct mtd_partition db1550_spiflash_parts[] = {
{
.name = "spi_flash",
- .offset = 0,
+ .offset = 0,
.size = MTDPART_SIZ_FULL,
},
};
@@ -151,12 +151,12 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
static struct mtd_partition db1550_nand_parts[] = {
{
.name = "NAND FS 0",
- .offset = 0,
+ .offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
+ .offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
@@ -495,10 +495,10 @@ static void __init db1550_devices(void)
{
alchemy_gpio_direction_output(203, 0); /* red led on */
- irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
- irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
- irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
- irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
+ irq_set_irq_type(AU1550_GPIO0_INT, IRQ_TYPE_EDGE_BOTH); /* CD0# */
+ irq_set_irq_type(AU1550_GPIO1_INT, IRQ_TYPE_EDGE_BOTH); /* CD1# */
+ irq_set_irq_type(AU1550_GPIO3_INT, IRQ_TYPE_LEVEL_LOW); /* CARD0# */
+ irq_set_irq_type(AU1550_GPIO5_INT, IRQ_TYPE_LEVEL_LOW); /* CARD1# */
irq_set_irq_type(AU1550_GPIO21_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG0# */
irq_set_irq_type(AU1550_GPIO22_INT, IRQ_TYPE_LEVEL_LOW); /* STSCHG1# */
@@ -539,7 +539,7 @@ static void __init pb1550_devices(void)
/* Pb1550, like all others, also has statuschange irqs; however they're
* wired up on one of the Au1550's shared GPIO201_205 line, which also
- * services the PCMCIA card interrupts. So we ignore statuschange and
+ * services the PCMCIA card interrupts. So we ignore statuschange and
* use the GPIO201_205 exclusively for card interrupts, since a) pcmcia
* drivers are used to shared irqs and b) statuschange isn't really use-
* ful anyway.
diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
index acaf91b5e46..b86bff31d1d 100644
--- a/arch/mips/alchemy/devboards/pm.c
+++ b/arch/mips/alchemy/devboards/pm.c
@@ -194,7 +194,7 @@ static ssize_t db1x_pmattr_store(struct kobject *kobj,
}
#define ATTR(x) \
- static struct kobj_attribute x##_attribute = \
+ static struct kobj_attribute x##_attribute = \
__ATTR(x, 0664, db1x_pmattr_show, \
db1x_pmattr_store);
diff --git a/arch/mips/ar7/Platform b/arch/mips/ar7/Platform
index 0bf85c416c6..21f9102d533 100644
--- a/arch/mips/ar7/Platform
+++ b/arch/mips/ar7/Platform
@@ -1,6 +1,6 @@
#
# Texas Instruments AR7
#
-platform-$(CONFIG_AR7) += ar7/
-cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
-load-$(CONFIG_AR7) += 0xffffffff94100000
+platform-$(CONFIG_AR7) += ar7/
+cflags-$(CONFIG_AR7) += -I$(srctree)/arch/mips/include/asm/mach-ar7
+load-$(CONFIG_AR7) += 0xffffffff94100000
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 7477fd2127a..7e2356fd5fd 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -492,11 +492,11 @@ static struct gpio_led gt701_leds[] = {
.active_low = 1,
.default_trigger = "default-on",
},
- {
- .name = "ethernet",
- .gpio = 10,
- .active_low = 1,
- },
+ {
+ .name = "ethernet",
+ .gpio = 10,
+ .active_low = 1,
+ },
};
static struct gpio_led_platform_data ar7_led_data;
@@ -512,7 +512,7 @@ static void __init detect_leds(void)
{
char *prid, *usb_prod;
- /* Default LEDs */
+ /* Default LEDs */
ar7_led_data.num_leds = ARRAY_SIZE(default_leds);
ar7_led_data.leds = default_leds;
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index f44feee2d67..3995e31a73e 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -14,6 +14,18 @@ config ATH79_MACH_AP121
Say 'Y' here if you want your kernel to support the
Atheros AP121 reference board.
+config ATH79_MACH_AP136
+ bool "Atheros AP136 reference board"
+ select SOC_QCA955X
+ select ATH79_DEV_GPIO_BUTTONS
+ select ATH79_DEV_LEDS_GPIO
+ select ATH79_DEV_SPI
+ select ATH79_DEV_USB
+ select ATH79_DEV_WMAC
+ help
+ Say 'Y' here if you want your kernel to support the
+ Atheros AP136 reference board.
+
config ATH79_MACH_AP81
bool "Atheros AP81 reference board"
select SOC_AR913X
@@ -88,6 +100,12 @@ config SOC_AR934X
select PCI_AR724X if PCI
def_bool n
+config SOC_QCA955X
+ select USB_ARCH_HAS_EHCI
+ select HW_HAS_PCI
+ select PCI_AR724X if PCI
+ def_bool n
+
config PCI_AR724X
def_bool n
@@ -104,7 +122,7 @@ config ATH79_DEV_USB
def_bool n
config ATH79_DEV_WMAC
- depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
+ depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X || SOC_QCA955X)
def_bool n
endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 2b54d98263f..5c9ff692ff3 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
# Machines
#
obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
+obj-$(CONFIG_ATH79_MACH_AP136) += mach-ap136.o
obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 579f452c0b4..765ef30e3e1 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -198,7 +198,7 @@ static void __init ar934x_clocks_init(void)
dpll_base = ioremap(AR934X_SRIF_BASE, AR934X_SRIF_SIZE);
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
+ if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
ath79_ref_clk.rate = 40 * 1000 * 1000;
else
ath79_ref_clk.rate = 25 * 1000 * 1000;
@@ -295,6 +295,82 @@ static void __init ar934x_clocks_init(void)
iounmap(dpll_base);
}
+static void __init qca955x_clocks_init(void)
+{
+ u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
+ u32 cpu_pll, ddr_pll;
+ u32 bootstrap;
+
+ bootstrap = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+ if (bootstrap & QCA955X_BOOTSTRAP_REF_CLK_40)
+ ath79_ref_clk.rate = 40 * 1000 * 1000;
+ else
+ ath79_ref_clk.rate = 25 * 1000 * 1000;
+
+ pll = ath79_pll_rr(QCA955X_PLL_CPU_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_CPU_CONFIG_NFRAC_MASK;
+
+ cpu_pll = nint * ath79_ref_clk.rate / ref_div;
+ cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
+ cpu_pll /= (1 << out_div);
+
+ pll = ath79_pll_rr(QCA955X_PLL_DDR_CONFIG_REG);
+ out_div = (pll >> QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK;
+ ref_div = (pll >> QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_REFDIV_MASK;
+ nint = (pll >> QCA955X_PLL_DDR_CONFIG_NINT_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NINT_MASK;
+ frac = (pll >> QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
+ QCA955X_PLL_DDR_CONFIG_NFRAC_MASK;
+
+ ddr_pll = nint * ath79_ref_clk.rate / ref_div;
+ ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
+ ddr_pll /= (1 << out_div);
+
+ clk_ctrl = ath79_pll_rr(QCA955X_PLL_CLK_CTRL_REG);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS)
+ ath79_cpu_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL)
+ ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS)
+ ath79_ddr_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL)
+ ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
+ else
+ ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
+
+ postdiv = (clk_ctrl >> QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT) &
+ QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK;
+
+ if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS)
+ ath79_ahb_clk.rate = ath79_ref_clk.rate;
+ else if (clk_ctrl & QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL)
+ ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
+ else
+ ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
+
+ ath79_wdt_clk.rate = ath79_ref_clk.rate;
+ ath79_uart_clk.rate = ath79_ref_clk.rate;
+}
+
void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
@@ -307,6 +383,8 @@ void __init ath79_clocks_init(void)
ar933x_clocks_init();
else if (soc_is_ar934x())
ar934x_clocks_init();
+ else if (soc_is_qca955x())
+ qca955x_clocks_init();
else
BUG();
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 5a4adfc9d79..eb3966cd8cf 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -72,6 +72,8 @@ void ath79_device_reset_set(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
else
BUG();
@@ -98,6 +100,8 @@ void ath79_device_reset_clear(u32 mask)
reg = AR933X_RESET_REG_RESET_MODULE;
else if (soc_is_ar934x())
reg = AR934X_RESET_REG_RESET_MODULE;
+ else if (soc_is_qca955x())
+ reg = QCA955X_RESET_REG_RESET_MODULE;
else
BUG();
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index ea3a8140e72..a3a2741d068 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -36,7 +36,7 @@ static struct resource ath79_uart_resources[] = {
static struct plat_serial8250_port ath79_uart_data[] = {
{
.mapbase = AR71XX_UART_BASE,
- .irq = ATH79_MISC_IRQ_UART,
+ .irq = ATH79_MISC_IRQ(3),
.flags = AR71XX_UART_FLAGS,
.iotype = UPIO_MEM32,
.regshift = 2,
@@ -62,8 +62,8 @@ static struct resource ar933x_uart_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = ATH79_MISC_IRQ_UART,
- .end = ATH79_MISC_IRQ_UART,
+ .start = ATH79_MISC_IRQ(3),
+ .end = ATH79_MISC_IRQ(3),
.flags = IORESOURCE_IRQ,
},
};
@@ -90,7 +90,8 @@ void __init ath79_register_uart(void)
if (soc_is_ar71xx() ||
soc_is_ar724x() ||
soc_is_ar913x() ||
- soc_is_ar934x()) {
+ soc_is_ar934x() ||
+ soc_is_qca955x()) {
ath79_uart_data[0].uartclk = clk_get_rate(clk);
platform_device_register(&ath79_uart_device);
} else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-usb.c b/arch/mips/ath79/dev-usb.c
index bd2bc108e1b..8227265bcc2 100644
--- a/arch/mips/ath79/dev-usb.c
+++ b/arch/mips/ath79/dev-usb.c
@@ -25,29 +25,11 @@
#include "common.h"
#include "dev-usb.h"
-static struct resource ath79_ohci_resources[2];
-
-static u64 ath79_ohci_dmamask = DMA_BIT_MASK(32);
+static u64 ath79_usb_dmamask = DMA_BIT_MASK(32);
static struct usb_ohci_pdata ath79_ohci_pdata = {
};
-static struct platform_device ath79_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .resource = ath79_ohci_resources,
- .num_resources = ARRAY_SIZE(ath79_ohci_resources),
- .dev = {
- .dma_mask = &ath79_ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &ath79_ohci_pdata,
- },
-};
-
-static struct resource ath79_ehci_resources[2];
-
-static u64 ath79_ehci_dmamask = DMA_BIT_MASK(32);
-
static struct usb_ehci_pdata ath79_ehci_pdata_v1 = {
.has_synopsys_hc_bug = 1,
};
@@ -57,22 +39,16 @@ static struct usb_ehci_pdata ath79_ehci_pdata_v2 = {
.has_tt = 1,
};
-static struct platform_device ath79_ehci_device = {
- .name = "ehci-platform",
- .id = -1,
- .resource = ath79_ehci_resources,
- .num_resources = ARRAY_SIZE(ath79_ehci_resources),
- .dev = {
- .dma_mask = &ath79_ehci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static void __init ath79_usb_init_resource(struct resource res[2],
- unsigned long base,
- unsigned long size,
- int irq)
+static void __init ath79_usb_register(const char *name, int id,
+ unsigned long base, unsigned long size,
+ int irq, const void *data,
+ size_t data_size)
{
+ struct resource res[2];
+ struct platform_device *pdev;
+
+ memset(res, 0, sizeof(res));
+
res[0].flags = IORESOURCE_MEM;
res[0].start = base;
res[0].end = base + size - 1;
@@ -80,6 +56,19 @@ static void __init ath79_usb_init_resource(struct resource res[2],
res[1].flags = IORESOURCE_IRQ;
res[1].start = irq;
res[1].end = irq;
+
+ pdev = platform_device_register_resndata(NULL, name, id,
+ res, ARRAY_SIZE(res),
+ data, data_size);
+
+ if (IS_ERR(pdev)) {
+ pr_err("ath79: unable to register USB at %08lx, err=%d\n",
+ base, (int) PTR_ERR(pdev));
+ return;
+ }
+
+ pdev->dev.dma_mask = &ath79_usb_dmamask;
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
#define AR71XX_USB_RESET_MASK (AR71XX_RESET_USB_HOST | \
@@ -106,14 +95,15 @@ static void __init ath79_usb_setup(void)
mdelay(900);
- ath79_usb_init_resource(ath79_ohci_resources, AR71XX_OHCI_BASE,
- AR71XX_OHCI_SIZE, ATH79_MISC_IRQ_OHCI);
- platform_device_register(&ath79_ohci_device);
+ ath79_usb_register("ohci-platform", -1,
+ AR71XX_OHCI_BASE, AR71XX_OHCI_SIZE,
+ ATH79_MISC_IRQ(6),
+ &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
- ath79_usb_init_resource(ath79_ehci_resources, AR71XX_EHCI_BASE,
- AR71XX_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v1;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR71XX_EHCI_BASE, AR71XX_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v1, sizeof(ath79_ehci_pdata_v1));
}
static void __init ar7240_usb_setup(void)
@@ -135,9 +125,10 @@ static void __init ar7240_usb_setup(void)
iounmap(usb_ctrl_base);
- ath79_usb_init_resource(ath79_ohci_resources, AR7240_OHCI_BASE,
- AR7240_OHCI_SIZE, ATH79_CPU_IRQ_USB);
- platform_device_register(&ath79_ohci_device);
+ ath79_usb_register("ohci-platform", -1,
+ AR7240_OHCI_BASE, AR7240_OHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ohci_pdata, sizeof(ath79_ohci_pdata));
}
static void __init ar724x_usb_setup(void)
@@ -151,10 +142,10 @@ static void __init ar724x_usb_setup(void)
ath79_device_reset_clear(AR724X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR724X_EHCI_BASE,
- AR724X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR724X_EHCI_BASE, AR724X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar913x_usb_setup(void)
@@ -168,10 +159,10 @@ static void __init ar913x_usb_setup(void)
ath79_device_reset_clear(AR913X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR913X_EHCI_BASE,
- AR913X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR913X_EHCI_BASE, AR913X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar933x_usb_setup(void)
@@ -185,10 +176,10 @@ static void __init ar933x_usb_setup(void)
ath79_device_reset_clear(AR933X_RESET_USB_PHY);
mdelay(10);
- ath79_usb_init_resource(ath79_ehci_resources, AR933X_EHCI_BASE,
- AR933X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR933X_EHCI_BASE, AR933X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
static void __init ar934x_usb_setup(void)
@@ -211,10 +202,23 @@ static void __init ar934x_usb_setup(void)
ath79_device_reset_clear(AR934X_RESET_USB_HOST);
udelay(1000);
- ath79_usb_init_resource(ath79_ehci_resources, AR934X_EHCI_BASE,
- AR934X_EHCI_SIZE, ATH79_CPU_IRQ_USB);
- ath79_ehci_device.dev.platform_data = &ath79_ehci_pdata_v2;
- platform_device_register(&ath79_ehci_device);
+ ath79_usb_register("ehci-platform", -1,
+ AR934X_EHCI_BASE, AR934X_EHCI_SIZE,
+ ATH79_CPU_IRQ(3),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+}
+
+static void __init qca955x_usb_setup(void)
+{
+ ath79_usb_register("ehci-platform", 0,
+ QCA955X_EHCI0_BASE, QCA955X_EHCI_SIZE,
+ ATH79_IP3_IRQ(0),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
+
+ ath79_usb_register("ehci-platform", 1,
+ QCA955X_EHCI1_BASE, QCA955X_EHCI_SIZE,
+ ATH79_IP3_IRQ(1),
+ &ath79_ehci_pdata_v2, sizeof(ath79_ehci_pdata_v2));
}
void __init ath79_register_usb(void)
@@ -231,6 +235,8 @@ void __init ath79_register_usb(void)
ar933x_usb_setup();
else if (soc_is_ar934x())
ar934x_usb_setup();
+ else if (soc_is_qca955x())
+ qca955x_usb_setup();
else
BUG();
}
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index d6d893c16ad..da190b1b87c 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -55,8 +55,8 @@ static void __init ar913x_wmac_setup(void)
ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
}
@@ -83,8 +83,8 @@ static void __init ar933x_wmac_setup(void)
ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
- ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
- ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
+ ath79_wmac_resources[1].start = ATH79_CPU_IRQ(2);
+ ath79_wmac_resources[1].end = ATH79_CPU_IRQ(2);
t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -107,7 +107,7 @@ static void ar934x_wmac_setup(void)
ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
- ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
if (t & AR934X_BOOTSTRAP_REF_CLK_40)
@@ -116,6 +116,24 @@ static void ar934x_wmac_setup(void)
ath79_wmac_data.is_clk_25mhz = true;
}
+static void qca955x_wmac_setup(void)
+{
+ u32 t;
+
+ ath79_wmac_device.name = "qca955x_wmac";
+
+ ath79_wmac_resources[0].start = QCA955X_WMAC_BASE;
+ ath79_wmac_resources[0].end = QCA955X_WMAC_BASE + QCA955X_WMAC_SIZE - 1;
+ ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
+ ath79_wmac_resources[1].end = ATH79_IP2_IRQ(1);
+
+ t = ath79_reset_rr(QCA955X_RESET_REG_BOOTSTRAP);
+ if (t & QCA955X_BOOTSTRAP_REF_CLK_40)
+ ath79_wmac_data.is_clk_25mhz = false;
+ else
+ ath79_wmac_data.is_clk_25mhz = true;
+}
+
void __init ath79_register_wmac(u8 *cal_data)
{
if (soc_is_ar913x())
@@ -124,6 +142,8 @@ void __init ath79_register_wmac(u8 *cal_data)
ar933x_wmac_setup();
else if (soc_is_ar934x())
ar934x_wmac_setup();
+ else if (soc_is_qca955x())
+ qca955x_wmac_setup();
else
BUG();
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index dc938cb2ba5..b955fafc58b 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -74,6 +74,8 @@ static void prom_putchar_init(void)
case REV_ID_MAJOR_AR9341:
case REV_ID_MAJOR_AR9342:
case REV_ID_MAJOR_AR9344:
+ case REV_ID_MAJOR_QCA9556:
+ case REV_ID_MAJOR_QCA9558:
_prom_putchar = prom_putchar_ar71xx;
break;
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index 48fe762d252..8d025b028bb 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -137,49 +137,45 @@ static struct gpio_chip ath79_gpio_chip = {
.base = 0,
};
-void ath79_gpio_function_enable(u32 mask)
+static void __iomem *ath79_gpio_get_function_reg(void)
{
- void __iomem *base = ath79_gpio_base;
- unsigned long flags;
+ u32 reg = 0;
- spin_lock_irqsave(&ath79_gpio_lock, flags);
-
- __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) | mask,
- base + AR71XX_GPIO_REG_FUNC);
- /* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ if (soc_is_ar71xx() ||
+ soc_is_ar724x() ||
+ soc_is_ar913x() ||
+ soc_is_ar933x())
+ reg = AR71XX_GPIO_REG_FUNC;
+ else if (soc_is_ar934x())
+ reg = AR934X_GPIO_REG_FUNC;
+ else
+ BUG();
- spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+ return ath79_gpio_base + reg;
}
-void ath79_gpio_function_disable(u32 mask)
+void ath79_gpio_function_setup(u32 set, u32 clear)
{
- void __iomem *base = ath79_gpio_base;
+ void __iomem *reg = ath79_gpio_get_function_reg();
unsigned long flags;
spin_lock_irqsave(&ath79_gpio_lock, flags);
- __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~mask,
- base + AR71XX_GPIO_REG_FUNC);
+ __raw_writel((__raw_readl(reg) & ~clear) | set, reg);
/* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ __raw_readl(reg);
spin_unlock_irqrestore(&ath79_gpio_lock, flags);
}
-void ath79_gpio_function_setup(u32 set, u32 clear)
+void ath79_gpio_function_enable(u32 mask)
{
- void __iomem *base = ath79_gpio_base;
- unsigned long flags;
-
- spin_lock_irqsave(&ath79_gpio_lock, flags);
-
- __raw_writel((__raw_readl(base + AR71XX_GPIO_REG_FUNC) & ~clear) | set,
- base + AR71XX_GPIO_REG_FUNC);
- /* flush write */
- __raw_readl(base + AR71XX_GPIO_REG_FUNC);
+ ath79_gpio_function_setup(mask, 0);
+}
- spin_unlock_irqrestore(&ath79_gpio_lock, flags);
+void ath79_gpio_function_disable(u32 mask)
+{
+ ath79_gpio_function_setup(0, mask);
}
void __init ath79_gpio_init(void)
@@ -198,12 +194,14 @@ void __init ath79_gpio_init(void)
ath79_gpio_count = AR933X_GPIO_COUNT;
else if (soc_is_ar934x())
ath79_gpio_count = AR934X_GPIO_COUNT;
+ else if (soc_is_qca955x())
+ ath79_gpio_count = QCA955X_GPIO_COUNT;
else
BUG();
ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
ath79_gpio_chip.ngpio = ath79_gpio_count;
- if (soc_is_ar934x()) {
+ if (soc_is_ar934x() || soc_is_qca955x()) {
ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
}
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 90d09fc1539..9c0e1761773 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -35,44 +35,17 @@ static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
pending = __raw_readl(base + AR71XX_RESET_REG_MISC_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_MISC_INT_ENABLE);
- if (pending & MISC_INT_UART)
- generic_handle_irq(ATH79_MISC_IRQ_UART);
-
- else if (pending & MISC_INT_DMA)
- generic_handle_irq(ATH79_MISC_IRQ_DMA);
-
- else if (pending & MISC_INT_PERFC)
- generic_handle_irq(ATH79_MISC_IRQ_PERFC);
-
- else if (pending & MISC_INT_TIMER)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER);
-
- else if (pending & MISC_INT_TIMER2)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER2);
-
- else if (pending & MISC_INT_TIMER3)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER3);
-
- else if (pending & MISC_INT_TIMER4)
- generic_handle_irq(ATH79_MISC_IRQ_TIMER4);
-
- else if (pending & MISC_INT_OHCI)
- generic_handle_irq(ATH79_MISC_IRQ_OHCI);
-
- else if (pending & MISC_INT_ERROR)
- generic_handle_irq(ATH79_MISC_IRQ_ERROR);
-
- else if (pending & MISC_INT_GPIO)
- generic_handle_irq(ATH79_MISC_IRQ_GPIO);
-
- else if (pending & MISC_INT_WDOG)
- generic_handle_irq(ATH79_MISC_IRQ_WDOG);
+ if (!pending) {
+ spurious_interrupt();
+ return;
+ }
- else if (pending & MISC_INT_ETHSW)
- generic_handle_irq(ATH79_MISC_IRQ_ETHSW);
+ while (pending) {
+ int bit = __ffs(pending);
- else
- spurious_interrupt();
+ generic_handle_irq(ATH79_MISC_IRQ(bit));
+ pending &= ~BIT(bit);
+ }
}
static void ar71xx_misc_irq_unmask(struct irq_data *d)
@@ -130,7 +103,10 @@ static void __init ath79_misc_irq_init(void)
if (soc_is_ar71xx() || soc_is_ar913x())
ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
- else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
+ else if (soc_is_ar724x() ||
+ soc_is_ar933x() ||
+ soc_is_ar934x() ||
+ soc_is_qca955x())
ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
else
BUG();
@@ -141,7 +117,7 @@ static void __init ath79_misc_irq_init(void)
handle_level_irq);
}
- irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
+ irq_set_chained_handler(ATH79_CPU_IRQ(6), ath79_misc_irq_handler);
}
static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
@@ -174,7 +150,89 @@ static void ar934x_ip2_irq_init(void)
irq_set_chip_and_handler(i, &dummy_irq_chip,
handle_level_irq);
- irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
+ irq_set_chained_handler(ATH79_CPU_IRQ(2), ar934x_ip2_irq_dispatch);
+}
+
+static void qca955x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+ status &= QCA955X_EXT_INT_PCIE_RC1_ALL | QCA955X_EXT_INT_WMAC_ALL;
+
+ if (status == 0) {
+ spurious_interrupt();
+ goto enable;
+ }
+
+ if (status & QCA955X_EXT_INT_PCIE_RC1_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP2_IRQ(0));
+ }
+
+ if (status & QCA955X_EXT_INT_WMAC_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP2_IRQ(1));
+ }
+
+enable:
+ enable_irq(irq);
+}
+
+static void qca955x_ip3_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+{
+ u32 status;
+
+ disable_irq_nosync(irq);
+
+ status = ath79_reset_rr(QCA955X_RESET_REG_EXT_INT_STATUS);
+ status &= QCA955X_EXT_INT_PCIE_RC2_ALL |
+ QCA955X_EXT_INT_USB1 |
+ QCA955X_EXT_INT_USB2;
+
+ if (status == 0) {
+ spurious_interrupt();
+ goto enable;
+ }
+
+ if (status & QCA955X_EXT_INT_USB1) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(0));
+ }
+
+ if (status & QCA955X_EXT_INT_USB2) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(1));
+ }
+
+ if (status & QCA955X_EXT_INT_PCIE_RC2_ALL) {
+ /* TODO: flush DDR? */
+ generic_handle_irq(ATH79_IP3_IRQ(2));
+ }
+
+enable:
+ enable_irq(irq);
+}
+
+static void qca955x_irq_init(void)
+{
+ int i;
+
+ for (i = ATH79_IP2_IRQ_BASE;
+ i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ(2), qca955x_ip2_irq_dispatch);
+
+ for (i = ATH79_IP3_IRQ_BASE;
+ i < ATH79_IP3_IRQ_BASE + ATH79_IP3_IRQ_COUNT; i++)
+ irq_set_chip_and_handler(i, &dummy_irq_chip,
+ handle_level_irq);
+
+ irq_set_chained_handler(ATH79_CPU_IRQ(3), qca955x_ip3_irq_dispatch);
}
asmlinkage void plat_irq_dispatch(void)
@@ -184,22 +242,22 @@ asmlinkage void plat_irq_dispatch(void)
pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
- do_IRQ(ATH79_CPU_IRQ_TIMER);
+ do_IRQ(ATH79_CPU_IRQ(7));
else if (pending & STATUSF_IP2)
ath79_ip2_handler();
else if (pending & STATUSF_IP4)
- do_IRQ(ATH79_CPU_IRQ_GE0);
+ do_IRQ(ATH79_CPU_IRQ(4));
else if (pending & STATUSF_IP5)
- do_IRQ(ATH79_CPU_IRQ_GE1);
+ do_IRQ(ATH79_CPU_IRQ(5));
else if (pending & STATUSF_IP3)
ath79_ip3_handler();
else if (pending & STATUSF_IP6)
- do_IRQ(ATH79_CPU_IRQ_MISC);
+ do_IRQ(ATH79_CPU_IRQ(6));
else
spurious_interrupt();
@@ -212,63 +270,69 @@ asmlinkage void plat_irq_dispatch(void)
* Issue a flush in the handlers to ensure that the driver sees
* the update.
*/
+
+static void ath79_default_ip2_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ(2));
+}
+
+static void ath79_default_ip3_handler(void)
+{
+ do_IRQ(ATH79_CPU_IRQ(3));
+}
+
static void ar71xx_ip2_handler(void)
{
ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar724x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar913x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar933x_ip2_handler(void)
{
ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
- do_IRQ(ATH79_CPU_IRQ_IP2);
-}
-
-static void ar934x_ip2_handler(void)
-{
- do_IRQ(ATH79_CPU_IRQ_IP2);
+ do_IRQ(ATH79_CPU_IRQ(2));
}
static void ar71xx_ip3_handler(void)
{
ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar724x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar913x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar933x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
static void ar934x_ip3_handler(void)
{
ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
- do_IRQ(ATH79_CPU_IRQ_USB);
+ do_IRQ(ATH79_CPU_IRQ(3));
}
void __init arch_init_irq(void)
@@ -286,16 +350,21 @@ void __init arch_init_irq(void)
ath79_ip2_handler = ar933x_ip2_handler;
ath79_ip3_handler = ar933x_ip3_handler;
} else if (soc_is_ar934x()) {
- ath79_ip2_handler = ar934x_ip2_handler;
+ ath79_ip2_handler = ath79_default_ip2_handler;
ath79_ip3_handler = ar934x_ip3_handler;
+ } else if (soc_is_qca955x()) {
+ ath79_ip2_handler = ath79_default_ip2_handler;
+ ath79_ip3_handler = ath79_default_ip3_handler;
} else {
BUG();
}
- cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
+ cp0_perfcount_irq = ATH79_MISC_IRQ(5);
mips_cpu_irq_init();
ath79_misc_irq_init();
if (soc_is_ar934x())
ar934x_ip2_irq_init();
+ else if (soc_is_qca955x())
+ qca955x_irq_init();
}
diff --git a/arch/mips/ath79/mach-ap121.c b/arch/mips/ath79/mach-ap121.c
index 4c20200d7c7..1bf73f2a069 100644
--- a/arch/mips/ath79/mach-ap121.c
+++ b/arch/mips/ath79/mach-ap121.c
@@ -69,7 +69,7 @@ static struct spi_board_info ap121_spi_info[] = {
static struct ath79_spi_platform_data ap121_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
static void __init ap121_setup(void)
diff --git a/arch/mips/ath79/mach-ap136.c b/arch/mips/ath79/mach-ap136.c
new file mode 100644
index 00000000000..479dd4b1d0d
--- /dev/null
+++ b/arch/mips/ath79/mach-ap136.c
@@ -0,0 +1,156 @@
+/*
+ * Qualcomm Atheros AP136 reference board support
+ *
+ * Copyright (c) 2012 Qualcomm Atheros
+ * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/ath9k_platform.h>
+
+#include "machtypes.h"
+#include "dev-gpio-buttons.h"
+#include "dev-leds-gpio.h"
+#include "dev-spi.h"
+#include "dev-usb.h"
+#include "dev-wmac.h"
+#include "pci.h"
+
+#define AP136_GPIO_LED_STATUS_RED 14
+#define AP136_GPIO_LED_STATUS_GREEN 19
+#define AP136_GPIO_LED_USB 4
+#define AP136_GPIO_LED_WLAN_2G 13
+#define AP136_GPIO_LED_WLAN_5G 12
+#define AP136_GPIO_LED_WPS_RED 15
+#define AP136_GPIO_LED_WPS_GREEN 20
+
+#define AP136_GPIO_BTN_WPS 16
+#define AP136_GPIO_BTN_RFKILL 21
+
+#define AP136_KEYS_POLL_INTERVAL 20 /* msecs */
+#define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL)
+
+#define AP136_WMAC_CALDATA_OFFSET 0x1000
+#define AP136_PCIE_CALDATA_OFFSET 0x5000
+
+static struct gpio_led ap136_leds_gpio[] __initdata = {
+ {
+ .name = "qca:green:status",
+ .gpio = AP136_GPIO_LED_STATUS_GREEN,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:status",
+ .gpio = AP136_GPIO_LED_STATUS_RED,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:green:wps",
+ .gpio = AP136_GPIO_LED_WPS_GREEN,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:wps",
+ .gpio = AP136_GPIO_LED_WPS_RED,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:wlan-2g",
+ .gpio = AP136_GPIO_LED_WLAN_2G,
+ .active_low = 1,
+ },
+ {
+ .name = "qca:red:usb",
+ .gpio = AP136_GPIO_LED_USB,
+ .active_low = 1,
+ }
+};
+
+static struct gpio_keys_button ap136_gpio_keys[] __initdata = {
+ {
+ .desc = "WPS button",
+ .type = EV_KEY,
+ .code = KEY_WPS_BUTTON,
+ .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = AP136_GPIO_BTN_WPS,
+ .active_low = 1,
+ },
+ {
+ .desc = "RFKILL button",
+ .type = EV_KEY,
+ .code = KEY_RFKILL,
+ .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL,
+ .gpio = AP136_GPIO_BTN_RFKILL,
+ .active_low = 1,
+ },
+};
+
+static struct spi_board_info ap136_spi_info[] = {
+ {
+ .bus_num = 0,
+ .chip_select = 0,
+ .max_speed_hz = 25000000,
+ .modalias = "mx25l6405d",
+ }
+};
+
+static struct ath79_spi_platform_data ap136_spi_data = {
+ .bus_num = 0,
+ .num_chipselect = 1,
+};
+
+#ifdef CONFIG_PCI
+static struct ath9k_platform_data ap136_ath9k_data;
+
+static int ap136_pci_plat_dev_init(struct pci_dev *dev)
+{
+ if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0)
+ dev->dev.platform_data = &ap136_ath9k_data;
+
+ return 0;
+}
+
+static void __init ap136_pci_init(u8 *eeprom)
+{
+ memcpy(ap136_ath9k_data.eeprom_data, eeprom,
+ sizeof(ap136_ath9k_data.eeprom_data));
+
+ ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init);
+ ath79_register_pci();
+}
+#else
+static inline void ap136_pci_init(void) {}
+#endif /* CONFIG_PCI */
+
+static void __init ap136_setup(void)
+{
+ u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
+
+ ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio),
+ ap136_leds_gpio);
+ ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL,
+ ARRAY_SIZE(ap136_gpio_keys),
+ ap136_gpio_keys);
+ ath79_register_spi(&ap136_spi_data, ap136_spi_info,
+ ARRAY_SIZE(ap136_spi_info));
+ ath79_register_usb();
+ ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET);
+ ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET);
+}
+
+MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010",
+ "Atheros AP136-010 reference board",
+ ap136_setup);
diff --git a/arch/mips/ath79/mach-ap81.c b/arch/mips/ath79/mach-ap81.c
index abe19836331..1c78d497f93 100644
--- a/arch/mips/ath79/mach-ap81.c
+++ b/arch/mips/ath79/mach-ap81.c
@@ -78,7 +78,7 @@ static struct spi_board_info ap81_spi_info[] = {
static struct ath79_spi_platform_data ap81_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
static void __init ap81_setup(void)
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
index 42f540a724f..4d661a1d2da 100644
--- a/arch/mips/ath79/mach-db120.c
+++ b/arch/mips/ath79/mach-db120.c
@@ -87,7 +87,7 @@ static struct spi_board_info db120_spi_info[] = {
static struct ath79_spi_platform_data db120_spi_data = {
.bus_num = 0,
- .num_chipselect = 1,
+ .num_chipselect = 1,
};
#ifdef CONFIG_PCI
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index c5f0ea5e00c..67b980d94fb 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -34,8 +34,8 @@
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
static struct i2c_gpio_platform_data pb44_i2c_gpio_data = {
- .sda_pin = PB44_GPIO_I2C_SDA,
- .scl_pin = PB44_GPIO_I2C_SCL,
+ .sda_pin = PB44_GPIO_I2C_SDA,
+ .scl_pin = PB44_GPIO_I2C_SCL,
};
static struct platform_device pb44_i2c_gpio_device = {
@@ -53,7 +53,7 @@ static struct pcf857x_platform_data pb44_pcf857x_data = {
static struct i2c_board_info pb44_i2c_board_info[] __initdata = {
{
I2C_BOARD_INFO("pcf8575", 0x20),
- .platform_data = &pb44_pcf857x_data,
+ .platform_data = &pb44_pcf857x_data,
},
};
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index af92e5c30d6..26254058c54 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -17,6 +17,7 @@
enum ath79_mach_type {
ATH79_MACH_GENERIC = 0,
ATH79_MACH_AP121, /* Atheros AP121 reference board */
+ ATH79_MACH_AP136_010, /* Atheros AP136-010 reference board */
ATH79_MACH_AP81, /* Atheros AP81 reference board */
ATH79_MACH_DB120, /* Atheros DB120 reference board */
ATH79_MACH_PB44, /* Atheros PB44 reference board */
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
index ca83abd9d31..730c0b03060 100644
--- a/arch/mips/ath79/pci.c
+++ b/arch/mips/ath79/pci.c
@@ -14,10 +14,11 @@
#include <linux/init.h>
#include <linux/pci.h>
+#include <linux/resource.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/irq.h>
-#include <asm/mach-ath79/pci.h>
#include "pci.h"
static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
@@ -48,6 +49,21 @@ static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
}
};
+static const struct ath79_pci_irq qca955x_pci_irq_map[] __initconst = {
+ {
+ .bus = 0,
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(0),
+ },
+ {
+ .bus = 1,
+ .slot = 0,
+ .pin = 1,
+ .irq = ATH79_PCI_IRQ(1),
+ },
+};
+
int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
{
int irq = -1;
@@ -63,6 +79,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
soc_is_ar9344()) {
ath79_pci_irq_map = ar724x_pci_irq_map;
ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
+ } else if (soc_is_qca955x()) {
+ ath79_pci_irq_map = qca955x_pci_irq_map;
+ ath79_pci_nr_irqs = ARRAY_SIZE(qca955x_pci_irq_map);
} else {
pr_crit("pci %s: invalid irq map\n",
pci_name((struct pci_dev *) dev));
@@ -74,7 +93,9 @@ int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
const struct ath79_pci_irq *entry;
entry = &ath79_pci_irq_map[i];
- if (entry->slot == slot && entry->pin == pin) {
+ if (entry->bus == dev->bus->number &&
+ entry->slot == slot &&
+ entry->pin == pin) {
irq = entry->irq;
break;
}
@@ -110,21 +131,143 @@ void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
ath79_pci_plat_dev_init = func;
}
-int __init ath79_register_pci(void)
+static struct platform_device *
+ath79_register_pci_ar71xx(void)
+{
+ struct platform_device *pdev;
+ struct resource res[4];
+
+ memset(res, 0, sizeof(res));
+
+ res[0].name = "cfg_base";
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = AR71XX_PCI_CFG_BASE;
+ res[0].end = AR71XX_PCI_CFG_BASE + AR71XX_PCI_CFG_SIZE - 1;
+
+ res[1].flags = IORESOURCE_IRQ;
+ res[1].start = ATH79_CPU_IRQ(2);
+ res[1].end = ATH79_CPU_IRQ(2);
+
+ res[2].name = "io_base";
+ res[2].flags = IORESOURCE_IO;
+ res[2].start = 0;
+ res[2].end = 0;
+
+ res[3].name = "mem_base";
+ res[3].flags = IORESOURCE_MEM;
+ res[3].start = AR71XX_PCI_MEM_BASE;
+ res[3].end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1;
+
+ pdev = platform_device_register_simple("ar71xx-pci", -1,
+ res, ARRAY_SIZE(res));
+ return pdev;
+}
+
+static struct platform_device *
+ath79_register_pci_ar724x(int id,
+ unsigned long cfg_base,
+ unsigned long ctrl_base,
+ unsigned long crp_base,
+ unsigned long mem_base,
+ unsigned long mem_size,
+ unsigned long io_base,
+ int irq)
{
- if (soc_is_ar71xx())
- return ar71xx_pcibios_init();
+ struct platform_device *pdev;
+ struct resource res[6];
+
+ memset(res, 0, sizeof(res));
+
+ res[0].name = "cfg_base";
+ res[0].flags = IORESOURCE_MEM;
+ res[0].start = cfg_base;
+ res[0].end = cfg_base + AR724X_PCI_CFG_SIZE - 1;
+
+ res[1].name = "ctrl_base";
+ res[1].flags = IORESOURCE_MEM;
+ res[1].start = ctrl_base;
+ res[1].end = ctrl_base + AR724X_PCI_CTRL_SIZE - 1;
+
+ res[2].flags = IORESOURCE_IRQ;
+ res[2].start = irq;
+ res[2].end = irq;
+
+ res[3].name = "mem_base";
+ res[3].flags = IORESOURCE_MEM;
+ res[3].start = mem_base;
+ res[3].end = mem_base + mem_size - 1;
+
+ res[4].name = "io_base";
+ res[4].flags = IORESOURCE_IO;
+ res[4].start = io_base;
+ res[4].end = io_base;
- if (soc_is_ar724x())
- return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
+ res[5].name = "crp_base";
+ res[5].flags = IORESOURCE_MEM;
+ res[5].start = crp_base;
+ res[5].end = crp_base + AR724X_PCI_CRP_SIZE - 1;
- if (soc_is_ar9342() || soc_is_ar9344()) {
+ pdev = platform_device_register_simple("ar724x-pci", id,
+ res, ARRAY_SIZE(res));
+ return pdev;
+}
+
+int __init ath79_register_pci(void)
+{
+ struct platform_device *pdev = NULL;
+
+ if (soc_is_ar71xx()) {
+ pdev = ath79_register_pci_ar71xx();
+ } else if (soc_is_ar724x()) {
+ pdev = ath79_register_pci_ar724x(-1,
+ AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CRP_BASE,
+ AR724X_PCI_MEM_BASE,
+ AR724X_PCI_MEM_SIZE,
+ 0,
+ ATH79_CPU_IRQ(2));
+ } else if (soc_is_ar9342() ||
+ soc_is_ar9344()) {
u32 bootstrap;
bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
- if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
- return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
+ if ((bootstrap & AR934X_BOOTSTRAP_PCIE_RC) == 0)
+ return -ENODEV;
+
+ pdev = ath79_register_pci_ar724x(-1,
+ AR724X_PCI_CFG_BASE,
+ AR724X_PCI_CTRL_BASE,
+ AR724X_PCI_CRP_BASE,
+ AR724X_PCI_MEM_BASE,
+ AR724X_PCI_MEM_SIZE,
+ 0,
+ ATH79_IP2_IRQ(0));
+ } else if (soc_is_qca9558()) {
+ pdev = ath79_register_pci_ar724x(0,
+ QCA955X_PCI_CFG_BASE0,
+ QCA955X_PCI_CTRL_BASE0,
+ QCA955X_PCI_CRP_BASE0,
+ QCA955X_PCI_MEM_BASE0,
+ QCA955X_PCI_MEM_SIZE,
+ 0,
+ ATH79_IP2_IRQ(0));
+
+ pdev = ath79_register_pci_ar724x(1,
+ QCA955X_PCI_CFG_BASE1,
+ QCA955X_PCI_CTRL_BASE1,
+ QCA955X_PCI_CRP_BASE1,
+ QCA955X_PCI_MEM_BASE1,
+ QCA955X_PCI_MEM_SIZE,
+ 1,
+ ATH79_IP3_IRQ(2));
+ } else {
+ /* No PCI support */
+ return -ENODEV;
}
- return -ENODEV;
+ if (!pdev)
+ pr_err("unable to register PCI controller device\n");
+
+ return pdev ? 0 : -ENODEV;
}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
index 51c6625dcc6..1d00a3803c3 100644
--- a/arch/mips/ath79/pci.h
+++ b/arch/mips/ath79/pci.h
@@ -14,6 +14,7 @@
#define _ATH79_PCI_H
struct ath79_pci_irq {
+ int bus;
u8 slot;
u8 pin;
int irq;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 60d212ef862..d5b3c905701 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -164,13 +164,29 @@ static void __init ath79_detect_sys_type(void)
rev = id & AR934X_REV_ID_REVISION_MASK;
break;
+ case REV_ID_MAJOR_QCA9556:
+ ath79_soc = ATH79_SOC_QCA9556;
+ chip = "9556";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
+ case REV_ID_MAJOR_QCA9558:
+ ath79_soc = ATH79_SOC_QCA9558;
+ chip = "9558";
+ rev = id & QCA955X_REV_ID_REVISION_MASK;
+ break;
+
default:
panic("ath79: unknown SoC, id:0x%08x", id);
}
ath79_soc_rev = rev;
- sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
+ if (soc_is_qca955x())
+ sprintf(ath79_sys_type, "Qualcomm Atheros QCA%s rev %u",
+ chip, rev);
+ else
+ sprintf(ath79_sys_type, "Atheros AR%s rev %u", chip, rev);
pr_info("SoC: %s\n", ath79_sys_type);
}
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 1a3567f07e7..f3bf6d5bfb9 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
+obj-y += irq.o nvram.o prom.o serial.o setup.o time.o sprom.o
obj-$(CONFIG_BCM47XX_SSB) += wgt634u.o
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 48a4c70b384..cc40b74940f 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -3,10 +3,10 @@
*
* Copyright (C) 2005 Broadcom Corporation
* Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2010-2011 Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -18,83 +18,160 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/addrspace.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
static char nvram_buf[NVRAM_SPACE];
+static u32 find_nvram_size(u32 end)
+{
+ struct nvram_header *header;
+ u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
+ header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]);
+ if (header->magic == NVRAM_HEADER)
+ return nvram_sizes[i];
+ }
+
+ return 0;
+}
+
/* Probe for NVRAM header */
-static void early_nvram_init(void)
+static int nvram_find_and_copy(u32 base, u32 lim)
{
-#ifdef CONFIG_BCM47XX_SSB
- struct ssb_mipscore *mcore_ssb;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- struct bcma_drv_cc *bcma_cc;
-#endif
struct nvram_header *header;
int i;
- u32 base = 0;
- u32 lim = 0;
u32 off;
u32 *src, *dst;
+ u32 size;
- switch (bcm47xx_bus_type) {
-#ifdef CONFIG_BCM47XX_SSB
- case BCM47XX_BUS_TYPE_SSB:
- mcore_ssb = &bcm47xx_bus.ssb.mipscore;
- base = mcore_ssb->pflash.window;
- lim = mcore_ssb->pflash.window_size;
- break;
-#endif
-#ifdef CONFIG_BCM47XX_BCMA
- case BCM47XX_BUS_TYPE_BCMA:
- bcma_cc = &bcm47xx_bus.bcma.bus.drv_cc;
- base = bcma_cc->pflash.window;
- lim = bcma_cc->pflash.window_size;
- break;
-#endif
- }
-
+ /* TODO: when nvram is on nand flash check for bad blocks first. */
off = FLASH_MIN;
while (off <= lim) {
/* Windowed flash access */
- header = (struct nvram_header *)
- KSEG1ADDR(base + off - NVRAM_SPACE);
- if (header->magic == NVRAM_HEADER)
+ size = find_nvram_size(base + off);
+ if (size) {
+ header = (struct nvram_header *)KSEG1ADDR(base + off -
+ size);
goto found;
+ }
off <<= 1;
}
/* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
header = (struct nvram_header *) KSEG1ADDR(base + 4096);
- if (header->magic == NVRAM_HEADER)
+ if (header->magic == NVRAM_HEADER) {
+ size = NVRAM_SPACE;
goto found;
+ }
header = (struct nvram_header *) KSEG1ADDR(base + 1024);
- if (header->magic == NVRAM_HEADER)
+ if (header->magic == NVRAM_HEADER) {
+ size = NVRAM_SPACE;
goto found;
+ }
- return;
+ pr_err("no nvram found\n");
+ return -ENXIO;
found:
+
+ if (header->len > size)
+ pr_err("The nvram size accoridng to the header seems to be bigger than the partition on flash\n");
+ if (header->len > NVRAM_SPACE)
+ pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
+ header->len, NVRAM_SPACE);
+
src = (u32 *) header;
dst = (u32 *) nvram_buf;
for (i = 0; i < sizeof(struct nvram_header); i += 4)
*dst++ = *src++;
- for (; i < header->len && i < NVRAM_SPACE; i += 4)
+ for (; i < header->len && i < NVRAM_SPACE && i < size; i += 4)
*dst++ = le32_to_cpu(*src++);
+ memset(dst, 0x0, NVRAM_SPACE - i);
+
+ return 0;
}
-int nvram_getenv(char *name, char *val, size_t val_len)
+#ifdef CONFIG_BCM47XX_SSB
+static int nvram_init_ssb(void)
+{
+ struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore;
+ u32 base;
+ u32 lim;
+
+ if (mcore->pflash.present) {
+ base = mcore->pflash.window;
+ lim = mcore->pflash.window_size;
+ } else {
+ pr_err("Couldn't find supported flash memory\n");
+ return -ENXIO;
+ }
+
+ return nvram_find_and_copy(base, lim);
+}
+#endif
+
+#ifdef CONFIG_BCM47XX_BCMA
+static int nvram_init_bcma(void)
+{
+ struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc;
+ u32 base;
+ u32 lim;
+
+#ifdef CONFIG_BCMA_NFLASH
+ if (cc->nflash.boot) {
+ base = BCMA_SOC_FLASH1;
+ lim = BCMA_SOC_FLASH1_SZ;
+ } else
+#endif
+ if (cc->pflash.present) {
+ base = cc->pflash.window;
+ lim = cc->pflash.window_size;
+#ifdef CONFIG_BCMA_SFLASH
+ } else if (cc->sflash.present) {
+ base = cc->sflash.window;
+ lim = cc->sflash.size;
+#endif
+ } else {
+ pr_err("Couldn't find supported flash memory\n");
+ return -ENXIO;
+ }
+
+ return nvram_find_and_copy(base, lim);
+}
+#endif
+
+static int nvram_init(void)
+{
+ switch (bcm47xx_bus_type) {
+#ifdef CONFIG_BCM47XX_SSB
+ case BCM47XX_BUS_TYPE_SSB:
+ return nvram_init_ssb();
+#endif
+#ifdef CONFIG_BCM47XX_BCMA
+ case BCM47XX_BUS_TYPE_BCMA:
+ return nvram_init_bcma();
+#endif
+ }
+ return -ENXIO;
+}
+
+int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len)
{
char *var, *value, *end, *eq;
+ int err;
if (!name)
- return NVRAM_ERR_INV_PARAM;
+ return -EINVAL;
- if (!nvram_buf[0])
- early_nvram_init();
+ if (!nvram_buf[0]) {
+ err = nvram_init();
+ if (err)
+ return err;
+ }
/* Look for name=value and return value */
var = &nvram_buf[sizeof(struct nvram_header)];
@@ -110,6 +187,6 @@ int nvram_getenv(char *name, char *val, size_t val_len)
return snprintf(val, val_len, "%s", value);
}
}
- return NVRAM_ERR_ENVNOTFOUND;
+ return -ENOENT;
}
-EXPORT_SYMBOL(nvram_getenv);
+EXPORT_SYMBOL(bcm47xx_nvram_getenv);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 4d54b58dbd3..b2246cd9ca1 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -35,7 +35,7 @@
#include <asm/reboot.h>
#include <asm/time.h>
#include <bcm47xx.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
union bcm47xx_bus bcm47xx_bus;
EXPORT_SYMBOL(bcm47xx_bus);
@@ -115,7 +115,7 @@ static int bcm47xx_get_invariants(struct ssb_bus *bus,
memset(&iv->sprom, 0, sizeof(struct ssb_sprom));
bcm47xx_fill_sprom(&iv->sprom, NULL, false);
- if (nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
+ if (bcm47xx_nvram_getenv("cardbus", buf, sizeof(buf)) >= 0)
iv->has_cardbus_slot = !!simple_strtoul(buf, NULL, 10);
return 0;
@@ -138,7 +138,7 @@ static void __init bcm47xx_register_ssb(void)
panic("Failed to initialize SSB bus (err %d)", err);
mcore = &bcm47xx_bus.ssb.mipscore;
- if (nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
+ if (bcm47xx_nvram_getenv("kernel_args", buf, sizeof(buf)) >= 0) {
if (strstr(buf, "console=ttyS1")) {
struct ssb_serial_port port;
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 289cc0a3863..ad03c931b90 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -27,7 +27,7 @@
*/
#include <bcm47xx.h>
-#include <nvram.h>
+#include <bcm47xx_nvram.h>
static void create_key(const char *prefix, const char *postfix,
const char *name, char *buf, int len)
@@ -50,18 +50,18 @@ static int get_nvram_var(const char *prefix, const char *postfix,
create_key(prefix, postfix, name, key, sizeof(key));
- err = nvram_getenv(key, buf, len);
- if (fallback && err == NVRAM_ERR_ENVNOTFOUND && prefix) {
+ err = bcm47xx_nvram_getenv(key, buf, len);
+ if (fallback && err == -ENOENT && prefix) {
create_key(NULL, postfix, name, key, sizeof(key));
- err = nvram_getenv(key, buf, len);
+ err = bcm47xx_nvram_getenv(key, buf, len);
}
return err;
}
#define NVRAM_READ_VAL(type) \
static void nvram_read_ ## type (const char *prefix, \
- const char *postfix, const char *name, \
- type *val, type allset, bool fallback) \
+ const char *postfix, const char *name, \
+ type *val, type allset, bool fallback) \
{ \
char buf[100]; \
int err; \
@@ -71,7 +71,7 @@ static void nvram_read_ ## type (const char *prefix, \
fallback); \
if (err < 0) \
return; \
- err = kstrto ## type (buf, 0, &var); \
+ err = kstrto ## type(strim(buf), 0, &var); \
if (err) { \
pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
prefix, name, postfix, buf, err); \
@@ -99,7 +99,7 @@ static void nvram_read_u32_2(const char *prefix, const char *name,
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
- err = kstrtou32(buf, 0, &val);
+ err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
@@ -120,7 +120,7 @@ static void nvram_read_leddc(const char *prefix, const char *name,
err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
if (err < 0)
return;
- err = kstrtou32(buf, 0, &val);
+ err = kstrtou32(strim(buf), 0, &val);
if (err) {
pr_warn("can not parse nvram name %s%s with value %s got %i\n",
prefix, name, buf, err);
@@ -144,7 +144,7 @@ static void nvram_read_macaddr(const char *prefix, const char *name,
if (err < 0)
return;
- nvram_parse_macaddr(buf, *val);
+ bcm47xx_nvram_parse_macaddr(buf, *val);
}
static void nvram_read_alpha2(const char *prefix, const char *name,
@@ -652,12 +652,10 @@ static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
bool fallback)
{
- nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0,
- fallback);
+ nvram_read_u16(prefix, NULL, "boardrev", &sprom->board_rev, 0, true);
nvram_read_u16(prefix, NULL, "boardnum", &sprom->board_num, 0,
fallback);
- nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0,
- fallback);
+ nvram_read_u16(prefix, NULL, "boardtype", &sprom->board_type, 0, true);
nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
&sprom->boardflags_hi, fallback);
nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
diff --git a/arch/mips/bcm47xx/wgt634u.c b/arch/mips/bcm47xx/wgt634u.c
index 9d111e8087e..c63a4c287b5 100644
--- a/arch/mips/bcm47xx/wgt634u.c
+++ b/arch/mips/bcm47xx/wgt634u.c
@@ -36,13 +36,13 @@ static struct gpio_led wgt634u_leds[] = {
};
static struct gpio_led_platform_data wgt634u_led_data = {
- .num_leds = ARRAY_SIZE(wgt634u_leds),
- .leds = wgt634u_leds,
+ .num_leds = ARRAY_SIZE(wgt634u_leds),
+ .leds = wgt634u_leds,
};
static struct platform_device wgt634u_gpio_leds = {
- .name = "leds-gpio",
- .id = -1,
+ .name = "leds-gpio",
+ .id = -1,
.dev = {
.platform_data = &wgt634u_led_data,
}
@@ -53,35 +53,35 @@ static struct platform_device wgt634u_gpio_leds = {
firmware. */
static struct mtd_partition wgt634u_partitions[] = {
{
- .name = "cfe",
- .offset = 0,
- .size = 0x60000, /* 384k */
- .mask_flags = MTD_WRITEABLE /* force read-only */
+ .name = "cfe",
+ .offset = 0,
+ .size = 0x60000, /* 384k */
+ .mask_flags = MTD_WRITEABLE /* force read-only */
},
{
- .name = "config",
+ .name = "config",
.offset = 0x60000,
- .size = 0x20000 /* 128k */
+ .size = 0x20000 /* 128k */
},
{
- .name = "linux",
+ .name = "linux",
.offset = 0x80000,
- .size = 0x140000 /* 1280k */
+ .size = 0x140000 /* 1280k */
},
{
- .name = "jffs",
+ .name = "jffs",
.offset = 0x1c0000,
- .size = 0x620000 /* 6272k */
+ .size = 0x620000 /* 6272k */
},
{
- .name = "nvram",
+ .name = "nvram",
.offset = 0x7e0000,
- .size = 0x20000 /* 128k */
+ .size = 0x20000 /* 128k */
},
};
static struct physmap_flash_data wgt634u_flash_data = {
- .parts = wgt634u_partitions,
+ .parts = wgt634u_partitions,
.nr_parts = ARRAY_SIZE(wgt634u_partitions)
};
@@ -90,9 +90,9 @@ static struct resource wgt634u_flash_resource = {
};
static struct platform_device wgt634u_flash = {
- .name = "physmap-flash",
- .id = 0,
- .dev = { .platform_data = &wgt634u_flash_data, },
+ .name = "physmap-flash",
+ .id = 0,
+ .dev = { .platform_data = &wgt634u_flash_data, },
.resource = &wgt634u_flash_resource,
.num_resources = 1,
};
diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 73be9b34969..ed1949c2950 100644
--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -406,9 +406,9 @@ static struct board_info __initdata board_FAST2404 = {
.expected_cpu_id = 0x6348,
.has_uart0 = 1,
- .has_enet0 = 1,
- .has_enet1 = 1,
- .has_pci = 1,
+ .has_enet0 = 1,
+ .has_enet1 = 1,
+ .has_pci = 1,
.enet0 = {
.has_phy = 1,
@@ -591,22 +591,22 @@ static struct board_info __initdata board_96358vw2 = {
};
static struct board_info __initdata board_AGPFS0 = {
- .name = "AGPF-S0",
- .expected_cpu_id = 0x6358,
+ .name = "AGPF-S0",
+ .expected_cpu_id = 0x6358,
.has_uart0 = 1,
- .has_enet0 = 1,
- .has_enet1 = 1,
- .has_pci = 1,
+ .has_enet0 = 1,
+ .has_enet1 = 1,
+ .has_pci = 1,
.enet0 = {
- .has_phy = 1,
- .use_internal_phy = 1,
+ .has_phy = 1,
+ .use_internal_phy = 1,
},
.enet1 = {
- .force_speed_100 = 1,
- .force_duplex_full = 1,
+ .force_speed_100 = 1,
+ .force_duplex_full = 1,
},
.has_ohci0 = 1,
@@ -677,7 +677,7 @@ static struct ssb_sprom bcm63xx_sprom = {
.revision = 0x02,
.board_rev = 0x17,
.country_code = 0x0,
- .ant_available_bg = 0x3,
+ .ant_available_bg = 0x3,
.pa0b0 = 0x15ae,
.pa0b1 = 0xfa85,
.pa0b2 = 0xfe8d,
diff --git a/arch/mips/bcm63xx/early_printk.c b/arch/mips/bcm63xx/early_printk.c
index bf353c937df..aa8f7f9cc7a 100644
--- a/arch/mips/bcm63xx/early_printk.c
+++ b/arch/mips/bcm63xx/early_printk.c
@@ -10,7 +10,7 @@
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
-static void __init wait_xfered(void)
+static void wait_xfered(void)
{
unsigned int val;
@@ -22,7 +22,7 @@ static void __init wait_xfered(void)
} while (1);
}
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
wait_xfered();
bcm_uart0_writel(c, UART_FIFO_REG);
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 85bcb5adc7c..851261e9fdc 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -24,7 +24,7 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections))
hostprogs-y := elf2ecoff
targets := vmlinux.ecoff
-quiet_cmd_ecoff = ECOFF $@
+quiet_cmd_ecoff = ECOFF $@
cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
$(obj)/vmlinux.ecoff: $(obj)/elf2ecoff $(VMLINUX) FORCE
$(call if_changed,ecoff)
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index c2a3fb0ffc8..bbaa1d4beb6 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -51,7 +51,7 @@ $(obj)/vmlinux.bin.z: $(obj)/vmlinux.bin FORCE
targets += piggy.o
OBJCOPYFLAGS_piggy.o := --add-section=.image=$(obj)/vmlinux.bin.z \
- --set-section-flags=.image=contents,alloc,load,readonly,data
+ --set-section-flags=.image=contents,alloc,load,readonly,data
$(obj)/piggy.o: $(obj)/dummy.o $(obj)/vmlinux.bin.z FORCE
$(call if_changed,objcopy)
@@ -67,9 +67,9 @@ endif
vmlinuzobjs-y += $(obj)/piggy.o
-quiet_cmd_zld = LD $@
+quiet_cmd_zld = LD $@
cmd_zld = $(LD) $(LDFLAGS) -Ttext $(VMLINUZ_LOAD_ADDRESS) -T $< $(vmlinuzobjs-y) -o $@
-quiet_cmd_strip = STRIP $@
+quiet_cmd_strip = STRIP $@
cmd_strip = $(STRIP) -s $@
vmlinuz: $(src)/ld.script $(vmlinuzobjs-y) $(obj)/calc_vmlinuz_load_addr
$(call cmd,zld)
@@ -96,7 +96,7 @@ quiet_cmd_32 = OBJCOPY $@
vmlinuz.32: vmlinuz
$(call cmd,32)
-quiet_cmd_ecoff = ECOFF $@
+quiet_cmd_ecoff = ECOFF $@
cmd_ecoff = $< $(VMLINUZ) $@ $(e2eflag)
vmlinuz.ecoff: $(obj)/../elf2ecoff $(VMLINUZ)
$(call cmd,ecoff)
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 9a6243676e2..37fe58c19a9 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2010 "Wu Zhangjin" <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 5cad0faefa1..2c9573098c0 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -5,8 +5,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index 4e65a8420be..409cb483a9f 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -32,8 +32,8 @@ start:
bne a2, a0, 1b
addiu a0, a0, 4
- PTR_LA a0, (.heap) /* heap address */
- PTR_LA sp, (.stack + 8192) /* stack address */
+ PTR_LA a0, (.heap) /* heap address */
+ PTR_LA sp, (.stack + 8192) /* stack address */
PTR_LA ra, 2f
PTR_LA k0, decompress_kernel
diff --git a/arch/mips/boot/ecoff.h b/arch/mips/boot/ecoff.h
index 8c3eed2877f..83e5c3813d6 100644
--- a/arch/mips/boot/ecoff.h
+++ b/arch/mips/boot/ecoff.h
@@ -2,48 +2,48 @@
* Some ECOFF definitions.
*/
typedef struct filehdr {
- unsigned short f_magic; /* magic number */
- unsigned short f_nscns; /* number of sections */
- long f_timdat; /* time & date stamp */
- long f_symptr; /* file pointer to symbolic header */
- long f_nsyms; /* sizeof(symbolic hdr) */
- unsigned short f_opthdr; /* sizeof(optional hdr) */
- unsigned short f_flags; /* flags */
+ unsigned short f_magic; /* magic number */
+ unsigned short f_nscns; /* number of sections */
+ long f_timdat; /* time & date stamp */
+ long f_symptr; /* file pointer to symbolic header */
+ long f_nsyms; /* sizeof(symbolic hdr) */
+ unsigned short f_opthdr; /* sizeof(optional hdr) */
+ unsigned short f_flags; /* flags */
} FILHDR;
-#define FILHSZ sizeof(FILHDR)
+#define FILHSZ sizeof(FILHDR)
#define OMAGIC 0407
#define MIPSEBMAGIC 0x160
#define MIPSELMAGIC 0x162
typedef struct scnhdr {
- char s_name[8]; /* section name */
- long s_paddr; /* physical address, aliased s_nlib */
- long s_vaddr; /* virtual address */
- long s_size; /* section size */
- long s_scnptr; /* file ptr to raw data for section */
- long s_relptr; /* file ptr to relocation */
- long s_lnnoptr; /* file ptr to gp histogram */
- unsigned short s_nreloc; /* number of relocation entries */
- unsigned short s_nlnno; /* number of gp histogram entries */
- long s_flags; /* flags */
+ char s_name[8]; /* section name */
+ long s_paddr; /* physical address, aliased s_nlib */
+ long s_vaddr; /* virtual address */
+ long s_size; /* section size */
+ long s_scnptr; /* file ptr to raw data for section */
+ long s_relptr; /* file ptr to relocation */
+ long s_lnnoptr; /* file ptr to gp histogram */
+ unsigned short s_nreloc; /* number of relocation entries */
+ unsigned short s_nlnno; /* number of gp histogram entries */
+ long s_flags; /* flags */
} SCNHDR;
#define SCNHSZ sizeof(SCNHDR)
#define SCNROUND ((long)16)
typedef struct aouthdr {
- short magic; /* see above */
- short vstamp; /* version stamp */
- long tsize; /* text size in bytes, padded to DW bdry*/
- long dsize; /* initialized data " " */
- long bsize; /* uninitialized data " " */
- long entry; /* entry pt. */
- long text_start; /* base of text used for this file */
- long data_start; /* base of data used for this file */
- long bss_start; /* base of bss used for this file */
- long gprmask; /* general purpose register mask */
- long cprmask[4]; /* co-processor register masks */
- long gp_value; /* the gp value used for this object */
+ short magic; /* see above */
+ short vstamp; /* version stamp */
+ long tsize; /* text size in bytes, padded to DW bdry*/
+ long dsize; /* initialized data " " */
+ long bsize; /* uninitialized data " " */
+ long entry; /* entry pt. */
+ long text_start; /* base of text used for this file */
+ long data_start; /* base of data used for this file */
+ long bss_start; /* base of bss used for this file */
+ long gprmask; /* general purpose register mask */
+ long cprmask[4]; /* co-processor register masks */
+ long gp_value; /* the gp value used for this object */
} AOUTHDR;
#define AOUTHSZ sizeof(AOUTHDR)
@@ -51,7 +51,7 @@ typedef struct aouthdr {
#define NMAGIC 0410
#define ZMAGIC 0413
#define SMAGIC 0411
-#define LIBMAGIC 0443
+#define LIBMAGIC 0443
#define N_TXTOFF(f, a) \
((a).magic == ZMAGIC || (a).magic == LIBMAGIC ? 0 : \
diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c
index e19d906236a..8585078ae50 100644
--- a/arch/mips/boot/elf2ecoff.c
+++ b/arch/mips/boot/elf2ecoff.c
@@ -29,7 +29,7 @@
/* elf2ecoff.c
This program converts an elf executable to an ECOFF executable.
- No symbol table is retained. This is useful primarily in building
+ No symbol table is retained. This is useful primarily in building
net-bootable kernels for machines (e.g., DECstation and Alpha) which
only support the ECOFF object file format. */
@@ -341,7 +341,7 @@ int main(int argc, char *argv[])
/* Figure out if we can cram the program header into an ECOFF
header... Basically, we can't handle anything but loadable
- segments, but we can ignore some kinds of segments. We can't
+ segments, but we can ignore some kinds of segments. We can't
handle holes in the address space. Segments may be out of order,
so we sort them first. */
@@ -514,7 +514,7 @@ int main(int argc, char *argv[])
for (i = 0; i < nosecs; i++) {
printf
- ("Section %d: %s phys %lx size %lx file offset %lx\n",
+ ("Section %d: %s phys %lx size %lx file offset %lx\n",
i, esecs[i].s_name, esecs[i].s_paddr,
esecs[i].s_size, esecs[i].s_scnptr);
}
@@ -551,7 +551,7 @@ int main(int argc, char *argv[])
}
/*
- * Copy the loadable sections. Zero-fill any gaps less than 64k;
+ * Copy the loadable sections. Zero-fill any gaps less than 64k;
* complain about any zero-filling, and die if we're asked to zero-fill
* more than 64k.
*/
diff --git a/arch/mips/cavium-octeon/Kconfig b/arch/mips/cavium-octeon/Kconfig
index 2f4f6d5e05b..75a6df7fd26 100644
--- a/arch/mips/cavium-octeon/Kconfig
+++ b/arch/mips/cavium-octeon/Kconfig
@@ -94,4 +94,13 @@ config SWIOTLB
select NEED_SG_DMA_LENGTH
+config OCTEON_ILM
+ tristate "Module to measure interrupt latency using Octeon CIU Timer"
+ help
+ This driver is a module to measure interrupt latency using the
+ the CIU Timers on Octeon.
+
+ To compile this driver as a module, choose M here. The module
+ will be called octeon-ilm
+
endif # CPU_CAVIUM_OCTEON
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 6e927cf20df..3595affb977 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -17,7 +17,8 @@ obj-y += dma-octeon.o flash_setup.o
obj-y += octeon-memcpy.o
obj-y += executive/
-obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_OCTEON_ILM) += oct_ilm.o
DTS_FILES = octeon_3xxx.dts octeon_68xx.dts
DTB_FILES = $(patsubst %.dts, %.dtb, $(DTS_FILES))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 6d5ddbc112c..504ed61a47c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -155,8 +155,8 @@ int cvmx_bootmem_init(void *mem_desc_ptr)
*
* Linux 64 bit: Set XKPHYS bit
* Linux 32 bit: use mmap to create mapping, use virtual address
- * CVMX 64 bit: use physical address directly
- * CVMX 32 bit: use physical address directly
+ * CVMX 64 bit: use physical address directly
+ * CVMX 32 bit: use physical address directly
*
* Note that the CVMX environment assumes the use of 1-1 TLB
* mappings so that the physical addresses can be used
@@ -398,7 +398,7 @@ error_out:
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
{
uint64_t cur_addr;
- uint64_t prev_addr = 0; /* zero is invalid */
+ uint64_t prev_addr = 0; /* zero is invalid */
int retval = 0;
#ifdef DEBUG
@@ -424,7 +424,7 @@ int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
if (cur_addr == 0 || phy_addr < cur_addr) {
/* add at front of list - special case with changing head ptr */
if (cur_addr && phy_addr + size > cur_addr)
- goto bootmem_free_done; /* error, overlapping section */
+ goto bootmem_free_done; /* error, overlapping section */
else if (phy_addr + size == cur_addr) {
/* Add to front of existing first block */
cvmx_bootmem_phy_set_next(phy_addr,
@@ -611,7 +611,7 @@ int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
}
cvmx_bootmem_unlock();
- return named_block_ptr != NULL; /* 0 on failure, 1 on success */
+ return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}
int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index fd2015331a2..7c649778189 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -203,10 +203,10 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
- * status for.
+ * status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
+ * return zero.
*/
cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
{
@@ -357,16 +357,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
result.s.link_up = 1;
result.s.full_duplex = ((phy_status >> 13) & 1);
switch ((phy_status >> 14) & 3) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.u64 = 0;
break;
}
@@ -391,16 +391,16 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
result.s.link_up = inband_status.s.status;
result.s.full_duplex = inband_status.s.duplex;
switch (inband_status.s.speed) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.u64 = 0;
break;
}
@@ -429,9 +429,9 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
*
* @phy_addr: The address of the PHY to program
* @enable_autoneg:
- * Non zero if you want to enable auto-negotiation.
+ * Non zero if you want to enable auto-negotiation.
* @link_info: Link speed to program. If the speed is zero and auto-negotiation
- * is enabled, all possible negotiation speeds are advertised.
+ * is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
@@ -607,10 +607,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
*
* @interface: Interface to probe
* @supported_ports:
- * Number of ports Octeon supports.
+ * Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
+ * simple be "support_ports".
*/
int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
{
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
index c1c54890bae..607b4e65957 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-jtag.c
@@ -79,10 +79,10 @@ void cvmx_helper_qlm_jtag_init(void)
* @qlm: QLM to shift value into
* @bits: Number of bits to shift in (1-32).
* @data: Data to shift in. Bit 0 enters the chain first, followed by
- * bit 1, etc.
+ * bit 1, etc.
*
* Returns The low order bits of the JTAG chain that shifted out of the
- * circle.
+ * circle.
*/
uint32_t cvmx_helper_qlm_jtag_shift(int qlm, int bits, uint32_t data)
{
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
index 82b21843421..f59c88ee9b3 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-rgmii.c
@@ -131,7 +131,7 @@ void cvmx_helper_rgmii_internal_loopback(int port)
* @interface: Interface to setup
* @port: Port to setup (0..3)
* @cpu_clock_hz:
- * Chip frequency in Hertz
+ * Chip frequency in Hertz
*
* Returns Zero on success, negative on failure
*/
@@ -409,14 +409,14 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
/*
- * Port .en .type .p0mii Configuration
- * ---- --- ----- ------ -----------------------------------------
- * X 0 X X All links are disabled.
- * 0 1 X 0 Port 0 is RGMII
- * 0 1 X 1 Port 0 is MII
- * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
- * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
- * MII port is selected by GMX_PRT1_CFG[SPEED].
+ * Port .en .type .p0mii Configuration
+ * ---- --- ----- ------ -----------------------------------------
+ * X 0 X X All links are disabled.
+ * 0 1 X 0 Port 0 is RGMII
+ * 0 1 X 1 Port 0 is MII
+ * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
+ * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
+ * MII port is selected by GMX_PRT1_CFG[SPEED].
*/
/* In MII mode, CLK_CNT = 1. */
@@ -464,9 +464,9 @@ int __cvmx_helper_rgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
index 0c0bf5d30e7..45f18cce31a 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-sgmii.c
@@ -523,9 +523,9 @@ int __cvmx_helper_sgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
index 2830e4bdf7f..1f3030c72d8 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-spi.c
@@ -160,16 +160,16 @@ cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
result.s.link_up = inband.s.status;
result.s.full_duplex = inband.s.duplex;
switch (inband.s.speed) {
- case 0: /* 10 Mbps */
+ case 0: /* 10 Mbps */
result.s.speed = 10;
break;
- case 1: /* 100 Mbps */
+ case 1: /* 100 Mbps */
result.s.speed = 100;
break;
- case 2: /* 1 Gbps */
+ case 2: /* 1 Gbps */
result.s.speed = 1000;
break;
- case 3: /* Illegal */
+ case 3: /* Illegal */
result.s.speed = 0;
result.s.link_up = 0;
break;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
index dfdfe8bdc9c..65d2bc9a0bd 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-util.c
@@ -96,9 +96,9 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
uint8_t *end_of_data;
cvmx_dprintf("Packet Length: %u\n", work->len);
- cvmx_dprintf(" Input Port: %u\n", work->ipprt);
- cvmx_dprintf(" QoS: %u\n", work->qos);
- cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
+ cvmx_dprintf(" Input Port: %u\n", work->ipprt);
+ cvmx_dprintf(" QoS: %u\n", work->qos);
+ cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
if (work->word2.s.bufs == 0) {
union cvmx_ipd_wqe_fpa_queue wqe_pool;
@@ -132,14 +132,14 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
while (remaining_bytes) {
start_of_buffer =
((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- cvmx_dprintf(" Buffer Start:%llx\n",
+ cvmx_dprintf(" Buffer Start:%llx\n",
(unsigned long long)start_of_buffer);
- cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
- cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
- cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- cvmx_dprintf(" Buffer Data: %llx\n",
+ cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
+ cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
+ cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
+ cvmx_dprintf(" Buffer Data: %llx\n",
(unsigned long long)buffer_ptr.s.addr);
- cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
+ cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
cvmx_dprintf("\t\t");
data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
@@ -172,11 +172,11 @@ int cvmx_helper_dump_packet(cvmx_wqe_t *work)
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
@@ -207,11 +207,11 @@ int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
index 1723248e987..7653b7e9219 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-xaui.c
@@ -321,9 +321,9 @@ int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index fa496385635..d63d20dfbfb 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -111,7 +111,7 @@ int cvmx_helper_ports_on_interface(int interface)
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
+ * DISABLED.
*/
cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
{
@@ -187,7 +187,7 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
* the defines in executive-config.h.
*
* @ipd_port: Port to configure. This follows the IPD numbering, not the
- * per interface numbering
+ * per interface numbering
*
* Returns Zero on success, negative on failure
*/
@@ -591,7 +591,7 @@ static int __cvmx_helper_packet_hardware_enable(int interface)
* Function to adjust internal IPD pointer alignments
*
* Returns 0 on success
- * !0 on failure
+ * !0 on failure
*/
int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
{
@@ -1068,9 +1068,9 @@ int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
index 560e034aa02..fa327ec891c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-rsl.c
@@ -85,11 +85,11 @@ void __cvmx_interrupt_gmxx_enable(int interface)
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
if (mode.s.en) {
switch (mode.cn56xx.mode) {
- case 1: /* XAUI */
+ case 1: /* XAUI */
num_ports = 1;
break;
- case 2: /* SGMII */
- case 3: /* PICMG */
+ case 2: /* SGMII */
+ case 3: /* PICMG */
num_ports = 4;
break;
default: /* Disabled */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
index 33b72144db3..42e38c30b54 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-l2c.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-l2c.c
@@ -147,7 +147,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask)
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error on some chips */
- if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
+ if (mask == valid_mask && !OCTEON_IS_MODEL(OCTEON_CN63XX))
return -1;
if (OCTEON_IS_MODEL(OCTEON_CN63XX))
@@ -438,7 +438,7 @@ void cvmx_l2c_flush(void)
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
- (assoc << assoc_shift) | (set << set_shift));
+ (assoc << assoc_shift) | (set << set_shift));
CVMX_CACHE_WBIL2I(address, 0);
}
}
@@ -573,8 +573,8 @@ union __cvmx_l2c_tag {
* @index: Index of the cacheline
*
* Returns The Octeon model specific tag structure. This is
- * translated by a wrapper function to a generic form that is
- * easier for applications to use.
+ * translated by a wrapper function to a generic form that is
+ * easier for applications to use.
*/
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
@@ -618,12 +618,12 @@ static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
".set push\n\t"
".set mips64\n\t"
".set noreorder\n\t"
- "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
+ "sd %[dbg_val], 0(%[dbg_addr])\n\t" /* Enter debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
- "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
- "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
+ "ld %[tag_val], 0(%[tag_addr])\n\t" /* Read L2C tag data */
+ "sd $0, 0(%[dbg_addr])\n\t" /* Exit debug mode, wait for store */
"ld $0, 0(%[dbg_addr])\n\t"
- "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
+ "cache 9, 0($0)\n\t" /* Invalidate dcache to discard debug data */
".set pop"
: [tag_val] "=r" (tag_val)
: [dbg_addr] "r" (dbg_addr), [dbg_val] "r" (debug_val), [tag_addr] "r" (debug_tag_addr)
@@ -664,10 +664,10 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
CVMX_SYNC; /* make sure CVMX_L2C_TADX_TAG is updated */
l2c_tadx_tag.u64 = cvmx_read_csr(CVMX_L2C_TADX_TAG(0));
- tag.s.V = l2c_tadx_tag.s.valid;
- tag.s.D = l2c_tadx_tag.s.dirty;
- tag.s.L = l2c_tadx_tag.s.lock;
- tag.s.U = l2c_tadx_tag.s.use;
+ tag.s.V = l2c_tadx_tag.s.valid;
+ tag.s.D = l2c_tadx_tag.s.dirty;
+ tag.s.L = l2c_tadx_tag.s.lock;
+ tag.s.U = l2c_tadx_tag.s.use;
tag.s.addr = l2c_tadx_tag.s.tag;
} else {
union __cvmx_l2c_tag tmp_tag;
@@ -679,34 +679,34 @@ union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
* as it can represent all models.
*/
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- tag.s.V = tmp_tag.cn58xx.V;
- tag.s.D = tmp_tag.cn58xx.D;
- tag.s.L = tmp_tag.cn58xx.L;
- tag.s.U = tmp_tag.cn58xx.U;
+ tag.s.V = tmp_tag.cn58xx.V;
+ tag.s.D = tmp_tag.cn58xx.D;
+ tag.s.L = tmp_tag.cn58xx.L;
+ tag.s.U = tmp_tag.cn58xx.U;
tag.s.addr = tmp_tag.cn58xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- tag.s.V = tmp_tag.cn38xx.V;
- tag.s.D = tmp_tag.cn38xx.D;
- tag.s.L = tmp_tag.cn38xx.L;
- tag.s.U = tmp_tag.cn38xx.U;
+ tag.s.V = tmp_tag.cn38xx.V;
+ tag.s.D = tmp_tag.cn38xx.D;
+ tag.s.L = tmp_tag.cn38xx.L;
+ tag.s.U = tmp_tag.cn38xx.U;
tag.s.addr = tmp_tag.cn38xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- tag.s.V = tmp_tag.cn31xx.V;
- tag.s.D = tmp_tag.cn31xx.D;
- tag.s.L = tmp_tag.cn31xx.L;
- tag.s.U = tmp_tag.cn31xx.U;
+ tag.s.V = tmp_tag.cn31xx.V;
+ tag.s.D = tmp_tag.cn31xx.D;
+ tag.s.L = tmp_tag.cn31xx.L;
+ tag.s.U = tmp_tag.cn31xx.U;
tag.s.addr = tmp_tag.cn31xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- tag.s.V = tmp_tag.cn30xx.V;
- tag.s.D = tmp_tag.cn30xx.D;
- tag.s.L = tmp_tag.cn30xx.L;
- tag.s.U = tmp_tag.cn30xx.U;
+ tag.s.V = tmp_tag.cn30xx.V;
+ tag.s.D = tmp_tag.cn30xx.D;
+ tag.s.L = tmp_tag.cn30xx.L;
+ tag.s.U = tmp_tag.cn30xx.U;
tag.s.addr = tmp_tag.cn30xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- tag.s.V = tmp_tag.cn50xx.V;
- tag.s.D = tmp_tag.cn50xx.D;
- tag.s.L = tmp_tag.cn50xx.L;
- tag.s.U = tmp_tag.cn50xx.U;
+ tag.s.V = tmp_tag.cn50xx.V;
+ tag.s.D = tmp_tag.cn50xx.D;
+ tag.s.L = tmp_tag.cn50xx.L;
+ tag.s.U = tmp_tag.cn50xx.U;
tag.s.addr = tmp_tag.cn50xx.addr;
} else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
@@ -865,7 +865,7 @@ void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
uint64_t address;
/* Create the address based on index and association.
* Bits<20:17> select the way of the cache block involved in
- * the operation
+ * the operation
* Bits<16:7> of the effect address select the index
*/
address = CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index f557084b109..f2c87754159 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -99,7 +99,7 @@ void cvmx_pko_initialize_global(void)
* be called after the FPA has been initialized and filled with pages.
*
* Returns 0 on success
- * !0 on failure
+ * !0 on failure
*/
int cvmx_pko_initialize_local(void)
{
@@ -186,19 +186,19 @@ void cvmx_pko_shutdown(void)
/**
* Configure a output port and the associated queues for use.
*
- * @port: Port to configure.
+ * @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues to associate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 0-8. A value of 8 get 8 times the traffic
- * of a value of 1. A value of 0 indicates that no rounds
- * will be participated in. These priorities can be changed
- * on the fly while the pko is enabled. A priority of 9
- * indicates that static priority should be used. If static
- * priority is used all queues with static priority must be
- * contiguous starting at the base_queue, and lower numbered
- * queues have higher priority than higher numbered queues.
- * There must be num_queues elements in the array.
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 0-8. A value of 8 get 8 times the traffic
+ * of a value of 1. A value of 0 indicates that no rounds
+ * will be participated in. These priorities can be changed
+ * on the fly while the pko is enabled. A priority of 9
+ * indicates that static priority should be used. If static
+ * priority is used all queues with static priority must be
+ * contiguous starting at the base_queue, and lower numbered
+ * queues have higher priority than higher numbered queues.
+ * There must be num_queues elements in the array.
*/
cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
uint64_t num_queues,
@@ -440,7 +440,7 @@ void cvmx_pko_show_queue_map()
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
@@ -473,7 +473,7 @@ int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
diff --git a/arch/mips/cavium-octeon/executive/cvmx-spi.c b/arch/mips/cavium-octeon/executive/cvmx-spi.c
index 74afb1710cd..ef5198d13a0 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-spi.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-spi.c
@@ -69,7 +69,7 @@ static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
/**
* Get current SPI4 initialization callbacks
*
- * @callbacks: Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
@@ -92,11 +92,11 @@ void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
@@ -138,11 +138,11 @@ int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
* with its correspondent system.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, negative of failure.
@@ -160,7 +160,7 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
/* NOTE: Calendar setup is not performed during restart */
- /* Refer to cvmx_spi_start_interface() for the full sequence */
+ /* Refer to cvmx_spi_start_interface() for the full sequence */
/* Callback to perform clock detection */
INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
@@ -182,11 +182,11 @@ int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
@@ -297,11 +297,11 @@ int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
* Callback to setup calendar and miscellaneous settings before clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -382,7 +382,7 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
stxx_spi4_dat.u64 = 0;
/*Minimum needed by dynamic alignment */
stxx_spi4_dat.s.alpha = 32;
- stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
+ stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
stxx_spi4_dat.u64);
@@ -416,11 +416,11 @@ int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -494,11 +494,11 @@ int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -563,11 +563,11 @@ int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -620,11 +620,11 @@ int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
index 8b18a20cc7b..3d17fac2935 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -74,26 +74,26 @@ EXPORT_SYMBOL(cvmx_sysinfo_get);
/**
* This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr:
- * Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
+ * Pointer to global physical memory descriptor
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
*
* @board_rev_major:
- * Board major revision
+ * Board major revision
* @board_rev_minor:
- * Board minor revision
+ * Board minor revision
* @cpu_clock_hz:
- * CPU clock freqency in hertz
+ * CPU clock freqency in hertz
*
* Returns 0: Failure
- * 1: success
+ * 1: success
*/
int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
diff --git a/arch/mips/cavium-octeon/oct_ilm.c b/arch/mips/cavium-octeon/oct_ilm.c
new file mode 100644
index 00000000000..71b213dbb62
--- /dev/null
+++ b/arch/mips/cavium-octeon/oct_ilm.c
@@ -0,0 +1,206 @@
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+
+#define TIMER_NUM 3
+
+static bool reset_stats;
+
+struct latency_info {
+ u64 io_interval;
+ u64 cpu_interval;
+ u64 timer_start1;
+ u64 timer_start2;
+ u64 max_latency;
+ u64 min_latency;
+ u64 latency_sum;
+ u64 average_latency;
+ u64 interrupt_cnt;
+};
+
+static struct latency_info li;
+static struct dentry *dir;
+
+static int show_latency(struct seq_file *m, void *v)
+{
+ u64 cpuclk, avg, max, min;
+ struct latency_info curr_li = li;
+
+ cpuclk = octeon_get_clock_rate();
+
+ max = (curr_li.max_latency * 1000000000) / cpuclk;
+ min = (curr_li.min_latency * 1000000000) / cpuclk;
+ avg = (curr_li.latency_sum * 1000000000) / (cpuclk * curr_li.interrupt_cnt);
+
+ seq_printf(m, "cnt: %10lld, avg: %7lld ns, max: %7lld ns, min: %7lld ns\n",
+ curr_li.interrupt_cnt, avg, max, min);
+ return 0;
+}
+
+static int oct_ilm_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, show_latency, NULL);
+}
+
+static const struct file_operations oct_ilm_ops = {
+ .open = oct_ilm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int reset_statistics(void *data, u64 value)
+{
+ reset_stats = true;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_statistics_ops, NULL, reset_statistics, "%llu\n");
+
+static int init_debufs(void)
+{
+ struct dentry *show_dentry;
+ dir = debugfs_create_dir("oct_ilm", 0);
+ if (!dir) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm\n");
+ return -1;
+ }
+
+ show_dentry = debugfs_create_file("statistics", 0222, dir, NULL,
+ &oct_ilm_ops);
+ if (!show_dentry) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm/statistics\n");
+ return -1;
+ }
+
+ show_dentry = debugfs_create_file("reset", 0222, dir, NULL,
+ &reset_statistics_ops);
+ if (!show_dentry) {
+ pr_err("oct_ilm: failed to create debugfs entry oct_ilm/reset\n");
+ return -1;
+ }
+
+ return 0;
+
+}
+
+static void init_latency_info(struct latency_info *li, int startup)
+{
+ /* interval in milli seconds after which the interrupt will
+ * be triggered
+ */
+ int interval = 1;
+
+ if (startup) {
+ /* Calculating by the amounts io clock and cpu clock would
+ * increment in interval amount of ms
+ */
+ li->io_interval = (octeon_get_io_clock_rate() * interval) / 1000;
+ li->cpu_interval = (octeon_get_clock_rate() * interval) / 1000;
+ }
+ li->timer_start1 = 0;
+ li->timer_start2 = 0;
+ li->max_latency = 0;
+ li->min_latency = (u64)-1;
+ li->latency_sum = 0;
+ li->interrupt_cnt = 0;
+}
+
+
+static void start_timer(int timer, u64 interval)
+{
+ union cvmx_ciu_timx timx;
+ unsigned long flags;
+
+ timx.u64 = 0;
+ timx.s.one_shot = 1;
+ timx.s.len = interval;
+ raw_local_irq_save(flags);
+ li.timer_start1 = read_c0_cvmcount();
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force wait until register is written. */
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+ li.timer_start2 = read_c0_cvmcount();
+ raw_local_irq_restore(flags);
+}
+
+
+static irqreturn_t cvm_oct_ciu_timer_interrupt(int cpl, void *dev_id)
+{
+ u64 last_latency;
+ u64 last_int_cnt;
+
+ if (reset_stats) {
+ init_latency_info(&li, 0);
+ reset_stats = false;
+ } else {
+ last_int_cnt = read_c0_cvmcount();
+ last_latency = last_int_cnt - (li.timer_start1 + li.cpu_interval);
+ li.interrupt_cnt++;
+ li.latency_sum += last_latency;
+ if (last_latency > li.max_latency)
+ li.max_latency = last_latency;
+ if (last_latency < li.min_latency)
+ li.min_latency = last_latency;
+ }
+ start_timer(TIMER_NUM, li.io_interval);
+ return IRQ_HANDLED;
+}
+
+static void disable_timer(int timer)
+{
+ union cvmx_ciu_timx timx;
+
+ timx.s.one_shot = 0;
+ timx.s.len = 0;
+ cvmx_write_csr(CVMX_CIU_TIMX(timer), timx.u64);
+ /* Read it back to force immediate write of timer register*/
+ timx.u64 = cvmx_read_csr(CVMX_CIU_TIMX(timer));
+}
+
+static __init int oct_ilm_module_init(void)
+{
+ int rc;
+ int irq = OCTEON_IRQ_TIMER0 + TIMER_NUM;
+
+ rc = init_debufs();
+ if (rc) {
+ WARN(1, "Could not create debugfs entries");
+ return rc;
+ }
+
+ rc = request_irq(irq, cvm_oct_ciu_timer_interrupt, IRQF_NO_THREAD,
+ "oct_ilm", 0);
+ if (rc) {
+ WARN(1, "Could not acquire IRQ %d", irq);
+ goto err_irq;
+ }
+
+ init_latency_info(&li, 1);
+ start_timer(TIMER_NUM, li.io_interval);
+
+ return 0;
+err_irq:
+ debugfs_remove_recursive(dir);
+ return rc;
+}
+
+static __exit void oct_ilm_module_exit(void)
+{
+ disable_timer(TIMER_NUM);
+ if (dir)
+ debugfs_remove_recursive(dir);
+ free_irq(OCTEON_IRQ_TIMER0 + TIMER_NUM, 0);
+}
+
+module_exit(oct_ilm_module_exit);
+module_init(oct_ilm_module_init);
+MODULE_AUTHOR("Venkat Subbiah, Cavium");
+MODULE_DESCRIPTION("Measures interrupt latency on Octeon chips.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 46f5dbceeec..156aa6143e1 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -1542,7 +1542,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
if (line == 3) /* MIO */
switch (bit) {
- case 2: /* IPD_DRP */
+ case 2: /* IPD_DRP */
case 8 ... 11: /* Timers */
case 48: /* PTP */
edge = true;
@@ -1553,7 +1553,7 @@ static bool octeon_irq_ciu2_is_edge(unsigned int line, unsigned int bit)
else if (line == 6) /* PKT */
switch (bit) {
case 52 ... 53: /* ILK_DRP */
- case 8 ... 12: /* GMX_DRP */
+ case 8 ... 12: /* GMX_DRP */
edge = true;
break;
default:
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 0ba0eb96d9a..64e08df51d6 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -116,15 +116,15 @@
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -316,9 +316,9 @@ EXC( STORE t0, -8(dst), s_exc_p1u)
src_unaligned:
#define rem t8
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, cleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -326,13 +326,13 @@ src_unaligned:
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), l_exc)
-EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), l_exc)
+EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), l_exc_copy)
EXC( LDREST t1, REST(1)(src), l_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
EXC( LDREST t2, REST(2)(src), l_exc_copy)
EXC( LDREST t3, REST(3)(src), l_exc_copy)
ADD src, src, 4*NBYTES
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index 3c1b625a585..389512e2abd 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -410,7 +410,7 @@ int __init octeon_prune_device_tree(void)
pip_path = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
if (pip_path) {
int pip = fdt_path_offset(initial_boot_params, pip_path);
- if (pip >= 0)
+ if (pip >= 0)
for (i = 0; i <= 4; i++)
octeon_fdt_pip_iface(pip, i, &mac_addr_base);
}
diff --git a/arch/mips/cavium-octeon/octeon_3xxx.dts b/arch/mips/cavium-octeon/octeon_3xxx.dts
index f28b2d0fde2..88cb42d4cc4 100644
--- a/arch/mips/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/cavium-octeon/octeon_3xxx.dts
@@ -3,7 +3,7 @@
* OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
*
* This device tree is pruned and patched by early boot code before
- * use. Because of this, it contains a super-set of the available
+ * use. Because of this, it contains a super-set of the available
* devices and properties.
*/
/ {
@@ -433,12 +433,12 @@
cavium,t-we = <45>;
cavium,t-rd-hld = <35>;
cavium,t-wr-hld = <45>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <35>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@4 {
@@ -450,12 +450,12 @@
cavium,t-we = <320>;
cavium,t-rd-hld = <320>;
cavium,t-wr-hld = <320>;
- cavium,t-pause = <320>;
- cavium,t-wait = <320>;
- cavium,t-page = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@5 {
@@ -467,12 +467,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <30>;
- cavium,t-pause = <0>;
- cavium,t-wait = <30>;
- cavium,t-page = <320>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <16>;
};
cavium,cs-config@6 {
@@ -484,12 +484,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <70>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <320>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,wait-mode;
cavium,bus-width = <16>;
};
diff --git a/arch/mips/cavium-octeon/octeon_68xx.dts b/arch/mips/cavium-octeon/octeon_68xx.dts
index 1839468932b..79b46fcb0a1 100644
--- a/arch/mips/cavium-octeon/octeon_68xx.dts
+++ b/arch/mips/cavium-octeon/octeon_68xx.dts
@@ -3,7 +3,7 @@
* OCTEON 68XX device tree skeleton.
*
* This device tree is pruned and patched by early boot code before
- * use. Because of this, it contains a super-set of the available
+ * use. Because of this, it contains a super-set of the available
* devices and properties.
*/
/ {
@@ -469,12 +469,12 @@
cavium,t-we = <35>;
cavium,t-rd-hld = <25>;
cavium,t-wr-hld = <35>;
- cavium,t-pause = <0>;
- cavium,t-wait = <300>;
- cavium,t-page = <25>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <25>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@4 {
@@ -486,12 +486,12 @@
cavium,t-we = <320>;
cavium,t-rd-hld = <320>;
cavium,t-wr-hld = <320>;
- cavium,t-pause = <320>;
- cavium,t-wait = <320>;
- cavium,t-page = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <8>;
};
cavium,cs-config@5 {
@@ -503,12 +503,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <300>;
- cavium,t-pause = <0>;
- cavium,t-wait = <300>;
- cavium,t-page = <310>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <300>;
+ cavium,t-page = <310>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,bus-width = <16>;
};
cavium,cs-config@6 {
@@ -520,12 +520,12 @@
cavium,t-we = <150>;
cavium,t-rd-hld = <100>;
cavium,t-wr-hld = <30>;
- cavium,t-pause = <0>;
- cavium,t-wait = <30>;
- cavium,t-page = <310>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <310>;
cavium,t-rd-dly = <0>;
- cavium,pages = <0>;
+ cavium,pages = <0>;
cavium,wait-mode;
cavium,bus-width = <16>;
};
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
index 428864b2ba4..7b066bbca86 100644
--- a/arch/mips/cavium-octeon/octeon_boot.h
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -31,7 +31,7 @@ struct boot_init_vector {
uint32_t k0_val;
/* Address of boot info block structure */
uint64_t boot_info_addr;
- uint32_t flags; /* flags */
+ uint32_t flags; /* flags */
uint32_t pad;
};
@@ -53,20 +53,20 @@ struct linux_app_boot_info {
/* If not to copy a lot of bootloader's structures
here is only offset of requested member */
-#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
+#define AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK 0x765c
/* hardcoded in bootloader */
-#define LABI_ADDR_IN_BOOTLOADER 0x700
+#define LABI_ADDR_IN_BOOTLOADER 0x700
#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
#define LABI_SIGNATURE 0xAABBCC01
/* from uboot-headers/octeon_mem_map.h */
-#define EXCEPTION_BASE_INCR (4 * 1024)
+#define EXCEPTION_BASE_INCR (4 * 1024)
/* Increment size for exception base addresses (4k minimum) */
-#define EXCEPTION_BASE_BASE 0
-#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
-#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
+#define EXCEPTION_BASE_BASE 0
+#define BOOTLOADER_PRIV_DATA_BASE (EXCEPTION_BASE_BASE + 0x800)
+#define BOOTLOADER_BOOT_VECTOR (BOOTLOADER_PRIV_DATA_BASE)
#endif /* __OCTEON_BOOT_H__ */
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d7e0a09f77c..c594a3d4f74 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -319,7 +319,7 @@ EXPORT_SYMBOL(octeon_get_io_clock_rate);
* exists on most Cavium evaluation boards. If it doesn't exist, then
* this function doesn't do anything.
*
- * @s: String to write
+ * @s: String to write
*/
void octeon_write_lcd(const char *s)
{
@@ -341,7 +341,7 @@ void octeon_write_lcd(const char *s)
/**
* Return the console uart passed by the bootloader
*
- * Returns uart (0 or 1)
+ * Returns uart (0 or 1)
*/
int octeon_get_boot_uart(void)
{
@@ -805,7 +805,7 @@ void __init prom_init(void)
/*
* To do: switch parsing to new style, something like:
* parse_crashkernel(arg, sysinfo->system_dram_size,
- * &crashk_size, &crashk_base);
+ * &crashk_size, &crashk_base);
*/
#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
@@ -1013,7 +1013,7 @@ void __init plat_mem_setup(void)
}
/*
- * Emit one character to the boot UART. Exported for use by the
+ * Emit one character to the boot UART. Exported for use by the
* watchdog timer.
*/
int prom_putchar(char c)
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index ee1fb9f7f51..295137dfdc3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -55,7 +55,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
/**
* Cause the function described by call_data to be executed on the passed
- * cpu. When the function has finished, increment the finished field of
+ * cpu. When the function has finished, increment the finished field of
* call_data.
*/
void octeon_send_ipi_single(int cpu, unsigned int action)
@@ -126,8 +126,8 @@ static void octeon_smp_setup(void)
#ifdef CONFIG_HOTPLUG_CPU
/*
- * The possible CPUs are all those present on the chip. We
- * will assign CPU numbers for possible cores as well. Cores
+ * The possible CPUs are all those present on the chip. We
+ * will assign CPU numbers for possible cores as well. Cores
* are always consecutively numberd from 0.
*/
for (id = 0; id < num_cores && id < NR_CPUS; id++) {
@@ -332,7 +332,7 @@ extern void kernel_entry(unsigned long arg1, ...);
static void start_after_reset(void)
{
- kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
+ kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
}
static int octeon_update_boot_vector(unsigned int cpu)
@@ -401,7 +401,7 @@ static int __cpuinit register_cavium_notifier(void)
}
late_initcall(register_cavium_notifier);
-#endif /* CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
struct plat_smp_ops octeon_smp_ops = {
.send_ipi_single = octeon_send_ipi_single,
diff --git a/arch/mips/cobalt/led.c b/arch/mips/cobalt/led.c
index d3ce6fa1dc7..32265f514e3 100644
--- a/arch/mips/cobalt/led.c
+++ b/arch/mips/cobalt/led.c
@@ -1,7 +1,7 @@
/*
* Registration of Cobalt LED platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/cobalt/mtd.c b/arch/mips/cobalt/mtd.c
index 691d620b676..8db7b5d8156 100644
--- a/arch/mips/cobalt/mtd.c
+++ b/arch/mips/cobalt/mtd.c
@@ -25,7 +25,7 @@
static struct mtd_partition cobalt_mtd_partitions[] = {
{
.name = "firmware",
- .offset = 0x0,
+ .offset = 0x0,
.size = 0x80000,
},
};
diff --git a/arch/mips/cobalt/rtc.c b/arch/mips/cobalt/rtc.c
index 3ab39898b4e..a6bc75ada9d 100644
--- a/arch/mips/cobalt/rtc.c
+++ b/arch/mips/cobalt/rtc.c
@@ -46,7 +46,7 @@ static __init int cobalt_rtc_add(void)
return -ENOMEM;
retval = platform_device_add_resources(pdev, cobalt_rtc_resource,
- ARRAY_SIZE(cobalt_rtc_resource));
+ ARRAY_SIZE(cobalt_rtc_resource));
if (retval)
goto err_free_device;
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index ea87d43ba60..e3a3836508e 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -1,5 +1,6 @@
CONFIG_ATH79=y
CONFIG_ATH79_MACH_AP121=y
+CONFIG_ATH79_MACH_AP136=y
CONFIG_ATH79_MACH_AP81=y
CONFIG_ATH79_MACH_DB120=y
CONFIG_ATH79_MACH_PB44=y
diff --git a/arch/mips/configs/pnx8550_jbs_defconfig b/arch/mips/configs/pnx8550_jbs_defconfig
deleted file mode 100644
index 1d1f2067f3e..00000000000
--- a/arch/mips/configs/pnx8550_jbs_defconfig
+++ /dev/null
@@ -1,98 +0,0 @@
-CONFIG_PNX8550_JBS=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_TCP_MD5SIG=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_SGI_IOC4=m
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_FC_ATTRS=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_NET_PCI=y
-CONFIG_8139TOO=y
-# CONFIG_8139TOO_PIO is not set
-CONFIG_8139TOO_TUNE_TWISTER=y
-CONFIG_8139TOO_8129=y
-CONFIG_CHELSIO_T3=m
-CONFIG_NETXEN_NIC=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_SERIAL_PNX8XXX=y
-CONFIG_SERIAL_PNX8XXX_CONSOLE=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/pnx8550_stb810_defconfig b/arch/mips/configs/pnx8550_stb810_defconfig
deleted file mode 100644
index 15c66a571f9..00000000000
--- a/arch/mips/configs/pnx8550_stb810_defconfig
+++ /dev/null
@@ -1,92 +0,0 @@
-CONFIG_PNX8550_STB810=y
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_EXPERT=y
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_PCI=y
-CONFIG_PM=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_MIGRATE=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-# CONFIG_IPV6 is not set
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_IDE_GENERIC=y
-CONFIG_BLK_DEV_OFFBOARD=y
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_HPT366=y
-CONFIG_BLK_DEV_IT8213=m
-CONFIG_BLK_DEV_TC86C001=m
-CONFIG_SCSI=y
-CONFIG_SCSI_TGT=m
-CONFIG_BLK_DEV_SD=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_ISCSI_TCP=m
-CONFIG_NETDEVICES=y
-CONFIG_NET_ETHERNET=y
-CONFIG_MII=y
-CONFIG_NET_PCI=y
-CONFIG_NATSEMI=y
-CONFIG_CHELSIO_T3=m
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
-CONFIG_HW_RANDOM=y
-# CONFIG_VGA_CONSOLE is not set
-# CONFIG_HID is not set
-# CONFIG_USB_HID is not set
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_STORAGE_DATAFAB=y
-CONFIG_USB_STORAGE_FREECOM=y
-CONFIG_USB_STORAGE_ISD200=y
-CONFIG_USB_STORAGE_USBAT=y
-CONFIG_USB_STORAGE_SDDR09=y
-CONFIG_USB_STORAGE_SDDR55=y
-CONFIG_USB_STORAGE_JUMPSHOT=y
-CONFIG_EXT2_FS=y
-# CONFIG_DNOTIFY is not set
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_ROOT_NFS=y
-CONFIG_NFSD=m
-CONFIG_DLM=m
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_HEADERS_CHECK=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_SLAB=y
-CONFIG_CMDLINE_BOOL=y
-CONFIG_CMDLINE="console=ttyS1,38400n8 root=/dev/nfs ip=bootp"
-CONFIG_CRYPTO_CBC=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_LRW=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XCBC=m
-CONFIG_CRYPTO_CAMELLIA=m
-CONFIG_CRYPTO_FCRYPT=m
-CONFIG_CRC_CCITT=m
diff --git a/arch/mips/configs/rt305x_defconfig b/arch/mips/configs/rt305x_defconfig
new file mode 100644
index 00000000000..d1741bcf894
--- /dev/null
+++ b/arch/mips/configs/rt305x_defconfig
@@ -0,0 +1,167 @@
+CONFIG_RALINK=y
+CONFIG_DTB_RT305X_EVAL=y
+CONFIG_CPU_MIPS32_R2=y
+# CONFIG_COMPACTION is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_INITRAMFS_ROOT_UID=1000
+CONFIG_INITRAMFS_ROOT_GID=1000
+# CONFIG_RD_GZIP is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_COREDUMP is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_TCP_CONG_ADVANCED=y
+# CONFIG_TCP_CONG_BIC is not set
+# CONFIG_TCP_CONG_WESTWOOD is not set
+# CONFIG_TCP_CONG_HTCP is not set
+# CONFIG_IPV6 is not set
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NETFILTER_XT_TARGET_CT=m
+CONFIG_NETFILTER_XT_TARGET_LOG=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NF_CONNTRACK_IPV4=m
+# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_RAW=m
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_HAMRADIO=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_OF=y
+CONFIG_MTD_M25P80=y
+CONFIG_EEPROM_93CX6=m
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=m
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_ISDN=y
+CONFIG_INPUT=m
+CONFIG_INPUT_POLLDEV=m
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_RUNTIME_UARTS=2
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SPI=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+# CONFIG_HID is not set
+# CONFIG_USB_HID is not set
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_STORAGE_DEBUG=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_STAGING=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CRYPTO_MANAGER=m
+CONFIG_CRYPTO_ARC4=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC_ITU_T=m
+CONFIG_CRC32_SARWATE=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
+CONFIG_AVERAGE=y
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 82c85281878..22afed16ccd 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -55,70 +55,70 @@
* DS2100/3100's, aka kn01, aka Pmax:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 SCSI
- * 3 Lance Ethernet
- * 4 DZ11 serial
- * 5 RTC
- * 6 Memory Controller & Video
- * 7 FPU
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 SCSI
+ * 3 Lance Ethernet
+ * 4 DZ11 serial
+ * 5 RTC
+ * 6 Memory Controller & Video
+ * 7 FPU
*
* DS5000/200, aka kn02, aka 3max:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 TurboChannel
- * 3 RTC
- * 4 Reserved
- * 5 Memory Controller
- * 6 Reserved
- * 7 FPU
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory Controller
+ * 6 Reserved
+ * 7 FPU
*
* DS5000/1xx's, aka kn02ba, aka 3min:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 TurboChannel Slot 0
- * 3 TurboChannel Slot 1
- * 4 TurboChannel Slot 2
- * 5 TurboChannel Slot 3 (ASIC)
- * 6 Halt button
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 TurboChannel Slot 0
+ * 3 TurboChannel Slot 1
+ * 4 TurboChannel Slot 2
+ * 5 TurboChannel Slot 3 (ASIC)
+ * 6 Halt button
+ * 7 FPU/R4k timer
*
* DS5000/2x's, aka kn02ca, aka maxine:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Periodic Interrupt (100usec)
- * 3 RTC
- * 4 I/O write timeout
- * 5 TurboChannel (ASIC)
- * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Periodic Interrupt (100usec)
+ * 3 RTC
+ * 4 I/O write timeout
+ * 5 TurboChannel (ASIC)
+ * 6 Halt Keycode from Access.Bus keyboard (CTRL-ALT-ENTER)
+ * 7 FPU/R4k timer
*
* DS5000/2xx's, aka kn03, aka 3maxplus:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 System Board (ASIC)
- * 3 RTC
- * 4 Reserved
- * 5 Memory
- * 6 Halt Button
- * 7 FPU/R4k timer
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 System Board (ASIC)
+ * 3 RTC
+ * 4 Reserved
+ * 5 Memory
+ * 6 Halt Button
+ * 7 FPU/R4k timer
*
* We handle the IRQ according to _our_ priority (see setup.c),
- * then we just return. If multiple IRQs are pending then we will
+ * then we just return. If multiple IRQs are pending then we will
* just take another exception, big deal.
*/
.align 5
@@ -146,7 +146,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,cpu_mask_nr_tbl
+ PTR_LA t1,cpu_mask_nr_tbl
1: lw t2,(t1)
nop
and t2,t0
@@ -195,7 +195,7 @@
/*
* Find irq with highest priority
*/
- PTR_LA t1,asic_mask_nr_tbl
+ PTR_LA t1,asic_mask_nr_tbl
2: lw t2,(t1)
nop
and t2,t0
@@ -221,7 +221,7 @@
FEXPORT(cpu_all_int) # HALT, timers, software junk
li a0,DEC_CPU_IRQ_BASE
srl t0,CAUSEB_IP
- li t1,CAUSEF_IP>>CAUSEB_IP # mask
+ li t1,CAUSEF_IP>>CAUSEB_IP # mask
b 1f
li t2,4 # nr of bits / 2
diff --git a/arch/mips/dec/kn02xa-berr.c b/arch/mips/dec/kn02xa-berr.c
index ebb73c51d82..f434b759e3b 100644
--- a/arch/mips/dec/kn02xa-berr.c
+++ b/arch/mips/dec/kn02xa-berr.c
@@ -128,8 +128,8 @@ void __init dec_kn02xa_be_init(void)
{
volatile u32 *mbcs = (void *)CKSEG1ADDR(KN4K_SLOT_BASE + KN4K_MB_CSR);
- /* For KN04 we need to make sure EE (?) is enabled in the MB. */
- if (current_cpu_type() == CPU_R4000SC)
+ /* For KN04 we need to make sure EE (?) is enabled in the MB. */
+ if (current_cpu_type() == CPU_R4000SC)
*mbcs |= KN4K_MB_CSR_EE;
fast_iob();
diff --git a/arch/mips/dec/prom/call_o32.S b/arch/mips/dec/prom/call_o32.S
index 8c8498159e4..c0d1522d448 100644
--- a/arch/mips/dec/prom/call_o32.S
+++ b/arch/mips/dec/prom/call_o32.S
@@ -14,7 +14,7 @@
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
/* Frame size for both of the above. */
#define O32_FRAMESZ (4 * O32_ARGC + SZREG * O32_STATC)
diff --git a/arch/mips/dec/prom/dectypes.h b/arch/mips/dec/prom/dectypes.h
index 707b6f1f5a9..69ea5b9c819 100644
--- a/arch/mips/dec/prom/dectypes.h
+++ b/arch/mips/dec/prom/dectypes.h
@@ -1,5 +1,5 @@
#ifndef DECTYPES
-#define DECTYPES
+#define DECTYPES
#define DS2100_3100 1 /* DS2100/3100 Pmax */
#define DS5000_200 2 /* DS5000/200 3max */
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index 93f1239af52..ab169046e44 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -103,7 +103,7 @@ void __init prom_init(void)
if (prom_is_rex(magic))
rex_clear_cache();
- /* Register the early console. */
+ /* Register the early console. */
register_prom_console();
/* Were we compiled with the right CPU option? */
diff --git a/arch/mips/dec/prom/memory.c b/arch/mips/dec/prom/memory.c
index 8c62316f22f..0aadac74290 100644
--- a/arch/mips/dec/prom/memory.c
+++ b/arch/mips/dec/prom/memory.c
@@ -22,7 +22,7 @@ volatile unsigned long mem_err; /* So we know an error occurred */
/*
* Probe memory in 4MB chunks, waiting for an error to tell us we've fallen
- * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
+ * off the end of real memory. Only suitable for the 2100/3100's (PMAX).
*/
#define CHUNK_SIZE 0x400000
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index b874accd878..741cb4235bd 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(ioasic_base);
/*
* IRQ routing and priority tables. Priorites are set as follows:
*
- * KN01 KN230 KN02 KN02-BA KN02-CA KN03
+ * KN01 KN230 KN02 KN02-BA KN02-CA KN03
*
* MEMORY CPU CPU CPU ASIC CPU CPU
* RTC CPU CPU CPU ASIC CPU CPU
@@ -413,7 +413,7 @@ static void __init dec_init_kn02(void)
/*
* Machine-specific initialisation for KN02-BA, aka DS5000/1xx
- * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
+ * (xx = 20, 25, 33), aka 3min. Also applies to KN04(-BA), aka
* DS5000/150, aka 4min.
*/
static int kn02ba_interrupt[DEC_NR_INTS] __initdata = {
diff --git a/arch/mips/dec/wbflush.c b/arch/mips/dec/wbflush.c
index 43feddd5e19..56bda4a396b 100644
--- a/arch/mips/dec/wbflush.c
+++ b/arch/mips/dec/wbflush.c
@@ -2,9 +2,9 @@
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
- * DECstation 3100 Desktop Workstation Functional Specification
- * DECstation 5000/200 KN02 System Module Functional Specification
- * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
+ * DECstation 3100 Desktop Workstation Functional Specification
+ * DECstation 5000/200 KN02 System Module Functional Specification
+ * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c
index b5f08255d9c..b880a83e4d4 100644
--- a/arch/mips/emma/markeins/irq.c
+++ b/arch/mips/emma/markeins/irq.c
@@ -292,7 +292,7 @@ void __init arch_init_irq(void)
asmlinkage void plat_irq_dispatch(void)
{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
+ unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
if (pending & STATUSF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
diff --git a/arch/mips/emma/markeins/platform.c b/arch/mips/emma/markeins/platform.c
index b05b08b92a3..99ea004730a 100644
--- a/arch/mips/emma/markeins/platform.c
+++ b/arch/mips/emma/markeins/platform.c
@@ -190,7 +190,7 @@ static struct platform_device markeins_flash_device = {
.name = "physmap-flash",
.id = 0,
.dev = {
- .platform_data = &markeins_flash_data,
+ .platform_data = &markeins_flash_data,
},
.num_resources = 1,
.resource = &markeins_flash_resource,
diff --git a/arch/mips/emma/markeins/setup.c b/arch/mips/emma/markeins/setup.c
index feceebcfff4..d71005835c0 100644
--- a/arch/mips/emma/markeins/setup.c
+++ b/arch/mips/emma/markeins/setup.c
@@ -28,7 +28,7 @@
#include <asm/emma/emma2rh.h>
-#define USE_CPU_COUNTER_TIMER /* whether we use cpu counter */
+#define USE_CPU_COUNTER_TIMER /* whether we use cpu counter */
extern void markeins_led(const char *);
diff --git a/arch/mips/fw/arc/file.c b/arch/mips/fw/arc/file.c
index 30335341b44..a8b08032348 100644
--- a/arch/mips/fw/arc/file.c
+++ b/arch/mips/fw/arc/file.c
@@ -15,7 +15,7 @@
LONG
ArcGetDirectoryEntry(ULONG FileID, struct linux_vdirent *Buffer,
- ULONG N, ULONG *Count)
+ ULONG N, ULONG *Count)
{
return ARC_CALL4(get_vdirent, FileID, Buffer, N, Count);
}
@@ -69,7 +69,7 @@ ArcGetFileInformation(ULONG FileID, struct linux_finfo *Information)
}
LONG ArcSetFileInformation(ULONG FileID, ULONG AttributeFlags,
- ULONG AttributeMask)
+ ULONG AttributeMask)
{
return ARC_CALL3(set_finfo, FileID, AttributeFlags, AttributeMask);
}
diff --git a/arch/mips/fw/arc/identify.c b/arch/mips/fw/arc/identify.c
index 54a33c756f6..f90266c02c9 100644
--- a/arch/mips/fw/arc/identify.c
+++ b/arch/mips/fw/arc/identify.c
@@ -100,7 +100,7 @@ void __init prom_identify_arch(void)
if (p == NULL) {
#ifdef CONFIG_SGI_IP27
/* IP27 PROM misbehaves, seems to not implement ARC
- GetChild(). So we just assume it's an IP27. */
+ GetChild(). So we just assume it's an IP27. */
iname = "SGI-IP27";
#else
iname = "Unknown";
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 8b8eea2b6cf..5537b94572b 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -1,6 +1,6 @@
/*
* memory.c: PROM library functions for acquiring/using memory descriptors
- * given to us from the ARCS firmware.
+ * given to us from the ARCS firmware.
*
* Copyright (C) 1996 by David S. Miller
* Copyright (C) 1999, 2000, 2001 by Ralf Baechle
diff --git a/arch/mips/fw/arc/promlib.c b/arch/mips/fw/arc/promlib.c
index b7f9dd3c93c..7e8ba5ce95b 100644
--- a/arch/mips/fw/arc/promlib.c
+++ b/arch/mips/fw/arc/promlib.c
@@ -11,7 +11,7 @@
#include <asm/bcache.h>
/*
- * IP22 boardcache is not compatible with board caches. Thus we disable it
+ * IP22 boardcache is not compatible with board caches. Thus we disable it
* during romvec action. Since r4xx0.c is always compiled and linked with your
* kernel, this shouldn't cause any harm regardless what MIPS processor you
* have.
diff --git a/arch/mips/fw/lib/call_o32.S b/arch/mips/fw/lib/call_o32.S
index e0a68713b3c..b308b2a0613 100644
--- a/arch/mips/fw/lib/call_o32.S
+++ b/arch/mips/fw/lib/call_o32.S
@@ -14,7 +14,7 @@
/* Maximum number of arguments supported. Must be even! */
#define O32_ARGC 32
-/* Number of static registers we save. */
+/* Number of static registers we save. */
#define O32_STATC 11
/* Frame size for static register */
#define O32_FRAMESZ (SZREG * O32_STATC)
diff --git a/arch/mips/fw/sni/sniprom.c b/arch/mips/fw/sni/sniprom.c
index 96ba9920275..2c2cb182af4 100644
--- a/arch/mips/fw/sni/sniprom.c
+++ b/arch/mips/fw/sni/sniprom.c
@@ -28,20 +28,20 @@
* registers
*/
#define PROM_GET_MEMCONF 58
-#define PROM_GET_HWCONF 61
+#define PROM_GET_HWCONF 61
#define PROM_VEC (u64 *)CKSEG1ADDR(0x1fc00000)
#define PROM_ENTRY(x) (PROM_VEC + (x))
-#define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
-#define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
-#define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
-#define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
+#define ___prom_putchar ((int *(*)(int))PROM_ENTRY(PROM_PUTCHAR))
+#define ___prom_getenv ((char *(*)(char *))PROM_ENTRY(PROM_GETENV))
+#define ___prom_get_memconf ((void (*)(void *))PROM_ENTRY(PROM_GET_MEMCONF))
+#define ___prom_get_hwconf ((u32 (*)(void))PROM_ENTRY(PROM_GET_HWCONF))
#ifdef CONFIG_64BIT
static u8 o32_stk[16384];
-#define O32_STK &o32_stk[sizeof(o32_stk)]
+#define O32_STK &o32_stk[sizeof(o32_stk)]
#define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
__asm__(#fun " = call_o32")
@@ -52,13 +52,13 @@ void __PROM_O32(__prom_get_memconf, (void (*)(void *), void *, void *));
u32 __PROM_O32(__prom_get_hwconf, (u32 (*)(void), void *));
#define _prom_putchar(x) __prom_putchar(___prom_putchar, O32_STK, x)
-#define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x)
+#define _prom_getenv(x) __prom_getenv(___prom_getenv, O32_STK, x)
#define _prom_get_memconf(x) __prom_get_memconf(___prom_get_memconf, O32_STK, x)
#define _prom_get_hwconf() __prom_get_hwconf(___prom_get_hwconf, O32_STK)
#else
#define _prom_putchar(x) ___prom_putchar(x)
-#define _prom_getenv(x) ___prom_getenv(x)
+#define _prom_getenv(x) ___prom_getenv(x)
#define _prom_get_memconf(x) ___prom_get_memconf(x)
#define _prom_get_hwconf(x) ___prom_get_hwconf(x)
#endif
diff --git a/arch/mips/include/asm/abi.h b/arch/mips/include/asm/abi.h
index 9252d9b50e5..909bb698486 100644
--- a/arch/mips/include/asm/abi.h
+++ b/arch/mips/include/asm/abi.h
@@ -14,12 +14,12 @@
struct mips_abi {
int (* const setup_frame)(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr,
- sigset_t *set);
+ struct pt_regs *regs, int signr,
+ sigset_t *set);
const unsigned long signal_return_offset;
int (* const setup_rt_frame)(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr,
- sigset_t *set, siginfo_t *info);
+ struct pt_regs *regs, int signr,
+ sigset_t *set, siginfo_t *info);
const unsigned long rt_signal_return_offset;
const unsigned long restart;
};
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 569f80aacbd..13d61c002e4 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -51,14 +51,14 @@
* Returns the physical address of a CKSEGx / XKPHYS address
*/
#define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
-#define XPHYSADDR(a) ((_ACAST64_(a)) & \
+#define XPHYSADDR(a) ((_ACAST64_(a)) & \
_CONST64_(0x000000ffffffffff))
#ifdef CONFIG_64BIT
/*
* Memory segments (64bit kernel mode addresses)
- * The compatibility segments use the full 64-bit sign extended value. Note
+ * The compatibility segments use the full 64-bit sign extended value. Note
* the R8000 doesn't have them so don't reference these in generic MIPS code.
*/
#define XKUSEG _CONST64_(0x0000000000000000)
@@ -131,7 +131,7 @@
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
- * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
+ * the region, 3 bits for the CCA mode. This leaves 59 bits of which the
* R8000 implements most with its 48-bit physical address space.
*/
#define TO_PHYS_MASK _CONST64_(0x07ffffffffffffff) /* 2^^59 - 1 */
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 608cfcfbb3e..164a21e65b4 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -33,12 +33,12 @@
* Not used for the kernel but here seems to be the right place.
*/
#ifdef __PIC__
-#define CPRESTORE(register) \
+#define CPRESTORE(register) \
.cprestore register
-#define CPADD(register) \
+#define CPADD(register) \
.cpadd register
-#define CPLOAD(register) \
- .cpload register
+#define CPLOAD(register) \
+ .cpload register
#else
#define CPRESTORE(register)
#define CPADD(register)
@@ -48,35 +48,35 @@
/*
* LEAF - declare leaf routine
*/
-#define LEAF(symbol) \
- .globl symbol; \
- .align 2; \
- .type symbol, @function; \
- .ent symbol, 0; \
+#define LEAF(symbol) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
symbol: .frame sp, 0, ra
/*
* NESTED - declare nested routine entry point
*/
-#define NESTED(symbol, framesize, rpc) \
- .globl symbol; \
- .align 2; \
- .type symbol, @function; \
- .ent symbol, 0; \
+#define NESTED(symbol, framesize, rpc) \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+ .ent symbol, 0; \
symbol: .frame sp, framesize, rpc
/*
* END - mark end of function
*/
-#define END(function) \
- .end function; \
+#define END(function) \
+ .end function; \
.size function, .-function
/*
* EXPORT - export definition of symbol
*/
#define EXPORT(symbol) \
- .globl symbol; \
+ .globl symbol; \
symbol:
/*
@@ -90,16 +90,16 @@ symbol:
/*
* ABS - export absolute symbol
*/
-#define ABS(symbol,value) \
- .globl symbol; \
+#define ABS(symbol,value) \
+ .globl symbol; \
symbol = value
-#define PANIC(msg) \
+#define PANIC(msg) \
.set push; \
- .set reorder; \
- PTR_LA a0, 8f; \
- jal panic; \
-9: b 9b; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal panic; \
+9: b 9b; \
.set pop; \
TEXT(msg)
@@ -107,31 +107,31 @@ symbol = value
* Print formatted string
*/
#ifdef CONFIG_PRINTK
-#define PRINT(string) \
+#define PRINT(string) \
.set push; \
- .set reorder; \
- PTR_LA a0, 8f; \
- jal printk; \
+ .set reorder; \
+ PTR_LA a0, 8f; \
+ jal printk; \
.set pop; \
TEXT(string)
#else
#define PRINT(string)
#endif
-#define TEXT(msg) \
+#define TEXT(msg) \
.pushsection .data; \
-8: .asciiz msg; \
+8: .asciiz msg; \
.popsection;
/*
* Build text tables
*/
-#define TTABLE(string) \
+#define TTABLE(string) \
.pushsection .text; \
- .word 1f; \
+ .word 1f; \
.popsection \
.pushsection .data; \
-1: .asciiz string; \
+1: .asciiz string; \
.popsection
/*
@@ -143,13 +143,13 @@ symbol = value
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
-#define PREF(hint,addr) \
+#define PREF(hint,addr) \
.set push; \
.set mips4; \
pref hint, addr; \
.set pop
-#define PREFX(hint,addr) \
+#define PREFX(hint,addr) \
.set push; \
.set mips4; \
prefx hint, addr; \
@@ -166,42 +166,42 @@ symbol = value
* MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
*/
#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
.set push; \
.set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
+ beqz rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
.set push; \
.set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
+ bnez rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
.set push; \
.set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
+ bnezl rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
.set push; \
.set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
+ beqzl rt, 9f; \
+ move rd, rs; \
.set pop; \
9:
#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
(_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
+#define MOVN(rd, rs, rt) \
movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
+#define MOVZ(rd, rs, rt) \
movz rd, rs, rt
#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 01cc6ba6483..08b607969a1 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -1,5 +1,5 @@
/*
- * Atomic operations that C can't guarantee us. Useful for
+ * Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* But use these as seldom as possible since they are much more slower
@@ -21,7 +21,7 @@
#include <asm/cmpxchg.h>
#include <asm/war.h>
-#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC_INIT(i) { (i) }
/*
* atomic_read - read atomic variable
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h
index f7fdc24e972..314ab553201 100644
--- a/arch/mips/include/asm/barrier.h
+++ b/arch/mips/include/asm/barrier.h
@@ -18,7 +18,7 @@
* over this barrier. All reads preceding this primitive are guaranteed
* to access memory (but not necessarily other CPUs' caches) before any
* reads following this primitive that depend on the data return by
- * any of the preceding reads. This primitive is much lighter weight than
+ * any of the preceding reads. This primitive is much lighter weight than
* rmb() on most CPUs, and is never heavier weight than is
* rmb().
*
@@ -43,7 +43,7 @@
* </programlisting>
*
* because the read of "*q" depends on the read of "p" and these
- * two reads are separated by a read_barrier_depends(). However,
+ * two reads are separated by a read_barrier_depends(). However,
* the following code, with the same initial values for "a" and "b":
*
* <programlisting>
@@ -57,7 +57,7 @@
* </programlisting>
*
* does not enforce ordering, since there is no data dependency between
- * the read of "a" and the read of "b". Therefore, on some CPUs, such
+ * the read of "a" and the read of "b". Therefore, on some CPUs, such
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
* in cases like this where there are no data dependencies.
*/
@@ -92,7 +92,7 @@
: "memory")
#ifdef CONFIG_CPU_CAVIUM_OCTEON
# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n"
-# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
+# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory")
# define fast_wmb() __syncw()
# define fast_rmb() barrier()
@@ -158,7 +158,7 @@
#endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
-#define __WEAK_LLSC_MB " sync \n"
+#define __WEAK_LLSC_MB " sync \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif
diff --git a/arch/mips/include/asm/bcache.h b/arch/mips/include/asm/bcache.h
index 0ba9d6ef76a..8c34484cea8 100644
--- a/arch/mips/include/asm/bcache.h
+++ b/arch/mips/include/asm/bcache.h
@@ -11,7 +11,7 @@
/* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent,
- chipset implemented caches. On machines with other CPUs the CPU does the
+ chipset implemented caches. On machines with other CPUs the CPU does the
cache thing itself. */
struct bcache_ops {
void (*bc_enable)(void);
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 46ac73abd5e..71305a8b3d7 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -26,15 +26,15 @@
#define SZLONG_MASK 31UL
#define __LL "ll "
#define __SC "sc "
-#define __INS "ins "
-#define __EXT "ext "
+#define __INS "ins "
+#define __EXT "ext "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
#define __LL "lld "
#define __SC "scd "
-#define __INS "dins "
-#define __EXT "dext "
+#define __INS "dins "
+#define __EXT "dext "
#endif
/*
@@ -357,7 +357,7 @@ static inline int test_and_clear_bit(unsigned long nr,
"1: " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
- " " __SC "%2, %1 \n"
+ " " __SC "%2, %1 \n"
" beqzl %2, 1b \n"
" and %2, %0, %3 \n"
" .set mips0 \n"
@@ -371,10 +371,10 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" " __EXT "%2, %0, %3, 1 \n"
- " " __INS "%0, $0, %3, 1 \n"
- " " __SC "%0, %1 \n"
+ " " __INS "%0, $0, %3, 1 \n"
+ " " __SC "%0, %1 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "ir" (bit)
: "memory");
@@ -387,10 +387,10 @@ static inline int test_and_clear_bit(unsigned long nr,
do {
__asm__ __volatile__(
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
- " " __SC "%2, %1 \n"
+ " " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
@@ -444,7 +444,7 @@ static inline int test_and_change_bit(unsigned long nr,
do {
__asm__ __volatile__(
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 7a51d879e6c..b71dd5b1608 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -44,19 +44,19 @@
/*
* Valid machtype for group PMC-MSP
*/
-#define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */
-#define MACH_MSP4200_GW 1 /* PMC-Sierra MSP4200 Gateway demo */
-#define MACH_MSP4200_FPGA 2 /* PMC-Sierra MSP4200 Emulation */
-#define MACH_MSP7120_EVAL 3 /* PMC-Sierra MSP7120 Evaluation */
-#define MACH_MSP7120_GW 4 /* PMC-Sierra MSP7120 Residential GW */
-#define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */
-#define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */
+#define MACH_MSP4200_EVAL 0 /* PMC-Sierra MSP4200 Evaluation */
+#define MACH_MSP4200_GW 1 /* PMC-Sierra MSP4200 Gateway demo */
+#define MACH_MSP4200_FPGA 2 /* PMC-Sierra MSP4200 Emulation */
+#define MACH_MSP7120_EVAL 3 /* PMC-Sierra MSP7120 Evaluation */
+#define MACH_MSP7120_GW 4 /* PMC-Sierra MSP7120 Residential GW */
+#define MACH_MSP7120_FPGA 5 /* PMC-Sierra MSP7120 Emulation */
+#define MACH_MSP_OTHER 255 /* PMC-Sierra unknown board type */
/*
* Valid machtype for group Mikrotik
*/
-#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
-#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
+#define MACH_MIKROTIK_RB532 0 /* Mikrotik RouterBoard 532 */
+#define MACH_MIKROTIK_RB532A 1 /* Mikrotik RouterBoard 532A */
/*
* Valid machtype for Loongson family
@@ -67,7 +67,7 @@
#define MACH_LEMOTE_ML2F7 3
#define MACH_LEMOTE_YL2F89 4
#define MACH_DEXXON_GDIUM2F10 5
-#define MACH_LEMOTE_NAS 6
+#define MACH_LEMOTE_NAS 6
#define MACH_LEMOTE_LL2F 7
#define MACH_LOONGSON_END 8
diff --git a/arch/mips/include/asm/break.h b/arch/mips/include/asm/break.h
new file mode 100644
index 00000000000..0ef11429a70
--- /dev/null
+++ b/arch/mips/include/asm/break.h
@@ -0,0 +1,26 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 2003 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ */
+#ifndef __ASM_BREAK_H
+#define __ASM_BREAK_H
+
+#ifdef __UAPI_ASM_BREAK_H
+#error "Error: Do not directly include <uapi/asm/break.h>"
+#endif
+#include <uapi/asm/break.h>
+
+/*
+ * Break codes used internally to the kernel.
+ */
+#define BRK_KDB 513 /* Used in KDB_ENTER() */
+#define BRK_MEMU 514 /* Used by FPU emulator */
+#define BRK_KPROBE_BP 515 /* Kprobe break */
+#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
+#define BRK_MULOVF 1023 /* Multiply overflow */
+
+#endif /* __ASM_BREAK_H */
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index 8f99c11ab66..68f37e3eccc 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -8,20 +8,20 @@
* (C) Copyright 1996, 97, 99, 2002, 03 Ralf Baechle
* (C) Copyright 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_CACHEOPS_H
-#define __ASM_CACHEOPS_H
+#ifndef __ASM_CACHEOPS_H
+#define __ASM_CACHEOPS_H
/*
* Cache Operations available on all MIPS processors with R4000-style caches
*/
-#define Index_Invalidate_I 0x00
-#define Index_Writeback_Inv_D 0x01
+#define Index_Invalidate_I 0x00
+#define Index_Writeback_Inv_D 0x01
#define Index_Load_Tag_I 0x04
#define Index_Load_Tag_D 0x05
#define Index_Store_Tag_I 0x08
#define Index_Store_Tag_D 0x09
#if defined(CONFIG_CPU_LOONGSON2)
-#define Hit_Invalidate_I 0x00
+#define Hit_Invalidate_I 0x00
#else
#define Hit_Invalidate_I 0x10
#endif
@@ -39,8 +39,8 @@
/*
* R4000SC and R4400SC-specific cacheops
*/
-#define Index_Invalidate_SI 0x02
-#define Index_Writeback_Inv_SD 0x03
+#define Index_Invalidate_SI 0x02
+#define Index_Writeback_Inv_SD 0x03
#define Index_Load_Tag_SI 0x06
#define Index_Load_Tag_SD 0x07
#define Index_Store_Tag_SI 0x0A
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index f2f7c6c264d..ac3d2b8a20d 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -194,7 +194,7 @@ static inline __sum16 ip_compute_csum(const void *buff, int len)
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
- const struct in6_addr *daddr,
+ const struct in6_addr *daddr,
__u32 len, unsigned short proto,
__wsum sum)
{
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index eee10dc07ac..466069bd846 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -146,7 +146,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
@@ -163,7 +163,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
@@ -205,7 +205,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
\
switch (sizeof(*(__ptr))) { \
case 4: \
- __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
+ __res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
break; \
case 8: \
if (sizeof(long) == 8) { \
diff --git a/arch/mips/include/asm/compat-signal.h b/arch/mips/include/asm/compat-signal.h
index 6599a901b63..64e0b9343b8 100644
--- a/arch/mips/include/asm/compat-signal.h
+++ b/arch/mips/include/asm/compat-signal.h
@@ -18,9 +18,9 @@ static inline int __copy_conv_sigset_to_user(compat_sigset_t __user *d,
BUG_ON(sizeof(*d) != sizeof(*s));
BUG_ON(_NSIG_WORDS != 2);
- err = __put_user(s->sig[0], &d->sig[0]);
+ err = __put_user(s->sig[0], &d->sig[0]);
err |= __put_user(s->sig[0] >> 32, &d->sig[1]);
- err |= __put_user(s->sig[1], &d->sig[2]);
+ err |= __put_user(s->sig[1], &d->sig[2]);
err |= __put_user(s->sig[1] >> 32, &d->sig[3]);
return err;
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index ebaae9649f8..c4bd54a7f5c 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -120,7 +120,7 @@ struct compat_statfs {
typedef u32 compat_old_sigset_t; /* at least 32 bits */
-#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
+#define _COMPAT_NSIG 128 /* Don't ask !$@#% ... */
#define _COMPAT_NSIG_BPW 32
typedef u32 compat_sigset_word;
@@ -168,7 +168,7 @@ typedef struct compat_siginfo {
s32 _addr; /* faulting insn/memory ref. */
} _sigfault;
- /* SIGPOLL, SIGXFSZ (To do ...) */
+ /* SIGPOLL, SIGXFSZ (To do ...) */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
@@ -179,7 +179,7 @@ typedef struct compat_siginfo {
timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval;/* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index c507b931b48..1a57e8b4d09 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -14,7 +14,7 @@
#include <cpu-feature-overrides.h>
#ifndef current_cpu_type
-#define current_cpu_type() current_cpu_data.cputype
+#define current_cpu_type() current_cpu_data.cputype
#endif
/*
@@ -87,10 +87,10 @@
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mdmx
-#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
+#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
#endif
#ifndef cpu_has_mips3d
-#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
+#define cpu_has_mips3d (cpu_data[0].ases & MIPS_ASE_MIPS3D)
#endif
#ifndef cpu_has_smartmips
#define cpu_has_smartmips (cpu_data[0].ases & MIPS_ASE_SMARTMIPS)
@@ -98,6 +98,9 @@
#ifndef cpu_has_rixi
#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
#endif
+#ifndef cpu_has_mmips
+#define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
@@ -108,11 +111,11 @@
#define cpu_has_ic_fills_f_dc (cpu_data[0].icache.flags & MIPS_CACHE_IC_F_DC)
#endif
#ifndef cpu_has_pindexed_dcache
-#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
+#define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX)
#endif
/*
- * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
+ * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
* such as the R10000 have I-Caches that snoop local stores; the embedded ones
* don't. For maintaining I-cache coherency this means we need to flush the
* D-cache all the way back to whever the I-cache does refills from, so the
@@ -130,6 +133,19 @@
#endif
#endif
+# define cpu_has_mips_1 (cpu_data[0].isa_level & MIPS_CPU_ISA_I)
+#ifndef cpu_has_mips_2
+# define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
+#endif
+#ifndef cpu_has_mips_3
+# define cpu_has_mips_3 (cpu_data[0].isa_level & MIPS_CPU_ISA_III)
+#endif
+#ifndef cpu_has_mips_4
+# define cpu_has_mips_4 (cpu_data[0].isa_level & MIPS_CPU_ISA_IV)
+#endif
+#ifndef cpu_has_mips_5
+# define cpu_has_mips_5 (cpu_data[0].isa_level & MIPS_CPU_ISA_V)
+#endif
# ifndef cpu_has_mips32r1
# define cpu_has_mips32r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R1)
# endif
@@ -148,8 +164,8 @@
*/
#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
-#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
-#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
+#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
cpu_has_mips64r1 | cpu_has_mips64r2)
@@ -159,7 +175,7 @@
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
@@ -191,7 +207,7 @@
# define cpu_has_64bits (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_zero_reg
-# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
+# define cpu_has_64bit_zero_reg (cpu_data[0].isa_level & MIPS_CPU_ISA_64BIT)
# endif
# ifndef cpu_has_64bit_gp_regs
# define cpu_has_64bit_gp_regs 0
@@ -260,4 +276,8 @@
#define cpu_has_perf_cntr_intr_bit (cpu_data[0].options & MIPS_CPU_PCI)
#endif
+#ifndef cpu_has_vz
+#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index c454550eb0c..41401d8eb7d 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -52,14 +52,14 @@ struct cpuinfo_mips {
unsigned int cputype;
int isa_level;
int tlbsize;
- struct cache_desc icache; /* Primary I-cache */
- struct cache_desc dcache; /* Primary D or combined I/D cache */
- struct cache_desc scache; /* Secondary cache */
- struct cache_desc tcache; /* Tertiary/split secondary cache */
- int srsets; /* Shadow register sets */
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int srsets; /* Shadow register sets */
int core; /* physical core number */
#ifdef CONFIG_64BIT
- int vmbits; /* Virtual memory size in bits */
+ int vmbits; /* Virtual memory size in bits */
#endif
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
/*
@@ -68,12 +68,12 @@ struct cpuinfo_mips {
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
*/
- int vpe_id; /* Virtual Processor number */
+ int vpe_id; /* Virtual Processor number */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
- int tc_id; /* Thread Context number */
+ int tc_id; /* Thread Context number */
#endif
- void *data; /* Additional data */
+ void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */
unsigned int watch_reg_use_cnt; /* Usable by ptrace */
#define NUM_WATCH_REGS 4
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index 90112adb194..dd86ab20548 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -1,6 +1,6 @@
/*
* cpu.h: Values of the PRId register used to match up
- * various MIPS cpu types.
+ * various MIPS cpu types.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 2004 Maciej W. Rozycki
@@ -9,14 +9,14 @@
#define _ASM_CPU_H
/* Assigned Company values for bits 23:16 of the PRId Register
- (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
+ (CP0 register 15, select 0). As of the MIPS32 and MIPS64 specs from
MTI, the PRId register is defined in this (backwards compatible)
way:
+----------------+----------------+----------------+----------------+
- | Company Options| Company ID | Processor ID | Revision |
+ | Company Options| Company ID | Processor ID | Revision |
+----------------+----------------+----------------+----------------+
- 31 24 23 16 15 8 7
+ 31 24 23 16 15 8 7
I don't have docs for all the previous processors, but my impression is
that bits 16-23 have been 0 for all MIPS processors before the MIPS32/64
@@ -29,7 +29,7 @@
#define PRID_COMP_ALCHEMY 0x030000
#define PRID_COMP_SIBYTE 0x040000
#define PRID_COMP_SANDCRAFT 0x050000
-#define PRID_COMP_NXP 0x060000
+#define PRID_COMP_NXP 0x060000
#define PRID_COMP_TOSHIBA 0x070000
#define PRID_COMP_LSI 0x080000
#define PRID_COMP_LEXRA 0x0b0000
@@ -38,9 +38,9 @@
#define PRID_COMP_INGENIC 0xd00000
/*
- * Assigned values for the product ID register. In order to detect a
+ * Assigned values for the product ID register. In order to detect a
* certain CPU type exactly eventually additional registers may need to
- * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
+ * be examined. These are valid when 23:16 == PRID_COMP_LEGACY
*/
#define PRID_IMP_R2000 0x0100
#define PRID_IMP_AU1_REV1 0x0100
@@ -96,19 +96,20 @@
#define PRID_IMP_1004K 0x9900
#define PRID_IMP_1074K 0x9a00
#define PRID_IMP_M14KC 0x9c00
+#define PRID_IMP_M14KEC 0x9e00
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
*/
-#define PRID_IMP_SB1 0x0100
-#define PRID_IMP_SB1A 0x1100
+#define PRID_IMP_SB1 0x0100
+#define PRID_IMP_SB1A 0x1100
/*
* These are the PRID's for when 23:16 == PRID_COMP_SANDCRAFT
*/
-#define PRID_IMP_SR71000 0x0400
+#define PRID_IMP_SR71000 0x0400
/*
* These are the PRID's for when 23:16 == PRID_COMP_BROADCOM
@@ -145,7 +146,7 @@
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC
*/
-#define PRID_IMP_JZRISC 0x0200
+#define PRID_IMP_JZRISC 0x0200
/*
* These are the PRID's for when 23:16 == PRID_COMP_NETLOGIC
@@ -188,9 +189,9 @@
#define PRID_REV_R3000A 0x0030
#define PRID_REV_R3000 0x0020
#define PRID_REV_R2000A 0x0010
-#define PRID_REV_TX3912 0x0010
-#define PRID_REV_TX3922 0x0030
-#define PRID_REV_TX3927 0x0040
+#define PRID_REV_TX3912 0x0010
+#define PRID_REV_TX3922 0x0030
+#define PRID_REV_TX3927 0x0040
#define PRID_REV_VR4111 0x0050
#define PRID_REV_VR4181 0x0050 /* Same as VR4111 */
#define PRID_REV_VR4121 0x0060
@@ -217,9 +218,9 @@
* FPU implementation/revision register (CP1 control register 0).
*
* +---------------------------------+----------------+----------------+
- * | 0 | Implementation | Revision |
+ * | 0 | Implementation | Revision |
* +---------------------------------+----------------+----------------+
- * 31 16 15 8 7 0
+ * 31 16 15 8 7 0
*/
#define FPIR_IMP_NONE 0x0000
@@ -264,6 +265,7 @@ enum cpu_type_enum {
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
+ CPU_M14KEC,
/*
* MIPS64 class processors
@@ -322,6 +324,7 @@ enum cpu_type_enum {
#define MIPS_CPU_ULRI 0x00200000 /* CPU has ULRI feature */
#define MIPS_CPU_PCI 0x00400000 /* CPU has Perf Ctr Int indicator */
#define MIPS_CPU_RIXI 0x00800000 /* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
/*
* CPU ASE encodings
@@ -333,6 +336,6 @@ enum cpu_type_enum {
#define MIPS_ASE_DSP 0x00000010 /* Signal Processing ASE */
#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
-
+#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dec/ioasic_addrs.h b/arch/mips/include/asm/dec/ioasic_addrs.h
index 4cbc1f8a112..a8665a7611c 100644
--- a/arch/mips/include/asm/dec/ioasic_addrs.h
+++ b/arch/mips/include/asm/dec/ioasic_addrs.h
@@ -25,22 +25,22 @@
*/
#define IOASIC_SYS_ROM (0*IOASIC_SLOT_SIZE) /* system board ROM */
#define IOASIC_IOCTL (1*IOASIC_SLOT_SIZE) /* I/O ASIC */
-#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
-#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
-#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
+#define IOASIC_ESAR (2*IOASIC_SLOT_SIZE) /* LANCE MAC address chip */
+#define IOASIC_LANCE (3*IOASIC_SLOT_SIZE) /* LANCE Ethernet */
+#define IOASIC_SCC0 (4*IOASIC_SLOT_SIZE) /* SCC #0 */
#define IOASIC_VDAC_HI (5*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
-#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
+#define IOASIC_SCC1 (6*IOASIC_SLOT_SIZE) /* SCC #1 (3min, 3max+) */
#define IOASIC_VDAC_LO (7*IOASIC_SLOT_SIZE) /* VDAC (maxine) */
-#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
-#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
+#define IOASIC_TOY (8*IOASIC_SLOT_SIZE) /* RTC */
+#define IOASIC_ISDN (9*IOASIC_SLOT_SIZE) /* ISDN (maxine) */
#define IOASIC_ERRADDR (9*IOASIC_SLOT_SIZE) /* bus error address (3max+) */
-#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
+#define IOASIC_CHKSYN (10*IOASIC_SLOT_SIZE) /* ECC syndrome (3max+) */
#define IOASIC_ACC_BUS (10*IOASIC_SLOT_SIZE) /* ACCESS.bus (maxine) */
-#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
-#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
-#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
+#define IOASIC_MCR (11*IOASIC_SLOT_SIZE) /* memory control (3max+) */
+#define IOASIC_FLOPPY (11*IOASIC_SLOT_SIZE) /* FDC (maxine) */
+#define IOASIC_SCSI (12*IOASIC_SLOT_SIZE) /* ASC SCSI */
#define IOASIC_FDC_DMA (13*IOASIC_SLOT_SIZE) /* FDC DMA (maxine) */
-#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
+#define IOASIC_SCSI_DMA (14*IOASIC_SLOT_SIZE) /* ??? */
#define IOASIC_RES_15 (15*IOASIC_SLOT_SIZE) /* unused? */
diff --git a/arch/mips/include/asm/dec/kn01.h b/arch/mips/include/asm/dec/kn01.h
index 88d9ffd7425..0eb3241de70 100644
--- a/arch/mips/include/asm/dec/kn01.h
+++ b/arch/mips/include/asm/dec/kn01.h
@@ -57,12 +57,12 @@
/*
* System Control & Status Register bits.
*/
-#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
-#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
-#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
-#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
-#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
-#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
+#define KN01_CSR_MNFMOD (1<<15) /* MNFMOD manufacturing jumper */
+#define KN01_CSR_STATUS (1<<14) /* self-test result status output */
+#define KN01_CSR_PARDIS (1<<13) /* parity error disable */
+#define KN01_CSR_CRSRTST (1<<12) /* PCC test output */
+#define KN01_CSR_MONO (1<<11) /* mono/color fb SIMM installed */
+#define KN01_CSR_MEMERR (1<<10) /* write timeout error status & ack*/
#define KN01_CSR_VINT (1<<9) /* PCC area detect #2 status & ack */
#define KN01_CSR_TXDIS (1<<8) /* DZ11 transmit disable */
#define KN01_CSR_VBGTRG (1<<2) /* blue DAC voltage over green (r/o) */
diff --git a/arch/mips/include/asm/dec/kn02ca.h b/arch/mips/include/asm/dec/kn02ca.h
index 92c0fe25609..69dc2a9a2d0 100644
--- a/arch/mips/include/asm/dec/kn02ca.h
+++ b/arch/mips/include/asm/dec/kn02ca.h
@@ -68,7 +68,7 @@
#define KN03CA_IO_SSR_ISDN_RST (1<<12) /* ~ISDN (Am79C30A) reset */
#define KN03CA_IO_SSR_FLOPPY_RST (1<<7) /* ~FDC (82077) reset */
-#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
+#define KN03CA_IO_SSR_VIDEO_RST (1<<6) /* ~framebuffer reset */
#define KN03CA_IO_SSR_AB_RST (1<<5) /* ACCESS.bus reset */
#define KN03CA_IO_SSR_RES_4 (1<<4) /* unused */
#define KN03CA_IO_SSR_RES_3 (1<<4) /* unused */
diff --git a/arch/mips/include/asm/dec/prom.h b/arch/mips/include/asm/dec/prom.h
index c0ead631384..446577712be 100644
--- a/arch/mips/include/asm/dec/prom.h
+++ b/arch/mips/include/asm/dec/prom.h
@@ -49,7 +49,7 @@
#ifdef CONFIG_64BIT
-#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
+#define prom_is_rex(magic) 1 /* KN04 and KN05 are REX PROMs. */
#else /* !CONFIG_64BIT */
diff --git a/arch/mips/include/asm/dma-mapping.h b/arch/mips/include/asm/dma-mapping.h
index 006b43e38a9..f8fc74b6cb4 100644
--- a/arch/mips/include/asm/dma-mapping.h
+++ b/arch/mips/include/asm/dma-mapping.h
@@ -5,7 +5,7 @@
#include <asm/cache.h>
#include <asm-generic/dma-coherent.h>
-#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
+#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
#include <dma-coherence.h>
#endif
diff --git a/arch/mips/include/asm/dma.h b/arch/mips/include/asm/dma.h
index f5097f65a8a..5b9ed1bffdb 100644
--- a/arch/mips/include/asm/dma.h
+++ b/arch/mips/include/asm/dma.h
@@ -47,21 +47,21 @@
*
* Address mapping for channels 0-3:
*
- * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * | ... | | ... | | ... |
- * P7 ... P0 A7 ... A0 A7 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
+ * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses)
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * | ... | | ... | | ... |
+ * P7 ... P0 A7 ... A0 A7 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Address mapping for channels 5-7:
*
- * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
- * | ... | \ \ ... \ \ \ ... \ \
- * | ... | \ \ ... \ \ \ ... \ (not used)
- * | ... | \ \ ... \ \ \ ... \
- * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
- * | Page | Addr MSB | Addr LSB | (DMA registers)
+ * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses)
+ * | ... | \ \ ... \ \ \ ... \ \
+ * | ... | \ \ ... \ \ \ ... \ (not used)
+ * | ... | \ \ ... \ \ \ ... \
+ * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0
+ * | Page | Addr MSB | Addr LSB | (DMA registers)
*
* Again, channels 5-7 transfer _physical_ words (16 bits), so addresses
* and counts _must_ be word-aligned (the lowest address bit is _ignored_ at
@@ -102,55 +102,55 @@
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
-#define DMA1_REQ_REG 0x09 /* request register (w) */
+#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
-#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
+#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
-#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
-#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
+#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
+#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
-#define DMA2_REQ_REG 0xD2 /* request register (w) */
+#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
-#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
+#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
-#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
-#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
-
-#define DMA_ADDR_0 0x00 /* DMA address registers */
-#define DMA_ADDR_1 0x02
-#define DMA_ADDR_2 0x04
-#define DMA_ADDR_3 0x06
-#define DMA_ADDR_4 0xC0
-#define DMA_ADDR_5 0xC4
-#define DMA_ADDR_6 0xC8
-#define DMA_ADDR_7 0xCC
-
-#define DMA_CNT_0 0x01 /* DMA count registers */
-#define DMA_CNT_1 0x03
-#define DMA_CNT_2 0x05
-#define DMA_CNT_3 0x07
-#define DMA_CNT_4 0xC2
-#define DMA_CNT_5 0xC6
-#define DMA_CNT_6 0xCA
-#define DMA_CNT_7 0xCE
-
-#define DMA_PAGE_0 0x87 /* DMA page registers */
-#define DMA_PAGE_1 0x83
-#define DMA_PAGE_2 0x81
-#define DMA_PAGE_3 0x82
-#define DMA_PAGE_5 0x8B
-#define DMA_PAGE_6 0x89
-#define DMA_PAGE_7 0x8A
+#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
+#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
+
+#define DMA_ADDR_0 0x00 /* DMA address registers */
+#define DMA_ADDR_1 0x02
+#define DMA_ADDR_2 0x04
+#define DMA_ADDR_3 0x06
+#define DMA_ADDR_4 0xC0
+#define DMA_ADDR_5 0xC4
+#define DMA_ADDR_6 0xC8
+#define DMA_ADDR_7 0xCC
+
+#define DMA_CNT_0 0x01 /* DMA count registers */
+#define DMA_CNT_1 0x03
+#define DMA_CNT_2 0x05
+#define DMA_CNT_3 0x07
+#define DMA_CNT_4 0xC2
+#define DMA_CNT_5 0xC6
+#define DMA_CNT_6 0xCA
+#define DMA_CNT_7 0xCE
+
+#define DMA_PAGE_0 0x87 /* DMA page registers */
+#define DMA_PAGE_1 0x83
+#define DMA_PAGE_2 0x81
+#define DMA_PAGE_3 0x82
+#define DMA_PAGE_5 0x8B
+#define DMA_PAGE_6 0x89
+#define DMA_PAGE_7 0x8A
#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */
#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */
-#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
+#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */
#define DMA_AUTOINIT 0x10
@@ -172,7 +172,7 @@ static __inline__ void release_dma_lock(unsigned long flags)
static __inline__ void enable_dma(unsigned int dmanr)
{
if (dmanr<=3)
- dma_outb(dmanr, DMA1_MASK_REG);
+ dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
}
@@ -204,7 +204,7 @@ static __inline__ void clear_dma_ff(unsigned int dmanr)
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
if (dmanr<=3)
- dma_outb(mode | dmanr, DMA1_MODE_REG);
+ dma_outb(mode | dmanr, DMA1_MODE_REG);
else
dma_outb(mode | (dmanr&3), DMA2_MODE_REG);
}
@@ -248,10 +248,10 @@ static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
set_dma_page(dmanr, a>>16);
- if (dmanr <= 3) {
+ if (dmanr <= 3) {
dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
- } else {
+ dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE );
+ } else {
dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE );
}
@@ -268,14 +268,14 @@ static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
- count--;
- if (dmanr <= 3) {
+ count--;
+ if (dmanr <= 3) {
dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE );
- } else {
+ } else {
dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE );
- }
+ }
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 455c0ac7d4e..cf3ae2480b1 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -11,13 +11,13 @@
/* ELF header e_flags defines. */
/* MIPS architecture level. */
-#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
-#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
-#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
-#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
-#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
-#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
-#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
+#define EF_MIPS_ARCH_1 0x00000000 /* -mips1 code. */
+#define EF_MIPS_ARCH_2 0x10000000 /* -mips2 code. */
+#define EF_MIPS_ARCH_3 0x20000000 /* -mips3 code. */
+#define EF_MIPS_ARCH_4 0x30000000 /* -mips4 code. */
+#define EF_MIPS_ARCH_5 0x40000000 /* -mips5 code. */
+#define EF_MIPS_ARCH_32 0x50000000 /* MIPS32 code. */
+#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */
#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */
@@ -74,7 +74,7 @@
#define R_MIPS_CALL16 11
#define R_MIPS_GPREL32 12
/* The remaining relocs are defined on Irix, although they are not
- in the MIPS ELF ABI. */
+ in the MIPS ELF ABI. */
#define R_MIPS_UNUSED1 13
#define R_MIPS_UNUSED2 14
#define R_MIPS_UNUSED3 15
@@ -214,7 +214,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
\
if (__h->e_machine != EM_MIPS) \
__res = 0; \
- if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
__res = 0; \
\
__res; \
@@ -292,7 +292,7 @@ do { \
__SET_PERSONALITY32_O32(); \
} while (0)
#else
-#define __SET_PERSONALITY32(ex) do { } while (0)
+#define __SET_PERSONALITY32(ex) do { } while (0)
#endif
#define SET_PERSONALITY(ex) \
@@ -337,11 +337,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
-#define ELF_HWCAP (0)
+#define ELF_HWCAP (0)
/*
* This yields a string that ld.so will use to load implementation
- * specific libraries for optimization. This is more specific in
+ * specific libraries for optimization. This is more specific in
* intent than poking at uname or /proc/cpuinfo.
*/
@@ -365,11 +365,11 @@ extern const char *__elf_platform;
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
#ifndef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
diff --git a/arch/mips/include/asm/emma/emma2rh.h b/arch/mips/include/asm/emma/emma2rh.h
index c1449d20ef0..ecf059608bd 100644
--- a/arch/mips/include/asm/emma/emma2rh.h
+++ b/arch/mips/include/asm/emma/emma2rh.h
@@ -2,7 +2,7 @@
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- * Copyright 2001 MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,7 +40,7 @@
#define EMMA2RH_BHIF_INT1_EN_2 (0x000058+REGBASE)
#define EMMA2RH_BHIF_SW_INT (0x000070+REGBASE)
#define EMMA2RH_BHIF_SW_INT_EN (0x000080+REGBASE)
-#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
+#define EMMA2RH_BHIF_SW_INT_CLR (0x000090+REGBASE)
#define EMMA2RH_BHIF_MAIN_CTRL (0x0000b4+REGBASE)
#define EMMA2RH_BHIF_EXCEPT_VECT_BASE_ADDRESS (0x0000c0+REGBASE)
#define EMMA2RH_GPIO_DIR (0x110d20+REGBASE)
@@ -73,7 +73,7 @@
* Memory map (physical address)
*
* Note most of the following address must be properly aligned by the
- * corresponding size. For example, if PCI_IO_SIZE is 16MB, then
+ * corresponding size. For example, if PCI_IO_SIZE is 16MB, then
* PCI_IO_BASE must be aligned along 16MB boundary.
*/
@@ -96,8 +96,8 @@
#define EMMA2RH_ROM_BASE 0x1c000000
#define EMMA2RH_ROM_SIZE 0x04000000 /* 64 MB */
-#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
-#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
+#define EMMA2RH_PCI_CONFIG_BASE EMMA2RH_PCI_IO_BASE
+#define EMMA2RH_PCI_CONFIG_SIZE EMMA2RH_PCI_IO_SIZE
#define NUM_EMMA2RH_IRQ 96
@@ -169,51 +169,51 @@ static inline u8 emma2rh_in8(u32 offset)
**/
/*---------------------------------------------------------------------------*/
-/* CNT - Control register (00H R/W) */
+/* CNT - Control register (00H R/W) */
/*---------------------------------------------------------------------------*/
-#define SPT 0x00000001
-#define STT 0x00000002
-#define ACKE 0x00000004
-#define WTIM 0x00000008
-#define SPIE 0x00000010
-#define WREL 0x00000020
-#define LREL 0x00000040
-#define IICE 0x00000080
-#define CNT_RESERVED 0x000000ff /* reserved bit 0 */
-
-#define I2C_EMMA_START (IICE | STT)
-#define I2C_EMMA_STOP (IICE | SPT)
+#define SPT 0x00000001
+#define STT 0x00000002
+#define ACKE 0x00000004
+#define WTIM 0x00000008
+#define SPIE 0x00000010
+#define WREL 0x00000020
+#define LREL 0x00000040
+#define IICE 0x00000080
+#define CNT_RESERVED 0x000000ff /* reserved bit 0 */
+
+#define I2C_EMMA_START (IICE | STT)
+#define I2C_EMMA_STOP (IICE | SPT)
#define I2C_EMMA_REPSTART I2C_EMMA_START
/*---------------------------------------------------------------------------*/
-/* STA - Status register (10H Read) */
+/* STA - Status register (10H Read) */
/*---------------------------------------------------------------------------*/
-#define MSTS 0x00000080
-#define ALD 0x00000040
-#define EXC 0x00000020
-#define COI 0x00000010
-#define TRC 0x00000008
-#define ACKD 0x00000004
-#define STD 0x00000002
-#define SPD 0x00000001
+#define MSTS 0x00000080
+#define ALD 0x00000040
+#define EXC 0x00000020
+#define COI 0x00000010
+#define TRC 0x00000008
+#define ACKD 0x00000004
+#define STD 0x00000002
+#define SPD 0x00000001
/*---------------------------------------------------------------------------*/
-/* CSEL - Clock select register (20H R/W) */
+/* CSEL - Clock select register (20H R/W) */
/*---------------------------------------------------------------------------*/
-#define FCL 0x00000080
-#define ND50 0x00000040
-#define CLD 0x00000020
-#define DAD 0x00000010
-#define SMC 0x00000008
-#define DFC 0x00000004
-#define CL 0x00000003
-#define CSEL_RESERVED 0x000000ff /* reserved bit 0 */
-
-#define FAST397 0x0000008b
-#define FAST297 0x0000008a
-#define FAST347 0x0000000b
-#define FAST260 0x0000000a
-#define FAST130 0x00000008
+#define FCL 0x00000080
+#define ND50 0x00000040
+#define CLD 0x00000020
+#define DAD 0x00000010
+#define SMC 0x00000008
+#define DFC 0x00000004
+#define CL 0x00000003
+#define CSEL_RESERVED 0x000000ff /* reserved bit 0 */
+
+#define FAST397 0x0000008b
+#define FAST297 0x0000008a
+#define FAST347 0x0000000b
+#define FAST260 0x0000000a
+#define FAST130 0x00000008
#define STANDARD108 0x00000083
#define STANDARD83 0x00000082
#define STANDARD95 0x00000003
@@ -222,32 +222,32 @@ static inline u8 emma2rh_in8(u32 offset)
#define STANDARD71 0x00000000
/*---------------------------------------------------------------------------*/
-/* SVA - Slave address register (30H R/W) */
+/* SVA - Slave address register (30H R/W) */
/*---------------------------------------------------------------------------*/
-#define SVA 0x000000fe
+#define SVA 0x000000fe
/*---------------------------------------------------------------------------*/
-/* SHR - Shift register (40H R/W) */
+/* SHR - Shift register (40H R/W) */
/*---------------------------------------------------------------------------*/
-#define SR 0x000000ff
+#define SR 0x000000ff
/*---------------------------------------------------------------------------*/
-/* INT - Interrupt register (50H R/W) */
-/* INTM - Interrupt mask register (60H R/W) */
+/* INT - Interrupt register (50H R/W) */
+/* INTM - Interrupt mask register (60H R/W) */
/*---------------------------------------------------------------------------*/
-#define INTE0 0x00000001
+#define INTE0 0x00000001
/***********************************************************************
* I2C registers
***********************************************************************
*/
-#define I2C_EMMA_CNT 0x00
-#define I2C_EMMA_STA 0x10
-#define I2C_EMMA_CSEL 0x20
-#define I2C_EMMA_SVA 0x30
-#define I2C_EMMA_SHR 0x40
-#define I2C_EMMA_INT 0x50
-#define I2C_EMMA_INTM 0x60
+#define I2C_EMMA_CNT 0x00
+#define I2C_EMMA_STA 0x10
+#define I2C_EMMA_CSEL 0x20
+#define I2C_EMMA_SVA 0x30
+#define I2C_EMMA_SHR 0x40
+#define I2C_EMMA_INT 0x50
+#define I2C_EMMA_INTM 0x60
/*
* include the board dependent part
diff --git a/arch/mips/include/asm/emma/markeins.h b/arch/mips/include/asm/emma/markeins.h
index bf2d229c2da..e55a6747782 100644
--- a/arch/mips/include/asm/emma/markeins.h
+++ b/arch/mips/include/asm/emma/markeins.h
@@ -2,7 +2,7 @@
* Copyright (C) NEC Electronics Corporation 2005-2006
*
* This file based on include/asm-mips/ddb5xxx/ddb5xxx.h
- * Copyright 2001 MontaVista Software Inc.
+ * Copyright 2001 MontaVista Software Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index 98bcc98cf29..dfaaf493e9d 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -95,7 +95,7 @@ static inline unsigned long fix_to_virt(const unsigned int idx)
if (idx >= __end_of_fixed_addresses)
__this_fixmap_does_not_exist();
- return __fix_to_virt(idx);
+ return __fix_to_virt(idx);
}
static inline unsigned long virt_to_fix(const unsigned long vaddr)
@@ -111,7 +111,7 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
* Called from pgtable_init()
*/
extern void fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base);
+ pgd_t *pgd_base);
#endif
diff --git a/arch/mips/include/asm/floppy.h b/arch/mips/include/asm/floppy.h
index 4456c9c47e2..d75aed36480 100644
--- a/arch/mips/include/asm/floppy.h
+++ b/arch/mips/include/asm/floppy.h
@@ -24,9 +24,9 @@ static inline void fd_cacheflush(char * addr, long size)
* And on Mips's the CMOS info fails also ...
*
* FIXME: This information should come from the ARC configuration tree
- * or wherever a particular machine has stored this ...
+ * or wherever a particular machine has stored this ...
*/
-#define FLOPPY0_TYPE fd_drive_type(0)
+#define FLOPPY0_TYPE fd_drive_type(0)
#define FLOPPY1_TYPE fd_drive_type(1)
#define FDC1 fd_getfdaddr1()
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
index 2b5fddc8f48..429481f9028 100644
--- a/arch/mips/include/asm/fpregdef.h
+++ b/arch/mips/include/asm/fpregdef.h
@@ -20,15 +20,15 @@
* These definitions only cover the R3000-ish 16/32 register model.
* But we're trying to be R3000 friendly anyway ...
*/
-#define fv0 $f0 /* return value */
+#define fv0 $f0 /* return value */
#define fv0f $f1
#define fv1 $f2
#define fv1f $f3
-#define fa0 $f12 /* argument registers */
+#define fa0 $f12 /* argument registers */
#define fa0f $f13
#define fa1 $f14
#define fa1f $f15
-#define ft0 $f4 /* caller saved */
+#define ft0 $f4 /* caller saved */
#define ft0f $f5
#define ft1 $f6
#define ft1f $f7
@@ -40,7 +40,7 @@
#define ft4f $f17
#define ft5 $f18
#define ft5f $f19
-#define fs0 $f20 /* callee saved */
+#define fs0 $f20 /* callee saved */
#define fs0f $f21
#define fs1 $f22
#define fs1f $f23
@@ -53,7 +53,7 @@
#define fs5 $f30
#define fs5f $f31
-#define fcr31 $31 /* FPU status register */
+#define fcr31 $31 /* FPU status register */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 7fcef8ef3fa..d088e5db490 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -35,14 +35,14 @@ extern void _restore_fp(struct task_struct *);
#define __enable_fpu() \
do { \
- set_c0_status(ST0_CU1); \
- enable_fpu_hazard(); \
+ set_c0_status(ST0_CU1); \
+ enable_fpu_hazard(); \
} while (0)
#define __disable_fpu() \
do { \
clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
+ disable_fpu_hazard(); \
} while (0)
#define enable_fpu() \
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ebf1734b41..6ea15815d3e 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -92,24 +92,24 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
switch (op) {
case FUTEX_OP_SET:
- __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
+ __futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ADD:
- __futex_atomic_op("addu $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ __futex_atomic_op("addu $1, %1, %z5",
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ ret, oldval, uaddr, oparg);
break;
case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5",
- ret, oldval, uaddr, ~oparg);
+ ret, oldval, uaddr, ~oparg);
break;
case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5",
- ret, oldval, uaddr, oparg);
+ ret, oldval, uaddr, oparg);
break;
default:
ret = -ENOSYS;
diff --git a/arch/mips/include/asm/fw/arc/hinv.h b/arch/mips/include/asm/fw/arc/hinv.h
index e6ff4add04e..f8d37d1df5d 100644
--- a/arch/mips/include/asm/fw/arc/hinv.h
+++ b/arch/mips/include/asm/fw/arc/hinv.h
@@ -12,7 +12,7 @@ typedef enum configclass {
SystemClass,
ProcessorClass,
CacheClass,
-#ifndef _NT_PROM
+#ifndef _NT_PROM
MemoryClass,
AdapterClass,
ControllerClass,
@@ -34,7 +34,7 @@ typedef enum configtype {
SecondaryICache,
SecondaryDCache,
SecondaryCache,
-#ifndef _NT_PROM
+#ifndef _NT_PROM
Memory,
#endif
EISAAdapter,
@@ -93,7 +93,7 @@ typedef enum {
} IDENTIFIERFLAG;
#ifndef NULL /* for GetChild(NULL); */
-#define NULL 0
+#define NULL 0
#endif
union key_u {
@@ -125,7 +125,7 @@ typedef struct component {
IDENTIFIERFLAG Flags;
USHORT Version;
USHORT Revision;
- ULONG Key;
+ ULONG Key;
ULONG AffinityMask;
ULONG ConfigurationDataSize;
ULONG IdentifierLength;
@@ -149,7 +149,7 @@ typedef struct systemid {
typedef enum memorytype {
ExceptionBlock,
SPBPage, /* ARCS == SystemParameterBlock */
-#ifndef _NT_PROM
+#ifndef _NT_PROM
FreeContiguous,
FreeMemory,
BadMemory,
diff --git a/arch/mips/include/asm/fw/arc/types.h b/arch/mips/include/asm/fw/arc/types.h
index 2b11f87d6fb..ad163806148 100644
--- a/arch/mips/include/asm/fw/arc/types.h
+++ b/arch/mips/include/asm/fw/arc/types.h
@@ -15,7 +15,7 @@
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef long LONG __attribute__ ((__mode__ (__SI__)));
+typedef long LONG __attribute__ ((__mode__ (__SI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__SI__)));
@@ -23,11 +23,11 @@ typedef void VOID;
/* The pointer types. Note that we're using a 64-bit compiler but all
pointer in the ARC structures are only 32-bit, so we need some disgusting
- workarounds. Keep your vomit bag handy. */
+ workarounds. Keep your vomit bag handy. */
typedef LONG _PCHAR;
typedef LONG _PSHORT;
typedef LONG _PLARGE_INTEGER;
-typedef LONG _PLONG;
+typedef LONG _PLONG;
typedef LONG _PUCHAR;
typedef LONG _PUSHORT;
typedef LONG _PULONG;
@@ -40,7 +40,7 @@ typedef LONG _PVOID;
typedef char CHAR;
typedef short SHORT;
typedef long LARGE_INTEGER __attribute__ ((__mode__ (__DI__)));
-typedef long LONG __attribute__ ((__mode__ (__DI__)));
+typedef long LONG __attribute__ ((__mode__ (__DI__)));
typedef unsigned char UCHAR;
typedef unsigned short USHORT;
typedef unsigned long ULONG __attribute__ ((__mode__ (__DI__)));
@@ -51,7 +51,7 @@ typedef void VOID;
typedef CHAR *_PCHAR;
typedef SHORT *_PSHORT;
typedef LARGE_INTEGER *_PLARGE_INTEGER;
-typedef LONG *_PLONG;
+typedef LONG *_PLONG;
typedef UCHAR *_PUCHAR;
typedef USHORT *_PUSHORT;
typedef ULONG *_PULONG;
@@ -62,7 +62,7 @@ typedef VOID *_PVOID;
typedef CHAR *PCHAR;
typedef SHORT *PSHORT;
typedef LARGE_INTEGER *PLARGE_INTEGER;
-typedef LONG *PLONG;
+typedef LONG *PLONG;
typedef UCHAR *PUCHAR;
typedef USHORT *PUSHORT;
typedef ULONG *PULONG;
diff --git a/arch/mips/include/asm/fw/cfe/cfe_api.h b/arch/mips/include/asm/fw/cfe/cfe_api.h
index 0995575db32..17347551a1b 100644
--- a/arch/mips/include/asm/fw/cfe/cfe_api.h
+++ b/arch/mips/include/asm/fw/cfe/cfe_api.h
@@ -40,7 +40,7 @@ typedef long intptr_t;
/* Seal indicating CFE's presence, passed to user program. */
#define CFE_EPTSEAL 0x43464531
-#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
+#define CFE_MI_RESERVED 0 /* memory is reserved, do not use */
#define CFE_MI_AVAILABLE 1 /* memory is available */
#define CFE_FLG_WARMSTART 0x00000001
@@ -52,13 +52,13 @@ typedef long intptr_t;
#define CFE_STDHANDLE_CONSOLE 0
-#define CFE_DEV_NETWORK 1
+#define CFE_DEV_NETWORK 1
#define CFE_DEV_DISK 2
#define CFE_DEV_FLASH 3
#define CFE_DEV_SERIAL 4
#define CFE_DEV_CPU 5
#define CFE_DEV_NVRAM 6
-#define CFE_DEV_CLOCK 7
+#define CFE_DEV_CLOCK 7
#define CFE_DEV_OTHER 8
#define CFE_DEV_MASK 0x0F
diff --git a/arch/mips/include/asm/fw/cfe/cfe_error.h b/arch/mips/include/asm/fw/cfe/cfe_error.h
index b8037463627..fc0e91f07e2 100644
--- a/arch/mips/include/asm/fw/cfe/cfe_error.h
+++ b/arch/mips/include/asm/fw/cfe/cfe_error.h
@@ -25,7 +25,7 @@
*/
#define CFE_OK 0
-#define CFE_ERR -1 /* generic error */
+#define CFE_ERR -1 /* generic error */
#define CFE_ERR_INV_COMMAND -2
#define CFE_ERR_EOF -3
#define CFE_ERR_IOERR -4
@@ -37,12 +37,12 @@
#define CFE_ERR_ENVREADONLY -10
#define CFE_ERR_NOTELF -11
-#define CFE_ERR_NOT32BIT -12
-#define CFE_ERR_WRONGENDIAN -13
-#define CFE_ERR_BADELFVERS -14
-#define CFE_ERR_NOTMIPS -15
-#define CFE_ERR_BADELFFMT -16
-#define CFE_ERR_BADADDR -17
+#define CFE_ERR_NOT32BIT -12
+#define CFE_ERR_WRONGENDIAN -13
+#define CFE_ERR_BADELFVERS -14
+#define CFE_ERR_NOTMIPS -15
+#define CFE_ERR_BADELFFMT -16
+#define CFE_ERR_BADADDR -17
#define CFE_ERR_FILENOTFOUND -18
#define CFE_ERR_UNSUPPORTED -19
@@ -73,8 +73,8 @@
#define CFE_ERR_NOTREADY -36
-#define CFE_ERR_GETMEM -37
-#define CFE_ERR_SETMEM -38
+#define CFE_ERR_GETMEM -37
+#define CFE_ERR_SETMEM -38
#define CFE_ERR_NOTCONN -39
#define CFE_ERR_ADDRINUSE -40
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
index c0cf76a2ca8..a7359f77a48 100644
--- a/arch/mips/include/asm/gcmpregs.h
+++ b/arch/mips/include/asm/gcmpregs.h
@@ -32,7 +32,7 @@
/* GCMP register access */
#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
-#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
+#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
@@ -45,76 +45,76 @@
/* GCB registers */
#define GCMP_GCB_GC_OFS 0x0000 /* Global Config Register */
-#define GCMP_GCB_GC_NUMIOCU_SHF 8
-#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
-#define GCMP_GCB_GC_NUMCORES_SHF 0
-#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
+#define GCMP_GCB_GC_NUMIOCU_SHF 8
+#define GCMP_GCB_GC_NUMIOCU_MSK GCMPGCBMSK(GC_NUMIOCU, 4)
+#define GCMP_GCB_GC_NUMCORES_SHF 0
+#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
-#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
-#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
-#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
-#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
-#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
-#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
+#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
+#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
+#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
+#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
+#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
+#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
+#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
-#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
-#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
+#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
+#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
-#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
-#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
-#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
-#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
+#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
+#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
+#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
-#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
-#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
+#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
+#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
-#define GCMP_GCB_GICBA_BASE_SHF 17
-#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
-#define GCMP_GCB_GICBA_EN_SHF 0
-#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
+#define GCMP_GCB_GICBA_BASE_SHF 17
+#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
+#define GCMP_GCB_GICBA_EN_SHF 0
+#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
/* GCB Regions */
#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
-#define GCMP_GCB_CMxBASE_BASE_SHF 16
-#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
+#define GCMP_GCB_CMxBASE_BASE_SHF 16
+#define GCMP_GCB_CMxBASE_BASE_MSK GCMPGCBMSK(CMxBASE_BASE, 16)
#define GCMP_GCB_CMxMASK_OFS(n) (0x0098+16*(n)) /* Global Region[0-3] Address Mask */
-#define GCMP_GCB_CMxMASK_MASK_SHF 16
-#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
-#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
-#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
-#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+#define GCMP_GCB_CMxMASK_MASK_SHF 16
+#define GCMP_GCB_CMxMASK_MASK_MSK GCMPGCBMSK(CMxMASK_MASK, 16)
+#define GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
+#define GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
+#define GCMP_GCB_CMxMASK_CMREGTGT_MEM 0
+#define GCMP_GCB_CMxMASK_CMREGTGT_MEM1 1
+#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
+#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
/* Core local/Core other control block registers */
#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
-#define GCMP_CCB_RESETR_INRESET_SHF 0
-#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
+#define GCMP_CCB_RESETR_INRESET_SHF 0
+#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
-#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
-#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
+#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
+#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
-#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
-#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
-#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
-#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
-#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
-#define GCMP_CCB_CFG_NUMVPE_SHF 0
-#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
+#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
+#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
+#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
+#define GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
+#define GCMP_CCB_CFG_IOCUTYPE_CIOCU 2
+#define GCMP_CCB_CFG_NUMVPE_SHF 0
+#define GCMP_CCB_CFG_NUMVPE_MSK GCMPCCBMSK(CFG_NUMVPE, 10)
#define GCMP_CCB_OTHER_OFS 0x0018 /* Other Address */
-#define GCMP_CCB_OTHER_CORENUM_SHF 16
-#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
+#define GCMP_CCB_OTHER_CORENUM_SHF 16
+#define GCMP_CCB_OTHER_CORENUM_MSK GCMPCCBMSK(OTHER_CORENUM, 16)
#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
-#define GCMP_CCB_RESETBASE_BEV_SHF 12
-#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
+#define GCMP_CCB_RESETBASE_BEV_SHF 12
+#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 37620db588b..bdc9786ab5a 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -66,7 +66,7 @@
/* Register Map for Shared Section */
-#define GIC_SH_CONFIG_OFS 0x0000
+#define GIC_SH_CONFIG_OFS 0x0000
/* Shared Global Counter */
#define GIC_SH_COUNTER_31_00_OFS 0x0010
@@ -146,13 +146,13 @@
#define GIC_SH_PEND_223_192_OFS 0x0498
#define GIC_SH_PEND_255_224_OFS 0x049c
-#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
/* Maps Interrupt X to a Pin */
#define GIC_SH_MAP_TO_PIN(intr) \
(GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
-#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
/* Maps Interrupt X to a VPE */
#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
@@ -326,7 +326,7 @@ struct gic_intr_map {
unsigned int polarity; /* Polarity : +/- */
unsigned int trigtype; /* Trigger : Edge/Levl */
unsigned int flags; /* Misc flags */
-#define GIC_FLAG_IPI 0x01
+#define GIC_FLAG_IPI 0x01
#define GIC_FLAG_TRANSPARENT 0x02
};
@@ -343,10 +343,10 @@ struct gic_shared_intr_map {
/* GIC nomenclature for Core Interrupt Pins. */
#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
-#define GIC_CPU_INT1 1 /* . */
-#define GIC_CPU_INT2 2 /* . */
-#define GIC_CPU_INT3 3 /* . */
-#define GIC_CPU_INT4 4 /* . */
+#define GIC_CPU_INT1 1 /* . */
+#define GIC_CPU_INT2 2 /* . */
+#define GIC_CPU_INT3 3 /* . */
+#define GIC_CPU_INT4 4 /* . */
#define GIC_CPU_INT5 5 /* Core Interrupt 5 */
/* Local GIC interrupts. */
@@ -359,6 +359,7 @@ struct gic_shared_intr_map {
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
+extern int gic_present;
extern unsigned long _gic_base;
extern unsigned int gic_irq_base;
extern unsigned int gic_irq_flags[];
diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h
index 5437c84664b..0878701712f 100644
--- a/arch/mips/include/asm/gio_device.h
+++ b/arch/mips/include/asm/gio_device.h
@@ -6,15 +6,15 @@ struct gio_device_id {
};
struct gio_device {
- struct device dev;
+ struct device dev;
struct resource resource;
- unsigned int irq;
- unsigned int slotno;
+ unsigned int irq;
+ unsigned int slotno;
- const char *name;
+ const char *name;
struct gio_device_id id;
- unsigned id32:1;
- unsigned gio64:1;
+ unsigned id32:1;
+ unsigned gio64:1;
};
#define to_gio_device(d) container_of(d, struct gio_device, dev)
@@ -50,7 +50,7 @@ static inline void gio_device_free(struct gio_device *dev)
extern int gio_register_driver(struct gio_driver *);
extern void gio_unregister_driver(struct gio_driver *);
-#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev)
+#define gio_get_drvdata(_dev) drv_get_drvdata(&(_dev)->dev)
#define gio_set_drvdata(_dev, data) drv_set_drvdata(&(_dev)->dev, (data))
extern void gio_set_master(struct gio_device *);
diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h
index 0aa44abc77f..2e72abb9440 100644
--- a/arch/mips/include/asm/gt64120.h
+++ b/arch/mips/include/asm/gt64120.h
@@ -34,7 +34,7 @@
#define GT_MULTI_OFS 0x120
-/* CPU Address Decode. */
+/* CPU Address Decode. */
#define GT_SCS10LD_OFS 0x008
#define GT_SCS10HD_OFS 0x010
#define GT_SCS32LD_OFS 0x018
@@ -106,12 +106,12 @@
#define GT_ADERR_OFS 0x470
-/* SDRAM Configuration. */
+/* SDRAM Configuration. */
#define GT_SDRAM_CFG_OFS 0x448
#define GT_SDRAM_OPMODE_OFS 0x474
#define GT_SDRAM_BM_OFS 0x478
-#define GT_SDRAM_ADDRDECODE_OFS 0x47c
+#define GT_SDRAM_ADDRDECODE_OFS 0x47c
/* SDRAM Parameters. */
#define GT_SDRAM_B0_OFS 0x44c
@@ -126,14 +126,14 @@
#define GT_DEV_B3_OFS 0x468
#define GT_DEV_BOOT_OFS 0x46c
-/* ECC. */
+/* ECC. */
#define GT_ECC_ERRDATALO 0x480 /* GT-64120A only */
#define GT_ECC_ERRDATAHI 0x484 /* GT-64120A only */
#define GT_ECC_MEM 0x488 /* GT-64120A only */
#define GT_ECC_CALC 0x48c /* GT-64120A only */
#define GT_ECC_ERRADDR 0x490 /* GT-64120A only */
-/* DMA Record. */
+/* DMA Record. */
#define GT_DMA0_CNT_OFS 0x800
#define GT_DMA1_CNT_OFS 0x804
#define GT_DMA2_CNT_OFS 0x808
@@ -156,13 +156,13 @@
#define GT_DMA2_CUR_OFS 0x878
#define GT_DMA3_CUR_OFS 0x87c
-/* DMA Channel Control. */
+/* DMA Channel Control. */
#define GT_DMA0_CTRL_OFS 0x840
#define GT_DMA1_CTRL_OFS 0x844
#define GT_DMA2_CTRL_OFS 0x848
#define GT_DMA3_CTRL_OFS 0x84c
-/* DMA Arbiter. */
+/* DMA Arbiter. */
#define GT_DMA_ARB_OFS 0x860
/* Timer/Counter. */
@@ -220,7 +220,7 @@
#define GT_PCI0_CFGADDR_OFS 0xcf8
#define GT_PCI0_CFGDATA_OFS 0xcfc
-/* Interrupts. */
+/* Interrupts. */
#define GT_INTRCAUSE_OFS 0xc18
#define GT_INTRMASK_OFS 0xc1c
@@ -547,15 +547,15 @@
#define GT_DEF_BASE 0x14000000UL
#define GT_MAX_BANKSIZE (256 * 1024 * 1024) /* Max 256MB bank */
-#define GT_LATTIM_MIN 6 /* Minimum lat */
+#define GT_LATTIM_MIN 6 /* Minimum lat */
/*
* The gt64120_dep.h file must define the following macros
*
* GT_READ(ofs, data_pointer)
- * GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
+ * GT_WRITE(ofs, data) - read/write GT64120 registers in 32bit
*
- * TIMER - gt64120 timer irq, temporary solution until
+ * TIMER - gt64120 timer irq, temporary solution until
* full gt64120 cascade interrupt support is in place
*/
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index f0324e92d08..44d6a5bde4a 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -25,7 +25,7 @@ static inline void name(void) \
}
/*
- * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
+ * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine.
*/
extern void mips_ihb(void);
@@ -68,7 +68,7 @@ ASMMACRO(back_to_back_c0_hazard,
)
/*
* gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler. Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
@@ -114,7 +114,7 @@ ASMMACRO(back_to_back_c0_hazard,
)
/*
* gcc has a tradition of misscompiling the previous construct using the
- * address of a label as argument to inline assembler. Gas otoh has the
+ * address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
@@ -141,7 +141,7 @@ do { \
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
- defined(CONFIG_CPU_R5500)
+ defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 2d91888c9b7..b0dd0c84df7 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -39,8 +39,8 @@ extern pte_t *pkmap_page_table;
*/
#define LAST_PKMAP 1024
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
-#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
-#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
diff --git a/arch/mips/include/asm/inst.h b/arch/mips/include/asm/inst.h
index 33c34adbecf..f1eadf76407 100644
--- a/arch/mips/include/asm/inst.h
+++ b/arch/mips/include/asm/inst.h
@@ -11,353 +11,7 @@
#ifndef _ASM_INST_H
#define _ASM_INST_H
-/*
- * Major opcodes; before MIPS IV cop1x was called cop3.
- */
-enum major_op {
- spec_op, bcond_op, j_op, jal_op,
- beq_op, bne_op, blez_op, bgtz_op,
- addi_op, addiu_op, slti_op, sltiu_op,
- andi_op, ori_op, xori_op, lui_op,
- cop0_op, cop1_op, cop2_op, cop1x_op,
- beql_op, bnel_op, blezl_op, bgtzl_op,
- daddi_op, daddiu_op, ldl_op, ldr_op,
- spec2_op, jalx_op, mdmx_op, spec3_op,
- lb_op, lh_op, lwl_op, lw_op,
- lbu_op, lhu_op, lwr_op, lwu_op,
- sb_op, sh_op, swl_op, sw_op,
- sdl_op, sdr_op, swr_op, cache_op,
- ll_op, lwc1_op, lwc2_op, pref_op,
- lld_op, ldc1_op, ldc2_op, ld_op,
- sc_op, swc1_op, swc2_op, major_3b_op,
- scd_op, sdc1_op, sdc2_op, sd_op
-};
-
-/*
- * func field of spec opcode.
- */
-enum spec_op {
- sll_op, movc_op, srl_op, sra_op,
- sllv_op, pmon_op, srlv_op, srav_op,
- jr_op, jalr_op, movz_op, movn_op,
- syscall_op, break_op, spim_op, sync_op,
- mfhi_op, mthi_op, mflo_op, mtlo_op,
- dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
- mult_op, multu_op, div_op, divu_op,
- dmult_op, dmultu_op, ddiv_op, ddivu_op,
- add_op, addu_op, sub_op, subu_op,
- and_op, or_op, xor_op, nor_op,
- spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
- dadd_op, daddu_op, dsub_op, dsubu_op,
- tge_op, tgeu_op, tlt_op, tltu_op,
- teq_op, spec5_unused_op, tne_op, spec6_unused_op,
- dsll_op, spec7_unused_op, dsrl_op, dsra_op,
- dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
-};
-
-/*
- * func field of spec2 opcode.
- */
-enum spec2_op {
- madd_op, maddu_op, mul_op, spec2_3_unused_op,
- msub_op, msubu_op, /* more unused ops */
- clz_op = 0x20, clo_op,
- dclz_op = 0x24, dclo_op,
- sdbpp_op = 0x3f
-};
-
-/*
- * func field of spec3 opcode.
- */
-enum spec3_op {
- ext_op, dextm_op, dextu_op, dext_op,
- ins_op, dinsm_op, dinsu_op, dins_op,
- lx_op = 0x0a,
- bshfl_op = 0x20,
- dbshfl_op = 0x24,
- rdhwr_op = 0x3b
-};
-
-/*
- * rt field of bcond opcodes.
- */
-enum rt_op {
- bltz_op, bgez_op, bltzl_op, bgezl_op,
- spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
- tgei_op, tgeiu_op, tlti_op, tltiu_op,
- teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
- bltzal_op, bgezal_op, bltzall_op, bgezall_op,
- rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
- rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
- bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
-};
-
-/*
- * rs field of cop opcodes.
- */
-enum cop_op {
- mfc_op = 0x00, dmfc_op = 0x01,
- cfc_op = 0x02, mtc_op = 0x04,
- dmtc_op = 0x05, ctc_op = 0x06,
- bc_op = 0x08, cop_op = 0x10,
- copm_op = 0x18
-};
-
-/*
- * rt field of cop.bc_op opcodes
- */
-enum bcop_op {
- bcf_op, bct_op, bcfl_op, bctl_op
-};
-
-/*
- * func field of cop0 coi opcodes.
- */
-enum cop0_coi_func {
- tlbr_op = 0x01, tlbwi_op = 0x02,
- tlbwr_op = 0x06, tlbp_op = 0x08,
- rfe_op = 0x10, eret_op = 0x18
-};
-
-/*
- * func field of cop0 com opcodes.
- */
-enum cop0_com_func {
- tlbr1_op = 0x01, tlbw_op = 0x02,
- tlbp1_op = 0x08, dctr_op = 0x09,
- dctw_op = 0x0a
-};
-
-/*
- * fmt field of cop1 opcodes.
- */
-enum cop1_fmt {
- s_fmt, d_fmt, e_fmt, q_fmt,
- w_fmt, l_fmt
-};
-
-/*
- * func field of cop1 instructions using d, s or w format.
- */
-enum cop1_sdw_func {
- fadd_op = 0x00, fsub_op = 0x01,
- fmul_op = 0x02, fdiv_op = 0x03,
- fsqrt_op = 0x04, fabs_op = 0x05,
- fmov_op = 0x06, fneg_op = 0x07,
- froundl_op = 0x08, ftruncl_op = 0x09,
- fceill_op = 0x0a, ffloorl_op = 0x0b,
- fround_op = 0x0c, ftrunc_op = 0x0d,
- fceil_op = 0x0e, ffloor_op = 0x0f,
- fmovc_op = 0x11, fmovz_op = 0x12,
- fmovn_op = 0x13, frecip_op = 0x15,
- frsqrt_op = 0x16, fcvts_op = 0x20,
- fcvtd_op = 0x21, fcvte_op = 0x22,
- fcvtw_op = 0x24, fcvtl_op = 0x25,
- fcmp_op = 0x30
-};
-
-/*
- * func field of cop1x opcodes (MIPS IV).
- */
-enum cop1x_func {
- lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
- madd_d_op = 0x21, madd_e_op = 0x22,
- msub_s_op = 0x28, msub_d_op = 0x29,
- msub_e_op = 0x2a, nmadd_s_op = 0x30,
- nmadd_d_op = 0x31, nmadd_e_op = 0x32,
- nmsub_s_op = 0x38, nmsub_d_op = 0x39,
- nmsub_e_op = 0x3a
-};
-
-/*
- * func field for mad opcodes (MIPS IV).
- */
-enum mad_func {
- madd_fp_op = 0x08, msub_fp_op = 0x0a,
- nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
-};
-
-/*
- * func field for special3 lx opcodes (Cavium Octeon).
- */
-enum lx_func {
- lwx_op = 0x00,
- lhx_op = 0x04,
- lbux_op = 0x06,
- ldx_op = 0x08,
- lwux_op = 0x10,
- lhux_op = 0x14,
- lbx_op = 0x16,
-};
-
-/*
- * Damn ... bitfields depend from byteorder :-(
- */
-#ifdef __MIPSEB__
-struct j_format { /* Jump format */
- unsigned int opcode : 6;
- unsigned int target : 26;
-};
-
-struct i_format { /* Immediate format (addi, lw, ...) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- signed int simmediate : 16;
-};
-
-struct u_format { /* Unsigned immediate format (ori, xori, ...) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int uimmediate : 16;
-};
-
-struct c_format { /* Cache (>= R6000) format */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int c_op : 3;
- unsigned int cache : 2;
- unsigned int simmediate : 16;
-};
-
-struct r_format { /* Register format */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct p_format { /* Performance counter format (R10000) */
- unsigned int opcode : 6;
- unsigned int rs : 5;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct f_format { /* FPU register format */
- unsigned int opcode : 6;
- unsigned int : 1;
- unsigned int fmt : 4;
- unsigned int rt : 5;
- unsigned int rd : 5;
- unsigned int re : 5;
- unsigned int func : 6;
-};
-
-struct ma_format { /* FPU multiply and add format (MIPS IV) */
- unsigned int opcode : 6;
- unsigned int fr : 5;
- unsigned int ft : 5;
- unsigned int fs : 5;
- unsigned int fd : 5;
- unsigned int func : 4;
- unsigned int fmt : 2;
-};
-
-struct b_format { /* BREAK and SYSCALL */
- unsigned int opcode:6;
- unsigned int code:20;
- unsigned int func:6;
-};
-
-#elif defined(__MIPSEL__)
-
-struct j_format { /* Jump format */
- unsigned int target : 26;
- unsigned int opcode : 6;
-};
-
-struct i_format { /* Immediate format */
- signed int simmediate : 16;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct u_format { /* Unsigned immediate format */
- unsigned int uimmediate : 16;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct c_format { /* Cache (>= R6000) format */
- unsigned int simmediate : 16;
- unsigned int cache : 2;
- unsigned int c_op : 3;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct r_format { /* Register format */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct p_format { /* Performance counter format (R10000) */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int rs : 5;
- unsigned int opcode : 6;
-};
-
-struct f_format { /* FPU register format */
- unsigned int func : 6;
- unsigned int re : 5;
- unsigned int rd : 5;
- unsigned int rt : 5;
- unsigned int fmt : 4;
- unsigned int : 1;
- unsigned int opcode : 6;
-};
-
-struct ma_format { /* FPU multiply and add format (MIPS IV) */
- unsigned int fmt : 2;
- unsigned int func : 4;
- unsigned int fd : 5;
- unsigned int fs : 5;
- unsigned int ft : 5;
- unsigned int fr : 5;
- unsigned int opcode : 6;
-};
-
-struct b_format { /* BREAK and SYSCALL */
- unsigned int func:6;
- unsigned int code:20;
- unsigned int opcode:6;
-};
-
-#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
-#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
-#endif
-
-union mips_instruction {
- unsigned int word;
- unsigned short halfword[2];
- unsigned char byte[4];
- struct j_format j_format;
- struct i_format i_format;
- struct u_format u_format;
- struct c_format c_format;
- struct r_format r_format;
- struct p_format p_format;
- struct f_format f_format;
- struct ma_format ma_format;
- struct b_format b_format;
-};
+#include <uapi/asm/inst.h>
/* HACHACHAHCAHC ... */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index ff2e0345e01..1be13727323 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -7,7 +7,7 @@
* Copyright (C) 1994 - 2000, 06 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Author: Maciej W. Rozycki <macro@mips.com>
*/
#ifndef _ASM_IO_H
#define _ASM_IO_H
@@ -253,9 +253,9 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
__ioremap_mode((offset), (size), _CACHE_UNCACHED)
/*
- * ioremap_cachable - map bus memory into CPU space
- * @offset: bus address of the memory
- * @size: size of the resource to map
+ * ioremap_cachable - map bus memory into CPU space
+ * @offset: bus address of the memory
+ * @size: size of the resource to map
*
* ioremap_nocache performs a platform specific sequence of operations to
* make bus memory CPU accessible via the readb/readw/readl/writeb/
@@ -264,14 +264,14 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
* address.
*
* This version of ioremap ensures that the memory is marked cachable by
- * the CPU. Also enables full write-combining. Useful for some
+ * the CPU. Also enables full write-combining. Useful for some
* memory-like regions on I/O busses.
*/
#define ioremap_cachable(offset, size) \
__ioremap_mode((offset), (size), _page_cachable_default)
/*
- * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
+ * These two are MIPS specific ioremap variant. ioremap_cacheable_cow
* requests a cachable mapping, ioremap_uncached_accelerated requests a
* mapping using the uncached accelerated mode which isn't supported on
* all processors.
@@ -298,7 +298,7 @@ static inline void iounmap(const volatile void __iomem *addr)
}
#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define war_octeon_io_reorder_wmb() wmb()
+#define war_octeon_io_reorder_wmb() wmb()
#else
#define war_octeon_io_reorder_wmb() do { } while (0)
#endif
@@ -317,7 +317,7 @@ static inline void pfx##write##bwlq(type val, \
\
__val = pfx##ioswab##bwlq(__mem, val); \
\
- if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
*__mem = __val; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
@@ -327,9 +327,9 @@ static inline void pfx##write##bwlq(type val, \
local_irq_save(__flags); \
__asm__ __volatile__( \
".set mips3" "\t\t# __writeq""\n\t" \
- "dsll32 %L0, %L0, 0" "\n\t" \
- "dsrl32 %L0, %L0, 0" "\n\t" \
- "dsll32 %M0, %M0, 0" "\n\t" \
+ "dsll32 %L0, %L0, 0" "\n\t" \
+ "dsrl32 %L0, %L0, 0" "\n\t" \
+ "dsll32 %M0, %M0, 0" "\n\t" \
"or %L0, %L0, %M0" "\n\t" \
"sd %L0, %2" "\n\t" \
".set mips0" "\n" \
@@ -348,7 +348,7 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
- if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
+ if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
__val = *__mem; \
else if (cpu_has_64bits) { \
unsigned long __flags; \
@@ -356,9 +356,9 @@ static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
if (irq) \
local_irq_save(__flags); \
__asm__ __volatile__( \
- ".set mips3" "\t\t# __readq" "\n\t" \
+ ".set mips3" "\t\t# __readq" "\n\t" \
"ld %L0, %1" "\n\t" \
- "dsra32 %M0, %L0, 0" "\n\t" \
+ "dsra32 %M0, %L0, 0" "\n\t" \
"sll %L0, %L0, 0" "\n\t" \
".set mips0" "\n" \
: "=r" (__val) \
@@ -586,7 +586,7 @@ extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
#else /* Sane hardware */
-#define dma_cache_wback_inv(start,size) \
+#define dma_cache_wback_inv(start,size) \
do { (void) (start); (void) (size); } while (0)
#define dma_cache_wback(start,size) \
do { (void) (start); (void) (size); } while (0)
diff --git a/arch/mips/include/asm/ip32/crime.h b/arch/mips/include/asm/ip32/crime.h
index 7c36b0e5b1c..16c94a27beb 100644
--- a/arch/mips/include/asm/ip32/crime.h
+++ b/arch/mips/include/asm/ip32/crime.h
@@ -74,7 +74,7 @@ struct sgi_crime {
#define CRIME_RE_IDLE_E_INT BIT(24)
#define CRIME_RE_EMPTY_L_INT BIT(25)
#define CRIME_RE_FULL_L_INT BIT(26)
-#define CRIME_RE_IDLE_L_INT BIT(27)
+#define CRIME_RE_IDLE_L_INT BIT(27)
#define CRIME_SOFT0_INT BIT(28)
#define CRIME_SOFT1_INT BIT(29)
#define CRIME_SOFT2_INT BIT(30)
@@ -118,7 +118,7 @@ struct sgi_crime {
#define CRIME_MEM_REF_COUNTER_MASK 0x3ff /* 10bit */
volatile unsigned long mem_error_stat;
-#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
+#define CRIME_MEM_ERROR_STAT_MASK 0x0ff7ffff /* 28-bit register */
#define CRIME_MEM_ERROR_MACE_ID 0x0000007f
#define CRIME_MEM_ERROR_MACE_ACCESS 0x00000080
#define CRIME_MEM_ERROR_RE_ID 0x00007f00
@@ -134,8 +134,8 @@ struct sgi_crime {
#define CRIME_MEM_ERROR_MEM_ECC_RD 0x00800000
#define CRIME_MEM_ERROR_MEM_ECC_RMW 0x01000000
#define CRIME_MEM_ERROR_INV 0x0e000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
-#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_RD 0x02000000
+#define CRIME_MEM_ERROR_INV_MEM_ADDR_WR 0x04000000
#define CRIME_MEM_ERROR_INV_MEM_ADDR_RMW 0x08000000
volatile unsigned long mem_error_addr;
diff --git a/arch/mips/include/asm/ip32/ip32_ints.h b/arch/mips/include/asm/ip32/ip32_ints.h
index 85bc5302bce..72e3368de11 100644
--- a/arch/mips/include/asm/ip32/ip32_ints.h
+++ b/arch/mips/include/asm/ip32/ip32_ints.h
@@ -13,7 +13,7 @@
/*
* This list reflects the assignment of interrupt numbers to
- * interrupting events. Order is fairly irrelevant to handling
+ * interrupting events. Order is fairly irrelevant to handling
* priority. This differs from irix.
*/
diff --git a/arch/mips/include/asm/ip32/mace.h b/arch/mips/include/asm/ip32/mace.h
index c523123df38..253ed7ea80b 100644
--- a/arch/mips/include/asm/ip32/mace.h
+++ b/arch/mips/include/asm/ip32/mace.h
@@ -250,12 +250,12 @@ struct mace_ps2 {
* -> drivers/i2c/algos/i2c-algo-sgi.c */
struct mace_i2c {
volatile unsigned long config;
-#define MACEI2C_RESET BIT(0)
-#define MACEI2C_FAST BIT(1)
-#define MACEI2C_DATA_OVERRIDE BIT(2)
-#define MACEI2C_CLOCK_OVERRIDE BIT(3)
-#define MACEI2C_DATA_STATUS BIT(4)
-#define MACEI2C_CLOCK_STATUS BIT(5)
+#define MACEI2C_RESET BIT(0)
+#define MACEI2C_FAST BIT(1)
+#define MACEI2C_DATA_OVERRIDE BIT(2)
+#define MACEI2C_CLOCK_OVERRIDE BIT(3)
+#define MACEI2C_DATA_STATUS BIT(4)
+#define MACEI2C_CLOCK_STATUS BIT(5)
volatile unsigned long control;
volatile unsigned long data;
};
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 78dbb8a86da..7bc2cdb3505 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -32,7 +32,7 @@ struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
- unsigned long hwmask);
+ unsigned long hwmask);
static inline void smtc_im_ack_irq(unsigned int irq)
{
@@ -60,7 +60,7 @@ extern void smtc_forward_irq(struct irq_data *d);
* if option is enabled.
*
* Up through Linux 2.6.22 (at least) cpumask operations are very
- * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
+ * inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
* used a "fast path" per-IRQ-descriptor cache of affinity information
* to reduce latency. As there is a project afoot to optimize the
* cpumask implementations, this version is optimistically assuming
@@ -133,7 +133,7 @@ extern void free_irqno(unsigned int irq);
/*
* Before R2 the timer and performance counter interrupts were both fixed to
- * IE7. Since R2 their number has to be read from the c0_intctl register.
+ * IE7. Since R2 their number has to be read from the c0_intctl register.
*/
#define CP0_LEGACY_COMPARE_IRQ 7
#define CP0_LEGACY_PERFCNT_IRQ 7
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index ef6a07cddb2..3f11fdb3ed8 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -17,4 +17,10 @@ extern void mips_cpu_irq_init(void);
extern void rm7k_cpu_irq_init(void);
extern void rm9k_cpu_irq_init(void);
+#ifdef CONFIG_IRQ_DOMAIN
+struct device_node;
+extern int mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent);
+#endif
+
#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/isadep.h b/arch/mips/include/asm/isadep.h
index 24c6cda7937..b4af6eb24ab 100644
--- a/arch/mips/include/asm/isadep.h
+++ b/arch/mips/include/asm/isadep.h
@@ -18,7 +18,7 @@
* kernel or user mode? (CP0_STATUS)
*/
#define KU_MASK 0x08
-#define KU_USER 0x08
+#define KU_USER 0x08
#define KU_KERN 0x00
#else
@@ -26,7 +26,7 @@
* kernel or user mode?
*/
#define KU_MASK 0x18
-#define KU_USER 0x10
+#define KU_USER 0x10
#define KU_KERN 0x00
#endif
diff --git a/arch/mips/include/asm/jazz.h b/arch/mips/include/asm/jazz.h
index 83f449dec95..a61970d01a8 100644
--- a/arch/mips/include/asm/jazz.h
+++ b/arch/mips/include/asm/jazz.h
@@ -16,7 +16,7 @@
* instead of 0xe0000000.
*/
-#define JAZZ_LOCAL_IO_SPACE 0xe0000000
+#define JAZZ_LOCAL_IO_SPACE 0xe0000000
/*
* Revision numbers in PICA_ASIC_REVISION
@@ -25,24 +25,24 @@
* 0xf0000001 - Rev2
* 0xf0000002 - Rev3
*/
-#define PICA_ASIC_REVISION 0xe0000008
+#define PICA_ASIC_REVISION 0xe0000008
/*
* The segments of the seven segment LED are mapped
* to the control bits as follows:
*
- * (7)
- * ---------
- * | |
- * (2) | | (6)
- * | (1) |
- * ---------
- * | |
- * (3) | | (5)
- * | (4) |
- * --------- . (0)
+ * (7)
+ * ---------
+ * | |
+ * (2) | | (6)
+ * | (1) |
+ * ---------
+ * | |
+ * (3) | | (5)
+ * | (4) |
+ * --------- . (0)
*/
-#define PICA_LED 0xe000f000
+#define PICA_LED 0xe000f000
/*
* Some characters for the LED control registers
@@ -51,24 +51,24 @@
* control each of the seven segments and the dot independently.
* It's only a toy, anyway...
*/
-#define LED_DOT 0x01
-#define LED_SPACE 0x00
-#define LED_0 0xfc
-#define LED_1 0x60
-#define LED_2 0xda
-#define LED_3 0xf2
-#define LED_4 0x66
-#define LED_5 0xb6
-#define LED_6 0xbe
-#define LED_7 0xe0
-#define LED_8 0xfe
-#define LED_9 0xf6
-#define LED_A 0xee
-#define LED_b 0x3e
-#define LED_C 0x9c
-#define LED_d 0x7a
-#define LED_E 0x9e
-#define LED_F 0x8e
+#define LED_DOT 0x01
+#define LED_SPACE 0x00
+#define LED_0 0xfc
+#define LED_1 0x60
+#define LED_2 0xda
+#define LED_3 0xf2
+#define LED_4 0x66
+#define LED_5 0xb6
+#define LED_6 0xbe
+#define LED_7 0xe0
+#define LED_8 0xfe
+#define LED_9 0xf6
+#define LED_A 0xee
+#define LED_b 0x3e
+#define LED_C 0x9c
+#define LED_d 0x7a
+#define LED_E 0x9e
+#define LED_F 0x8e
#ifndef __ASSEMBLY__
@@ -96,9 +96,9 @@ static __inline__ void pica_set_led(unsigned int bits)
* This address is just a guess and seems to differ from
* other mips machines such as RC3xxx...
*/
-#define JAZZ_KEYBOARD_ADDRESS 0xe0005000
-#define JAZZ_KEYBOARD_DATA 0xe0005000
-#define JAZZ_KEYBOARD_COMMAND 0xe0005001
+#define JAZZ_KEYBOARD_ADDRESS 0xe0005000
+#define JAZZ_KEYBOARD_DATA 0xe0005000
+#define JAZZ_KEYBOARD_COMMAND 0xe0005001
#ifndef __ASSEMBLY__
@@ -119,28 +119,28 @@ typedef struct {
/*
* For now. Needs to be changed for RC3xxx support. See below.
*/
-#define keyboard_hardware jazz_keyboard_hardware
+#define keyboard_hardware jazz_keyboard_hardware
#endif /* !__ASSEMBLY__ */
/*
* i8042 keyboard controller for most other Mips machines.
*/
-#define MIPS_KEYBOARD_ADDRESS 0xb9005000
-#define MIPS_KEYBOARD_DATA 0xb9005003
-#define MIPS_KEYBOARD_COMMAND 0xb9005007
+#define MIPS_KEYBOARD_ADDRESS 0xb9005000
+#define MIPS_KEYBOARD_DATA 0xb9005003
+#define MIPS_KEYBOARD_COMMAND 0xb9005007
/*
* Serial and parallel ports (WD 16C552) on the Mips JAZZ
*/
-#define JAZZ_SERIAL1_BASE (unsigned int)0xe0006000
-#define JAZZ_SERIAL2_BASE (unsigned int)0xe0007000
-#define JAZZ_PARALLEL_BASE (unsigned int)0xe0008000
+#define JAZZ_SERIAL1_BASE (unsigned int)0xe0006000
+#define JAZZ_SERIAL2_BASE (unsigned int)0xe0007000
+#define JAZZ_PARALLEL_BASE (unsigned int)0xe0008000
/*
* Dummy Device Address. Used in jazzdma.c
*/
-#define JAZZ_DUMMY_DEVICE 0xe000d000
+#define JAZZ_DUMMY_DEVICE 0xe000d000
/*
* JAZZ timer registers and interrupt no.
@@ -148,8 +148,8 @@ typedef struct {
* cpu level 6, but to keep compatibility with PC stuff
* it is remapped to vector 0. See arch/mips/kernel/entry.S.
*/
-#define JAZZ_TIMER_INTERVAL 0xe0000228
-#define JAZZ_TIMER_REGISTER 0xe0000230
+#define JAZZ_TIMER_INTERVAL 0xe0000228
+#define JAZZ_TIMER_REGISTER 0xe0000230
/*
* DRAM configuration register
@@ -176,13 +176,13 @@ typedef struct {
#endif
#endif /* !__ASSEMBLY__ */
-#define PICA_DRAM_CONFIG 0xe00fffe0
+#define PICA_DRAM_CONFIG 0xe00fffe0
/*
* JAZZ interrupt control registers
*/
-#define JAZZ_IO_IRQ_SOURCE 0xe0010000
-#define JAZZ_IO_IRQ_ENABLE 0xe0010002
+#define JAZZ_IO_IRQ_SOURCE 0xe0010000
+#define JAZZ_IO_IRQ_ENABLE 0xe0010002
/*
* JAZZ Interrupt Level definitions
@@ -190,20 +190,20 @@ typedef struct {
* This is somewhat broken. For reasons which nobody can remember anymore
* we remap the Jazz interrupts to the usual ISA style interrupt numbers.
*/
-#define JAZZ_IRQ_START 24
-#define JAZZ_IRQ_END (24 + 9)
-#define JAZZ_PARALLEL_IRQ (JAZZ_IRQ_START + 0)
-#define JAZZ_FLOPPY_IRQ (JAZZ_IRQ_START + 1)
-#define JAZZ_SOUND_IRQ (JAZZ_IRQ_START + 2)
-#define JAZZ_VIDEO_IRQ (JAZZ_IRQ_START + 3)
-#define JAZZ_ETHERNET_IRQ (JAZZ_IRQ_START + 4)
-#define JAZZ_SCSI_IRQ (JAZZ_IRQ_START + 5)
-#define JAZZ_KEYBOARD_IRQ (JAZZ_IRQ_START + 6)
-#define JAZZ_MOUSE_IRQ (JAZZ_IRQ_START + 7)
-#define JAZZ_SERIAL1_IRQ (JAZZ_IRQ_START + 8)
-#define JAZZ_SERIAL2_IRQ (JAZZ_IRQ_START + 9)
-
-#define JAZZ_TIMER_IRQ (MIPS_CPU_IRQ_BASE+6)
+#define JAZZ_IRQ_START 24
+#define JAZZ_IRQ_END (24 + 9)
+#define JAZZ_PARALLEL_IRQ (JAZZ_IRQ_START + 0)
+#define JAZZ_FLOPPY_IRQ (JAZZ_IRQ_START + 1)
+#define JAZZ_SOUND_IRQ (JAZZ_IRQ_START + 2)
+#define JAZZ_VIDEO_IRQ (JAZZ_IRQ_START + 3)
+#define JAZZ_ETHERNET_IRQ (JAZZ_IRQ_START + 4)
+#define JAZZ_SCSI_IRQ (JAZZ_IRQ_START + 5)
+#define JAZZ_KEYBOARD_IRQ (JAZZ_IRQ_START + 6)
+#define JAZZ_MOUSE_IRQ (JAZZ_IRQ_START + 7)
+#define JAZZ_SERIAL1_IRQ (JAZZ_IRQ_START + 8)
+#define JAZZ_SERIAL2_IRQ (JAZZ_IRQ_START + 9)
+
+#define JAZZ_TIMER_IRQ (MIPS_CPU_IRQ_BASE+6)
/*
@@ -211,46 +211,46 @@ typedef struct {
* Note: Channels 4...7 are not used with respect to the Acer PICA-61
* chipset which does not provide these DMA channels.
*/
-#define JAZZ_SCSI_DMA 0 /* SCSI */
-#define JAZZ_FLOPPY_DMA 1 /* FLOPPY */
-#define JAZZ_AUDIOL_DMA 2 /* AUDIO L */
-#define JAZZ_AUDIOR_DMA 3 /* AUDIO R */
+#define JAZZ_SCSI_DMA 0 /* SCSI */
+#define JAZZ_FLOPPY_DMA 1 /* FLOPPY */
+#define JAZZ_AUDIOL_DMA 2 /* AUDIO L */
+#define JAZZ_AUDIOR_DMA 3 /* AUDIO R */
/*
* JAZZ R4030 MCT_ADR chip (DMA controller)
* Note: Virtual Addresses !
*/
#define JAZZ_R4030_CONFIG 0xE0000000 /* R4030 config register */
-#define JAZZ_R4030_REVISION 0xE0000008 /* same as PICA_ASIC_REVISION */
+#define JAZZ_R4030_REVISION 0xE0000008 /* same as PICA_ASIC_REVISION */
#define JAZZ_R4030_INV_ADDR 0xE0000010 /* Invalid Address register */
-#define JAZZ_R4030_TRSTBL_BASE 0xE0000018 /* Translation Table Base */
-#define JAZZ_R4030_TRSTBL_LIM 0xE0000020 /* Translation Table Limit */
-#define JAZZ_R4030_TRSTBL_INV 0xE0000028 /* Translation Table Invalidate */
+#define JAZZ_R4030_TRSTBL_BASE 0xE0000018 /* Translation Table Base */
+#define JAZZ_R4030_TRSTBL_LIM 0xE0000020 /* Translation Table Limit */
+#define JAZZ_R4030_TRSTBL_INV 0xE0000028 /* Translation Table Invalidate */
-#define JAZZ_R4030_CACHE_MTNC 0xE0000030 /* Cache Maintenance */
-#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038 /* Remote Failed Address */
-#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040 /* Memory Failed Address */
+#define JAZZ_R4030_CACHE_MTNC 0xE0000030 /* Cache Maintenance */
+#define JAZZ_R4030_R_FAIL_ADDR 0xE0000038 /* Remote Failed Address */
+#define JAZZ_R4030_M_FAIL_ADDR 0xE0000040 /* Memory Failed Address */
-#define JAZZ_R4030_CACHE_PTAG 0xE0000048 /* I/O Cache Physical Tag */
-#define JAZZ_R4030_CACHE_LTAG 0xE0000050 /* I/O Cache Logical Tag */
-#define JAZZ_R4030_CACHE_BMASK 0xE0000058 /* I/O Cache Byte Mask */
-#define JAZZ_R4030_CACHE_BWIN 0xE0000060 /* I/O Cache Buffer Window */
+#define JAZZ_R4030_CACHE_PTAG 0xE0000048 /* I/O Cache Physical Tag */
+#define JAZZ_R4030_CACHE_LTAG 0xE0000050 /* I/O Cache Logical Tag */
+#define JAZZ_R4030_CACHE_BMASK 0xE0000058 /* I/O Cache Byte Mask */
+#define JAZZ_R4030_CACHE_BWIN 0xE0000060 /* I/O Cache Buffer Window */
/*
* Remote Speed Registers.
*
- * 0: free, 1: Ethernet, 2: SCSI, 3: Floppy,
- * 4: RTC, 5: Kb./Mouse 6: serial 1, 7: serial 2,
- * 8: parallel, 9: NVRAM, 10: CPU, 11: PROM,
+ * 0: free, 1: Ethernet, 2: SCSI, 3: Floppy,
+ * 4: RTC, 5: Kb./Mouse 6: serial 1, 7: serial 2,
+ * 8: parallel, 9: NVRAM, 10: CPU, 11: PROM,
* 12: reserved, 13: free, 14: 7seg LED, 15: ???
*/
#define JAZZ_R4030_REM_SPEED 0xE0000070 /* 16 Remote Speed Registers */
/* 0xE0000070,78,80... 0xE00000E8 */
-#define JAZZ_R4030_IRQ_ENABLE 0xE00000E8 /* Internal Interrupt Enable */
-#define JAZZ_R4030_INVAL_ADDR 0xE0000010 /* Invalid address Register */
-#define JAZZ_R4030_IRQ_SOURCE 0xE0000200 /* Interrupt Source Register */
-#define JAZZ_R4030_I386_ERROR 0xE0000208 /* i386/EISA Bus Error */
+#define JAZZ_R4030_IRQ_ENABLE 0xE00000E8 /* Internal Interrupt Enable */
+#define JAZZ_R4030_INVAL_ADDR 0xE0000010 /* Invalid address Register */
+#define JAZZ_R4030_IRQ_SOURCE 0xE0000200 /* Interrupt Source Register */
+#define JAZZ_R4030_I386_ERROR 0xE0000208 /* i386/EISA Bus Error */
/*
* Virtual (E)ISA controller address
diff --git a/arch/mips/include/asm/jazzdma.h b/arch/mips/include/asm/jazzdma.h
index 8bb37bba68f..2cefc3c4724 100644
--- a/arch/mips/include/asm/jazzdma.h
+++ b/arch/mips/include/asm/jazzdma.h
@@ -10,7 +10,7 @@
extern unsigned long vdma_alloc(unsigned long paddr, unsigned long size);
extern int vdma_free(unsigned long laddr);
extern int vdma_remap(unsigned long laddr, unsigned long paddr,
- unsigned long size);
+ unsigned long size);
extern unsigned long vdma_phys2log(unsigned long paddr);
extern unsigned long vdma_log2phys(unsigned long laddr);
extern void vdma_stats(void); /* for debugging only */
@@ -35,14 +35,14 @@ extern int vdma_get_enable(int channel);
* Macros to get page no. and offset of a given address
* Note that VDMA_PAGE() works for physical addresses only
*/
-#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
-#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
+#define VDMA_PAGE(a) ((unsigned int)(a) >> 12)
+#define VDMA_OFFSET(a) ((unsigned int)(a) & (VDMA_PAGESIZE-1))
/*
* error code returned by vdma_alloc()
* (See also arch/mips/kernel/jazzdma.c)
*/
-#define VDMA_ERROR 0xffffffff
+#define VDMA_ERROR 0xffffffff
/*
* VDMA pagetable entry description
@@ -59,37 +59,37 @@ typedef volatile struct VDMA_PGTBL_ENTRY {
*/
#define JAZZ_R4030_CHNL_MODE 0xE0000100 /* 8 DMA Channel Mode Registers, */
/* 0xE0000100,120,140... */
-#define JAZZ_R4030_CHNL_ENABLE 0xE0000108 /* 8 DMA Channel Enable Regs, */
+#define JAZZ_R4030_CHNL_ENABLE 0xE0000108 /* 8 DMA Channel Enable Regs, */
/* 0xE0000108,128,148... */
-#define JAZZ_R4030_CHNL_COUNT 0xE0000110 /* 8 DMA Channel Byte Cnt Regs, */
+#define JAZZ_R4030_CHNL_COUNT 0xE0000110 /* 8 DMA Channel Byte Cnt Regs, */
/* 0xE0000110,130,150... */
#define JAZZ_R4030_CHNL_ADDR 0xE0000118 /* 8 DMA Channel Address Regs, */
/* 0xE0000118,138,158... */
/* channel enable register bits */
-#define R4030_CHNL_ENABLE (1<<0)
-#define R4030_CHNL_WRITE (1<<1)
-#define R4030_TC_INTR (1<<8)
-#define R4030_MEM_INTR (1<<9)
-#define R4030_ADDR_INTR (1<<10)
+#define R4030_CHNL_ENABLE (1<<0)
+#define R4030_CHNL_WRITE (1<<1)
+#define R4030_TC_INTR (1<<8)
+#define R4030_MEM_INTR (1<<9)
+#define R4030_ADDR_INTR (1<<10)
/*
* Channel mode register bits
*/
-#define R4030_MODE_ATIME_40 (0) /* device access time on remote bus */
-#define R4030_MODE_ATIME_80 (1)
-#define R4030_MODE_ATIME_120 (2)
-#define R4030_MODE_ATIME_160 (3)
-#define R4030_MODE_ATIME_200 (4)
-#define R4030_MODE_ATIME_240 (5)
-#define R4030_MODE_ATIME_280 (6)
-#define R4030_MODE_ATIME_320 (7)
-#define R4030_MODE_WIDTH_8 (1<<3) /* device data bus width */
-#define R4030_MODE_WIDTH_16 (2<<3)
-#define R4030_MODE_WIDTH_32 (3<<3)
-#define R4030_MODE_INTR_EN (1<<5)
-#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
-#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
+#define R4030_MODE_ATIME_40 (0) /* device access time on remote bus */
+#define R4030_MODE_ATIME_80 (1)
+#define R4030_MODE_ATIME_120 (2)
+#define R4030_MODE_ATIME_160 (3)
+#define R4030_MODE_ATIME_200 (4)
+#define R4030_MODE_ATIME_240 (5)
+#define R4030_MODE_ATIME_280 (6)
+#define R4030_MODE_ATIME_320 (7)
+#define R4030_MODE_WIDTH_8 (1<<3) /* device data bus width */
+#define R4030_MODE_WIDTH_16 (2<<3)
+#define R4030_MODE_WIDTH_32 (3<<3)
+#define R4030_MODE_INTR_EN (1<<5)
+#define R4030_MODE_BURST (1<<6) /* Rev. 2 only */
+#define R4030_MODE_FAST_ACK (1<<7) /* Rev. 2 only */
#endif /* _ASM_JAZZDMA_H */
diff --git a/arch/mips/include/asm/kmap_types.h b/arch/mips/include/asm/kmap_types.h
index 58e91ed0388..c1909dcada3 100644
--- a/arch/mips/include/asm/kmap_types.h
+++ b/arch/mips/include/asm/kmap_types.h
@@ -2,7 +2,7 @@
#define _ASM_KMAP_TYPES_H
#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
+#define __WITH_KM_FENCE
#endif
#include <asm-generic/kmap_types.h>
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index 1fbbca01e68..daba1f9a4f7 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -29,7 +29,7 @@
#include <asm/kdebug.h>
#include <asm/inst.h>
-#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define __ARCH_WANT_KPROBES_INSN_SLOT
struct kprobe;
struct pt_regs;
diff --git a/arch/mips/include/asm/lasat/eeprom.h b/arch/mips/include/asm/lasat/eeprom.h
index 3dac203697f..d918b822e37 100644
--- a/arch/mips/include/asm/lasat/eeprom.h
+++ b/arch/mips/include/asm/lasat/eeprom.h
@@ -1,12 +1,12 @@
#include <asm/addrspace.h>
/* lasat 100 */
-#define AT93C_REG_100 KSEG1ADDR(0x1c810000)
-#define AT93C_RDATA_REG_100 AT93C_REG_100
-#define AT93C_RDATA_SHIFT_100 4
-#define AT93C_WDATA_SHIFT_100 4
-#define AT93C_CS_M_100 (1 << 5)
-#define AT93C_CLK_M_100 (1 << 3)
+#define AT93C_REG_100 KSEG1ADDR(0x1c810000)
+#define AT93C_RDATA_REG_100 AT93C_REG_100
+#define AT93C_RDATA_SHIFT_100 4
+#define AT93C_WDATA_SHIFT_100 4
+#define AT93C_CS_M_100 (1 << 5)
+#define AT93C_CLK_M_100 (1 << 3)
/* lasat 200 */
#define AT93C_REG_200 KSEG1ADDR(0x11000000)
diff --git a/arch/mips/include/asm/lasat/lasat.h b/arch/mips/include/asm/lasat/lasat.h
index e8ff70f80e1..9e32b4da99e 100644
--- a/arch/mips/include/asm/lasat/lasat.h
+++ b/arch/mips/include/asm/lasat/lasat.h
@@ -100,7 +100,7 @@ struct lasat_eeprom_struct_pre7 {
/* Configuration descriptor encoding - see the doc for details */
-#define LASAT_W0_DSCTYPE(v) (((v)) & 0xf)
+#define LASAT_W0_DSCTYPE(v) (((v)) & 0xf)
#define LASAT_W0_BMID(v) (((v) >> 0x04) & 0xf)
#define LASAT_W0_CPUTYPE(v) (((v) >> 0x08) & 0xf)
#define LASAT_W0_BUSSPEED(v) (((v) >> 0x0c) & 0xf)
@@ -109,7 +109,7 @@ struct lasat_eeprom_struct_pre7 {
#define LASAT_W0_SDRAMBANKS(v) (((v) >> 0x18) & 0xf)
#define LASAT_W0_L2CACHE(v) (((v) >> 0x1c) & 0xf)
-#define LASAT_W1_EDHAC(v) (((v)) & 0xf)
+#define LASAT_W1_EDHAC(v) (((v)) & 0xf)
#define LASAT_W1_HIFN(v) (((v) >> 0x04) & 0x1)
#define LASAT_W1_ISDN(v) (((v) >> 0x05) & 0x1)
#define LASAT_W1_IDE(v) (((v) >> 0x06) & 0x1)
@@ -239,7 +239,7 @@ static inline void lasat_ndelay(unsigned int ns)
__delay(ns / lasat_ndelay_divider);
}
-#define IS_LASAT_200() (current_cpu_data.cputype == CPU_R5000)
+#define IS_LASAT_200() (current_cpu_data.cputype == CPU_R5000)
#endif /* !defined (_LANGUAGE_ASSEMBLY) */
@@ -247,11 +247,11 @@ static inline void lasat_ndelay(unsigned int ns)
#define LASAT_SERVICEMODE_MAGIC_2 0xfedeabba
/* Lasat 100 boards */
-#define LASAT_GT_BASE (KSEG1ADDR(0x14000000))
+#define LASAT_GT_BASE (KSEG1ADDR(0x14000000))
/* Lasat 200 boards */
-#define Vrc5074_PHYS_BASE 0x1fa00000
-#define Vrc5074_BASE (KSEG1ADDR(Vrc5074_PHYS_BASE))
-#define PCI_WINDOW1 0x1a000000
+#define Vrc5074_PHYS_BASE 0x1fa00000
+#define Vrc5074_BASE (KSEG1ADDR(Vrc5074_PHYS_BASE))
+#define PCI_WINDOW1 0x1a000000
#endif /* _LASAT_H */
diff --git a/arch/mips/include/asm/lasat/serial.h b/arch/mips/include/asm/lasat/serial.h
index 1c37d70579b..a2f6c7a9cfe 100644
--- a/arch/mips/include/asm/lasat/serial.h
+++ b/arch/mips/include/asm/lasat/serial.h
@@ -1,7 +1,7 @@
#include <asm/lasat/lasat.h>
/* Lasat 100 boards serial configuration */
-#define LASAT_BASE_BAUD_100 (7372800 / 16)
+#define LASAT_BASE_BAUD_100 (7372800 / 16)
#define LASAT_UART_REGS_BASE_100 0x1c8b0000
#define LASAT_UART_REGS_SHIFT_100 2
#define LASATINT_UART_100 16
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index 94fde8d0fac..d44622cd74b 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -15,10 +15,10 @@ typedef struct
#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
#define local_read(l) atomic_long_read(&(l)->a)
-#define local_set(l, i) atomic_long_set(&(l)->a, (i))
+#define local_set(l, i) atomic_long_set(&(l)->a, (i))
-#define local_add(i, l) atomic_long_add((i), (&(l)->a))
-#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
+#define local_add(i, l) atomic_long_add((i), (&(l)->a))
+#define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
#define local_inc(l) atomic_long_inc(&(l)->a)
#define local_dec(l) atomic_long_dec(&(l)->a)
diff --git a/arch/mips/include/asm/m48t37.h b/arch/mips/include/asm/m48t37.h
index cabf86264f3..e6eaf5339e4 100644
--- a/arch/mips/include/asm/m48t37.h
+++ b/arch/mips/include/asm/m48t37.h
@@ -9,7 +9,7 @@
extern spinlock_t rtc_lock;
struct m48t37_rtc {
- volatile u8 pad[0x7ff0]; /* NVRAM */
+ volatile u8 pad[0x7ff0]; /* NVRAM */
volatile u8 flags;
volatile u8 century;
volatile u8 alarm_sec;
diff --git a/arch/mips/include/asm/mach-ar7/ar7.h b/arch/mips/include/asm/mach-ar7/ar7.h
index 07d3fadb244..a47ea0c8524 100644
--- a/arch/mips/include/asm/mach-ar7/ar7.h
+++ b/arch/mips/include/asm/mach-ar7/ar7.h
@@ -40,9 +40,9 @@
#define AR7_REGS_USB (AR7_REGS_BASE + 0x1200)
#define AR7_REGS_RESET (AR7_REGS_BASE + 0x1600)
#define AR7_REGS_PINSEL (AR7_REGS_BASE + 0x160C)
-#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
+#define AR7_REGS_VLYNQ0 (AR7_REGS_BASE + 0x1800)
#define AR7_REGS_DCL (AR7_REGS_BASE + 0x1a00)
-#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
+#define AR7_REGS_VLYNQ1 (AR7_REGS_BASE + 0x1c00)
#define AR7_REGS_MDIO (AR7_REGS_BASE + 0x1e00)
#define AR7_REGS_IRQ (AR7_REGS_BASE + 0x2400)
#define AR7_REGS_MAC1 (AR7_REGS_BASE + 0x2800)
@@ -52,7 +52,7 @@
#define UR8_REGS_UART1 (AR7_REGS_BASE + 0x0f00)
/* Titan registers */
-#define TITAN_REGS_ESWITCH_BASE (0x08640000)
+#define TITAN_REGS_ESWITCH_BASE (0x08640000)
#define TITAN_REGS_MAC0 (TITAN_REGS_ESWITCH_BASE)
#define TITAN_REGS_MAC1 (TITAN_REGS_ESWITCH_BASE + 0x0800)
#define TITAN_REGS_MDIO (TITAN_REGS_ESWITCH_BASE + 0x02000)
@@ -72,9 +72,9 @@
/* GPIO control registers */
#define AR7_GPIO_INPUT 0x0
-#define AR7_GPIO_OUTPUT 0x4
+#define AR7_GPIO_OUTPUT 0x4
#define AR7_GPIO_DIR 0x8
-#define AR7_GPIO_ENABLE 0xc
+#define AR7_GPIO_ENABLE 0xc
#define TITAN_GPIO_INPUT_0 0x0
#define TITAN_GPIO_INPUT_1 0x4
#define TITAN_GPIO_OUTPUT_0 0x8
@@ -88,10 +88,10 @@
#define AR7_CHIP_7200 0x2b
#define AR7_CHIP_7300 0x05
#define AR7_CHIP_TITAN 0x07
-#define TITAN_CHIP_1050 0x0f
-#define TITAN_CHIP_1055 0x0e
-#define TITAN_CHIP_1056 0x0d
-#define TITAN_CHIP_1060 0x07
+#define TITAN_CHIP_1050 0x0f
+#define TITAN_CHIP_1055 0x0e
+#define TITAN_CHIP_1056 0x0d
+#define TITAN_CHIP_1060 0x07
/* Interrupts */
#define AR7_IRQ_UART0 15
diff --git a/arch/mips/include/asm/mach-ar7/irq.h b/arch/mips/include/asm/mach-ar7/irq.h
index 39e9757e3d9..7ad10e379e2 100644
--- a/arch/mips/include/asm/mach-ar7/irq.h
+++ b/arch/mips/include/asm/mach-ar7/irq.h
@@ -9,7 +9,7 @@
#ifndef __ASM_AR7_IRQ_H
#define __ASM_AR7_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index a5e0f17ea77..b86a1253a5b 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -34,18 +34,44 @@
#define AR71XX_UART_SIZE 0x100
#define AR71XX_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
#define AR71XX_USB_CTRL_SIZE 0x100
-#define AR71XX_GPIO_BASE (AR71XX_APB_BASE + 0x00040000)
-#define AR71XX_GPIO_SIZE 0x100
+#define AR71XX_GPIO_BASE (AR71XX_APB_BASE + 0x00040000)
+#define AR71XX_GPIO_SIZE 0x100
#define AR71XX_PLL_BASE (AR71XX_APB_BASE + 0x00050000)
#define AR71XX_PLL_SIZE 0x100
#define AR71XX_RESET_BASE (AR71XX_APB_BASE + 0x00060000)
#define AR71XX_RESET_SIZE 0x100
+#define AR71XX_PCI_MEM_BASE 0x10000000
+#define AR71XX_PCI_MEM_SIZE 0x07000000
+
+#define AR71XX_PCI_WIN0_OFFS 0x10000000
+#define AR71XX_PCI_WIN1_OFFS 0x11000000
+#define AR71XX_PCI_WIN2_OFFS 0x12000000
+#define AR71XX_PCI_WIN3_OFFS 0x13000000
+#define AR71XX_PCI_WIN4_OFFS 0x14000000
+#define AR71XX_PCI_WIN5_OFFS 0x15000000
+#define AR71XX_PCI_WIN6_OFFS 0x16000000
+#define AR71XX_PCI_WIN7_OFFS 0x07000000
+
+#define AR71XX_PCI_CFG_BASE \
+ (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
+#define AR71XX_PCI_CFG_SIZE 0x100
+
#define AR7240_USB_CTRL_BASE (AR71XX_APB_BASE + 0x00030000)
#define AR7240_USB_CTRL_SIZE 0x100
#define AR7240_OHCI_BASE 0x1b000000
#define AR7240_OHCI_SIZE 0x1000
+#define AR724X_PCI_MEM_BASE 0x10000000
+#define AR724X_PCI_MEM_SIZE 0x04000000
+
+#define AR724X_PCI_CFG_BASE 0x14000000
+#define AR724X_PCI_CFG_SIZE 0x1000
+#define AR724X_PCI_CRP_BASE (AR71XX_APB_BASE + 0x000c0000)
+#define AR724X_PCI_CRP_SIZE 0x1000
+#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
+#define AR724X_PCI_CTRL_SIZE 0x100
+
#define AR724X_EHCI_BASE 0x1b000000
#define AR724X_EHCI_SIZE 0x1000
@@ -68,6 +94,25 @@
#define AR934X_SRIF_BASE (AR71XX_APB_BASE + 0x00116000)
#define AR934X_SRIF_SIZE 0x1000
+#define QCA955X_PCI_MEM_BASE0 0x10000000
+#define QCA955X_PCI_MEM_BASE1 0x12000000
+#define QCA955X_PCI_MEM_SIZE 0x02000000
+#define QCA955X_PCI_CFG_BASE0 0x14000000
+#define QCA955X_PCI_CFG_BASE1 0x16000000
+#define QCA955X_PCI_CFG_SIZE 0x1000
+#define QCA955X_PCI_CRP_BASE0 (AR71XX_APB_BASE + 0x000c0000)
+#define QCA955X_PCI_CRP_BASE1 (AR71XX_APB_BASE + 0x00250000)
+#define QCA955X_PCI_CRP_SIZE 0x1000
+#define QCA955X_PCI_CTRL_BASE0 (AR71XX_APB_BASE + 0x000f0000)
+#define QCA955X_PCI_CTRL_BASE1 (AR71XX_APB_BASE + 0x00280000)
+#define QCA955X_PCI_CTRL_SIZE 0x100
+
+#define QCA955X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
+#define QCA955X_WMAC_SIZE 0x20000
+#define QCA955X_EHCI0_BASE 0x1b000000
+#define QCA955X_EHCI1_BASE 0x1b400000
+#define QCA955X_EHCI_SIZE 0x1000
+
/*
* DDR_CTRL block
*/
@@ -199,6 +244,41 @@
#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+#define QCA955X_PLL_CPU_CONFIG_REG 0x00
+#define QCA955X_PLL_DDR_CONFIG_REG 0x04
+#define QCA955X_PLL_CLK_CTRL_REG 0x08
+
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_NINT_SHIFT 6
+#define QCA955X_PLL_CPU_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
+#define QCA955X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
+#define QCA955X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
+
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
+#define QCA955X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
+#define QCA955X_PLL_DDR_CONFIG_NINT_SHIFT 10
+#define QCA955X_PLL_DDR_CONFIG_NINT_MASK 0x3f
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
+#define QCA955X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
+#define QCA955X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
+
+#define QCA955X_PLL_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
+#define QCA955X_PLL_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
+#define QCA955X_PLL_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_SHIFT 5
+#define QCA955X_PLL_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_SHIFT 10
+#define QCA955X_PLL_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_SHIFT 15
+#define QCA955X_PLL_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
+#define QCA955X_PLL_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
+#define QCA955X_PLL_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
+#define QCA955X_PLL_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
+
/*
* USB_CONFIG block
*/
@@ -238,6 +318,10 @@
#define AR934X_RESET_REG_BOOTSTRAP 0xb0
#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
+#define QCA955X_RESET_REG_RESET_MODULE 0x1c
+#define QCA955X_RESET_REG_BOOTSTRAP 0xb0
+#define QCA955X_RESET_REG_EXT_INT_STATUS 0xac
+
#define MISC_INT_ETHSW BIT(12)
#define MISC_INT_TIMER4 BIT(10)
#define MISC_INT_TIMER3 BIT(9)
@@ -312,9 +396,11 @@
#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
-#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
+#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
#define AR934X_BOOTSTRAP_DDR1 BIT(0)
+#define QCA955X_BOOTSTRAP_REF_CLK_40 BIT(4)
+
#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
@@ -333,6 +419,37 @@
AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
AR934X_PCIE_WMAC_INT_PCIE_RC3)
+#define QCA955X_EXT_INT_WMAC_MISC BIT(0)
+#define QCA955X_EXT_INT_WMAC_TX BIT(1)
+#define QCA955X_EXT_INT_WMAC_RXLP BIT(2)
+#define QCA955X_EXT_INT_WMAC_RXHP BIT(3)
+#define QCA955X_EXT_INT_PCIE_RC1 BIT(4)
+#define QCA955X_EXT_INT_PCIE_RC1_INT0 BIT(5)
+#define QCA955X_EXT_INT_PCIE_RC1_INT1 BIT(6)
+#define QCA955X_EXT_INT_PCIE_RC1_INT2 BIT(7)
+#define QCA955X_EXT_INT_PCIE_RC1_INT3 BIT(8)
+#define QCA955X_EXT_INT_PCIE_RC2 BIT(12)
+#define QCA955X_EXT_INT_PCIE_RC2_INT0 BIT(13)
+#define QCA955X_EXT_INT_PCIE_RC2_INT1 BIT(14)
+#define QCA955X_EXT_INT_PCIE_RC2_INT2 BIT(15)
+#define QCA955X_EXT_INT_PCIE_RC2_INT3 BIT(16)
+#define QCA955X_EXT_INT_USB1 BIT(24)
+#define QCA955X_EXT_INT_USB2 BIT(28)
+
+#define QCA955X_EXT_INT_WMAC_ALL \
+ (QCA955X_EXT_INT_WMAC_MISC | QCA955X_EXT_INT_WMAC_TX | \
+ QCA955X_EXT_INT_WMAC_RXLP | QCA955X_EXT_INT_WMAC_RXHP)
+
+#define QCA955X_EXT_INT_PCIE_RC1_ALL \
+ (QCA955X_EXT_INT_PCIE_RC1 | QCA955X_EXT_INT_PCIE_RC1_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT1 | QCA955X_EXT_INT_PCIE_RC1_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC1_INT3)
+
+#define QCA955X_EXT_INT_PCIE_RC2_ALL \
+ (QCA955X_EXT_INT_PCIE_RC2 | QCA955X_EXT_INT_PCIE_RC2_INT0 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT1 | QCA955X_EXT_INT_PCIE_RC2_INT2 | \
+ QCA955X_EXT_INT_PCIE_RC2_INT3)
+
#define REV_ID_MAJOR_MASK 0xfff0
#define REV_ID_MAJOR_AR71XX 0x00a0
#define REV_ID_MAJOR_AR913X 0x00b0
@@ -344,6 +461,8 @@
#define REV_ID_MAJOR_AR9341 0x0120
#define REV_ID_MAJOR_AR9342 0x1120
#define REV_ID_MAJOR_AR9344 0x2120
+#define REV_ID_MAJOR_QCA9556 0x0130
+#define REV_ID_MAJOR_QCA9558 0x1130
#define AR71XX_REV_ID_MINOR_MASK 0x3
#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -362,7 +481,9 @@
#define AR724X_REV_ID_REVISION_MASK 0x3
-#define AR934X_REV_ID_REVISION_MASK 0xf
+#define AR934X_REV_ID_REVISION_MASK 0xf
+
+#define QCA955X_REV_ID_REVISION_MASK 0xf
/*
* SPI block
@@ -401,12 +522,15 @@
#define AR71XX_GPIO_REG_INT_ENABLE 0x24
#define AR71XX_GPIO_REG_FUNC 0x28
+#define AR934X_GPIO_REG_FUNC 0x6c
+
#define AR71XX_GPIO_COUNT 16
#define AR7240_GPIO_COUNT 18
#define AR7241_GPIO_COUNT 20
#define AR913X_GPIO_COUNT 22
#define AR933X_GPIO_COUNT 30
#define AR934X_GPIO_COUNT 23
+#define QCA955X_GPIO_COUNT 24
/*
* SRIF block
diff --git a/arch/mips/include/asm/mach-ath79/ar933x_uart.h b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
index 52730555937..c2917b39966 100644
--- a/arch/mips/include/asm/mach-ath79/ar933x_uart.h
+++ b/arch/mips/include/asm/mach-ath79/ar933x_uart.h
@@ -26,14 +26,14 @@
#define AR933X_UART_CS_PARITY_S 0
#define AR933X_UART_CS_PARITY_M 0x3
-#define AR933X_UART_CS_PARITY_NONE 0
-#define AR933X_UART_CS_PARITY_ODD 1
-#define AR933X_UART_CS_PARITY_EVEN 2
+#define AR933X_UART_CS_PARITY_NONE 0
+#define AR933X_UART_CS_PARITY_ODD 1
+#define AR933X_UART_CS_PARITY_EVEN 2
#define AR933X_UART_CS_IF_MODE_S 2
#define AR933X_UART_CS_IF_MODE_M 0x3
-#define AR933X_UART_CS_IF_MODE_NONE 0
-#define AR933X_UART_CS_IF_MODE_DTE 1
-#define AR933X_UART_CS_IF_MODE_DCE 2
+#define AR933X_UART_CS_IF_MODE_NONE 0
+#define AR933X_UART_CS_IF_MODE_DTE 1
+#define AR933X_UART_CS_IF_MODE_DCE 2
#define AR933X_UART_CS_FLOW_CTRL_S 4
#define AR933X_UART_CS_FLOW_CTRL_M 0x3
#define AR933X_UART_CS_DMA_EN BIT(6)
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 4f248c3d7b2..1557934aaca 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -32,6 +32,8 @@ enum ath79_soc_type {
ATH79_SOC_AR9341,
ATH79_SOC_AR9342,
ATH79_SOC_AR9344,
+ ATH79_SOC_QCA9556,
+ ATH79_SOC_QCA9558,
};
extern enum ath79_soc_type ath79_soc;
@@ -98,6 +100,21 @@ static inline int soc_is_ar934x(void)
return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
}
+static inline int soc_is_qca9556(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9556;
+}
+
+static inline int soc_is_qca9558(void)
+{
+ return ath79_soc == ATH79_SOC_QCA9558;
+}
+
+static inline int soc_is_qca955x(void)
+{
+ return soc_is_qca9556() || soc_is_qca9558();
+}
+
extern void __iomem *ath79_ddr_base;
extern void __iomem *ath79_pll_base;
extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
index ea4b66dccf6..ddb947e9221 100644
--- a/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h
@@ -49,7 +49,7 @@
#define cpu_has_64bits 0
#define cpu_has_64bit_zero_reg 0
#define cpu_has_64bit_gp_regs 0
-#define cpu_has_64bit_addresses 0
+#define cpu_has_64bit_addresses 0
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 0968f69e201..5c9ca76a7eb 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,10 +10,13 @@
#define __ASM_MACH_ATH79_IRQ_H
#define MIPS_CPU_IRQ_BASE 0
-#define NR_IRQS 48
+#define NR_IRQS 51
+
+#define ATH79_CPU_IRQ(_x) (MIPS_CPU_IRQ_BASE + (_x))
#define ATH79_MISC_IRQ_BASE 8
#define ATH79_MISC_IRQ_COUNT 32
+#define ATH79_MISC_IRQ(_x) (ATH79_MISC_IRQ_BASE + (_x))
#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
#define ATH79_PCI_IRQ_COUNT 6
@@ -23,25 +26,9 @@
#define ATH79_IP2_IRQ_COUNT 2
#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
-#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2)
-#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3)
-#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4)
-#define ATH79_CPU_IRQ_GE1 (MIPS_CPU_IRQ_BASE + 5)
-#define ATH79_CPU_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6)
-#define ATH79_CPU_IRQ_TIMER (MIPS_CPU_IRQ_BASE + 7)
-
-#define ATH79_MISC_IRQ_TIMER (ATH79_MISC_IRQ_BASE + 0)
-#define ATH79_MISC_IRQ_ERROR (ATH79_MISC_IRQ_BASE + 1)
-#define ATH79_MISC_IRQ_GPIO (ATH79_MISC_IRQ_BASE + 2)
-#define ATH79_MISC_IRQ_UART (ATH79_MISC_IRQ_BASE + 3)
-#define ATH79_MISC_IRQ_WDOG (ATH79_MISC_IRQ_BASE + 4)
-#define ATH79_MISC_IRQ_PERFC (ATH79_MISC_IRQ_BASE + 5)
-#define ATH79_MISC_IRQ_OHCI (ATH79_MISC_IRQ_BASE + 6)
-#define ATH79_MISC_IRQ_DMA (ATH79_MISC_IRQ_BASE + 7)
-#define ATH79_MISC_IRQ_TIMER2 (ATH79_MISC_IRQ_BASE + 8)
-#define ATH79_MISC_IRQ_TIMER3 (ATH79_MISC_IRQ_BASE + 9)
-#define ATH79_MISC_IRQ_TIMER4 (ATH79_MISC_IRQ_BASE + 10)
-#define ATH79_MISC_IRQ_ETHSW (ATH79_MISC_IRQ_BASE + 12)
+#define ATH79_IP3_IRQ_BASE (ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT)
+#define ATH79_IP3_IRQ_COUNT 3
+#define ATH79_IP3_IRQ(_x) (ATH79_IP3_IRQ_BASE + (_x))
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
deleted file mode 100644
index 7868f7fa028..00000000000
--- a/arch/mips/include/asm/mach-ath79/pci.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Atheros AR71XX/AR724X PCI support
- *
- * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
- * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __ASM_MACH_ATH79_PCI_H
-#define __ASM_MACH_ATH79_PCI_H
-
-#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
-int ar71xx_pcibios_init(void);
-#else
-static inline int ar71xx_pcibios_init(void) { return 0; }
-#endif
-
-#if defined(CONFIG_PCI_AR724X)
-int ar724x_pcibios_init(int irq);
-#else
-static inline int ar724x_pcibios_init(int irq) { return 0; }
-#endif
-
-#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1000.h b/arch/mips/include/asm/mach-au1x00/au1000.h
index 569828d3cca..3e11a468cdf 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000.h
@@ -349,7 +349,7 @@ extern void au1300_vss_block_control(int block, int enable);
#define AU1000_INTC0_INT_LAST (AU1000_INTC0_INT_BASE + 31)
#define AU1000_INTC1_INT_BASE (AU1000_INTC0_INT_LAST + 1)
#define AU1000_INTC1_INT_LAST (AU1000_INTC1_INT_BASE + 31)
-#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
+#define AU1000_MAX_INTR AU1000_INTC1_INT_LAST
/* Au1300-style (GPIC): 1 controller with up to 128 sources */
#define ALCHEMY_GPIC_INT_BASE (MIPS_CPU_IRQ_BASE + 8)
@@ -589,7 +589,7 @@ enum soc_au1550_ints {
AU1550_GPIO14_INT,
AU1550_GPIO15_INT,
AU1550_GPIO200_INT,
- AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
+ AU1550_GPIO201_205_INT, /* Logical or of GPIO201:205 */
AU1550_GPIO16_INT,
AU1550_GPIO17_INT,
AU1550_GPIO20_INT,
@@ -603,7 +603,7 @@ enum soc_au1550_ints {
AU1550_GPIO28_INT,
AU1550_GPIO206_INT,
AU1550_GPIO207_INT,
- AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
+ AU1550_GPIO208_215_INT, /* Logical or of GPIO208:215 */
};
enum soc_au1200_ints {
@@ -636,7 +636,7 @@ enum soc_au1200_ints {
AU1200_GPIO205_INT,
AU1200_GPIO206_INT,
AU1200_GPIO207_INT,
- AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
+ AU1200_GPIO208_215_INT, /* Logical OR of 208:215 */
AU1200_USB_INT,
AU1200_LCD_INT,
AU1200_MAE_BOTH_INT,
@@ -823,7 +823,7 @@ enum soc_au1200_ints {
#define GPIC_GPIO_TO_BIT(gpio) \
(1 << ((gpio) & 0x1f))
-#define GPIC_GPIO_BANKOFF(gpio) \
+#define GPIC_GPIO_BANKOFF(gpio) \
(((gpio) >> 5) * 4)
/* Pin Control bits: who owns the pin, what does it do */
@@ -958,32 +958,32 @@ enum soc_au1200_ints {
#define MEM_STSTAT 0xB4001104
#define MEM_STNAND_CMD 0x0
-#define MEM_STNAND_ADDR 0x4
-#define MEM_STNAND_DATA 0x20
+#define MEM_STNAND_ADDR 0x4
+#define MEM_STNAND_DATA 0x20
/* Programmable Counters 0 and 1 */
#define SYS_BASE 0xB1900000
#define SYS_COUNTER_CNTRL (SYS_BASE + 0x14)
-# define SYS_CNTRL_E1S (1 << 23)
-# define SYS_CNTRL_T1S (1 << 20)
-# define SYS_CNTRL_M21 (1 << 19)
-# define SYS_CNTRL_M11 (1 << 18)
-# define SYS_CNTRL_M01 (1 << 17)
-# define SYS_CNTRL_C1S (1 << 16)
+# define SYS_CNTRL_E1S (1 << 23)
+# define SYS_CNTRL_T1S (1 << 20)
+# define SYS_CNTRL_M21 (1 << 19)
+# define SYS_CNTRL_M11 (1 << 18)
+# define SYS_CNTRL_M01 (1 << 17)
+# define SYS_CNTRL_C1S (1 << 16)
# define SYS_CNTRL_BP (1 << 14)
-# define SYS_CNTRL_EN1 (1 << 13)
-# define SYS_CNTRL_BT1 (1 << 12)
-# define SYS_CNTRL_EN0 (1 << 11)
-# define SYS_CNTRL_BT0 (1 << 10)
+# define SYS_CNTRL_EN1 (1 << 13)
+# define SYS_CNTRL_BT1 (1 << 12)
+# define SYS_CNTRL_EN0 (1 << 11)
+# define SYS_CNTRL_BT0 (1 << 10)
# define SYS_CNTRL_E0 (1 << 8)
-# define SYS_CNTRL_E0S (1 << 7)
-# define SYS_CNTRL_32S (1 << 5)
-# define SYS_CNTRL_T0S (1 << 4)
-# define SYS_CNTRL_M20 (1 << 3)
-# define SYS_CNTRL_M10 (1 << 2)
-# define SYS_CNTRL_M00 (1 << 1)
-# define SYS_CNTRL_C0S (1 << 0)
+# define SYS_CNTRL_E0S (1 << 7)
+# define SYS_CNTRL_32S (1 << 5)
+# define SYS_CNTRL_T0S (1 << 4)
+# define SYS_CNTRL_M20 (1 << 3)
+# define SYS_CNTRL_M10 (1 << 2)
+# define SYS_CNTRL_M00 (1 << 1)
+# define SYS_CNTRL_C0S (1 << 0)
/* Programmable Counter 0 Registers */
#define SYS_TOYTRIM (SYS_BASE + 0)
@@ -1003,33 +1003,33 @@ enum soc_au1200_ints {
/* I2S Controller */
#define I2S_DATA 0xB1000000
-# define I2S_DATA_MASK 0xffffff
+# define I2S_DATA_MASK 0xffffff
#define I2S_CONFIG 0xB1000004
-# define I2S_CONFIG_XU (1 << 25)
-# define I2S_CONFIG_XO (1 << 24)
-# define I2S_CONFIG_RU (1 << 23)
-# define I2S_CONFIG_RO (1 << 22)
-# define I2S_CONFIG_TR (1 << 21)
-# define I2S_CONFIG_TE (1 << 20)
-# define I2S_CONFIG_TF (1 << 19)
-# define I2S_CONFIG_RR (1 << 18)
-# define I2S_CONFIG_RE (1 << 17)
-# define I2S_CONFIG_RF (1 << 16)
-# define I2S_CONFIG_PD (1 << 11)
-# define I2S_CONFIG_LB (1 << 10)
-# define I2S_CONFIG_IC (1 << 9)
+# define I2S_CONFIG_XU (1 << 25)
+# define I2S_CONFIG_XO (1 << 24)
+# define I2S_CONFIG_RU (1 << 23)
+# define I2S_CONFIG_RO (1 << 22)
+# define I2S_CONFIG_TR (1 << 21)
+# define I2S_CONFIG_TE (1 << 20)
+# define I2S_CONFIG_TF (1 << 19)
+# define I2S_CONFIG_RR (1 << 18)
+# define I2S_CONFIG_RE (1 << 17)
+# define I2S_CONFIG_RF (1 << 16)
+# define I2S_CONFIG_PD (1 << 11)
+# define I2S_CONFIG_LB (1 << 10)
+# define I2S_CONFIG_IC (1 << 9)
# define I2S_CONFIG_FM_BIT 7
# define I2S_CONFIG_FM_MASK (0x3 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_I2S (0x0 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_LJ (0x1 << I2S_CONFIG_FM_BIT)
# define I2S_CONFIG_FM_RJ (0x2 << I2S_CONFIG_FM_BIT)
-# define I2S_CONFIG_TN (1 << 6)
-# define I2S_CONFIG_RN (1 << 5)
+# define I2S_CONFIG_TN (1 << 6)
+# define I2S_CONFIG_RN (1 << 5)
# define I2S_CONFIG_SZ_BIT 0
# define I2S_CONFIG_SZ_MASK (0x1F << I2S_CONFIG_SZ_BIT)
#define I2S_CONTROL 0xB1000008
-# define I2S_CONTROL_D (1 << 1)
+# define I2S_CONTROL_D (1 << 1)
# define I2S_CONTROL_CE (1 << 0)
@@ -1037,16 +1037,16 @@ enum soc_au1200_ints {
/* 4 byte offsets from AU1000_ETH_BASE */
#define MAC_CONTROL 0x0
-# define MAC_RX_ENABLE (1 << 2)
-# define MAC_TX_ENABLE (1 << 3)
-# define MAC_DEF_CHECK (1 << 5)
-# define MAC_SET_BL(X) (((X) & 0x3) << 6)
+# define MAC_RX_ENABLE (1 << 2)
+# define MAC_TX_ENABLE (1 << 3)
+# define MAC_DEF_CHECK (1 << 5)
+# define MAC_SET_BL(X) (((X) & 0x3) << 6)
# define MAC_AUTO_PAD (1 << 8)
# define MAC_DISABLE_RETRY (1 << 10)
# define MAC_DISABLE_BCAST (1 << 11)
# define MAC_LATE_COL (1 << 12)
-# define MAC_HASH_MODE (1 << 13)
-# define MAC_HASH_ONLY (1 << 15)
+# define MAC_HASH_MODE (1 << 13)
+# define MAC_HASH_ONLY (1 << 15)
# define MAC_PASS_ALL (1 << 16)
# define MAC_INVERSE_FILTER (1 << 17)
# define MAC_PROMISCUOUS (1 << 18)
@@ -1083,9 +1083,9 @@ enum soc_au1200_ints {
# define MAC_EN_RESET0 (1 << 1)
# define MAC_EN_TOSS (0 << 2)
# define MAC_EN_CACHEABLE (1 << 3)
-# define MAC_EN_RESET1 (1 << 4)
-# define MAC_EN_RESET2 (1 << 5)
-# define MAC_DMA_RESET (1 << 6)
+# define MAC_EN_RESET1 (1 << 4)
+# define MAC_EN_RESET2 (1 << 5)
+# define MAC_DMA_RESET (1 << 6)
/* Ethernet Controller DMA Channels */
@@ -1095,7 +1095,7 @@ enum soc_au1200_ints {
#define MAC_TX_BUFF0_STATUS 0x0
# define TX_FRAME_ABORTED (1 << 0)
# define TX_JAB_TIMEOUT (1 << 1)
-# define TX_NO_CARRIER (1 << 2)
+# define TX_NO_CARRIER (1 << 2)
# define TX_LOSS_CARRIER (1 << 3)
# define TX_EXC_DEF (1 << 4)
# define TX_LATE_COLL_ABORT (1 << 5)
@@ -1106,7 +1106,7 @@ enum soc_au1200_ints {
# define TX_COLL_CNT_MASK (0xF << 10)
# define TX_PKT_RETRY (1 << 31)
#define MAC_TX_BUFF0_ADDR 0x4
-# define TX_DMA_ENABLE (1 << 0)
+# define TX_DMA_ENABLE (1 << 0)
# define TX_T_DONE (1 << 1)
# define TX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
#define MAC_TX_BUFF0_LEN 0x8
@@ -1125,7 +1125,7 @@ enum soc_au1200_ints {
/* offsets from MAC_RX_RING_ADDR */
#define MAC_RX_BUFF0_STATUS 0x0
# define RX_FRAME_LEN_MASK 0x3fff
-# define RX_WDOG_TIMER (1 << 14)
+# define RX_WDOG_TIMER (1 << 14)
# define RX_RUNT (1 << 15)
# define RX_OVERLEN (1 << 16)
# define RX_COLL (1 << 17)
@@ -1148,7 +1148,7 @@ enum soc_au1200_ints {
RX_COLL | RX_MII_ERROR | RX_CRC_ERROR | \
RX_LEN_ERROR | RX_U_CNTRL_FRAME | RX_MISSED_FRAME)
#define MAC_RX_BUFF0_ADDR 0x4
-# define RX_DMA_ENABLE (1 << 0)
+# define RX_DMA_ENABLE (1 << 0)
# define RX_T_DONE (1 << 1)
# define RX_GET_DMA_BUFFER(X) (((X) >> 2) & 0x3)
# define RX_SET_BUFF_ADDR(X) ((X) & 0xffffffc0)
@@ -1173,34 +1173,34 @@ enum soc_au1200_ints {
/* SSIO */
#define SSI0_STATUS 0xB1600000
-# define SSI_STATUS_BF (1 << 4)
-# define SSI_STATUS_OF (1 << 3)
-# define SSI_STATUS_UF (1 << 2)
+# define SSI_STATUS_BF (1 << 4)
+# define SSI_STATUS_OF (1 << 3)
+# define SSI_STATUS_UF (1 << 2)
# define SSI_STATUS_D (1 << 1)
# define SSI_STATUS_B (1 << 0)
#define SSI0_INT 0xB1600004
# define SSI_INT_OI (1 << 3)
# define SSI_INT_UI (1 << 2)
# define SSI_INT_DI (1 << 1)
-#define SSI0_INT_ENABLE 0xB1600008
+#define SSI0_INT_ENABLE 0xB1600008
# define SSI_INTE_OIE (1 << 3)
# define SSI_INTE_UIE (1 << 2)
# define SSI_INTE_DIE (1 << 1)
#define SSI0_CONFIG 0xB1600020
-# define SSI_CONFIG_AO (1 << 24)
-# define SSI_CONFIG_DO (1 << 23)
+# define SSI_CONFIG_AO (1 << 24)
+# define SSI_CONFIG_DO (1 << 23)
# define SSI_CONFIG_ALEN_BIT 20
# define SSI_CONFIG_ALEN_MASK (0x7 << 20)
# define SSI_CONFIG_DLEN_BIT 16
# define SSI_CONFIG_DLEN_MASK (0x7 << 16)
-# define SSI_CONFIG_DD (1 << 11)
-# define SSI_CONFIG_AD (1 << 10)
+# define SSI_CONFIG_DD (1 << 11)
+# define SSI_CONFIG_AD (1 << 10)
# define SSI_CONFIG_BM_BIT 8
# define SSI_CONFIG_BM_MASK (0x3 << 8)
-# define SSI_CONFIG_CE (1 << 7)
-# define SSI_CONFIG_DP (1 << 6)
-# define SSI_CONFIG_DL (1 << 5)
-# define SSI_CONFIG_EP (1 << 4)
+# define SSI_CONFIG_CE (1 << 7)
+# define SSI_CONFIG_DP (1 << 6)
+# define SSI_CONFIG_DL (1 << 5)
+# define SSI_CONFIG_EP (1 << 4)
#define SSI0_ADATA 0xB1600024
# define SSI_AD_D (1 << 24)
# define SSI_AD_ADDR_BIT 16
@@ -1210,12 +1210,12 @@ enum soc_au1200_ints {
#define SSI0_CLKDIV 0xB1600028
#define SSI0_CONTROL 0xB1600100
# define SSI_CONTROL_CD (1 << 1)
-# define SSI_CONTROL_E (1 << 0)
+# define SSI_CONTROL_E (1 << 0)
/* SSI1 */
#define SSI1_STATUS 0xB1680000
#define SSI1_INT 0xB1680004
-#define SSI1_INT_ENABLE 0xB1680008
+#define SSI1_INT_ENABLE 0xB1680008
#define SSI1_CONFIG 0xB1680020
#define SSI1_ADATA 0xB1680024
#define SSI1_CLKDIV 0xB1680028
@@ -1242,8 +1242,8 @@ enum soc_au1200_ints {
#define SSI_CONFIG_AO (1 << 24)
#define SSI_CONFIG_DO (1 << 23)
-#define SSI_CONFIG_ALEN (7 << 20)
-#define SSI_CONFIG_DLEN (15 << 16)
+#define SSI_CONFIG_ALEN (7 << 20)
+#define SSI_CONFIG_DLEN (15 << 16)
#define SSI_CONFIG_DD (1 << 11)
#define SSI_CONFIG_AD (1 << 10)
#define SSI_CONFIG_BM (3 << 8)
@@ -1305,7 +1305,7 @@ struct au1k_irda_platform_data {
# define SYS_PF_CS (1 << 16) /* EXTCLK0/32KHz to gpio2 */
# define SYS_PF_EX0 (1 << 9) /* GPIO2/clock */
-/* Au1550 only. Redefines lots of pins */
+/* Au1550 only. Redefines lots of pins */
# define SYS_PF_PSC2_MASK (7 << 17)
# define SYS_PF_PSC2_AC97 0
# define SYS_PF_PSC2_SPI 0
@@ -1322,33 +1322,33 @@ struct au1k_irda_platform_data {
# define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2))
/* Au1200 only */
-#define SYS_PINFUNC_DMA (1 << 31)
-#define SYS_PINFUNC_S0A (1 << 30)
-#define SYS_PINFUNC_S1A (1 << 29)
-#define SYS_PINFUNC_LP0 (1 << 28)
-#define SYS_PINFUNC_LP1 (1 << 27)
-#define SYS_PINFUNC_LD16 (1 << 26)
-#define SYS_PINFUNC_LD8 (1 << 25)
-#define SYS_PINFUNC_LD1 (1 << 24)
-#define SYS_PINFUNC_LD0 (1 << 23)
-#define SYS_PINFUNC_P1A (3 << 21)
-#define SYS_PINFUNC_P1B (1 << 20)
-#define SYS_PINFUNC_FS3 (1 << 19)
-#define SYS_PINFUNC_P0A (3 << 17)
+#define SYS_PINFUNC_DMA (1 << 31)
+#define SYS_PINFUNC_S0A (1 << 30)
+#define SYS_PINFUNC_S1A (1 << 29)
+#define SYS_PINFUNC_LP0 (1 << 28)
+#define SYS_PINFUNC_LP1 (1 << 27)
+#define SYS_PINFUNC_LD16 (1 << 26)
+#define SYS_PINFUNC_LD8 (1 << 25)
+#define SYS_PINFUNC_LD1 (1 << 24)
+#define SYS_PINFUNC_LD0 (1 << 23)
+#define SYS_PINFUNC_P1A (3 << 21)
+#define SYS_PINFUNC_P1B (1 << 20)
+#define SYS_PINFUNC_FS3 (1 << 19)
+#define SYS_PINFUNC_P0A (3 << 17)
#define SYS_PINFUNC_CS (1 << 16)
-#define SYS_PINFUNC_CIM (1 << 15)
-#define SYS_PINFUNC_P1C (1 << 14)
-#define SYS_PINFUNC_U1T (1 << 12)
-#define SYS_PINFUNC_U1R (1 << 11)
-#define SYS_PINFUNC_EX1 (1 << 10)
-#define SYS_PINFUNC_EX0 (1 << 9)
-#define SYS_PINFUNC_U0R (1 << 8)
+#define SYS_PINFUNC_CIM (1 << 15)
+#define SYS_PINFUNC_P1C (1 << 14)
+#define SYS_PINFUNC_U1T (1 << 12)
+#define SYS_PINFUNC_U1R (1 << 11)
+#define SYS_PINFUNC_EX1 (1 << 10)
+#define SYS_PINFUNC_EX0 (1 << 9)
+#define SYS_PINFUNC_U0R (1 << 8)
#define SYS_PINFUNC_MC (1 << 7)
-#define SYS_PINFUNC_S0B (1 << 6)
-#define SYS_PINFUNC_S0C (1 << 5)
-#define SYS_PINFUNC_P0B (1 << 4)
-#define SYS_PINFUNC_U0T (1 << 3)
-#define SYS_PINFUNC_S1B (1 << 2)
+#define SYS_PINFUNC_S0B (1 << 6)
+#define SYS_PINFUNC_S0C (1 << 5)
+#define SYS_PINFUNC_P0B (1 << 4)
+#define SYS_PINFUNC_U0T (1 << 3)
+#define SYS_PINFUNC_S1B (1 << 2)
/* Power Management */
#define SYS_SCRATCH0 0xB1900018
@@ -1405,7 +1405,7 @@ struct au1k_irda_platform_data {
# define SYS_CS_DI2 (1 << 16)
# define SYS_CS_CI2 (1 << 15)
-# define SYS_CS_ML_BIT 7
+# define SYS_CS_ML_BIT 7
# define SYS_CS_ML_MASK (0x7 << SYS_CS_ML_BIT)
# define SYS_CS_DL (1 << 6)
# define SYS_CS_CL (1 << 5)
@@ -1554,8 +1554,8 @@ struct au1k_irda_platform_data {
#define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16)
#define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff)
#define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16)
-#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8)
-#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff)
+#define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8)
+#define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff)
#define PCI_ID_DID(x) (((x) & 0xffff) << 16)
#define PCI_ID_VID(x) ((x) & 0xffff)
#define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16)
diff --git a/arch/mips/include/asm/mach-au1x00/au1000_dma.h b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
index ba4cf0e91c8..7cedca5a305 100644
--- a/arch/mips/include/asm/mach-au1x00/au1000_dma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1000_dma.h
@@ -34,7 +34,7 @@
#include <linux/spinlock.h> /* And spinlocks */
#include <linux/delay.h>
-#define NUM_AU1000_DMA_CHANNELS 8
+#define NUM_AU1000_DMA_CHANNELS 8
/* DMA Channel Register Offsets */
#define DMA_MODE_SET 0x00000000
@@ -47,7 +47,7 @@
#define DMA_DS (1 << 15)
#define DMA_BE (1 << 13)
#define DMA_DR (1 << 12)
-#define DMA_TS8 (1 << 11)
+#define DMA_TS8 (1 << 11)
#define DMA_DW_BIT 9
#define DMA_DW_MASK (0x03 << DMA_DW_BIT)
#define DMA_DW8 (0 << DMA_DW_BIT)
@@ -59,9 +59,9 @@
#define DMA_GO (1 << 5)
#define DMA_AB (1 << 4)
#define DMA_D1 (1 << 3)
-#define DMA_BE1 (1 << 2)
+#define DMA_BE1 (1 << 2)
#define DMA_D0 (1 << 1)
-#define DMA_BE0 (1 << 0)
+#define DMA_BE0 (1 << 0)
#define DMA_PERIPHERAL_ADDR 0x00000008
#define DMA_BUFFER0_START 0x0000000C
@@ -246,7 +246,7 @@ static inline void init_dma(unsigned int dmanr)
mode |= DMA_IE;
au_writel(~mode, chan->io + DMA_MODE_CLEAR);
- au_writel(mode, chan->io + DMA_MODE_SET);
+ au_writel(mode, chan->io + DMA_MODE_SET);
}
/*
diff --git a/arch/mips/include/asm/mach-au1x00/au1100_mmc.h b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
index e221659f1bc..cadab91cee2 100644
--- a/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
+++ b/arch/mips/include/asm/mach-au1x00/au1100_mmc.h
@@ -148,7 +148,7 @@ struct au1xmmc_platform_data {
/*
* SD_STATUS bit definitions.
*/
-#define SD_STATUS_DCRCW (0x00000007)
+#define SD_STATUS_DCRCW (0x00000007)
#define SD_STATUS_xx1 (0x00000008)
#define SD_STATUS_CB (0x00000010)
#define SD_STATUS_DB (0x00000020)
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index 217810e1836..ca8077afac4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -103,7 +103,7 @@ typedef volatile struct au1xxx_ddma_desc {
* Lets have some SW data following -- make sure it's 32 bytes.
*/
u32 sw_status;
- u32 sw_context;
+ u32 sw_context;
u32 sw_reserved[6];
} au1x_ddma_desc_t;
@@ -123,7 +123,7 @@ typedef volatile struct au1xxx_ddma_desc {
#define DSCR_CMD0_CV (0x1 << 2) /* Clear Valid when done */
#define DSCR_CMD0_ST_MASK (0x3 << 0) /* Status instruction */
-#define SW_STATUS_INUSE (1 << 0)
+#define SW_STATUS_INUSE (1 << 0)
/* Command 0 device IDs. */
#define AU1550_DSCR_CMD0_UART0_TX 0
@@ -195,8 +195,8 @@ typedef volatile struct au1xxx_ddma_desc {
#define AU1300_DSCR_CMD0_SDMS_RX0 9
#define AU1300_DSCR_CMD0_SDMS_TX1 10
#define AU1300_DSCR_CMD0_SDMS_RX1 11
-#define AU1300_DSCR_CMD0_AES_TX 12
-#define AU1300_DSCR_CMD0_AES_RX 13
+#define AU1300_DSCR_CMD0_AES_TX 12
+#define AU1300_DSCR_CMD0_AES_RX 13
#define AU1300_DSCR_CMD0_PSC0_TX 14
#define AU1300_DSCR_CMD0_PSC0_RX 15
#define AU1300_DSCR_CMD0_PSC1_TX 16
@@ -205,12 +205,12 @@ typedef volatile struct au1xxx_ddma_desc {
#define AU1300_DSCR_CMD0_PSC2_RX 19
#define AU1300_DSCR_CMD0_PSC3_TX 20
#define AU1300_DSCR_CMD0_PSC3_RX 21
-#define AU1300_DSCR_CMD0_LCD 22
+#define AU1300_DSCR_CMD0_LCD 22
#define AU1300_DSCR_CMD0_NAND_FLASH 23
#define AU1300_DSCR_CMD0_SDMS_TX2 24
#define AU1300_DSCR_CMD0_SDMS_RX2 25
#define AU1300_DSCR_CMD0_CIM_SYNC 26
-#define AU1300_DSCR_CMD0_UDMA 27
+#define AU1300_DSCR_CMD0_UDMA 27
#define AU1300_DSCR_CMD0_DMA_REQ0 28
#define AU1300_DSCR_CMD0_DMA_REQ1 29
@@ -298,7 +298,7 @@ typedef volatile struct au1xxx_ddma_desc {
#define DSCR_NXTPTR_MS (1 << 27)
/* The number of DBDMA channels. */
-#define NUM_DBDMA_CHANS 16
+#define NUM_DBDMA_CHANS 16
/*
* DDMA API definitions
@@ -316,7 +316,7 @@ typedef struct dbdma_device_table {
typedef struct dbdma_chan_config {
- spinlock_t lock;
+ spinlock_t lock;
u32 chan_flags;
u32 chan_index;
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
index e306384b141..bb91b8923a4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
@@ -1,5 +1,5 @@
/*
- * include/asm-mips/mach-au1x00/au1xxx_ide.h version 01.30.00 Aug. 02 2005
+ * include/asm-mips/mach-au1x00/au1xxx_ide.h version 01.30.00 Aug. 02 2005
*
* BRIEF MODULE DESCRIPTION
* AMD Alchemy Au1xxx IDE interface routines over the Static Bus
@@ -27,14 +27,14 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Note: for more information, please refer "AMD Alchemy Au1200/Au1550 IDE
- * Interface and Linux Device Driver" Application Note.
+ * Interface and Linux Device Driver" Application Note.
*/
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
#define DMA_WAIT_TIMEOUT 100
-#define NUM_DESCRIPTORS PRD_ENTRIES
+#define NUM_DESCRIPTORS PRD_ENTRIES
#else /* CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA */
-#define NUM_DESCRIPTORS 2
+#define NUM_DESCRIPTORS 2
#endif
#ifndef AU1XXX_ATA_RQSIZE
@@ -84,8 +84,8 @@ typedef struct {
#define TWP_MASK (0x3F << 14)
#define TCSW_MASK (0x0F << 10)
#define TPM_MASK (0x0F << 6)
-#define TA_MASK (0x3F << 0)
-#define TS_MASK (1 << 8)
+#define TA_MASK (0x3F << 0)
+#define TS_MASK (1 << 8)
/* Timing parameters PIO mode 0 */
#define SBC_IDE_PIO0_TCSOE (0x04 << 29)
@@ -96,7 +96,7 @@ typedef struct {
#define SBC_IDE_PIO0_TWP (0x10 << 14)
#define SBC_IDE_PIO0_TCSW (0x04 << 10)
#define SBC_IDE_PIO0_TPM (0x00 << 6)
-#define SBC_IDE_PIO0_TA (0x15 << 0)
+#define SBC_IDE_PIO0_TA (0x15 << 0)
/* Timing parameters PIO mode 1 */
#define SBC_IDE_PIO1_TCSOE (0x03 << 29)
#define SBC_IDE_PIO1_TOECS (0x01 << 26)
@@ -106,7 +106,7 @@ typedef struct {
#define SBC_IDE_PIO1_TWP (0x08 << 14)
#define SBC_IDE_PIO1_TCSW (0x03 << 10)
#define SBC_IDE_PIO1_TPM (0x00 << 6)
-#define SBC_IDE_PIO1_TA (0x0B << 0)
+#define SBC_IDE_PIO1_TA (0x0B << 0)
/* Timing parameters PIO mode 2 */
#define SBC_IDE_PIO2_TCSOE (0x05 << 29)
#define SBC_IDE_PIO2_TOECS (0x01 << 26)
@@ -116,7 +116,7 @@ typedef struct {
#define SBC_IDE_PIO2_TWP (0x1F << 14)
#define SBC_IDE_PIO2_TCSW (0x05 << 10)
#define SBC_IDE_PIO2_TPM (0x00 << 6)
-#define SBC_IDE_PIO2_TA (0x22 << 0)
+#define SBC_IDE_PIO2_TA (0x22 << 0)
/* Timing parameters PIO mode 3 */
#define SBC_IDE_PIO3_TCSOE (0x05 << 29)
#define SBC_IDE_PIO3_TOECS (0x01 << 26)
@@ -126,7 +126,7 @@ typedef struct {
#define SBC_IDE_PIO3_TWP (0x15 << 14)
#define SBC_IDE_PIO3_TCSW (0x05 << 10)
#define SBC_IDE_PIO3_TPM (0x00 << 6)
-#define SBC_IDE_PIO3_TA (0x1A << 0)
+#define SBC_IDE_PIO3_TA (0x1A << 0)
/* Timing parameters PIO mode 4 */
#define SBC_IDE_PIO4_TCSOE (0x04 << 29)
#define SBC_IDE_PIO4_TOECS (0x01 << 26)
@@ -136,7 +136,7 @@ typedef struct {
#define SBC_IDE_PIO4_TWP (0x0D << 14)
#define SBC_IDE_PIO4_TCSW (0x03 << 10)
#define SBC_IDE_PIO4_TPM (0x00 << 6)
-#define SBC_IDE_PIO4_TA (0x12 << 0)
+#define SBC_IDE_PIO4_TA (0x12 << 0)
/* Timing parameters MDMA mode 0 */
#define SBC_IDE_MDMA0_TCSOE (0x03 << 29)
#define SBC_IDE_MDMA0_TOECS (0x01 << 26)
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
index 4e3f3bc26c6..8a9cd754be2 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
@@ -53,7 +53,7 @@
#define PSC_CTRL_DISABLE 0
#define PSC_CTRL_SUSPEND 2
-#define PSC_CTRL_ENABLE 3
+#define PSC_CTRL_ENABLE 3
/* AC97 Registers. */
#define PSC_AC97CFG_OFFSET 0x00000008
@@ -85,8 +85,8 @@
#define PSC_AC97CFG_SE_ENABLE (1 << 25)
#define PSC_AC97CFG_LEN_MASK (0xf << 21)
-#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
-#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
+#define PSC_AC97CFG_TXSLOT_MASK (0x3ff << 11)
+#define PSC_AC97CFG_RXSLOT_MASK (0x3ff << 1)
#define PSC_AC97CFG_GE_ENABLE (1)
/* Enable slots 3-12. */
@@ -95,7 +95,7 @@
/*
* The word length equation is ((x) * 2) + 2, so choose 'x' appropriately.
- * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
+ * The only sensible numbers are 7, 9, or possibly 11. Nah, just do the
* arithmetic in the macro.
*/
#define PSC_AC97CFG_SET_LEN(x) (((((x) - 2) / 2) & 0xf) << 21)
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
index 73853b5a2a3..796afd051c3 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1000.h
@@ -12,14 +12,14 @@
#include <asm/mach-au1x00/au1000.h>
/* The default GPIO numberspace as documented in the Alchemy manuals.
- * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block.
+ * GPIO0-31 from GPIO1 block, GPIO200-215 from GPIO2 block.
*/
#define ALCHEMY_GPIO1_BASE 0
#define ALCHEMY_GPIO2_BASE 200
#define ALCHEMY_GPIO1_NUM 32
#define ALCHEMY_GPIO2_NUM 16
-#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
+#define ALCHEMY_GPIO1_MAX (ALCHEMY_GPIO1_BASE + ALCHEMY_GPIO1_NUM - 1)
#define ALCHEMY_GPIO2_MAX (ALCHEMY_GPIO2_BASE + ALCHEMY_GPIO2_NUM - 1)
#define MAKE_IRQ(intc, off) (AU1000_INTC##intc##_INT_BASE + (off))
@@ -67,7 +67,7 @@ static inline int au1500_gpio1_to_irq(int gpio)
switch (gpio) {
case 0 ... 15:
case 20:
- case 23 ... 28: return MAKE_IRQ(1, gpio);
+ case 23 ... 28: return MAKE_IRQ(1, gpio);
}
return -ENXIO;
@@ -139,8 +139,8 @@ static inline int au1550_gpio1_to_irq(int gpio)
switch (gpio) {
case 0 ... 15:
- case 20 ... 28: return MAKE_IRQ(1, gpio);
- case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
+ case 20 ... 28: return MAKE_IRQ(1, gpio);
+ case 16 ... 17: return MAKE_IRQ(1, 18 + gpio - 16);
}
return -ENXIO;
@@ -152,9 +152,9 @@ static inline int au1550_gpio2_to_irq(int gpio)
switch (gpio) {
case 0: return MAKE_IRQ(1, 16);
- case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
+ case 1 ... 5: return MAKE_IRQ(1, 17); /* shared GPIO201_205 */
case 6 ... 7: return MAKE_IRQ(1, 29 + gpio - 6);
- case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
+ case 8 ... 15: return MAKE_IRQ(1, 31); /* shared GPIO208_215 */
}
return -ENXIO;
@@ -190,7 +190,7 @@ static inline int au1200_gpio2_to_irq(int gpio)
case 0 ... 2: return MAKE_IRQ(0, 5 + gpio - 0);
case 3: return MAKE_IRQ(0, 22);
case 4 ... 7: return MAKE_IRQ(0, 24 + gpio - 4);
- case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
+ case 8 ... 15: return MAKE_IRQ(0, 28); /* shared GPIO208_215 */
}
return -ENXIO;
@@ -428,7 +428,7 @@ static inline void alchemy_gpio2_disable_int(int gpio2)
/**
* alchemy_gpio2_enable - Activate GPIO2 block.
*
- * The GPIO2 block must be enabled excplicitly to work. On systems
+ * The GPIO2 block must be enabled excplicitly to work. On systems
* where this isn't done by the bootloader, this macro can be used.
*/
static inline void alchemy_gpio2_enable(void)
@@ -533,7 +533,7 @@ static inline int alchemy_irq_to_gpio(int irq)
* 2 (1 for Au1000) gpio_chips are registered.
*
*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
- * the boards' gpio.h must provide the linux gpio wrapper functions,
+ * the boards' gpio.h must provide the linux gpio wrapper functions,
*
*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
* inlinable gpio functions are provided which enable access to the
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index fb9975c74c5..ce02894271c 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -130,7 +130,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
* A gpiochip for the 75 GPIOs is registered.
*
*(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y:
-* the boards' gpio.h must provide the linux gpio wrapper functions,
+* the boards' gpio.h must provide the linux gpio wrapper functions,
*
*(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n:
* inlinable gpio functions are provided which enable access to the
diff --git a/arch/mips/include/asm/mach-bcm47xx/nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
index 69ef3efe06e..b8e7be8f34d 100644
--- a/arch/mips/include/asm/mach-bcm47xx/nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
@@ -8,8 +8,8 @@
* option) any later version.
*/
-#ifndef __NVRAM_H
-#define __NVRAM_H
+#ifndef __BCM47XX_NVRAM_H
+#define __BCM47XX_NVRAM_H
#include <linux/types.h>
#include <linux/kernel.h>
@@ -32,12 +32,9 @@ struct nvram_header {
#define NVRAM_MAX_VALUE_LEN 255
#define NVRAM_MAX_PARAM_LEN 64
-#define NVRAM_ERR_INV_PARAM -8
-#define NVRAM_ERR_ENVNOTFOUND -9
+extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len);
-extern int nvram_getenv(char *name, char *val, size_t val_len);
-
-static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
+static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
{
if (strchr(buf, ':'))
sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
@@ -51,4 +48,4 @@ static inline void nvram_parse_macaddr(char *buf, u8 macaddr[6])
printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
}
-#endif
+#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
index dbd5b5ad07a..cb922b9cb0e 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h
@@ -182,7 +182,7 @@ enum bcm63xx_regs_set {
#define BCM_6328_PERF_BASE (0xb0000000)
#define BCM_6328_TIMER_BASE (0xb0000040)
#define BCM_6328_WDT_BASE (0xb000005c)
-#define BCM_6328_UART0_BASE (0xb0000100)
+#define BCM_6328_UART0_BASE (0xb0000100)
#define BCM_6328_UART1_BASE (0xb0000120)
#define BCM_6328_GPIO_BASE (0xb0000080)
#define BCM_6328_SPI_BASE (0xdeadbeef)
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
index 03a54df5fb8..7033144aab2 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_io.h
@@ -88,7 +88,7 @@
#define bcm_mpi_readl(o) bcm_rset_readl(RSET_MPI, (o))
#define bcm_mpi_writel(v, o) bcm_rset_writel(RSET_MPI, (v), (o))
#define bcm_pcmcia_readl(o) bcm_rset_readl(RSET_PCMCIA, (o))
-#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
+#define bcm_pcmcia_writel(v, o) bcm_rset_writel(RSET_PCMCIA, (v), (o))
#define bcm_pcie_readl(o) bcm_rset_readl(RSET_PCIE, (o))
#define bcm_pcie_writel(v, o) bcm_rset_writel(RSET_PCIE, (v), (o))
#define bcm_sdram_readl(o) bcm_rset_readl(RSET_SDRAM, (o))
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
index a5bbff31c89..1e89df7244b 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_iudma.h
@@ -19,7 +19,7 @@ struct bcm_enet_desc {
#define DMADESC_SOP_MASK (1 << 13)
#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK)
#define DMADESC_WRAP_MASK (1 << 12)
-#define DMADESC_USB_NOZERO_MASK (1 << 1)
+#define DMADESC_USB_NOZERO_MASK (1 << 1)
#define DMADESC_USB_ZERO_MASK (1 << 0)
/* status */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
index c3eeb90b480..81b4702f792 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h
@@ -143,7 +143,7 @@
CKCTL_6368_NAND_EN | \
CKCTL_6368_IPSEC_EN)
-/* System PLL Control register */
+/* System PLL Control register */
#define PERF_SYS_PLL_CTL_REG 0x8
#define SYS_PLL_SOFT_RESET 0x1
@@ -219,7 +219,7 @@
#define SOFTRESET_6338_DMAMEM_MASK (1 << 6)
#define SOFTRESET_6338_SAR_MASK (1 << 7)
#define SOFTRESET_6338_ACLC_MASK (1 << 8)
-#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
+#define SOFTRESET_6338_ADSLMIPSPLL_MASK (1 << 10)
#define SOFTRESET_6338_ALL (SOFTRESET_6338_SPI_MASK | \
SOFTRESET_6338_ENET_MASK | \
SOFTRESET_6338_USBH_MASK | \
@@ -238,7 +238,7 @@
#define SOFTRESET_6348_DMAMEM_MASK (1 << 6)
#define SOFTRESET_6348_SAR_MASK (1 << 7)
#define SOFTRESET_6348_ACLC_MASK (1 << 8)
-#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
+#define SOFTRESET_6348_ADSLMIPSPLL_MASK (1 << 10)
#define SOFTRESET_6348_ALL (SOFTRESET_6348_SPI_MASK | \
SOFTRESET_6348_ENET_MASK | \
@@ -560,7 +560,7 @@
#define GPIO_PINMUX_OTHR_REG 0x24
-#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
+#define GPIO_PINMUX_OTHR_6328_USB_SHIFT 12
#define GPIO_PINMUX_OTHR_6328_USB_MASK (3 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
#define GPIO_PINMUX_OTHR_6328_USB_HOST (1 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
#define GPIO_PINMUX_OTHR_6328_USB_DEV (2 << GPIO_PINMUX_OTHR_6328_USB_SHIFT)
@@ -572,12 +572,12 @@
/* those bits must be kept as read in gpio basemode register*/
#define GPIO_STRAPBUS_REG 0x40
-#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
+#define STRAPBUS_6358_BOOT_SEL_PARALLEL (1 << 1)
#define STRAPBUS_6358_BOOT_SEL_SERIAL (0 << 1)
#define STRAPBUS_6368_BOOT_SEL_MASK 0x3
#define STRAPBUS_6368_BOOT_SEL_NAND 0
#define STRAPBUS_6368_BOOT_SEL_SERIAL 1
-#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
+#define STRAPBUS_6368_BOOT_SEL_PARALLEL 3
/*************************************************************************
@@ -812,7 +812,7 @@
#define USBH_PRIV_SWAP_OHCI_DATA_MASK (1 << USBH_PRIV_SWAP_OHCI_DATA_SHIFT)
#define USBH_PRIV_UTMI_CTL_6368_REG 0x10
-#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
+#define USBH_PRIV_UTMI_CTL_NODRIV_SHIFT 12
#define USBH_PRIV_UTMI_CTL_NODRIV_MASK (0xf << USBH_PRIV_UTMI_CTL_NODRIV_SHIFT)
#define USBH_PRIV_UTMI_CTL_HOSTB_SHIFT 0
#define USBH_PRIV_UTMI_CTL_HOSTB_MASK (0xf << USBH_PRIV_UTMI_CTL_HOSTB_SHIFT)
@@ -841,7 +841,7 @@
#define USBD_CONTROL_INIT_SEL_MASK (0xf << USBD_CONTROL_INIT_SEL_SHIFT)
#define USBD_CONTROL_FIFO_RESET_SHIFT 6
#define USBD_CONTROL_FIFO_RESET_MASK (3 << USBD_CONTROL_FIFO_RESET_SHIFT)
-#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
+#define USBD_CONTROL_SETUPERRLOCK_SHIFT 5
#define USBD_CONTROL_SETUPERRLOCK_MASK (1 << USBD_CONTROL_SETUPERRLOCK_SHIFT)
#define USBD_CONTROL_DONE_CSRS_SHIFT 0
#define USBD_CONTROL_DONE_CSRS_MASK (1 << USBD_CONTROL_DONE_CSRS_SHIFT)
@@ -852,7 +852,7 @@
#define USBD_STRAPS_APP_SELF_PWR_MASK (1 << USBD_STRAPS_APP_SELF_PWR_SHIFT)
#define USBD_STRAPS_APP_DISCON_SHIFT 9
#define USBD_STRAPS_APP_DISCON_MASK (1 << USBD_STRAPS_APP_DISCON_SHIFT)
-#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
+#define USBD_STRAPS_APP_CSRPRGSUP_SHIFT 8
#define USBD_STRAPS_APP_CSRPRGSUP_MASK (1 << USBD_STRAPS_APP_CSRPRGSUP_SHIFT)
#define USBD_STRAPS_APP_RMTWKUP_SHIFT 6
#define USBD_STRAPS_APP_RMTWKUP_MASK (1 << USBD_STRAPS_APP_RMTWKUP_SHIFT)
@@ -943,7 +943,7 @@
#define USBD_EPNUM_TYPEMAP_REG 0x50
#define USBD_EPNUM_TYPEMAP_TYPE_SHIFT 8
#define USBD_EPNUM_TYPEMAP_TYPE_MASK (0x3 << USBD_EPNUM_TYPEMAP_TYPE_SHIFT)
-#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
+#define USBD_EPNUM_TYPEMAP_DMA_CH_SHIFT 0
#define USBD_EPNUM_TYPEMAP_DMA_CH_MASK (0xf << USBD_EPNUM_TYPEMAP_DMACH_SHIFT)
/* Misc per-endpoint settings */
@@ -1048,8 +1048,8 @@
#define MPI_L2PREMAP_IS_CARDBUS_MASK (1 << 2)
#define MPI_PCIMODESEL_REG 0x144
-#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
-#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
+#define MPI_PCIMODESEL_BAR1_NOSWAP_MASK (1 << 0)
+#define MPI_PCIMODESEL_BAR2_NOSWAP_MASK (1 << 1)
#define MPI_PCIMODESEL_EXT_ARB_MASK (1 << 2)
#define MPI_PCIMODESEL_PREFETCH_SHIFT 4
#define MPI_PCIMODESEL_PREFETCH_MASK (0xf << MPI_PCIMODESEL_PREFETCH_SHIFT)
diff --git a/arch/mips/include/asm/mach-bcm63xx/irq.h b/arch/mips/include/asm/mach-bcm63xx/irq.h
index 9332e788a5c..2bbfc8d1f30 100644
--- a/arch/mips/include/asm/mach-bcm63xx/irq.h
+++ b/arch/mips/include/asm/mach-bcm63xx/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_BCM63XX_IRQ_H
#define __ASM_MACH_BCM63XX_IRQ_H
-#define NR_IRQS 128
+#define NR_IRQS 128
#define MIPS_CPU_IRQ_BASE 0
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index 502bb1815ae..60fc4c347c4 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -51,8 +51,8 @@ enum octeon_irq {
/* 256 - 511 represent the MSI interrupts 0-255 */
#define OCTEON_IRQ_MSI_BIT0 (256)
-#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
-#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
+#define OCTEON_IRQ_MSI_LAST (OCTEON_IRQ_MSI_BIT0 + 255)
+#define OCTEON_IRQ_LAST (OCTEON_IRQ_MSI_LAST + 1)
#endif
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index dedef7d2b01..1e7dbb19265 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -16,7 +16,7 @@
#define CP0_PRID_OCTEON_PASS1 0x000d0000
#define CP0_PRID_OCTEON_CN30XX 0x000d0200
-.macro kernel_entry_setup
+.macro kernel_entry_setup
# Registers set by bootloader:
# (only 32 bits set by bootloader, all addresses are physical
# addresses, and need to have the appropriate memory region set
@@ -28,12 +28,12 @@
.set push
.set arch=octeon
# Read the cavium mem control register
- dmfc0 v0, CP0_CVMMEMCTL_REG
+ dmfc0 v0, CP0_CVMMEMCTL_REG
# Clear the lower 6 bits, the CVMSEG size
- dins v0, $0, 0, 6
- ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
- dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
- dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
+ dins v0, $0, 0, 6
+ ori v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+ dmtc0 v0, CP0_CVMMEMCTL_REG # Write the cavium mem control register
+ dmfc0 v0, CP0_CVMCTL_REG # Read the cavium control register
#ifdef CONFIG_CAVIUM_OCTEON_HW_FIX_UNALIGNED
# Disable unaligned load/store support but leave HW fixup enabled
or v0, v0, 0x5001
@@ -69,14 +69,14 @@ skip:
and v0, v0, v1
ori v0, v0, (6 << 7)
# Write the cavium control register
- dmtc0 v0, CP0_CVMCTL_REG
+ dmtc0 v0, CP0_CVMCTL_REG
sync
# Flush dcache after config change
- cache 9, 0($0)
+ cache 9, 0($0)
# Get my core id
- rdhwr v0, $0
+ rdhwr v0, $0
# Jump the master to kernel_entry
- bne a2, zero, octeon_main_processor
+ bne a2, zero, octeon_main_processor
nop
#ifdef CONFIG_SMP
@@ -87,21 +87,21 @@ skip:
#
# This is the variable where the next core to boot os stored
- PTR_LA t0, octeon_processor_boot
+ PTR_LA t0, octeon_processor_boot
octeon_spin_wait_boot:
# Get the core id of the next to be booted
- LONG_L t1, (t0)
+ LONG_L t1, (t0)
# Keep looping if it isn't me
bne t1, v0, octeon_spin_wait_boot
nop
# Get my GP from the global variable
- PTR_LA t0, octeon_processor_gp
- LONG_L gp, (t0)
+ PTR_LA t0, octeon_processor_gp
+ LONG_L gp, (t0)
# Get my SP from the global variable
- PTR_LA t0, octeon_processor_sp
- LONG_L sp, (t0)
+ PTR_LA t0, octeon_processor_sp
+ LONG_L sp, (t0)
# Set the SP global variable to zero so the master knows we've started
- LONG_S zero, (t0)
+ LONG_S zero, (t0)
#ifdef __OCTEON__
syncw
syncw
@@ -130,7 +130,7 @@ octeon_main_processor:
/*
* Do SMP slave processor setup necessary before we can savely execute C code.
*/
- .macro smp_slave_setup
+ .macro smp_slave_setup
.endm
#endif /* __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H */
diff --git a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
index babc8374e37..71d4bface1d 100644
--- a/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cobalt/cpu-feature-overrides.h
@@ -32,9 +32,9 @@
#define cpu_scache_line_size() 0
#ifdef CONFIG_64BIT
-#define cpu_has_llsc 0
+#define cpu_has_llsc 0
#else
-#define cpu_has_llsc 1
+#define cpu_has_llsc 1
#endif
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
index f8afec3f294..6fe475b9e96 100644
--- a/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-cobalt/mach-gt64120.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2006 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/include/asm/mach-db1x00/bcsr.h b/arch/mips/include/asm/mach-db1x00/bcsr.h
index 16f1cf5982b..3c3ed4ae45e 100644
--- a/arch/mips/include/asm/mach-db1x00/bcsr.h
+++ b/arch/mips/include/asm/mach-db1x00/bcsr.h
@@ -110,7 +110,7 @@ enum bcsr_whoami_boards {
BCSR_WHOAMI_DB1300,
};
-/* STATUS reg. Unless otherwise noted, they're valid on all boards.
+/* STATUS reg. Unless otherwise noted, they're valid on all boards.
* PB1200 = DB1200.
*/
#define BCSR_STATUS_PC0VS 0x0003
@@ -190,7 +190,7 @@ enum bcsr_whoami_boards {
#define BCSR_RESETS_OTPWRPROT 0x1000 /* DB1300 */
#define BCSR_RESETS_OTPCSB 0x2000 /* DB1300 */
#define BCSR_RESETS_OTGPWR 0x4000 /* DB1300 */
-#define BCSR_RESETS_USBHPWR 0x8000 /* DB1300 */
+#define BCSR_RESETS_USBHPWR 0x8000 /* DB1300 */
#define BCSR_BOARD_LCDVEE 0x0001
#define BCSR_BOARD_LCDVDD 0x0002
diff --git a/arch/mips/include/asm/mach-db1x00/db1200.h b/arch/mips/include/asm/mach-db1x00/db1200.h
index b2a8319521e..d3cce7326dd 100644
--- a/arch/mips/include/asm/mach-db1x00/db1200.h
+++ b/arch/mips/include/asm/mach-db1x00/db1200.h
@@ -63,7 +63,7 @@
* the interrupt define and subtracting the DB1200_INT_BEGIN value.
*
* Example: IDE bis pos is = 64 - 64
- * ETH bit pos is = 65 - 64
+ * ETH bit pos is = 65 - 64
*/
enum external_db1200_ints {
DB1200_INT_BEGIN = AU1000_MAX_INTR + 1,
diff --git a/arch/mips/include/asm/mach-db1x00/db1300.h b/arch/mips/include/asm/mach-db1x00/db1300.h
index 7fe5fb3ba87..3d1ede46f05 100644
--- a/arch/mips/include/asm/mach-db1x00/db1300.h
+++ b/arch/mips/include/asm/mach-db1x00/db1300.h
@@ -21,7 +21,7 @@
#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
-#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
+#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
/* SMSC9210 CS */
diff --git a/arch/mips/include/asm/mach-emma2rh/irq.h b/arch/mips/include/asm/mach-emma2rh/irq.h
index 5439eb85646..2f7155dade2 100644
--- a/arch/mips/include/asm/mach-emma2rh/irq.h
+++ b/arch/mips/include/asm/mach-emma2rh/irq.h
@@ -8,7 +8,7 @@
#ifndef __ASM_MACH_EMMA2RH_IRQ_H
#define __ASM_MACH_EMMA2RH_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
index 7c185bb06f1..42be9e9ced2 100644
--- a/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-generic/cpu-feature-overrides.h
@@ -8,6 +8,6 @@
#ifndef __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H
-/* Intentionally empty file ... */
+/* Intentionally empty file ... */
#endif /* __ASM_MACH_GENERIC_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-generic/floppy.h b/arch/mips/include/asm/mach-generic/floppy.h
index a38f4d43e5e..5b5cd689a2f 100644
--- a/arch/mips/include/asm/mach-generic/floppy.h
+++ b/arch/mips/include/asm/mach-generic/floppy.h
@@ -98,7 +98,7 @@ static inline void fd_disable_irq(void)
static inline int fd_request_irq(void)
{
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- 0, "floppy", NULL);
+ 0, "floppy", NULL);
}
static inline void fd_free_irq(void)
@@ -106,7 +106,7 @@ static inline void fd_free_irq(void)
free_irq(FLOPPY_IRQ, NULL);
}
-#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
+#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL);
static inline unsigned long fd_getfdaddr1(void)
diff --git a/arch/mips/include/asm/mach-generic/ide.h b/arch/mips/include/asm/mach-generic/ide.h
index 9c93a5b36f2..affa66f5c2d 100644
--- a/arch/mips/include/asm/mach-generic/ide.h
+++ b/arch/mips/include/asm/mach-generic/ide.h
@@ -51,7 +51,7 @@ static inline void __ide_flush_dcache_range(unsigned long addr, unsigned long si
/*
* insw() and gang might be called with interrupts disabled, so we can't
* send IPIs for flushing due to the potencial of deadlocks, see the comment
- * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
+ * above smp_call_function() in arch/mips/kernel/smp.c. We work around the
* problem by disabling preemption so we know we actually perform the flush
* on the processor that actually has the lines to be flushed which hopefully
* is even better for performance anyway.
@@ -123,7 +123,7 @@ static inline void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
__ide_flush_epilogue();
}
-/* ide_insw calls insw, not __ide_insw. Why? */
+/* ide_insw calls insw, not __ide_insw. Why? */
#undef insw
#undef insl
#undef outsw
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index e014264b2be..139cd200e79 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -9,12 +9,12 @@
#define __ASM_MACH_GENERIC_IRQ_H
#ifndef NR_IRQS
-#define NR_IRQS 128
+#define NR_IRQS 128
#endif
#ifdef CONFIG_I8259
#ifndef I8259A_IRQ_BASE
-#define I8259A_IRQ_BASE 0
+#define I8259A_IRQ_BASE 0
#endif
#endif
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index d7a9efd3a5c..73d717a75cb 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -69,7 +69,7 @@
#define HIGHMEM_START (_AC(1, UL) << _AC(59, UL))
#endif
-#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
+#define TO_PHYS(x) ( ((x) & TO_PHYS_MASK))
#define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK))
#define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK))
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index 624d66c7f29..a323efb720d 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -51,8 +51,8 @@
* We might not get launched at the address the kernel is linked to,
* so we jump there.
*/
- PTR_LA t0, 0f
- jr t0
+ PTR_LA t0, 0f
+ jr t0
0:
.endm
diff --git a/arch/mips/include/asm/mach-ip27/mmzone.h b/arch/mips/include/asm/mach-ip27/mmzone.h
index 986a3b9b59a..ebc9377ff87 100644
--- a/arch/mips/include/asm/mach-ip27/mmzone.h
+++ b/arch/mips/include/asm/mach-ip27/mmzone.h
@@ -7,7 +7,7 @@
#define pa_to_nid(addr) NASID_TO_COMPACT_NODEID(NASID_GET(addr))
-#define LEVELS_PER_SLICE 128
+#define LEVELS_PER_SLICE 128
struct slice_data {
unsigned long irq_enable_mask[2];
diff --git a/arch/mips/include/asm/mach-ip27/topology.h b/arch/mips/include/asm/mach-ip27/topology.h
index b2cf641f206..defd135e7ac 100644
--- a/arch/mips/include/asm/mach-ip27/topology.h
+++ b/arch/mips/include/asm/mach-ip27/topology.h
@@ -34,7 +34,7 @@ extern int pcibus_to_node(struct pci_bus *);
extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
-#define node_distance(from, to) (__node_distances[(from)][(to)])
+#define node_distance(from, to) (__node_distances[(from)][(to)])
#include <asm-generic/topology.h>
diff --git a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
index 50d344ca60a..65e9c856390 100644
--- a/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-ip28/cpu-feature-overrides.h
@@ -28,7 +28,7 @@
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
-#define cpu_icache_snoops_remote_store 1
+#define cpu_icache_snoops_remote_store 1
#define cpu_has_mipsmt 0
#define cpu_has_userlocal 0
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h
index 05aabb27e5e..5edf05d9dad 100644
--- a/arch/mips/include/asm/mach-ip28/spaces.h
+++ b/arch/mips/include/asm/mach-ip28/spaces.h
@@ -6,7 +6,7 @@
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
- * 2004 pf
+ * 2004 pf
*/
#ifndef _ASM_MACH_IP28_SPACES_H
#define _ASM_MACH_IP28_SPACES_H
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index c8fb5aacf50..073f0c4760b 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -50,7 +50,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
return pa;
}
-/* This is almost certainly wrong but it's what dma-ip32.c used to use */
+/* This is almost certainly wrong but it's what dma-ip32.c used to use */
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
diff --git a/arch/mips/include/asm/mach-ip32/war.h b/arch/mips/include/asm/mach-ip32/war.h
index 7237a935a13..9807ecda5a8 100644
--- a/arch/mips/include/asm/mach-ip32/war.h
+++ b/arch/mips/include/asm/mach-ip32/war.h
@@ -17,7 +17,7 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 1
+#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
diff --git a/arch/mips/include/asm/mach-jazz/floppy.h b/arch/mips/include/asm/mach-jazz/floppy.h
index 88b5acb7514..62aa1e287fb 100644
--- a/arch/mips/include/asm/mach-jazz/floppy.h
+++ b/arch/mips/include/asm/mach-jazz/floppy.h
@@ -90,7 +90,7 @@ static inline void fd_disable_irq(void)
static inline int fd_request_irq(void)
{
return request_irq(FLOPPY_IRQ, floppy_interrupt,
- 0, "floppy", NULL);
+ 0, "floppy", NULL);
}
static inline void fd_free_irq(void)
diff --git a/arch/mips/include/asm/mach-jz4740/clock.h b/arch/mips/include/asm/mach-jz4740/clock.h
index 1b7408dd0e2..16659cd76d4 100644
--- a/arch/mips/include/asm/mach-jz4740/clock.h
+++ b/arch/mips/include/asm/mach-jz4740/clock.h
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/dma.h b/arch/mips/include/asm/mach-jz4740/dma.h
index a3be1218359..98b4e7c0dba 100644
--- a/arch/mips/include/asm/mach-jz4740/dma.h
+++ b/arch/mips/include/asm/mach-jz4740/dma.h
@@ -3,7 +3,7 @@
* JZ7420/JZ4740 DMA definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -40,9 +40,9 @@ enum jz4740_dma_width {
};
enum jz4740_dma_transfer_size {
- JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
- JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
- JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
+ JZ4740_DMA_TRANSFER_SIZE_4BYTE = 0,
+ JZ4740_DMA_TRANSFER_SIZE_1BYTE = 1,
+ JZ4740_DMA_TRANSFER_SIZE_2BYTE = 2,
JZ4740_DMA_TRANSFER_SIZE_16BYTE = 3,
JZ4740_DMA_TRANSFER_SIZE_32BYTE = 4,
};
@@ -87,4 +87,4 @@ uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma);
void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma,
jz4740_dma_complete_callback_t cb);
-#endif /* __ASM_JZ4740_DMA_H__ */
+#endif /* __ASM_JZ4740_DMA_H__ */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index 1a6482ea0bb..eaacba79cf1 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -198,7 +198,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask);
#define JZ_GPIO_FUNC_MEM_ADDR14 JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_ADDR15 JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_ADDR16 JZ_GPIO_FUNC1
-#define JZ_GPIO_FUNC_LCD_CLS JZ_GPIO_FUNC1
+#define JZ_GPIO_FUNC_LCD_CLS JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_LCD_SPL JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_DCS JZ_GPIO_FUNC1
#define JZ_GPIO_FUNC_MEM_RAS JZ_GPIO_FUNC1
diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h
index 5ad1a9c113c..df50736749c 100644
--- a/arch/mips/include/asm/mach-jz4740/irq.h
+++ b/arch/mips/include/asm/mach-jz4740/irq.h
@@ -3,7 +3,7 @@
* JZ4740 IRQ definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 163e81db880..72cfebdb5a4 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -3,7 +3,7 @@
* JZ4740 platform device definitions
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-jz4740/timer.h b/arch/mips/include/asm/mach-jz4740/timer.h
index a7759fb1f73..8750a1d04e2 100644
--- a/arch/mips/include/asm/mach-jz4740/timer.h
+++ b/arch/mips/include/asm/mach-jz4740/timer.h
@@ -3,7 +3,7 @@
* JZ4740 platform timer support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index fccac359265..98d6a2f14aa 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -44,7 +44,7 @@
/* BOOT_SEL - find what boot media we have */
#define BS_FLASH 0x1
-#define BS_SPI 0x4
+#define BS_SPI 0x4
/* global register ranges */
extern __iomem void *ltq_ebu_membase;
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index 5e8a6e96575..f196cceb732 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -34,6 +34,7 @@ extern spinlock_t ebu_lock;
extern void ltq_disable_irq(struct irq_data *data);
extern void ltq_mask_and_ack_irq(struct irq_data *data);
extern void ltq_enable_irq(struct irq_data *data);
+extern int ltq_eiu_get_irq(int exin);
/* clock handling */
extern int clk_activate(struct clk *clk);
@@ -41,6 +42,7 @@ extern void clk_deactivate(struct clk *clk);
extern struct clk *clk_get_cpu(void);
extern struct clk *clk_get_fpi(void);
extern struct clk *clk_get_io(void);
+extern struct clk *clk_get_ppe(void);
/* find out what bootsource we have */
extern unsigned char ltq_boot_select(void);
diff --git a/arch/mips/include/asm/mach-lantiq/war.h b/arch/mips/include/asm/mach-lantiq/war.h
index b6c568c280e..358ca979c1b 100644
--- a/arch/mips/include/asm/mach-lantiq/war.h
+++ b/arch/mips/include/asm/mach-lantiq/war.h
@@ -7,17 +7,17 @@
#ifndef __ASM_MIPS_MACH_LANTIQ_WAR_H
#define __ASM_MIPS_MACH_LANTIQ_WAR_H
-#define R4600_V1_INDEX_ICACHEOP_WAR 0
-#define R4600_V1_HIT_CACHEOP_WAR 0
-#define R4600_V2_HIT_CACHEOP_WAR 0
-#define R5432_CP0_INTERRUPT_WAR 0
-#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
-#define MIPS4K_ICACHE_REFILL_WAR 0
-#define MIPS_CACHE_SYNC_WAR 0
-#define TX49XX_ICACHE_INDEX_INV_WAR 0
-#define ICACHE_REFILLS_WORKAROUND_WAR 0
-#define R10000_LLSC_WAR 0
-#define MIPS34K_MISSED_ITLB_WAR 0
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 872943a4b90..5f8693d5ab1 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -21,7 +21,7 @@
#define LTQ_DESC_SIZE 0x08 /* each descriptor is 64bit */
#define LTQ_DESC_NUM 0x40 /* 64 descriptors / channel */
-#define LTQ_DMA_OWN BIT(31) /* owner bit */
+#define LTQ_DMA_OWN BIT(31) /* owner bit */
#define LTQ_DMA_C BIT(30) /* complete bit */
#define LTQ_DMA_SOP BIT(29) /* start of packet */
#define LTQ_DMA_EOP BIT(28) /* end of packet */
@@ -38,7 +38,7 @@ struct ltq_dma_channel {
int nr; /* the channel number */
int irq; /* the mapped irq */
int desc; /* the current descriptor */
- struct ltq_dma_desc *desc_base; /* the descriptor base */
+ struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
};
diff --git a/arch/mips/include/asm/mach-lasat/mach-gt64120.h b/arch/mips/include/asm/mach-lasat/mach-gt64120.h
index 1a9ad45cc13..c253d3fa516 100644
--- a/arch/mips/include/asm/mach-lasat/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-lasat/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
@@ -18,8 +18,8 @@
*
* (Guessing ...)
*/
-#define GT_PCI_MEM_BASE 0x12000000UL
-#define GT_PCI_MEM_SIZE 0x02000000UL
+#define GT_PCI_MEM_BASE 0x12000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
#define GT_PCI_IO_BASE 0x10000000UL
#define GT_PCI_IO_SIZE 0x02000000UL
#define GT_ISA_IO_BASE PCI_IO_BASE
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 1a05d854e34..75fd8c0f986 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -8,9 +8,9 @@
* Copyright (C) 2009 Zhang Le <r0bertz@gentoo.org>
*
* reference: /proc/cpuinfo,
- * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
- * arch/mips/kernel/proc.c(show_cpuinfo),
- * loongson2f user manual.
+ * arch/mips/kernel/cpu-probe.c(cpu_probe_legacy),
+ * arch/mips/kernel/proc.c(show_cpuinfo),
+ * loongson2f user manual.
*/
#ifndef __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H
@@ -37,7 +37,7 @@
#define cpu_has_fpu 1
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_inclusive_pcaches 1
-#define cpu_has_llsc 1
+#define cpu_has_llsc 1
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
index 2a8e2bb5d53..a0ee0cb775a 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536.h
@@ -5,8 +5,8 @@
* Author : jlliu <liujl@lemote.com>
*/
-#ifndef _CS5536_H
-#define _CS5536_H
+#ifndef _CS5536_H
+#define _CS5536_H
#include <linux/types.h>
@@ -16,237 +16,237 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
/*
* MSR module base
*/
-#define CS5536_SB_MSR_BASE (0x00000000)
-#define CS5536_GLIU_MSR_BASE (0x10000000)
-#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
-#define CS5536_USB_MSR_BASE (0x40000000)
-#define CS5536_IDE_MSR_BASE (0x60000000)
-#define CS5536_DIVIL_MSR_BASE (0x80000000)
-#define CS5536_ACC_MSR_BASE (0xa0000000)
-#define CS5536_UNUSED_MSR_BASE (0xc0000000)
-#define CS5536_GLCP_MSR_BASE (0xe0000000)
+#define CS5536_SB_MSR_BASE (0x00000000)
+#define CS5536_GLIU_MSR_BASE (0x10000000)
+#define CS5536_ILLEGAL_MSR_BASE (0x20000000)
+#define CS5536_USB_MSR_BASE (0x40000000)
+#define CS5536_IDE_MSR_BASE (0x60000000)
+#define CS5536_DIVIL_MSR_BASE (0x80000000)
+#define CS5536_ACC_MSR_BASE (0xa0000000)
+#define CS5536_UNUSED_MSR_BASE (0xc0000000)
+#define CS5536_GLCP_MSR_BASE (0xe0000000)
-#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
-#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
-#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
-#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
-#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
-#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
-#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
-#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
-#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
+#define SB_MSR_REG(offset) (CS5536_SB_MSR_BASE | (offset))
+#define GLIU_MSR_REG(offset) (CS5536_GLIU_MSR_BASE | (offset))
+#define ILLEGAL_MSR_REG(offset) (CS5536_ILLEGAL_MSR_BASE | (offset))
+#define USB_MSR_REG(offset) (CS5536_USB_MSR_BASE | (offset))
+#define IDE_MSR_REG(offset) (CS5536_IDE_MSR_BASE | (offset))
+#define DIVIL_MSR_REG(offset) (CS5536_DIVIL_MSR_BASE | (offset))
+#define ACC_MSR_REG(offset) (CS5536_ACC_MSR_BASE | (offset))
+#define UNUSED_MSR_REG(offset) (CS5536_UNUSED_MSR_BASE | (offset))
+#define GLCP_MSR_REG(offset) (CS5536_GLCP_MSR_BASE | (offset))
/*
* BAR SPACE OF VIRTUAL PCI :
* range for pci probe use, length is the actual size.
*/
/* IO space for all DIVIL modules */
-#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
-#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
-#define CS5536_SMB_RANGE 0xfffffff8
-#define CS5536_SMB_LENGTH 0x08
-#define CS5536_GPIO_RANGE 0xffffff00
-#define CS5536_GPIO_LENGTH 0x100
-#define CS5536_MFGPT_RANGE 0xffffffc0
-#define CS5536_MFGPT_LENGTH 0x40
-#define CS5536_ACPI_RANGE 0xffffffe0
-#define CS5536_ACPI_LENGTH 0x20
-#define CS5536_PMS_RANGE 0xffffff80
-#define CS5536_PMS_LENGTH 0x80
+#define CS5536_IRQ_RANGE 0xffffffe0 /* USERD FOR PCI PROBE */
+#define CS5536_IRQ_LENGTH 0x20 /* THE REGS ACTUAL LENGTH */
+#define CS5536_SMB_RANGE 0xfffffff8
+#define CS5536_SMB_LENGTH 0x08
+#define CS5536_GPIO_RANGE 0xffffff00
+#define CS5536_GPIO_LENGTH 0x100
+#define CS5536_MFGPT_RANGE 0xffffffc0
+#define CS5536_MFGPT_LENGTH 0x40
+#define CS5536_ACPI_RANGE 0xffffffe0
+#define CS5536_ACPI_LENGTH 0x20
+#define CS5536_PMS_RANGE 0xffffff80
+#define CS5536_PMS_LENGTH 0x80
/* IO space for IDE */
-#define CS5536_IDE_RANGE 0xfffffff0
-#define CS5536_IDE_LENGTH 0x10
+#define CS5536_IDE_RANGE 0xfffffff0
+#define CS5536_IDE_LENGTH 0x10
/* IO space for ACC */
-#define CS5536_ACC_RANGE 0xffffff80
-#define CS5536_ACC_LENGTH 0x80
+#define CS5536_ACC_RANGE 0xffffff80
+#define CS5536_ACC_LENGTH 0x80
/* MEM space for ALL USB modules */
-#define CS5536_OHCI_RANGE 0xfffff000
-#define CS5536_OHCI_LENGTH 0x1000
-#define CS5536_EHCI_RANGE 0xfffff000
-#define CS5536_EHCI_LENGTH 0x1000
+#define CS5536_OHCI_RANGE 0xfffff000
+#define CS5536_OHCI_LENGTH 0x1000
+#define CS5536_EHCI_RANGE 0xfffff000
+#define CS5536_EHCI_LENGTH 0x1000
/*
* PCI MSR ACCESS
*/
-#define PCI_MSR_CTRL 0xF0
-#define PCI_MSR_ADDR 0xF4
-#define PCI_MSR_DATA_LO 0xF8
-#define PCI_MSR_DATA_HI 0xFC
+#define PCI_MSR_CTRL 0xF0
+#define PCI_MSR_ADDR 0xF4
+#define PCI_MSR_DATA_LO 0xF8
+#define PCI_MSR_DATA_HI 0xFC
/**************** MSR *****************************/
/*
* GLIU STANDARD MSR
*/
-#define GLIU_CAP 0x00
-#define GLIU_CONFIG 0x01
-#define GLIU_SMI 0x02
-#define GLIU_ERROR 0x03
-#define GLIU_PM 0x04
-#define GLIU_DIAG 0x05
+#define GLIU_CAP 0x00
+#define GLIU_CONFIG 0x01
+#define GLIU_SMI 0x02
+#define GLIU_ERROR 0x03
+#define GLIU_PM 0x04
+#define GLIU_DIAG 0x05
/*
* GLIU SPEC. MSR
*/
-#define GLIU_P2D_BM0 0x20
-#define GLIU_P2D_BM1 0x21
-#define GLIU_P2D_BM2 0x22
-#define GLIU_P2D_BMK0 0x23
-#define GLIU_P2D_BMK1 0x24
-#define GLIU_P2D_BM3 0x25
-#define GLIU_P2D_BM4 0x26
-#define GLIU_COH 0x80
-#define GLIU_PAE 0x81
-#define GLIU_ARB 0x82
-#define GLIU_ASMI 0x83
-#define GLIU_AERR 0x84
-#define GLIU_DEBUG 0x85
-#define GLIU_PHY_CAP 0x86
-#define GLIU_NOUT_RESP 0x87
-#define GLIU_NOUT_WDATA 0x88
-#define GLIU_WHOAMI 0x8B
-#define GLIU_SLV_DIS 0x8C
-#define GLIU_IOD_BM0 0xE0
-#define GLIU_IOD_BM1 0xE1
-#define GLIU_IOD_BM2 0xE2
-#define GLIU_IOD_BM3 0xE3
-#define GLIU_IOD_BM4 0xE4
-#define GLIU_IOD_BM5 0xE5
-#define GLIU_IOD_BM6 0xE6
-#define GLIU_IOD_BM7 0xE7
-#define GLIU_IOD_BM8 0xE8
-#define GLIU_IOD_BM9 0xE9
-#define GLIU_IOD_SC0 0xEA
-#define GLIU_IOD_SC1 0xEB
-#define GLIU_IOD_SC2 0xEC
-#define GLIU_IOD_SC3 0xED
-#define GLIU_IOD_SC4 0xEE
-#define GLIU_IOD_SC5 0xEF
-#define GLIU_IOD_SC6 0xF0
-#define GLIU_IOD_SC7 0xF1
+#define GLIU_P2D_BM0 0x20
+#define GLIU_P2D_BM1 0x21
+#define GLIU_P2D_BM2 0x22
+#define GLIU_P2D_BMK0 0x23
+#define GLIU_P2D_BMK1 0x24
+#define GLIU_P2D_BM3 0x25
+#define GLIU_P2D_BM4 0x26
+#define GLIU_COH 0x80
+#define GLIU_PAE 0x81
+#define GLIU_ARB 0x82
+#define GLIU_ASMI 0x83
+#define GLIU_AERR 0x84
+#define GLIU_DEBUG 0x85
+#define GLIU_PHY_CAP 0x86
+#define GLIU_NOUT_RESP 0x87
+#define GLIU_NOUT_WDATA 0x88
+#define GLIU_WHOAMI 0x8B
+#define GLIU_SLV_DIS 0x8C
+#define GLIU_IOD_BM0 0xE0
+#define GLIU_IOD_BM1 0xE1
+#define GLIU_IOD_BM2 0xE2
+#define GLIU_IOD_BM3 0xE3
+#define GLIU_IOD_BM4 0xE4
+#define GLIU_IOD_BM5 0xE5
+#define GLIU_IOD_BM6 0xE6
+#define GLIU_IOD_BM7 0xE7
+#define GLIU_IOD_BM8 0xE8
+#define GLIU_IOD_BM9 0xE9
+#define GLIU_IOD_SC0 0xEA
+#define GLIU_IOD_SC1 0xEB
+#define GLIU_IOD_SC2 0xEC
+#define GLIU_IOD_SC3 0xED
+#define GLIU_IOD_SC4 0xEE
+#define GLIU_IOD_SC5 0xEF
+#define GLIU_IOD_SC6 0xF0
+#define GLIU_IOD_SC7 0xF1
/*
* SB STANDARD
*/
-#define SB_CAP 0x00
-#define SB_CONFIG 0x01
-#define SB_SMI 0x02
-#define SB_ERROR 0x03
-#define SB_MAR_ERR_EN 0x00000001
-#define SB_TAR_ERR_EN 0x00000002
-#define SB_RSVD_BIT1 0x00000004
-#define SB_EXCEP_ERR_EN 0x00000008
-#define SB_SYSE_ERR_EN 0x00000010
-#define SB_PARE_ERR_EN 0x00000020
-#define SB_TAS_ERR_EN 0x00000040
-#define SB_MAR_ERR_FLAG 0x00010000
-#define SB_TAR_ERR_FLAG 0x00020000
-#define SB_RSVD_BIT2 0x00040000
-#define SB_EXCEP_ERR_FLAG 0x00080000
-#define SB_SYSE_ERR_FLAG 0x00100000
-#define SB_PARE_ERR_FLAG 0x00200000
-#define SB_TAS_ERR_FLAG 0x00400000
-#define SB_PM 0x04
-#define SB_DIAG 0x05
+#define SB_CAP 0x00
+#define SB_CONFIG 0x01
+#define SB_SMI 0x02
+#define SB_ERROR 0x03
+#define SB_MAR_ERR_EN 0x00000001
+#define SB_TAR_ERR_EN 0x00000002
+#define SB_RSVD_BIT1 0x00000004
+#define SB_EXCEP_ERR_EN 0x00000008
+#define SB_SYSE_ERR_EN 0x00000010
+#define SB_PARE_ERR_EN 0x00000020
+#define SB_TAS_ERR_EN 0x00000040
+#define SB_MAR_ERR_FLAG 0x00010000
+#define SB_TAR_ERR_FLAG 0x00020000
+#define SB_RSVD_BIT2 0x00040000
+#define SB_EXCEP_ERR_FLAG 0x00080000
+#define SB_SYSE_ERR_FLAG 0x00100000
+#define SB_PARE_ERR_FLAG 0x00200000
+#define SB_TAS_ERR_FLAG 0x00400000
+#define SB_PM 0x04
+#define SB_DIAG 0x05
/*
* SB SPEC.
*/
-#define SB_CTRL 0x10
-#define SB_R0 0x20
-#define SB_R1 0x21
-#define SB_R2 0x22
-#define SB_R3 0x23
-#define SB_R4 0x24
-#define SB_R5 0x25
-#define SB_R6 0x26
-#define SB_R7 0x27
-#define SB_R8 0x28
-#define SB_R9 0x29
-#define SB_R10 0x2A
-#define SB_R11 0x2B
-#define SB_R12 0x2C
-#define SB_R13 0x2D
-#define SB_R14 0x2E
-#define SB_R15 0x2F
+#define SB_CTRL 0x10
+#define SB_R0 0x20
+#define SB_R1 0x21
+#define SB_R2 0x22
+#define SB_R3 0x23
+#define SB_R4 0x24
+#define SB_R5 0x25
+#define SB_R6 0x26
+#define SB_R7 0x27
+#define SB_R8 0x28
+#define SB_R9 0x29
+#define SB_R10 0x2A
+#define SB_R11 0x2B
+#define SB_R12 0x2C
+#define SB_R13 0x2D
+#define SB_R14 0x2E
+#define SB_R15 0x2F
/*
* GLCP STANDARD
*/
-#define GLCP_CAP 0x00
-#define GLCP_CONFIG 0x01
-#define GLCP_SMI 0x02
-#define GLCP_ERROR 0x03
-#define GLCP_PM 0x04
-#define GLCP_DIAG 0x05
+#define GLCP_CAP 0x00
+#define GLCP_CONFIG 0x01
+#define GLCP_SMI 0x02
+#define GLCP_ERROR 0x03
+#define GLCP_PM 0x04
+#define GLCP_DIAG 0x05
/*
* GLCP SPEC.
*/
-#define GLCP_CLK_DIS_DELAY 0x08
-#define GLCP_PM_CLK_DISABLE 0x09
-#define GLCP_GLB_PM 0x0B
-#define GLCP_DBG_OUT 0x0C
-#define GLCP_RSVD1 0x0D
-#define GLCP_SOFT_COM 0x0E
-#define SOFT_BAR_SMB_FLAG 0x00000001
-#define SOFT_BAR_GPIO_FLAG 0x00000002
-#define SOFT_BAR_MFGPT_FLAG 0x00000004
-#define SOFT_BAR_IRQ_FLAG 0x00000008
-#define SOFT_BAR_PMS_FLAG 0x00000010
-#define SOFT_BAR_ACPI_FLAG 0x00000020
-#define SOFT_BAR_IDE_FLAG 0x00000400
-#define SOFT_BAR_ACC_FLAG 0x00000800
-#define SOFT_BAR_OHCI_FLAG 0x00001000
-#define SOFT_BAR_EHCI_FLAG 0x00002000
-#define GLCP_RSVD2 0x0F
-#define GLCP_CLK_OFF 0x10
-#define GLCP_CLK_ACTIVE 0x11
-#define GLCP_CLK_DISABLE 0x12
-#define GLCP_CLK4ACK 0x13
-#define GLCP_SYS_RST 0x14
-#define GLCP_RSVD3 0x15
-#define GLCP_DBG_CLK_CTRL 0x16
-#define GLCP_CHIP_REV_ID 0x17
+#define GLCP_CLK_DIS_DELAY 0x08
+#define GLCP_PM_CLK_DISABLE 0x09
+#define GLCP_GLB_PM 0x0B
+#define GLCP_DBG_OUT 0x0C
+#define GLCP_RSVD1 0x0D
+#define GLCP_SOFT_COM 0x0E
+#define SOFT_BAR_SMB_FLAG 0x00000001
+#define SOFT_BAR_GPIO_FLAG 0x00000002
+#define SOFT_BAR_MFGPT_FLAG 0x00000004
+#define SOFT_BAR_IRQ_FLAG 0x00000008
+#define SOFT_BAR_PMS_FLAG 0x00000010
+#define SOFT_BAR_ACPI_FLAG 0x00000020
+#define SOFT_BAR_IDE_FLAG 0x00000400
+#define SOFT_BAR_ACC_FLAG 0x00000800
+#define SOFT_BAR_OHCI_FLAG 0x00001000
+#define SOFT_BAR_EHCI_FLAG 0x00002000
+#define GLCP_RSVD2 0x0F
+#define GLCP_CLK_OFF 0x10
+#define GLCP_CLK_ACTIVE 0x11
+#define GLCP_CLK_DISABLE 0x12
+#define GLCP_CLK4ACK 0x13
+#define GLCP_SYS_RST 0x14
+#define GLCP_RSVD3 0x15
+#define GLCP_DBG_CLK_CTRL 0x16
+#define GLCP_CHIP_REV_ID 0x17
/* PIC */
-#define PIC_YSEL_LOW 0x20
-#define PIC_YSEL_LOW_USB_SHIFT 8
-#define PIC_YSEL_LOW_ACC_SHIFT 16
-#define PIC_YSEL_LOW_FLASH_SHIFT 24
-#define PIC_YSEL_HIGH 0x21
-#define PIC_ZSEL_LOW 0x22
-#define PIC_ZSEL_HIGH 0x23
-#define PIC_IRQM_PRIM 0x24
-#define PIC_IRQM_LPC 0x25
-#define PIC_XIRR_STS_LOW 0x26
-#define PIC_XIRR_STS_HIGH 0x27
-#define PCI_SHDW 0x34
+#define PIC_YSEL_LOW 0x20
+#define PIC_YSEL_LOW_USB_SHIFT 8
+#define PIC_YSEL_LOW_ACC_SHIFT 16
+#define PIC_YSEL_LOW_FLASH_SHIFT 24
+#define PIC_YSEL_HIGH 0x21
+#define PIC_ZSEL_LOW 0x22
+#define PIC_ZSEL_HIGH 0x23
+#define PIC_IRQM_PRIM 0x24
+#define PIC_IRQM_LPC 0x25
+#define PIC_XIRR_STS_LOW 0x26
+#define PIC_XIRR_STS_HIGH 0x27
+#define PCI_SHDW 0x34
/*
* DIVIL STANDARD
*/
-#define DIVIL_CAP 0x00
-#define DIVIL_CONFIG 0x01
-#define DIVIL_SMI 0x02
-#define DIVIL_ERROR 0x03
-#define DIVIL_PM 0x04
-#define DIVIL_DIAG 0x05
+#define DIVIL_CAP 0x00
+#define DIVIL_CONFIG 0x01
+#define DIVIL_SMI 0x02
+#define DIVIL_ERROR 0x03
+#define DIVIL_PM 0x04
+#define DIVIL_DIAG 0x05
/*
* DIVIL SPEC.
*/
-#define DIVIL_LBAR_IRQ 0x08
-#define DIVIL_LBAR_KEL 0x09
-#define DIVIL_LBAR_SMB 0x0B
-#define DIVIL_LBAR_GPIO 0x0C
-#define DIVIL_LBAR_MFGPT 0x0D
-#define DIVIL_LBAR_ACPI 0x0E
-#define DIVIL_LBAR_PMS 0x0F
-#define DIVIL_LEG_IO 0x14
-#define DIVIL_BALL_OPTS 0x15
-#define DIVIL_SOFT_IRQ 0x16
-#define DIVIL_SOFT_RESET 0x17
+#define DIVIL_LBAR_IRQ 0x08
+#define DIVIL_LBAR_KEL 0x09
+#define DIVIL_LBAR_SMB 0x0B
+#define DIVIL_LBAR_GPIO 0x0C
+#define DIVIL_LBAR_MFGPT 0x0D
+#define DIVIL_LBAR_ACPI 0x0E
+#define DIVIL_LBAR_PMS 0x0F
+#define DIVIL_LEG_IO 0x14
+#define DIVIL_BALL_OPTS 0x15
+#define DIVIL_SOFT_IRQ 0x16
+#define DIVIL_SOFT_RESET 0x17
/* MFGPT */
#define MFGPT_IRQ 0x28
@@ -254,52 +254,52 @@ extern void _wrmsr(u32 msr, u32 hi, u32 lo);
/*
* IDE STANDARD
*/
-#define IDE_CAP 0x00
-#define IDE_CONFIG 0x01
-#define IDE_SMI 0x02
-#define IDE_ERROR 0x03
-#define IDE_PM 0x04
-#define IDE_DIAG 0x05
+#define IDE_CAP 0x00
+#define IDE_CONFIG 0x01
+#define IDE_SMI 0x02
+#define IDE_ERROR 0x03
+#define IDE_PM 0x04
+#define IDE_DIAG 0x05
/*
* IDE SPEC.
*/
-#define IDE_IO_BAR 0x08
-#define IDE_CFG 0x10
-#define IDE_DTC 0x12
-#define IDE_CAST 0x13
-#define IDE_ETC 0x14
-#define IDE_INTERNAL_PM 0x15
+#define IDE_IO_BAR 0x08
+#define IDE_CFG 0x10
+#define IDE_DTC 0x12
+#define IDE_CAST 0x13
+#define IDE_ETC 0x14
+#define IDE_INTERNAL_PM 0x15
/*
* ACC STANDARD
*/
-#define ACC_CAP 0x00
-#define ACC_CONFIG 0x01
-#define ACC_SMI 0x02
-#define ACC_ERROR 0x03
-#define ACC_PM 0x04
-#define ACC_DIAG 0x05
+#define ACC_CAP 0x00
+#define ACC_CONFIG 0x01
+#define ACC_SMI 0x02
+#define ACC_ERROR 0x03
+#define ACC_PM 0x04
+#define ACC_DIAG 0x05
/*
* USB STANDARD
*/
-#define USB_CAP 0x00
-#define USB_CONFIG 0x01
-#define USB_SMI 0x02
-#define USB_ERROR 0x03
-#define USB_PM 0x04
-#define USB_DIAG 0x05
+#define USB_CAP 0x00
+#define USB_CONFIG 0x01
+#define USB_SMI 0x02
+#define USB_ERROR 0x03
+#define USB_PM 0x04
+#define USB_DIAG 0x05
/*
* USB SPEC.
*/
-#define USB_OHCI 0x08
-#define USB_EHCI 0x09
+#define USB_OHCI 0x08
+#define USB_EHCI 0x09
/****************** NATIVE ***************************/
/* GPIO : I/O SPACE; REG : 32BITS */
-#define GPIOL_OUT_VAL 0x00
-#define GPIOL_OUT_EN 0x04
+#define GPIOL_OUT_VAL 0x00
+#define GPIOL_OUT_EN 0x04
#endif /* _CS5536_H */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
index 4b493d6772c..021d0172dad 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_mfgpt.h
@@ -25,7 +25,7 @@ static inline void __maybe_unused enable_mfgpt0_counter(void)
#endif
#define MFGPT_TICK_RATE 14318000
-#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
+#define COMPARE ((MFGPT_TICK_RATE + HZ/2) / HZ)
#define MFGPT_BASE mfgpt_base
#define MFGPT0_CMP2 (MFGPT_BASE + 2)
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
index 0dca9c89ee7..8a7ecb4d5c6 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_pci.h
@@ -8,8 +8,8 @@
* Author : jlliu, liujl@lemote.com
*/
-#ifndef _CS5536_PCI_H
-#define _CS5536_PCI_H
+#ifndef _CS5536_PCI_H
+#define _CS5536_PCI_H
#include <linux/types.h>
#include <linux/pci_regs.h>
@@ -17,20 +17,20 @@
extern void cs5536_pci_conf_write4(int function, int reg, u32 value);
extern u32 cs5536_pci_conf_read4(int function, int reg);
-#define CS5536_ACC_INTR 9
-#define CS5536_IDE_INTR 14
-#define CS5536_USB_INTR 11
-#define CS5536_MFGPT_INTR 5
-#define CS5536_UART1_INTR 4
-#define CS5536_UART2_INTR 3
+#define CS5536_ACC_INTR 9
+#define CS5536_IDE_INTR 14
+#define CS5536_USB_INTR 11
+#define CS5536_MFGPT_INTR 5
+#define CS5536_UART1_INTR 4
+#define CS5536_UART2_INTR 3
/************** PCI BUS DEVICE FUNCTION ***************/
/*
* PCI bus device function
*/
-#define PCI_BUS_CS5536 0
-#define PCI_IDSEL_CS5536 14
+#define PCI_BUS_CS5536 0
+#define PCI_IDSEL_CS5536 14
/********** STANDARD PCI-2.2 EXPANSION ****************/
@@ -45,21 +45,21 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
(((mod_dev_id) << 16) | (sys_vendor_id))
/* VENDOR ID */
-#define CS5536_VENDOR_ID 0x1022
+#define CS5536_VENDOR_ID 0x1022
/* DEVICE ID */
-#define CS5536_ISA_DEVICE_ID 0x2090
-#define CS5536_IDE_DEVICE_ID 0x209a
-#define CS5536_ACC_DEVICE_ID 0x2093
-#define CS5536_OHCI_DEVICE_ID 0x2094
-#define CS5536_EHCI_DEVICE_ID 0x2095
+#define CS5536_ISA_DEVICE_ID 0x2090
+#define CS5536_IDE_DEVICE_ID 0x209a
+#define CS5536_ACC_DEVICE_ID 0x2093
+#define CS5536_OHCI_DEVICE_ID 0x2094
+#define CS5536_EHCI_DEVICE_ID 0x2095
/* CLASS CODE : CLASS SUB-CLASS INTERFACE */
-#define CS5536_ISA_CLASS_CODE 0x060100
+#define CS5536_ISA_CLASS_CODE 0x060100
#define CS5536_IDE_CLASS_CODE 0x010180
-#define CS5536_ACC_CLASS_CODE 0x040100
-#define CS5536_OHCI_CLASS_CODE 0x0C0310
-#define CS5536_EHCI_CLASS_CODE 0x0C0320
+#define CS5536_ACC_CLASS_CODE 0x040100
+#define CS5536_OHCI_CLASS_CODE 0x0C0310
+#define CS5536_EHCI_CLASS_CODE 0x0C0320
/* BHLC : BIST HEADER-TYPE LATENCY-TIMER CACHE-LINE-SIZE */
@@ -67,40 +67,40 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
((PCI_NONE_BIST << 24) | ((header_type) << 16) \
| ((latency_timer) << 8) | PCI_NORMAL_CACHE_LINE_SIZE);
-#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
-#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
-#define PCI_NORMAL_HEADER_TYPE 0x00
-#define PCI_NORMAL_LATENCY_TIMER 0x00
-#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
+#define PCI_NONE_BIST 0x00 /* RO not implemented yet. */
+#define PCI_BRIDGE_HEADER_TYPE 0x80 /* RO */
+#define PCI_NORMAL_HEADER_TYPE 0x00
+#define PCI_NORMAL_LATENCY_TIMER 0x00
+#define PCI_NORMAL_CACHE_LINE_SIZE 0x08 /* RW */
/* BAR */
-#define PCI_BAR0_REG 0x10
-#define PCI_BAR1_REG 0x14
-#define PCI_BAR2_REG 0x18
-#define PCI_BAR3_REG 0x1c
-#define PCI_BAR4_REG 0x20
-#define PCI_BAR5_REG 0x24
-#define PCI_BAR_COUNT 6
-#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
+#define PCI_BAR0_REG 0x10
+#define PCI_BAR1_REG 0x14
+#define PCI_BAR2_REG 0x18
+#define PCI_BAR3_REG 0x1c
+#define PCI_BAR4_REG 0x20
+#define PCI_BAR5_REG 0x24
+#define PCI_BAR_COUNT 6
+#define PCI_BAR_RANGE_MASK 0xFFFFFFFF
/* CARDBUS CIS POINTER */
-#define PCI_CARDBUS_CIS_POINTER 0x00000000
+#define PCI_CARDBUS_CIS_POINTER 0x00000000
-/* SUBSYSTEM VENDOR ID */
-#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
+/* SUBSYSTEM VENDOR ID */
+#define CS5536_SUB_VENDOR_ID CS5536_VENDOR_ID
/* SUBSYSTEM ID */
-#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
-#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
-#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
-#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
-#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
+#define CS5536_ISA_SUB_ID CS5536_ISA_DEVICE_ID
+#define CS5536_IDE_SUB_ID CS5536_IDE_DEVICE_ID
+#define CS5536_ACC_SUB_ID CS5536_ACC_DEVICE_ID
+#define CS5536_OHCI_SUB_ID CS5536_OHCI_DEVICE_ID
+#define CS5536_EHCI_SUB_ID CS5536_EHCI_DEVICE_ID
/* EXPANSION ROM BAR */
-#define PCI_EXPANSION_ROM_BAR 0x00000000
+#define PCI_EXPANSION_ROM_BAR 0x00000000
/* CAPABILITIES POINTER */
-#define PCI_CAPLIST_POINTER 0x00000000
+#define PCI_CAPLIST_POINTER 0x00000000
#define PCI_CAPLIST_USB_POINTER 0x40
/* INTERRUPT */
@@ -108,46 +108,46 @@ extern u32 cs5536_pci_conf_read4(int function, int reg);
((PCI_MAX_LATENCY << 24) | (PCI_MIN_GRANT << 16) | \
((pin) << 8) | (mod_intr))
-#define PCI_MAX_LATENCY 0x40
-#define PCI_MIN_GRANT 0x00
-#define PCI_DEFAULT_PIN 0x01
+#define PCI_MAX_LATENCY 0x40
+#define PCI_MIN_GRANT 0x00
+#define PCI_DEFAULT_PIN 0x01
/*********** EXPANSION PCI REG ************************/
/*
* ISA EXPANSION
*/
-#define PCI_UART1_INT_REG 0x50
+#define PCI_UART1_INT_REG 0x50
#define PCI_UART2_INT_REG 0x54
-#define PCI_ISA_FIXUP_REG 0x58
+#define PCI_ISA_FIXUP_REG 0x58
/*
* IDE EXPANSION
*/
-#define PCI_IDE_CFG_REG 0x40
-#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
-#define PCI_IDE_DTC_REG 0x48
-#define PCI_IDE_CAST_REG 0x4C
-#define PCI_IDE_ETC_REG 0x50
-#define PCI_IDE_PM_REG 0x54
-#define PCI_IDE_INT_REG 0x60
+#define PCI_IDE_CFG_REG 0x40
+#define CS5536_IDE_FLASH_SIGNATURE 0xDEADBEEF
+#define PCI_IDE_DTC_REG 0x48
+#define PCI_IDE_CAST_REG 0x4C
+#define PCI_IDE_ETC_REG 0x50
+#define PCI_IDE_PM_REG 0x54
+#define PCI_IDE_INT_REG 0x60
/*
* ACC EXPANSION
*/
-#define PCI_ACC_INT_REG 0x50
+#define PCI_ACC_INT_REG 0x50
/*
* OHCI EXPANSION : INTTERUPT IS IMPLEMENTED BY THE OHCI
*/
-#define PCI_OHCI_PM_REG 0x40
-#define PCI_OHCI_INT_REG 0x50
+#define PCI_OHCI_PM_REG 0x40
+#define PCI_OHCI_INT_REG 0x50
/*
* EHCI EXPANSION
*/
-#define PCI_EHCI_LEGSMIEN_REG 0x50
-#define PCI_EHCI_LEGSMISTS_REG 0x54
-#define PCI_EHCI_FLADJ_REG 0x60
+#define PCI_EHCI_LEGSMIEN_REG 0x50
+#define PCI_EHCI_LEGSMISTS_REG 0x54
+#define PCI_EHCI_FLADJ_REG 0x60
#endif /* _CS5536_PCI_H_ */
diff --git a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
index 21c4ecedebe..1f17c1815ee 100644
--- a/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
+++ b/arch/mips/include/asm/mach-loongson/cs5536/cs5536_vsm.h
@@ -5,8 +5,8 @@
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*/
-#ifndef _CS5536_VSM_H
-#define _CS5536_VSM_H
+#ifndef _CS5536_VSM_H
+#define _CS5536_VSM_H
#include <linux/types.h>
diff --git a/arch/mips/include/asm/mach-loongson/gpio.h b/arch/mips/include/asm/mach-loongson/gpio.h
index e30e73d443d..211a7b7138f 100644
--- a/arch/mips/include/asm/mach-loongson/gpio.h
+++ b/arch/mips/include/asm/mach-loongson/gpio.h
@@ -10,8 +10,8 @@
* (at your option) any later version.
*/
-#ifndef __STLS2F_GPIO_H
-#define __STLS2F_GPIO_H
+#ifndef __STLS2F_GPIO_H
+#define __STLS2F_GPIO_H
#include <asm-generic/gpio.h>
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 5222a007bc2..b286534fef0 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -52,7 +52,7 @@ extern void mach_irq_dispatch(unsigned int pending);
extern int mach_i8259_irq(void);
/* We need this in some places... */
-#define delay() ({ \
+#define delay() ({ \
int x; \
for (x = 0; x < 100000; x++) \
__asm__ __volatile__(""); \
@@ -82,13 +82,13 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_BOOT_BASE 0x1fc00000
#define LOONGSON_BOOT_SIZE 0x00100000 /* 1M */
-#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
-#define LOONGSON_REG_BASE 0x1fe00000
-#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
+#define LOONGSON_BOOT_TOP (LOONGSON_BOOT_BASE+LOONGSON_BOOT_SIZE-1)
+#define LOONGSON_REG_BASE 0x1fe00000
+#define LOONGSON_REG_SIZE 0x00100000 /* 256Bytes + 256Bytes + ??? */
#define LOONGSON_REG_TOP (LOONGSON_REG_BASE+LOONGSON_REG_SIZE-1)
-#define LOONGSON_LIO1_BASE 0x1ff00000
-#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
+#define LOONGSON_LIO1_BASE 0x1ff00000
+#define LOONGSON_LIO1_SIZE 0x00100000 /* 1M */
#define LOONGSON_LIO1_TOP (LOONGSON_LIO1_BASE+LOONGSON_LIO1_SIZE-1)
#define LOONGSON_PCILO0_BASE 0x10000000
@@ -115,13 +115,13 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCI_REG(x) LOONGSON_REG(LOONGSON_PCICONFIGBASE + (x))
#define LOONGSON_PCIDID LOONGSON_PCI_REG(0x00)
#define LOONGSON_PCICMD LOONGSON_PCI_REG(0x04)
-#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
+#define LOONGSON_PCICLASS LOONGSON_PCI_REG(0x08)
#define LOONGSON_PCILTIMER LOONGSON_PCI_REG(0x0c)
-#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
-#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
-#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
-#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
-#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
+#define LOONGSON_PCIBASE0 LOONGSON_PCI_REG(0x10)
+#define LOONGSON_PCIBASE1 LOONGSON_PCI_REG(0x14)
+#define LOONGSON_PCIBASE2 LOONGSON_PCI_REG(0x18)
+#define LOONGSON_PCIBASE3 LOONGSON_PCI_REG(0x1c)
+#define LOONGSON_PCIBASE4 LOONGSON_PCI_REG(0x20)
#define LOONGSON_PCIEXPRBASE LOONGSON_PCI_REG(0x30)
#define LOONGSON_PCIINT LOONGSON_PCI_REG(0x3c)
@@ -132,7 +132,7 @@ static inline void do_perfcnt_IRQ(void)
#define LOONGSON_PCICMD_MABORT_CLR 0x20000000
#define LOONGSON_PCICMD_MTABORT_CLR 0x10000000
#define LOONGSON_PCICMD_TABORT_CLR 0x08000000
-#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
+#define LOONGSON_PCICMD_MPERR_CLR 0x01000000
#define LOONGSON_PCICMD_PERRRESPEN 0x00000040
#define LOONGSON_PCICMD_ASTEPEN 0x00000080
#define LOONGSON_PCICMD_SERREN 0x00000100
@@ -142,7 +142,7 @@ static inline void do_perfcnt_IRQ(void)
/* Loongson h/w Configuration */
#define LOONGSON_GENCFG_OFFSET 0x4
-#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
+#define LOONGSON_GENCFG LOONGSON_REG(LOONGSON_REGBASE + LOONGSON_GENCFG_OFFSET)
#define LOONGSON_GENCFG_DEBUGMODE 0x00000001
#define LOONGSON_GENCFG_SNOOPEN 0x00000002
@@ -173,25 +173,25 @@ static inline void do_perfcnt_IRQ(void)
/* GPIO Regs - r/w */
-#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
+#define LOONGSON_GPIODATA LOONGSON_REG(LOONGSON_REGBASE + 0x1c)
#define LOONGSON_GPIOIE LOONGSON_REG(LOONGSON_REGBASE + 0x20)
/* ICU Configuration Regs - r/w */
#define LOONGSON_INTEDGE LOONGSON_REG(LOONGSON_REGBASE + 0x24)
-#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
+#define LOONGSON_INTSTEER LOONGSON_REG(LOONGSON_REGBASE + 0x28)
#define LOONGSON_INTPOL LOONGSON_REG(LOONGSON_REGBASE + 0x2c)
/* ICU Enable Regs - IntEn & IntISR are r/o. */
-#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
-#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
+#define LOONGSON_INTENSET LOONGSON_REG(LOONGSON_REGBASE + 0x30)
+#define LOONGSON_INTENCLR LOONGSON_REG(LOONGSON_REGBASE + 0x34)
#define LOONGSON_INTEN LOONGSON_REG(LOONGSON_REGBASE + 0x38)
#define LOONGSON_INTISR LOONGSON_REG(LOONGSON_REGBASE + 0x3c)
/* ICU */
#define LOONGSON_ICU_MBOXES 0x0000000f
-#define LOONGSON_ICU_MBOXES_SHIFT 0
+#define LOONGSON_ICU_MBOXES_SHIFT 0
#define LOONGSON_ICU_DMARDY 0x00000010
#define LOONGSON_ICU_DMAEMPTY 0x00000020
#define LOONGSON_ICU_COPYRDY 0x00000040
@@ -212,10 +212,10 @@ static inline void do_perfcnt_IRQ(void)
/* PCI prefetch window base & mask */
-#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
-#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
-#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
-#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
+#define LOONGSON_MEM_WIN_BASE_L LOONGSON_REG(LOONGSON_REGBASE + 0x40)
+#define LOONGSON_MEM_WIN_BASE_H LOONGSON_REG(LOONGSON_REGBASE + 0x44)
+#define LOONGSON_MEM_WIN_MASK_L LOONGSON_REG(LOONGSON_REGBASE + 0x48)
+#define LOONGSON_MEM_WIN_MASK_H LOONGSON_REG(LOONGSON_REGBASE + 0x4c)
/* PCI_Hit*_Sel_* */
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 43213388c17..3810d5ca84a 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson/mem.h b/arch/mips/include/asm/mach-loongson/mem.h
index 3b23ee8647d..f4a36d7dbfa 100644
--- a/arch/mips/include/asm/mach-loongson/mem.h
+++ b/arch/mips/include/asm/mach-loongson/mem.h
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin <wuzhangjin@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/irq.h b/arch/mips/include/asm/mach-loongson1/irq.h
index da96ed42f73..96bfb1c1c73 100644
--- a/arch/mips/include/asm/mach-loongson1/irq.h
+++ b/arch/mips/include/asm/mach-loongson1/irq.h
@@ -3,8 +3,8 @@
*
* IRQ mappings for Loongson 1
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h
index 4e18e88cebb..5c437c2ba6b 100644
--- a/arch/mips/include/asm/mach-loongson1/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson1/loongson1.h
@@ -3,8 +3,8 @@
*
* Register mappings for Loongson 1
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
index 718a1228a4f..30c13e508ff 100644
--- a/arch/mips/include/asm/mach-loongson1/platform.h
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/prom.h b/arch/mips/include/asm/mach-loongson1/prom.h
index b871dc41b8d..34859a4d4ac 100644
--- a/arch/mips/include/asm/mach-loongson1/prom.h
+++ b/arch/mips/include/asm/mach-loongson1/prom.h
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
index a81fa3d0dc9..fb6a3ff9318 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -3,8 +3,8 @@
*
* Loongson 1 Clock Register Definitions.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
index f897de68c52..6574568c208 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-wdt.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
@@ -3,8 +3,8 @@
*
* Loongson 1 watchdog register definitions.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
index 37e3583a9fd..de3b66a3723 100644
--- a/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-malta/cpu-feature-overrides.h
@@ -23,8 +23,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
@@ -50,8 +50,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h
index 9b9da26683c..47cfe64efbb 100644
--- a/arch/mips/include/asm/mach-malta/irq.h
+++ b/arch/mips/include/asm/mach-malta/irq.h
@@ -2,7 +2,7 @@
#define __ASM_MACH_MIPS_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-malta/mach-gt64120.h b/arch/mips/include/asm/mach-malta/mach-gt64120.h
index 0f863148f3b..62a4b2889fa 100644
--- a/arch/mips/include/asm/mach-malta/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-malta/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h b/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h
index 016fa9446ba..016fa9446ba 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/cpu-feature-overrides.h
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h
index ebdbab973e4..ebdbab973e4 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/gpio.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/gpio.h
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h
index c84bcf9570b..ac863e2deb6 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_cic_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_cic_int.h
@@ -43,14 +43,14 @@
* IRQs directly forwarded to the CPU
*/
#define MSP_MIPS_INTBASE 0
-#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
-#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
-#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
-#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
-#define MSP_INT_USB 4 /* IRQ for USB, C_IRQ2 */
-#define MSP_INT_SAR 5 /* IRQ for ADSL2+ SAR, C_IRQ3 */
-#define MSP_INT_CIC 6 /* IRQ for CIC block, C_IRQ4 */
-#define MSP_INT_SEC 7 /* IRQ for Sec engine, C_IRQ5 */
+#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
+#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
+#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
+#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
+#define MSP_INT_USB 4 /* IRQ for USB, C_IRQ2 */
+#define MSP_INT_SAR 5 /* IRQ for ADSL2+ SAR, C_IRQ3 */
+#define MSP_INT_CIC 6 /* IRQ for CIC block, C_IRQ4 */
+#define MSP_INT_SEC 7 /* IRQ for Sec engine, C_IRQ5 */
/*
* IRQs cascaded on CPU interrupt 4 (CAUSE bit 12, C_IRQ4)
@@ -59,93 +59,93 @@
*/
#define MSP_CIC_INTBASE (MSP_MIPS_INTBASE + 8)
#define MSP_INT_EXT0 (MSP_CIC_INTBASE + 0)
- /* External interrupt 0 */
+ /* External interrupt 0 */
#define MSP_INT_EXT1 (MSP_CIC_INTBASE + 1)
- /* External interrupt 1 */
+ /* External interrupt 1 */
#define MSP_INT_EXT2 (MSP_CIC_INTBASE + 2)
- /* External interrupt 2 */
+ /* External interrupt 2 */
#define MSP_INT_EXT3 (MSP_CIC_INTBASE + 3)
- /* External interrupt 3 */
+ /* External interrupt 3 */
#define MSP_INT_CPUIF (MSP_CIC_INTBASE + 4)
- /* CPU interface interrupt */
+ /* CPU interface interrupt */
#define MSP_INT_EXT4 (MSP_CIC_INTBASE + 5)
- /* External interrupt 4 */
+ /* External interrupt 4 */
#define MSP_INT_CIC_USB (MSP_CIC_INTBASE + 6)
- /* Cascaded IRQ for USB */
+ /* Cascaded IRQ for USB */
#define MSP_INT_MBOX (MSP_CIC_INTBASE + 7)
- /* Sec engine mailbox IRQ */
+ /* Sec engine mailbox IRQ */
#define MSP_INT_EXT5 (MSP_CIC_INTBASE + 8)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_TDM (MSP_CIC_INTBASE + 9)
- /* TDM interrupt */
+ /* TDM interrupt */
#define MSP_INT_CIC_MAC0 (MSP_CIC_INTBASE + 10)
- /* Cascaded IRQ for MAC 0 */
+ /* Cascaded IRQ for MAC 0 */
#define MSP_INT_CIC_MAC1 (MSP_CIC_INTBASE + 11)
- /* Cascaded IRQ for MAC 1 */
+ /* Cascaded IRQ for MAC 1 */
#define MSP_INT_CIC_SEC (MSP_CIC_INTBASE + 12)
- /* Cascaded IRQ for sec engine */
-#define MSP_INT_PER (MSP_CIC_INTBASE + 13)
- /* Peripheral interrupt */
-#define MSP_INT_TIMER0 (MSP_CIC_INTBASE + 14)
- /* SLP timer 0 */
-#define MSP_INT_TIMER1 (MSP_CIC_INTBASE + 15)
- /* SLP timer 1 */
-#define MSP_INT_TIMER2 (MSP_CIC_INTBASE + 16)
- /* SLP timer 2 */
-#define MSP_INT_VPE0_TIMER (MSP_CIC_INTBASE + 17)
- /* VPE0 MIPS timer */
+ /* Cascaded IRQ for sec engine */
+#define MSP_INT_PER (MSP_CIC_INTBASE + 13)
+ /* Peripheral interrupt */
+#define MSP_INT_TIMER0 (MSP_CIC_INTBASE + 14)
+ /* SLP timer 0 */
+#define MSP_INT_TIMER1 (MSP_CIC_INTBASE + 15)
+ /* SLP timer 1 */
+#define MSP_INT_TIMER2 (MSP_CIC_INTBASE + 16)
+ /* SLP timer 2 */
+#define MSP_INT_VPE0_TIMER (MSP_CIC_INTBASE + 17)
+ /* VPE0 MIPS timer */
#define MSP_INT_BLKCP (MSP_CIC_INTBASE + 18)
- /* Block Copy */
+ /* Block Copy */
#define MSP_INT_UART0 (MSP_CIC_INTBASE + 19)
- /* UART 0 */
+ /* UART 0 */
#define MSP_INT_PCI (MSP_CIC_INTBASE + 20)
- /* PCI subsystem */
+ /* PCI subsystem */
#define MSP_INT_EXT6 (MSP_CIC_INTBASE + 21)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_PCI_MSI (MSP_CIC_INTBASE + 22)
- /* PCI Message Signal */
+ /* PCI Message Signal */
#define MSP_INT_CIC_SAR (MSP_CIC_INTBASE + 23)
- /* Cascaded ADSL2+ SAR IRQ */
+ /* Cascaded ADSL2+ SAR IRQ */
#define MSP_INT_DSL (MSP_CIC_INTBASE + 24)
- /* ADSL2+ IRQ */
+ /* ADSL2+ IRQ */
#define MSP_INT_CIC_ERR (MSP_CIC_INTBASE + 25)
- /* SLP error condition */
+ /* SLP error condition */
#define MSP_INT_VPE1_TIMER (MSP_CIC_INTBASE + 26)
- /* VPE1 MIPS timer */
+ /* VPE1 MIPS timer */
#define MSP_INT_VPE0_PC (MSP_CIC_INTBASE + 27)
- /* VPE0 Performance counter */
+ /* VPE0 Performance counter */
#define MSP_INT_VPE1_PC (MSP_CIC_INTBASE + 28)
- /* VPE1 Performance counter */
+ /* VPE1 Performance counter */
#define MSP_INT_EXT7 (MSP_CIC_INTBASE + 29)
- /* External interrupt 5 */
+ /* External interrupt 5 */
#define MSP_INT_VPE0_SW (MSP_CIC_INTBASE + 30)
- /* VPE0 Software interrupt */
+ /* VPE0 Software interrupt */
#define MSP_INT_VPE1_SW (MSP_CIC_INTBASE + 31)
- /* VPE0 Software interrupt */
+ /* VPE0 Software interrupt */
/*
* IRQs cascaded on CIC PER interrupt (MSP_INT_PER)
*/
#define MSP_PER_INTBASE (MSP_CIC_INTBASE + 32)
-/* Reserved 0-1 */
+/* Reserved 0-1 */
#define MSP_INT_UART1 (MSP_PER_INTBASE + 2)
- /* UART 1 */
-/* Reserved 3-5 */
+ /* UART 1 */
+/* Reserved 3-5 */
#define MSP_INT_2WIRE (MSP_PER_INTBASE + 6)
- /* 2-wire */
+ /* 2-wire */
#define MSP_INT_TM0 (MSP_PER_INTBASE + 7)
/* Peripheral timer block out 0 */
#define MSP_INT_TM1 (MSP_PER_INTBASE + 8)
/* Peripheral timer block out 1 */
-/* Reserved 9 */
+/* Reserved 9 */
#define MSP_INT_SPRX (MSP_PER_INTBASE + 10)
- /* SPI RX complete */
+ /* SPI RX complete */
#define MSP_INT_SPTX (MSP_PER_INTBASE + 11)
- /* SPI TX complete */
+ /* SPI TX complete */
#define MSP_INT_GPIO (MSP_PER_INTBASE + 12)
- /* GPIO */
+ /* GPIO */
#define MSP_INT_PER_ERR (MSP_PER_INTBASE + 13)
- /* Peripheral error */
-/* Reserved 14-31 */
+ /* Peripheral error */
+/* Reserved 14-31 */
#endif /* !_MSP_CIC_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h
index 156f320c69e..daacebb047c 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_gpio_macros.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_gpio_macros.h
@@ -54,7 +54,7 @@ enum msp_gpio_mode {
MSP_GPIO_UART_OUTPUT = 0x9, /* Only GPIO 2 or 3 */
MSP_GPIO_PERIF_TIMERA = 0x9, /* Only GPIO 0 or 1 */
MSP_GPIO_PERIF_TIMERB = 0xa, /* Only GPIO 0 or 1 */
- MSP_GPIO_UNKNOWN = 0xb, /* No such GPIO or mode */
+ MSP_GPIO_UNKNOWN = 0xb, /* No such GPIO or mode */
};
/* -- Static Tables -- */
@@ -148,7 +148,7 @@ static unsigned int MSP_GPIO_MODE_ALLOWED[] = {
BASIC_MODE_REG_VALUE(mode, OFFSET_GPIO_NUMBER(gpio))
#define BASIC_MODE_SHIFT(gpio) \
BASIC_MODE_REG_SHIFT(OFFSET_GPIO_NUMBER(gpio))
-#define BASIC_MODE_FROM_REG(data, gpio) \
+#define BASIC_MODE_FROM_REG(data, gpio) \
BASIC_MODE_REG_FROM_REG(data, OFFSET_GPIO_NUMBER(gpio))
/*
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h
index 1d9f0547482..29f8bf79d7a 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_int.h
@@ -1,7 +1,7 @@
/*
* Defines for the MSP interrupt handlers.
*
- * Copyright (C) 2005, PMC-Sierra, Inc. All rights reserved.
+ * Copyright (C) 2005, PMC-Sierra, Inc. All rights reserved.
* Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
*
* ########################################################################
@@ -28,7 +28,7 @@
/*
* The PMC-Sierra MSP product line has at least two different interrupt
* controllers, the SLP register based scheme and the CIC interrupt
- * controller block mechanism. This file distinguishes between them
+ * controller block mechanism. This file distinguishes between them
* so that devices see a uniform interface.
*/
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h
index 41560690361..24948cc4246 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_pci.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_pci.h
@@ -26,7 +26,7 @@
#ifndef _MSP_PCI_H_
#define _MSP_PCI_H_
-#define MSP_HAS_PCI(ID) (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
+#define MSP_HAS_PCI(ID) (((u32)(ID) <= 0x4236) && ((u32)(ID) >= 0x4220))
/*
* It is convenient to program the OATRAN register so that
@@ -96,24 +96,24 @@ enum
config_status_command, /* 1 */
config_class_revision, /* 2 */
config_BIST_header_latency_cache, /* 3 */
- config_BAR0, /* 4 */
- config_BAR1, /* 5 */
- config_BAR2, /* 6 */
- config_not_used7, /* 7 */
- config_not_used8, /* 8 */
- config_not_used9, /* 9 */
- config_CIS, /* 10 */
- config_subsystem, /* 11 */
- config_not_used12, /* 12 */
+ config_BAR0, /* 4 */
+ config_BAR1, /* 5 */
+ config_BAR2, /* 6 */
+ config_not_used7, /* 7 */
+ config_not_used8, /* 8 */
+ config_not_used9, /* 9 */
+ config_CIS, /* 10 */
+ config_subsystem, /* 11 */
+ config_not_used12, /* 12 */
config_capabilities, /* 13 */
- config_not_used14, /* 14 */
+ config_not_used14, /* 14 */
config_lat_grant_irq, /* 15 */
config_message_control,/* 16 */
config_message_addr, /* 17 */
config_message_data, /* 18 */
- config_VPD_addr, /* 19 */
- config_VPD_data, /* 20 */
- config_maxregs /* 21 - number of registers */
+ config_VPD_addr, /* 19 */
+ config_VPD_data, /* 20 */
+ config_maxregs /* 21 - number of registers */
};
struct msp_pci_regs
@@ -132,15 +132,15 @@ struct msp_pci_regs
pcireg hop_unused_2C; /* +0x2C */
pcireg hop_unused_30; /* +0x30 */
pcireg hop_unused_34; /* +0x34 */
- pcireg if_control; /* +0x38 */
- pcireg oatran; /* +0x3C */
- pcireg reset_ctl; /* +0x40 */
- pcireg config_addr; /* +0x44 */
+ pcireg if_control; /* +0x38 */
+ pcireg oatran; /* +0x3C */
+ pcireg reset_ctl; /* +0x40 */
+ pcireg config_addr; /* +0x44 */
pcireg hop_unused_48; /* +0x48 */
pcireg msg_signaled_int_status; /* +0x4C */
pcireg msg_signaled_int_mask; /* +0x50 */
- pcireg if_status; /* +0x54 */
- pcireg if_mask; /* +0x58 */
+ pcireg if_status; /* +0x54 */
+ pcireg if_mask; /* +0x58 */
pcireg hop_unused_5C; /* +0x5C */
pcireg hop_unused_60; /* +0x60 */
pcireg hop_unused_64; /* +0x64 */
@@ -190,9 +190,9 @@ struct msp_pci_regs
#define BPCI_IFSTATUS_PEI (1<<30) /* Parity error as initiator */
#define BPCI_IFSTATUS_PET (1<<31) /* Parity error as target */
-#define BPCI_RESETCTL_PR (1<<0) /* True if reset asserted */
-#define BPCI_RESETCTL_RT (1<<4) /* Release time */
-#define BPCI_RESETCTL_CT (1<<8) /* Config time */
+#define BPCI_RESETCTL_PR (1<<0) /* True if reset asserted */
+#define BPCI_RESETCTL_RT (1<<4) /* Release time */
+#define BPCI_RESETCTL_CT (1<<8) /* Config time */
#define BPCI_RESETCTL_PE (1<<12) /* PCI enabled */
#define BPCI_RESETCTL_HM (1<<13) /* PCI host mode */
#define BPCI_RESETCTL_RI (1<<14) /* PCI reset in */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h
index 786d82daf8d..4d3052ab89a 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_prom.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_prom.h
@@ -40,7 +40,7 @@
(((revision >= 0xb0) && (revision < 0xd0)))
#define FPGA_IS_5000(revision) \
((revision >= 0x80) && (revision <= 0x90))
-#define FPGA_IS_ZEUS(revision) ((revision < 0x7f))
+#define FPGA_IS_ZEUS(revision) ((revision < 0x7f))
#define FPGA_IS_DUET(revision) \
(((revision >= 0xa0) && (revision < 0xb0)))
#define FPGA_IS_MSP4200(revision) ((revision >= 0xd0))
@@ -48,7 +48,7 @@
#define MACHINE_TYPE_POLO "POLO"
#define MACHINE_TYPE_DUET "DUET"
-#define MACHINE_TYPE_ZEUS "ZEUS"
+#define MACHINE_TYPE_ZEUS "ZEUS"
#define MACHINE_TYPE_MSP2000REVB "MSP2000REVB"
#define MACHINE_TYPE_MSP5000 "MSP5000"
#define MACHINE_TYPE_MSP4200 "MSP4200"
@@ -58,7 +58,7 @@
#define MACHINE_TYPE_POLO_FPGA "POLO-FPGA"
#define MACHINE_TYPE_DUET_FPGA "DUET-FPGA"
-#define MACHINE_TYPE_ZEUS_FPGA "ZEUS_FPGA"
+#define MACHINE_TYPE_ZEUS_FPGA "ZEUS_FPGA"
#define MACHINE_TYPE_MSP2000REVB_FPGA "MSP2000REVB-FPGA"
#define MACHINE_TYPE_MSP5000_FPGA "MSP5000-FPGA"
#define MACHINE_TYPE_MSP4200_FPGA "MSP4200-FPGA"
@@ -95,7 +95,7 @@
#define ENET_MII 'M'
#define ENET_RMII 'R'
-#define ENETTXD_FALLING 'F'
+#define ENETTXD_FALLING 'F'
#define ENETTXD_RISING 'R'
#define PCI_HOST 'H'
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index 7d41474e548..2dbc7a8cec1 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -233,4 +233,4 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
: "=&r" (tmp), "=m" (*address) \
: "0" (tmp), "m" (*address))
-#endif /* __ASM_REGOPS_H__ */
+#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h
index 692c1b658b9..da3a8dea228 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_regs.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regs.h
@@ -37,13 +37,13 @@
/*
########################################################################
- # Address space and device base definitions #
+ # Address space and device base definitions #
########################################################################
*/
/*
***************************************************************************
- * System Logic and Peripherals (ELB, UART0, etc) device address space *
+ * System Logic and Peripherals (ELB, UART0, etc) device address space *
***************************************************************************
*/
#define MSP_SLP_BASE 0x1c000000
@@ -53,69 +53,69 @@
#define MSP_RST_SIZE 0x0C /* System reset register space */
#define MSP_WTIMER_BASE (MSP_SLP_BASE + 0x04C)
- /* watchdog timer base */
+ /* watchdog timer base */
#define MSP_ITIMER_BASE (MSP_SLP_BASE + 0x054)
- /* internal timer base */
+ /* internal timer base */
#define MSP_UART0_BASE (MSP_SLP_BASE + 0x100)
- /* UART0 controller base */
+ /* UART0 controller base */
#define MSP_BCPY_CTRL_BASE (MSP_SLP_BASE + 0x120)
- /* Block Copy controller base */
+ /* Block Copy controller base */
#define MSP_BCPY_DESC_BASE (MSP_SLP_BASE + 0x160)
- /* Block Copy descriptor base */
+ /* Block Copy descriptor base */
/*
***************************************************************************
- * PCI address space *
+ * PCI address space *
***************************************************************************
*/
#define MSP_PCI_BASE 0x19000000
/*
***************************************************************************
- * MSbus device address space *
+ * MSbus device address space *
***************************************************************************
*/
#define MSP_MSB_BASE 0x18000000
- /* MSbus address start */
+ /* MSbus address start */
#define MSP_PER_BASE (MSP_MSB_BASE + 0x400000)
- /* Peripheral device registers */
+ /* Peripheral device registers */
#define MSP_MAC0_BASE (MSP_MSB_BASE + 0x600000)
- /* MAC A device registers */
+ /* MAC A device registers */
#define MSP_MAC1_BASE (MSP_MSB_BASE + 0x700000)
- /* MAC B device registers */
+ /* MAC B device registers */
#define MSP_MAC_SIZE 0xE0 /* MAC register space */
#define MSP_SEC_BASE (MSP_MSB_BASE + 0x800000)
- /* Security Engine registers */
+ /* Security Engine registers */
#define MSP_MAC2_BASE (MSP_MSB_BASE + 0x900000)
- /* MAC C device registers */
+ /* MAC C device registers */
#define MSP_ADSL2_BASE (MSP_MSB_BASE + 0xA80000)
- /* ADSL2 device registers */
+ /* ADSL2 device registers */
#define MSP_USB0_BASE (MSP_MSB_BASE + 0xB00000)
- /* USB0 device registers */
+ /* USB0 device registers */
#define MSP_USB1_BASE (MSP_MSB_BASE + 0x300000)
/* USB1 device registers */
#define MSP_CPUIF_BASE (MSP_MSB_BASE + 0xC00000)
- /* CPU interface registers */
+ /* CPU interface registers */
/* Devices within the MSbus peripheral block */
#define MSP_UART1_BASE (MSP_PER_BASE + 0x030)
- /* UART1 controller base */
+ /* UART1 controller base */
#define MSP_SPI_BASE (MSP_PER_BASE + 0x058)
- /* SPI/MPI control registers */
+ /* SPI/MPI control registers */
#define MSP_TWI_BASE (MSP_PER_BASE + 0x090)
- /* Two-wire control registers */
+ /* Two-wire control registers */
#define MSP_PTIMER_BASE (MSP_PER_BASE + 0x0F0)
- /* Programmable timer control */
+ /* Programmable timer control */
/*
***************************************************************************
- * Physical Memory configuration address space *
+ * Physical Memory configuration address space *
***************************************************************************
*/
#define MSP_MEM_CFG_BASE 0x17f00000
-#define MSP_MEM_INDIRECT_CTL_10 0x10
+#define MSP_MEM_INDIRECT_CTL_10 0x10
/*
* Notes:
@@ -130,10 +130,10 @@
* 3) These constants are for physical addresses which means that they
* work correctly with "ioremap" and friends. This means that device
* drivers will need to remap these addresses using ioremap and perhaps
- * the readw/writew macros. Or they could use the regptr() macro
+ * the readw/writew macros. Or they could use the regptr() macro
* defined below, but the readw/writew calls are the correct thing.
* 4) The UARTs have an additional status register offset from the base
- * address. This register isn't used in the standard 8250 driver but
+ * address. This register isn't used in the standard 8250 driver but
* may be used in other software. Consult the hardware datasheet for
* offset details.
* 5) For some unknown reason the security engine (MSP_SEC_BASE) registers
@@ -163,44 +163,44 @@
/*
***************************************************************************
- * System Logic and Peripherals (RESET, ELB, etc) registers *
+ * System Logic and Peripherals (RESET, ELB, etc) registers *
***************************************************************************
*/
/* System Control register definitions */
-#define DEV_ID_REG regptr(MSP_SLP_BASE + 0x00)
- /* Device-ID RO */
-#define FWR_ID_REG regptr(MSP_SLP_BASE + 0x04)
- /* Firmware-ID Register RW */
-#define SYS_ID_REG0 regptr(MSP_SLP_BASE + 0x08)
- /* System-ID Register-0 RW */
-#define SYS_ID_REG1 regptr(MSP_SLP_BASE + 0x0C)
- /* System-ID Register-1 RW */
+#define DEV_ID_REG regptr(MSP_SLP_BASE + 0x00)
+ /* Device-ID RO */
+#define FWR_ID_REG regptr(MSP_SLP_BASE + 0x04)
+ /* Firmware-ID Register RW */
+#define SYS_ID_REG0 regptr(MSP_SLP_BASE + 0x08)
+ /* System-ID Register-0 RW */
+#define SYS_ID_REG1 regptr(MSP_SLP_BASE + 0x0C)
+ /* System-ID Register-1 RW */
/* System Reset register definitions */
-#define RST_STS_REG regptr(MSP_SLP_BASE + 0x10)
- /* System Reset Status RO */
-#define RST_SET_REG regptr(MSP_SLP_BASE + 0x14)
- /* System Set Reset WO */
-#define RST_CLR_REG regptr(MSP_SLP_BASE + 0x18)
- /* System Clear Reset WO */
+#define RST_STS_REG regptr(MSP_SLP_BASE + 0x10)
+ /* System Reset Status RO */
+#define RST_SET_REG regptr(MSP_SLP_BASE + 0x14)
+ /* System Set Reset WO */
+#define RST_CLR_REG regptr(MSP_SLP_BASE + 0x18)
+ /* System Clear Reset WO */
/* System Clock Registers */
#define PCI_SLP_REG regptr(MSP_SLP_BASE + 0x1C)
- /* PCI clock generator RW */
+ /* PCI clock generator RW */
#define URT_SLP_REG regptr(MSP_SLP_BASE + 0x20)
- /* UART clock generator RW */
-/* reserved (MSP_SLP_BASE + 0x24) */
-/* reserved (MSP_SLP_BASE + 0x28) */
+ /* UART clock generator RW */
+/* reserved (MSP_SLP_BASE + 0x24) */
+/* reserved (MSP_SLP_BASE + 0x28) */
#define PLL1_SLP_REG regptr(MSP_SLP_BASE + 0x2C)
- /* PLL1 clock generator RW */
+ /* PLL1 clock generator RW */
#define PLL0_SLP_REG regptr(MSP_SLP_BASE + 0x30)
- /* PLL0 clock generator RW */
+ /* PLL0 clock generator RW */
#define MIPS_SLP_REG regptr(MSP_SLP_BASE + 0x34)
- /* MIPS clock generator RW */
-#define VE_SLP_REG regptr(MSP_SLP_BASE + 0x38)
+ /* MIPS clock generator RW */
+#define VE_SLP_REG regptr(MSP_SLP_BASE + 0x38)
/* Voice Eng clock generator RW */
-/* reserved (MSP_SLP_BASE + 0x3C) */
+/* reserved (MSP_SLP_BASE + 0x3C) */
#define MSB_SLP_REG regptr(MSP_SLP_BASE + 0x40)
/* MS-Bus clock generator RW */
#define SMAC_SLP_REG regptr(MSP_SLP_BASE + 0x44)
@@ -216,108 +216,108 @@
#define SE_MBOX_REG regptr(MSP_SLP_BASE + 0x78)
/* Security Engine mailbox RW */
#define VE_MBOX_REG regptr(MSP_SLP_BASE + 0x7C)
- /* Voice Engine mailbox RW */
+ /* Voice Engine mailbox RW */
/* ELB Controller Registers */
#define CS0_CNFG_REG regptr(MSP_SLP_BASE + 0x80)
- /* ELB CS0 Configuration Reg */
+ /* ELB CS0 Configuration Reg */
#define CS0_ADDR_REG regptr(MSP_SLP_BASE + 0x84)
- /* ELB CS0 Base Address Reg */
+ /* ELB CS0 Base Address Reg */
#define CS0_MASK_REG regptr(MSP_SLP_BASE + 0x88)
- /* ELB CS0 Mask Register */
+ /* ELB CS0 Mask Register */
#define CS0_ACCESS_REG regptr(MSP_SLP_BASE + 0x8C)
- /* ELB CS0 access register */
+ /* ELB CS0 access register */
#define CS1_CNFG_REG regptr(MSP_SLP_BASE + 0x90)
- /* ELB CS1 Configuration Reg */
+ /* ELB CS1 Configuration Reg */
#define CS1_ADDR_REG regptr(MSP_SLP_BASE + 0x94)
- /* ELB CS1 Base Address Reg */
+ /* ELB CS1 Base Address Reg */
#define CS1_MASK_REG regptr(MSP_SLP_BASE + 0x98)
- /* ELB CS1 Mask Register */
+ /* ELB CS1 Mask Register */
#define CS1_ACCESS_REG regptr(MSP_SLP_BASE + 0x9C)
- /* ELB CS1 access register */
+ /* ELB CS1 access register */
#define CS2_CNFG_REG regptr(MSP_SLP_BASE + 0xA0)
- /* ELB CS2 Configuration Reg */
+ /* ELB CS2 Configuration Reg */
#define CS2_ADDR_REG regptr(MSP_SLP_BASE + 0xA4)
- /* ELB CS2 Base Address Reg */
+ /* ELB CS2 Base Address Reg */
#define CS2_MASK_REG regptr(MSP_SLP_BASE + 0xA8)
- /* ELB CS2 Mask Register */
+ /* ELB CS2 Mask Register */
#define CS2_ACCESS_REG regptr(MSP_SLP_BASE + 0xAC)
- /* ELB CS2 access register */
+ /* ELB CS2 access register */
#define CS3_CNFG_REG regptr(MSP_SLP_BASE + 0xB0)
- /* ELB CS3 Configuration Reg */
+ /* ELB CS3 Configuration Reg */
#define CS3_ADDR_REG regptr(MSP_SLP_BASE + 0xB4)
- /* ELB CS3 Base Address Reg */
+ /* ELB CS3 Base Address Reg */
#define CS3_MASK_REG regptr(MSP_SLP_BASE + 0xB8)
- /* ELB CS3 Mask Register */
+ /* ELB CS3 Mask Register */
#define CS3_ACCESS_REG regptr(MSP_SLP_BASE + 0xBC)
- /* ELB CS3 access register */
+ /* ELB CS3 access register */
#define CS4_CNFG_REG regptr(MSP_SLP_BASE + 0xC0)
- /* ELB CS4 Configuration Reg */
+ /* ELB CS4 Configuration Reg */
#define CS4_ADDR_REG regptr(MSP_SLP_BASE + 0xC4)
- /* ELB CS4 Base Address Reg */
+ /* ELB CS4 Base Address Reg */
#define CS4_MASK_REG regptr(MSP_SLP_BASE + 0xC8)
- /* ELB CS4 Mask Register */
+ /* ELB CS4 Mask Register */
#define CS4_ACCESS_REG regptr(MSP_SLP_BASE + 0xCC)
- /* ELB CS4 access register */
+ /* ELB CS4 access register */
#define CS5_CNFG_REG regptr(MSP_SLP_BASE + 0xD0)
- /* ELB CS5 Configuration Reg */
+ /* ELB CS5 Configuration Reg */
#define CS5_ADDR_REG regptr(MSP_SLP_BASE + 0xD4)
- /* ELB CS5 Base Address Reg */
+ /* ELB CS5 Base Address Reg */
#define CS5_MASK_REG regptr(MSP_SLP_BASE + 0xD8)
- /* ELB CS5 Mask Register */
+ /* ELB CS5 Mask Register */
#define CS5_ACCESS_REG regptr(MSP_SLP_BASE + 0xDC)
- /* ELB CS5 access register */
+ /* ELB CS5 access register */
-/* reserved 0xE0 - 0xE8 */
+/* reserved 0xE0 - 0xE8 */
#define ELB_1PC_EN_REG regptr(MSP_SLP_BASE + 0xEC)
- /* ELB single PC card detect */
+ /* ELB single PC card detect */
-/* reserved 0xF0 - 0xF8 */
-#define ELB_CLK_CFG_REG regptr(MSP_SLP_BASE + 0xFC)
- /* SDRAM read/ELB timing Reg */
+/* reserved 0xF0 - 0xF8 */
+#define ELB_CLK_CFG_REG regptr(MSP_SLP_BASE + 0xFC)
+ /* SDRAM read/ELB timing Reg */
/* Extended UART status registers */
#define UART0_STATUS_REG regptr(MSP_UART0_BASE + 0x0c0)
- /* UART Status Register 0 */
+ /* UART Status Register 0 */
#define UART1_STATUS_REG regptr(MSP_UART1_BASE + 0x170)
- /* UART Status Register 1 */
+ /* UART Status Register 1 */
/* Performance monitoring registers */
#define PERF_MON_CTRL_REG regptr(MSP_SLP_BASE + 0x140)
- /* Performance monitor control */
+ /* Performance monitor control */
#define PERF_MON_CLR_REG regptr(MSP_SLP_BASE + 0x144)
- /* Performance monitor clear */
+ /* Performance monitor clear */
#define PERF_MON_CNTH_REG regptr(MSP_SLP_BASE + 0x148)
- /* Perf monitor counter high */
+ /* Perf monitor counter high */
#define PERF_MON_CNTL_REG regptr(MSP_SLP_BASE + 0x14C)
- /* Perf monitor counter low */
+ /* Perf monitor counter low */
/* System control registers */
#define SYS_CTRL_REG regptr(MSP_SLP_BASE + 0x150)
- /* System control register */
+ /* System control register */
#define SYS_ERR1_REG regptr(MSP_SLP_BASE + 0x154)
- /* System Error status 1 */
+ /* System Error status 1 */
#define SYS_ERR2_REG regptr(MSP_SLP_BASE + 0x158)
- /* System Error status 2 */
+ /* System Error status 2 */
#define SYS_INT_CFG_REG regptr(MSP_SLP_BASE + 0x15C)
- /* System Interrupt config */
+ /* System Interrupt config */
/* Voice Engine Memory configuration */
#define VE_MEM_REG regptr(MSP_SLP_BASE + 0x17C)
- /* Voice engine memory config */
+ /* Voice engine memory config */
/* CPU/SLP Error Status registers */
#define CPU_ERR1_REG regptr(MSP_SLP_BASE + 0x180)
- /* CPU/SLP Error status 1 */
+ /* CPU/SLP Error status 1 */
#define CPU_ERR2_REG regptr(MSP_SLP_BASE + 0x184)
- /* CPU/SLP Error status 1 */
+ /* CPU/SLP Error status 1 */
-/* Extended GPIO registers */
+/* Extended GPIO registers */
#define EXTENDED_GPIO1_REG regptr(MSP_SLP_BASE + 0x188)
#define EXTENDED_GPIO2_REG regptr(MSP_SLP_BASE + 0x18c)
#define EXTENDED_GPIO_REG EXTENDED_GPIO1_REG
@@ -325,182 +325,182 @@
/* System Error registers */
#define SLP_ERR_STS_REG regptr(MSP_SLP_BASE + 0x190)
- /* Int status for SLP errors */
+ /* Int status for SLP errors */
#define SLP_ERR_MSK_REG regptr(MSP_SLP_BASE + 0x194)
- /* Int mask for SLP errors */
+ /* Int mask for SLP errors */
#define SLP_ELB_ERST_REG regptr(MSP_SLP_BASE + 0x198)
- /* External ELB reset */
+ /* External ELB reset */
#define SLP_BOOT_STS_REG regptr(MSP_SLP_BASE + 0x19C)
- /* Boot Status */
+ /* Boot Status */
/* Extended ELB addressing */
#define CS0_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A0)
- /* CS0 Extended address */
+ /* CS0 Extended address */
#define CS1_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A4)
- /* CS1 Extended address */
+ /* CS1 Extended address */
#define CS2_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1A8)
- /* CS2 Extended address */
+ /* CS2 Extended address */
#define CS3_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1AC)
- /* CS3 Extended address */
-/* reserved 0x1B0 */
+ /* CS3 Extended address */
+/* reserved 0x1B0 */
#define CS5_EXT_ADDR_REG regptr(MSP_SLP_BASE + 0x1B4)
- /* CS5 Extended address */
+ /* CS5 Extended address */
/* PLL Adjustment registers */
#define PLL_LOCK_REG regptr(MSP_SLP_BASE + 0x200)
- /* PLL0 lock status */
+ /* PLL0 lock status */
#define PLL_ARST_REG regptr(MSP_SLP_BASE + 0x204)
- /* PLL Analog reset status */
+ /* PLL Analog reset status */
#define PLL0_ADJ_REG regptr(MSP_SLP_BASE + 0x208)
- /* PLL0 Adjustment value */
+ /* PLL0 Adjustment value */
#define PLL1_ADJ_REG regptr(MSP_SLP_BASE + 0x20C)
- /* PLL1 Adjustment value */
+ /* PLL1 Adjustment value */
/*
***************************************************************************
- * Peripheral Register definitions *
+ * Peripheral Register definitions *
***************************************************************************
*/
/* Peripheral status */
#define PER_CTRL_REG regptr(MSP_PER_BASE + 0x50)
- /* Peripheral control register */
+ /* Peripheral control register */
#define PER_STS_REG regptr(MSP_PER_BASE + 0x54)
- /* Peripheral status register */
+ /* Peripheral status register */
/* SPI/MPI Registers */
#define SMPI_TX_SZ_REG regptr(MSP_PER_BASE + 0x58)
- /* SPI/MPI Tx Size register */
+ /* SPI/MPI Tx Size register */
#define SMPI_RX_SZ_REG regptr(MSP_PER_BASE + 0x5C)
- /* SPI/MPI Rx Size register */
+ /* SPI/MPI Rx Size register */
#define SMPI_CTL_REG regptr(MSP_PER_BASE + 0x60)
- /* SPI/MPI Control register */
+ /* SPI/MPI Control register */
#define SMPI_MS_REG regptr(MSP_PER_BASE + 0x64)
- /* SPI/MPI Chip Select reg */
+ /* SPI/MPI Chip Select reg */
#define SMPI_CORE_DATA_REG regptr(MSP_PER_BASE + 0xC0)
- /* SPI/MPI Core Data reg */
+ /* SPI/MPI Core Data reg */
#define SMPI_CORE_CTRL_REG regptr(MSP_PER_BASE + 0xC4)
- /* SPI/MPI Core Control reg */
+ /* SPI/MPI Core Control reg */
#define SMPI_CORE_STAT_REG regptr(MSP_PER_BASE + 0xC8)
- /* SPI/MPI Core Status reg */
+ /* SPI/MPI Core Status reg */
#define SMPI_CORE_SSEL_REG regptr(MSP_PER_BASE + 0xCC)
- /* SPI/MPI Core Ssel reg */
+ /* SPI/MPI Core Ssel reg */
#define SMPI_FIFO_REG regptr(MSP_PER_BASE + 0xD0)
- /* SPI/MPI Data FIFO reg */
+ /* SPI/MPI Data FIFO reg */
-/* Peripheral Block Error Registers */
+/* Peripheral Block Error Registers */
#define PER_ERR_STS_REG regptr(MSP_PER_BASE + 0x70)
- /* Error Bit Status Register */
+ /* Error Bit Status Register */
#define PER_ERR_MSK_REG regptr(MSP_PER_BASE + 0x74)
- /* Error Bit Mask Register */
+ /* Error Bit Mask Register */
#define PER_HDR1_REG regptr(MSP_PER_BASE + 0x78)
- /* Error Header 1 Register */
+ /* Error Header 1 Register */
#define PER_HDR2_REG regptr(MSP_PER_BASE + 0x7C)
- /* Error Header 2 Register */
+ /* Error Header 2 Register */
-/* Peripheral Block Interrupt Registers */
+/* Peripheral Block Interrupt Registers */
#define PER_INT_STS_REG regptr(MSP_PER_BASE + 0x80)
- /* Interrupt status register */
+ /* Interrupt status register */
#define PER_INT_MSK_REG regptr(MSP_PER_BASE + 0x84)
- /* Interrupt Mask Register */
+ /* Interrupt Mask Register */
#define GPIO_INT_STS_REG regptr(MSP_PER_BASE + 0x88)
- /* GPIO interrupt status reg */
+ /* GPIO interrupt status reg */
#define GPIO_INT_MSK_REG regptr(MSP_PER_BASE + 0x8C)
- /* GPIO interrupt MASK Reg */
+ /* GPIO interrupt MASK Reg */
-/* POLO GPIO registers */
+/* POLO GPIO registers */
#define POLO_GPIO_DAT1_REG regptr(MSP_PER_BASE + 0x0E0)
- /* Polo GPIO[8:0] data reg */
+ /* Polo GPIO[8:0] data reg */
#define POLO_GPIO_CFG1_REG regptr(MSP_PER_BASE + 0x0E4)
- /* Polo GPIO[7:0] config reg */
+ /* Polo GPIO[7:0] config reg */
#define POLO_GPIO_CFG2_REG regptr(MSP_PER_BASE + 0x0E8)
- /* Polo GPIO[15:8] config reg */
+ /* Polo GPIO[15:8] config reg */
#define POLO_GPIO_OD1_REG regptr(MSP_PER_BASE + 0x0EC)
/* Polo GPIO[31:0] output drive */
#define POLO_GPIO_CFG3_REG regptr(MSP_PER_BASE + 0x170)
- /* Polo GPIO[23:16] config reg */
+ /* Polo GPIO[23:16] config reg */
#define POLO_GPIO_DAT2_REG regptr(MSP_PER_BASE + 0x174)
- /* Polo GPIO[15:9] data reg */
+ /* Polo GPIO[15:9] data reg */
#define POLO_GPIO_DAT3_REG regptr(MSP_PER_BASE + 0x178)
- /* Polo GPIO[23:16] data reg */
+ /* Polo GPIO[23:16] data reg */
#define POLO_GPIO_DAT4_REG regptr(MSP_PER_BASE + 0x17C)
- /* Polo GPIO[31:24] data reg */
+ /* Polo GPIO[31:24] data reg */
#define POLO_GPIO_DAT5_REG regptr(MSP_PER_BASE + 0x180)
- /* Polo GPIO[39:32] data reg */
+ /* Polo GPIO[39:32] data reg */
#define POLO_GPIO_DAT6_REG regptr(MSP_PER_BASE + 0x184)
- /* Polo GPIO[47:40] data reg */
+ /* Polo GPIO[47:40] data reg */
#define POLO_GPIO_DAT7_REG regptr(MSP_PER_BASE + 0x188)
- /* Polo GPIO[54:48] data reg */
+ /* Polo GPIO[54:48] data reg */
#define POLO_GPIO_CFG4_REG regptr(MSP_PER_BASE + 0x18C)
- /* Polo GPIO[31:24] config reg */
+ /* Polo GPIO[31:24] config reg */
#define POLO_GPIO_CFG5_REG regptr(MSP_PER_BASE + 0x190)
- /* Polo GPIO[39:32] config reg */
+ /* Polo GPIO[39:32] config reg */
#define POLO_GPIO_CFG6_REG regptr(MSP_PER_BASE + 0x194)
- /* Polo GPIO[47:40] config reg */
+ /* Polo GPIO[47:40] config reg */
#define POLO_GPIO_CFG7_REG regptr(MSP_PER_BASE + 0x198)
- /* Polo GPIO[54:48] config reg */
+ /* Polo GPIO[54:48] config reg */
#define POLO_GPIO_OD2_REG regptr(MSP_PER_BASE + 0x19C)
/* Polo GPIO[54:32] output drive */
-/* Generic GPIO registers */
+/* Generic GPIO registers */
#define GPIO_DATA1_REG regptr(MSP_PER_BASE + 0x170)
- /* GPIO[1:0] data register */
+ /* GPIO[1:0] data register */
#define GPIO_DATA2_REG regptr(MSP_PER_BASE + 0x174)
- /* GPIO[5:2] data register */
+ /* GPIO[5:2] data register */
#define GPIO_DATA3_REG regptr(MSP_PER_BASE + 0x178)
- /* GPIO[9:6] data register */
+ /* GPIO[9:6] data register */
#define GPIO_DATA4_REG regptr(MSP_PER_BASE + 0x17C)
- /* GPIO[15:10] data register */
+ /* GPIO[15:10] data register */
#define GPIO_CFG1_REG regptr(MSP_PER_BASE + 0x180)
- /* GPIO[1:0] config register */
+ /* GPIO[1:0] config register */
#define GPIO_CFG2_REG regptr(MSP_PER_BASE + 0x184)
- /* GPIO[5:2] config register */
+ /* GPIO[5:2] config register */
#define GPIO_CFG3_REG regptr(MSP_PER_BASE + 0x188)
- /* GPIO[9:6] config register */
+ /* GPIO[9:6] config register */
#define GPIO_CFG4_REG regptr(MSP_PER_BASE + 0x18C)
- /* GPIO[15:10] config register */
+ /* GPIO[15:10] config register */
#define GPIO_OD_REG regptr(MSP_PER_BASE + 0x190)
- /* GPIO[15:0] output drive */
+ /* GPIO[15:0] output drive */
/*
***************************************************************************
- * CPU Interface register definitions *
+ * CPU Interface register definitions *
***************************************************************************
*/
#define PCI_FLUSH_REG regptr(MSP_CPUIF_BASE + 0x00)
/* PCI-SDRAM queue flush trigger */
#define OCP_ERR1_REG regptr(MSP_CPUIF_BASE + 0x04)
- /* OCP Error Attribute 1 */
+ /* OCP Error Attribute 1 */
#define OCP_ERR2_REG regptr(MSP_CPUIF_BASE + 0x08)
- /* OCP Error Attribute 2 */
+ /* OCP Error Attribute 2 */
#define OCP_STS_REG regptr(MSP_CPUIF_BASE + 0x0C)
- /* OCP Error Status */
+ /* OCP Error Status */
#define CPUIF_PM_REG regptr(MSP_CPUIF_BASE + 0x10)
- /* CPU policy configuration */
+ /* CPU policy configuration */
#define CPUIF_CFG_REG regptr(MSP_CPUIF_BASE + 0x10)
- /* Misc configuration options */
+ /* Misc configuration options */
/* Central Interrupt Controller Registers */
#define MSP_CIC_BASE (MSP_CPUIF_BASE + 0x8000)
- /* Central Interrupt registers */
+ /* Central Interrupt registers */
#define CIC_EXT_CFG_REG regptr(MSP_CIC_BASE + 0x00)
- /* External interrupt config */
+ /* External interrupt config */
#define CIC_STS_REG regptr(MSP_CIC_BASE + 0x04)
- /* CIC Interrupt Status */
+ /* CIC Interrupt Status */
#define CIC_VPE0_MSK_REG regptr(MSP_CIC_BASE + 0x08)
- /* VPE0 Interrupt Mask */
+ /* VPE0 Interrupt Mask */
#define CIC_VPE1_MSK_REG regptr(MSP_CIC_BASE + 0x0C)
- /* VPE1 Interrupt Mask */
+ /* VPE1 Interrupt Mask */
#define CIC_TC0_MSK_REG regptr(MSP_CIC_BASE + 0x10)
- /* Thread Context 0 Int Mask */
+ /* Thread Context 0 Int Mask */
#define CIC_TC1_MSK_REG regptr(MSP_CIC_BASE + 0x14)
- /* Thread Context 1 Int Mask */
+ /* Thread Context 1 Int Mask */
#define CIC_TC2_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 2 Int Mask */
+ /* Thread Context 2 Int Mask */
#define CIC_TC3_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 3 Int Mask */
+ /* Thread Context 3 Int Mask */
#define CIC_TC4_MSK_REG regptr(MSP_CIC_BASE + 0x18)
- /* Thread Context 4 Int Mask */
+ /* Thread Context 4 Int Mask */
#define CIC_PCIMSI_STS_REG regptr(MSP_CIC_BASE + 0x18)
#define CIC_PCIMSI_MSK_REG regptr(MSP_CIC_BASE + 0x18)
#define CIC_PCIFLSH_REG regptr(MSP_CIC_BASE + 0x18)
@@ -509,7 +509,7 @@
/*
***************************************************************************
- * Memory controller registers *
+ * Memory controller registers *
***************************************************************************
*/
#define MEM_CFG1_REG regptr(MSP_MEM_CFG_BASE + 0x00)
@@ -519,7 +519,7 @@
/*
***************************************************************************
- * PCI controller registers *
+ * PCI controller registers *
***************************************************************************
*/
#define PCI_BASE_REG regptr(MSP_PCI_BASE + 0x00)
@@ -528,25 +528,25 @@
/*
########################################################################
- # Register content & macro definitions #
+ # Register content & macro definitions #
########################################################################
*/
/*
***************************************************************************
- * DEV_ID defines *
+ * DEV_ID defines *
***************************************************************************
*/
-#define DEV_ID_PCI_DIS (1 << 26) /* Set if PCI disabled */
-#define DEV_ID_PCI_HOST (1 << 20) /* Set if PCI host */
-#define DEV_ID_SINGLE_PC (1 << 19) /* Set if single PC Card */
-#define DEV_ID_FAMILY (0xff << 8) /* family ID code */
-#define POLO_ZEUS_SUB_FAMILY (0x7 << 16) /* sub family for Polo/Zeus */
+#define DEV_ID_PCI_DIS (1 << 26) /* Set if PCI disabled */
+#define DEV_ID_PCI_HOST (1 << 20) /* Set if PCI host */
+#define DEV_ID_SINGLE_PC (1 << 19) /* Set if single PC Card */
+#define DEV_ID_FAMILY (0xff << 8) /* family ID code */
+#define POLO_ZEUS_SUB_FAMILY (0x7 << 16) /* sub family for Polo/Zeus */
-#define MSPFPGA_ID (0x00 << 8) /* you are on your own here */
+#define MSPFPGA_ID (0x00 << 8) /* you are on your own here */
#define MSP5000_ID (0x50 << 8)
-#define MSP4F00_ID (0x4f << 8) /* FPGA version of MSP4200 */
-#define MSP4E00_ID (0x4f << 8) /* FPGA version of MSP7120 */
+#define MSP4F00_ID (0x4f << 8) /* FPGA version of MSP4200 */
+#define MSP4E00_ID (0x4f << 8) /* FPGA version of MSP7120 */
#define MSP4200_ID (0x42 << 8)
#define MSP4000_ID (0x40 << 8)
#define MSP2XXX_ID (0x20 << 8)
@@ -563,27 +563,27 @@
/*
***************************************************************************
- * RESET defines *
+ * RESET defines *
***************************************************************************
*/
-#define MSP_GR_RST (0x01 << 0) /* Global reset bit */
-#define MSP_MR_RST (0x01 << 1) /* MIPS reset bit */
-#define MSP_PD_RST (0x01 << 2) /* PVC DMA reset bit */
-#define MSP_PP_RST (0x01 << 3) /* PVC reset bit */
-/* reserved */
-#define MSP_EA_RST (0x01 << 6) /* Mac A reset bit */
-#define MSP_EB_RST (0x01 << 7) /* Mac B reset bit */
-#define MSP_SE_RST (0x01 << 8) /* Security Eng reset bit */
-#define MSP_PB_RST (0x01 << 9) /* Per block reset bit */
-#define MSP_EC_RST (0x01 << 10) /* Mac C reset bit */
-#define MSP_TW_RST (0x01 << 11) /* TWI reset bit */
-#define MSP_SPI_RST (0x01 << 12) /* SPI/MPI reset bit */
-#define MSP_U1_RST (0x01 << 13) /* UART1 reset bit */
-#define MSP_U0_RST (0x01 << 14) /* UART0 reset bit */
+#define MSP_GR_RST (0x01 << 0) /* Global reset bit */
+#define MSP_MR_RST (0x01 << 1) /* MIPS reset bit */
+#define MSP_PD_RST (0x01 << 2) /* PVC DMA reset bit */
+#define MSP_PP_RST (0x01 << 3) /* PVC reset bit */
+/* reserved */
+#define MSP_EA_RST (0x01 << 6) /* Mac A reset bit */
+#define MSP_EB_RST (0x01 << 7) /* Mac B reset bit */
+#define MSP_SE_RST (0x01 << 8) /* Security Eng reset bit */
+#define MSP_PB_RST (0x01 << 9) /* Per block reset bit */
+#define MSP_EC_RST (0x01 << 10) /* Mac C reset bit */
+#define MSP_TW_RST (0x01 << 11) /* TWI reset bit */
+#define MSP_SPI_RST (0x01 << 12) /* SPI/MPI reset bit */
+#define MSP_U1_RST (0x01 << 13) /* UART1 reset bit */
+#define MSP_U0_RST (0x01 << 14) /* UART0 reset bit */
/*
***************************************************************************
- * UART defines *
+ * UART defines *
***************************************************************************
*/
#define MSP_BASE_BAUD 25000000
@@ -591,15 +591,15 @@
/*
***************************************************************************
- * ELB defines *
+ * ELB defines *
***************************************************************************
*/
-#define PCCARD_32 0x02 /* Set if is PCCARD 32 (Cardbus) */
-#define SINGLE_PCCARD 0x01 /* Set to enable single PC card */
+#define PCCARD_32 0x02 /* Set if is PCCARD 32 (Cardbus) */
+#define SINGLE_PCCARD 0x01 /* Set to enable single PC card */
/*
***************************************************************************
- * CIC defines *
+ * CIC defines *
***************************************************************************
*/
@@ -625,7 +625,7 @@
/*
***************************************************************************
- * Memory Controller defines *
+ * Memory Controller defines *
***************************************************************************
*/
@@ -644,17 +644,17 @@
/*
***************************************************************************
- * SPI/MPI Mode *
+ * SPI/MPI Mode *
***************************************************************************
*/
#define SPI_MPI_RX_BUSY 0x00008000 /* SPI/MPI Receive Busy */
-#define SPI_MPI_FIFO_EMPTY 0x00004000 /* SPI/MPI Fifo Empty */
+#define SPI_MPI_FIFO_EMPTY 0x00004000 /* SPI/MPI Fifo Empty */
#define SPI_MPI_TX_BUSY 0x00002000 /* SPI/MPI Transmit Busy */
-#define SPI_MPI_FIFO_FULL 0x00001000 /* SPI/MPU FIFO full */
+#define SPI_MPI_FIFO_FULL 0x00001000 /* SPI/MPU FIFO full */
/*
***************************************************************************
- * SPI/MPI Control Register *
+ * SPI/MPI Control Register *
***************************************************************************
*/
#define SPI_MPI_RX_START 0x00000004 /* Start receive command */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h
index 96d4c8ce8c8..51a66dcc429 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_slp_int.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_slp_int.h
@@ -27,9 +27,9 @@
/*
* The PMC-Sierra SLP interrupts are arranged in a 3 level cascaded
- * hierarchical system. The first level are the direct MIPS interrupts
+ * hierarchical system. The first level are the direct MIPS interrupts
* and are assigned the interrupt range 0-7. The second level is the SLM
- * interrupt controller and is assigned the range 8-39. The third level
+ * interrupt controller and is assigned the range 8-39. The third level
* comprises the Peripherial block, the PCI block, the PCI MSI block and
* the SLP. The PCI interrupts and the SLP errors are handled by the
* relevant subsystems so the core interrupt code needs only concern
@@ -41,11 +41,11 @@
* IRQs directly connected to CPU
*/
#define MSP_MIPS_INTBASE 0
-#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
-#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
-#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
-#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
-#define MSP_INT_C_IRQ2 4 /* Wired off, C_IRQ2 */
+#define MSP_INT_SW0 0 /* IRQ for swint0, C_SW0 */
+#define MSP_INT_SW1 1 /* IRQ for swint1, C_SW1 */
+#define MSP_INT_MAC0 2 /* IRQ for MAC 0, C_IRQ0 */
+#define MSP_INT_MAC1 3 /* IRQ for MAC 1, C_IRQ1 */
+#define MSP_INT_C_IRQ2 4 /* Wired off, C_IRQ2 */
#define MSP_INT_VE 5 /* IRQ for Voice Engine, C_IRQ3 */
#define MSP_INT_SLP 6 /* IRQ for SLM block, C_IRQ4 */
#define MSP_INT_TIMER 7 /* IRQ for the MIPS timer, C_IRQ5 */
@@ -57,85 +57,85 @@
*/
#define MSP_SLP_INTBASE (MSP_MIPS_INTBASE + 8)
#define MSP_INT_EXT0 (MSP_SLP_INTBASE + 0)
- /* External interrupt 0 */
+ /* External interrupt 0 */
#define MSP_INT_EXT1 (MSP_SLP_INTBASE + 1)
- /* External interrupt 1 */
+ /* External interrupt 1 */
#define MSP_INT_EXT2 (MSP_SLP_INTBASE + 2)
- /* External interrupt 2 */
+ /* External interrupt 2 */
#define MSP_INT_EXT3 (MSP_SLP_INTBASE + 3)
- /* External interrupt 3 */
-/* Reserved 4-7 */
+ /* External interrupt 3 */
+/* Reserved 4-7 */
/*
*************************************************************************
* DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER/DANGER *
- * Some MSP produces have this interrupt labelled as Voice and some are *
- * SEC mbox ... *
+ * Some MSP produces have this interrupt labelled as Voice and some are *
+ * SEC mbox ... *
*************************************************************************
*/
#define MSP_INT_SLP_VE (MSP_SLP_INTBASE + 8)
/* Cascaded IRQ for Voice Engine*/
#define MSP_INT_SLP_TDM (MSP_SLP_INTBASE + 9)
- /* TDM interrupt */
+ /* TDM interrupt */
#define MSP_INT_SLP_MAC0 (MSP_SLP_INTBASE + 10)
- /* Cascaded IRQ for MAC 0 */
+ /* Cascaded IRQ for MAC 0 */
#define MSP_INT_SLP_MAC1 (MSP_SLP_INTBASE + 11)
- /* Cascaded IRQ for MAC 1 */
+ /* Cascaded IRQ for MAC 1 */
#define MSP_INT_SEC (MSP_SLP_INTBASE + 12)
- /* IRQ for security engine */
-#define MSP_INT_PER (MSP_SLP_INTBASE + 13)
- /* Peripheral interrupt */
-#define MSP_INT_TIMER0 (MSP_SLP_INTBASE + 14)
- /* SLP timer 0 */
-#define MSP_INT_TIMER1 (MSP_SLP_INTBASE + 15)
- /* SLP timer 1 */
-#define MSP_INT_TIMER2 (MSP_SLP_INTBASE + 16)
- /* SLP timer 2 */
-#define MSP_INT_SLP_TIMER (MSP_SLP_INTBASE + 17)
- /* Cascaded MIPS timer */
+ /* IRQ for security engine */
+#define MSP_INT_PER (MSP_SLP_INTBASE + 13)
+ /* Peripheral interrupt */
+#define MSP_INT_TIMER0 (MSP_SLP_INTBASE + 14)
+ /* SLP timer 0 */
+#define MSP_INT_TIMER1 (MSP_SLP_INTBASE + 15)
+ /* SLP timer 1 */
+#define MSP_INT_TIMER2 (MSP_SLP_INTBASE + 16)
+ /* SLP timer 2 */
+#define MSP_INT_SLP_TIMER (MSP_SLP_INTBASE + 17)
+ /* Cascaded MIPS timer */
#define MSP_INT_BLKCP (MSP_SLP_INTBASE + 18)
- /* Block Copy */
+ /* Block Copy */
#define MSP_INT_UART0 (MSP_SLP_INTBASE + 19)
- /* UART 0 */
+ /* UART 0 */
#define MSP_INT_PCI (MSP_SLP_INTBASE + 20)
- /* PCI subsystem */
+ /* PCI subsystem */
#define MSP_INT_PCI_DBELL (MSP_SLP_INTBASE + 21)
- /* PCI doorbell */
+ /* PCI doorbell */
#define MSP_INT_PCI_MSI (MSP_SLP_INTBASE + 22)
- /* PCI Message Signal */
+ /* PCI Message Signal */
#define MSP_INT_PCI_BC0 (MSP_SLP_INTBASE + 23)
- /* PCI Block Copy 0 */
+ /* PCI Block Copy 0 */
#define MSP_INT_PCI_BC1 (MSP_SLP_INTBASE + 24)
- /* PCI Block Copy 1 */
+ /* PCI Block Copy 1 */
#define MSP_INT_SLP_ERR (MSP_SLP_INTBASE + 25)
- /* SLP error condition */
+ /* SLP error condition */
#define MSP_INT_MAC2 (MSP_SLP_INTBASE + 26)
- /* IRQ for MAC2 */
-/* Reserved 26-31 */
+ /* IRQ for MAC2 */
+/* Reserved 26-31 */
/*
* IRQs cascaded on SLP PER interrupt (MSP_INT_PER)
*/
#define MSP_PER_INTBASE (MSP_SLP_INTBASE + 32)
-/* Reserved 0-1 */
+/* Reserved 0-1 */
#define MSP_INT_UART1 (MSP_PER_INTBASE + 2)
- /* UART 1 */
-/* Reserved 3-5 */
+ /* UART 1 */
+/* Reserved 3-5 */
#define MSP_INT_2WIRE (MSP_PER_INTBASE + 6)
- /* 2-wire */
+ /* 2-wire */
#define MSP_INT_TM0 (MSP_PER_INTBASE + 7)
/* Peripheral timer block out 0 */
#define MSP_INT_TM1 (MSP_PER_INTBASE + 8)
/* Peripheral timer block out 1 */
-/* Reserved 9 */
+/* Reserved 9 */
#define MSP_INT_SPRX (MSP_PER_INTBASE + 10)
- /* SPI RX complete */
+ /* SPI RX complete */
#define MSP_INT_SPTX (MSP_PER_INTBASE + 11)
- /* SPI TX complete */
+ /* SPI TX complete */
#define MSP_INT_GPIO (MSP_PER_INTBASE + 12)
- /* GPIO */
+ /* GPIO */
#define MSP_INT_PER_ERR (MSP_PER_INTBASE + 13)
- /* Peripheral error */
-/* Reserved 14-31 */
+ /* Peripheral error */
+/* Reserved 14-31 */
#endif /* !_MSP_SLP_INT_H */
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
index 4c9348df9df..aa45e6a0712 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/msp_usb.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_usb.h
@@ -40,7 +40,7 @@
#define MSP_USB0_HS_END (MSP_USB0_BASE + 0x401FF)
/* Register spaces for USB host 1 */
-#define MSP_USB1_MAB_START (MSP_USB1_BASE + 0x0)
+#define MSP_USB1_MAB_START (MSP_USB1_BASE + 0x0)
#define MSP_USB1_MAB_END (MSP_USB1_BASE + 0x17)
#define MSP_USB1_ID_START (MSP_USB1_BASE + 0x40000)
#define MSP_USB1_ID_END (MSP_USB1_BASE + 0x4008f)
diff --git a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
index c74eb1657f5..a60bf9dd14a 100644
--- a/arch/mips/include/asm/pmc-sierra/msp71xx/war.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/war.h
@@ -21,9 +21,9 @@
#define R10000_LLSC_WAR 0
#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \
defined(CONFIG_PMC_MSP7120_FPGA)
-#define MIPS34K_MISSED_ITLB_WAR 1
+#define MIPS34K_MISSED_ITLB_WAR 1
#else
-#define MIPS34K_MISSED_ITLB_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
#endif
#endif /* __ASM_MIPS_PMC_SIERRA_WAR_H */
diff --git a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h b/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
index 6d70264557b..daa85ce03ef 100644
--- a/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
+++ b/arch/mips/include/asm/mach-pnx833x/irq-mapping.h
@@ -42,15 +42,15 @@
#define PNX833X_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7)
/* Interrupts supported by PIC */
-#define PNX833X_PIC_I2C0_INT (PNX833X_PIC_IRQ_BASE + 1)
-#define PNX833X_PIC_I2C1_INT (PNX833X_PIC_IRQ_BASE + 2)
-#define PNX833X_PIC_UART0_INT (PNX833X_PIC_IRQ_BASE + 3)
-#define PNX833X_PIC_UART1_INT (PNX833X_PIC_IRQ_BASE + 4)
-#define PNX833X_PIC_TS_IN0_DV_INT (PNX833X_PIC_IRQ_BASE + 5)
-#define PNX833X_PIC_TS_IN0_DMA_INT (PNX833X_PIC_IRQ_BASE + 6)
-#define PNX833X_PIC_GPIO_INT (PNX833X_PIC_IRQ_BASE + 7)
-#define PNX833X_PIC_AUDIO_DEC_INT (PNX833X_PIC_IRQ_BASE + 8)
-#define PNX833X_PIC_VIDEO_DEC_INT (PNX833X_PIC_IRQ_BASE + 9)
+#define PNX833X_PIC_I2C0_INT (PNX833X_PIC_IRQ_BASE + 1)
+#define PNX833X_PIC_I2C1_INT (PNX833X_PIC_IRQ_BASE + 2)
+#define PNX833X_PIC_UART0_INT (PNX833X_PIC_IRQ_BASE + 3)
+#define PNX833X_PIC_UART1_INT (PNX833X_PIC_IRQ_BASE + 4)
+#define PNX833X_PIC_TS_IN0_DV_INT (PNX833X_PIC_IRQ_BASE + 5)
+#define PNX833X_PIC_TS_IN0_DMA_INT (PNX833X_PIC_IRQ_BASE + 6)
+#define PNX833X_PIC_GPIO_INT (PNX833X_PIC_IRQ_BASE + 7)
+#define PNX833X_PIC_AUDIO_DEC_INT (PNX833X_PIC_IRQ_BASE + 8)
+#define PNX833X_PIC_VIDEO_DEC_INT (PNX833X_PIC_IRQ_BASE + 9)
#define PNX833X_PIC_CONFIG_INT (PNX833X_PIC_IRQ_BASE + 10)
#define PNX833X_PIC_AOI_INT (PNX833X_PIC_IRQ_BASE + 11)
#define PNX833X_PIC_SYNC_INT (PNX833X_PIC_IRQ_BASE + 12)
diff --git a/arch/mips/include/asm/mach-pnx833x/pnx833x.h b/arch/mips/include/asm/mach-pnx833x/pnx833x.h
index 100f52870e3..e6fc3a9d594 100644
--- a/arch/mips/include/asm/mach-pnx833x/pnx833x.h
+++ b/arch/mips/include/asm/mach-pnx833x/pnx833x.h
@@ -73,7 +73,7 @@
#define PNX833X_RESET_CONTROL PNX833X_REG(0x8004)
-#define PNX833X_RESET_CONTROL_2 PNX833X_REG(0x8014)
+#define PNX833X_RESET_CONTROL_2 PNX833X_REG(0x8014)
#define PNX833X_PIC_REG(offs) PNX833X_REG(0x01000 + (offs))
#define PNX833X_PIC_INT_PRIORITY PNX833X_PIC_REG(0x0)
@@ -82,10 +82,10 @@
#define PNX833X_PIC_INT_SRC_INT_SRC_SHIFT 3
#define PNX833X_PIC_INT_REG(irq) PNX833X_PIC_REG(0x10 + 4*(irq))
-#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228)
+#define PNX833X_CLOCK_CPUCP_CTL PNX833X_REG(0x9228)
#define PNX833X_CLOCK_CPUCP_CTL_EXIT_RESET 0x00000002ul /* bit 1 */
#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_MASK 0x00000018ul /* bits 4:3 */
-#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3
+#define PNX833X_CLOCK_CPUCP_CTL_DIV_CLOCK_SHIFT 3
#define PNX8335_CLOCK_PLL_CPU_CTL PNX833X_REG(0x9020)
#define PNX8335_CLOCK_PLL_CPU_CTL_FREQ_MASK 0x1f
@@ -149,7 +149,7 @@
#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_MASK (1 << 14)
#define PNX833X_MIU_SEL0_SPI_MODE_ENABLE_SHIFT 14
-#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7)
+#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_MASK (1 << 7)
#define PNX833X_MIU_SEL0_BURST_MODE_ENABLE_SHIFT 7
#define PNX833X_MIU_SEL0_BURST_PAGE_LEN_MASK (0xF << 9)
@@ -160,10 +160,10 @@
#define PNX833X_MIU_CONFIG_SPI_OPCODE_MASK (0xFF << 3)
#define PNX833X_MIU_CONFIG_SPI_OPCODE_SHIFT 3
-#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2)
+#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_MASK (1 << 2)
#define PNX833X_MIU_CONFIG_SPI_DATA_ENABLE_SHIFT 2
-#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1)
+#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_MASK (1 << 1)
#define PNX833X_MIU_CONFIG_SPI_ADDR_ENABLE_SHIFT 1
#define PNX833X_MIU_CONFIG_SPI_SYNC_MASK (1 << 0)
diff --git a/arch/mips/include/asm/mach-pnx8550/cm.h b/arch/mips/include/asm/mach-pnx8550/cm.h
deleted file mode 100644
index bb0a56c7d01..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/cm.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Clock module specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_CM_H
-#define __PNX8550_CM_H
-
-#define PNX8550_CM_BASE 0xBBE47000
-
-#define PNX8550_CM_PLL0_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x000)
-#define PNX8550_CM_PLL1_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x004)
-#define PNX8550_CM_PLL2_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x008)
-#define PNX8550_CM_PLL3_CTL *(volatile unsigned long *)(PNX8550_CM_BASE + 0x00C)
-
-// Table not complete.....
-
-#define PNX8550_CM_PLL_BLOCKED_MASK 0x80000000
-#define PNX8550_CM_PLL_LOCK_MASK 0x40000000
-#define PNX8550_CM_PLL_CURRENT_ADJ_MASK 0x3c000000
-#define PNX8550_CM_PLL_N_MASK 0x01ff0000
-#define PNX8550_CM_PLL_M_MASK 0x00003f00
-#define PNX8550_CM_PLL_P_MASK 0x0000000c
-#define PNX8550_CM_PLL_PD_MASK 0x00000002
-
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/glb.h b/arch/mips/include/asm/mach-pnx8550/glb.h
deleted file mode 100644
index 07aa85e609b..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/glb.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PNX8550 global definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_GLB_H
-#define __PNX8550_GLB_H
-
-#define PNX8550_GLB1_BASE 0xBBE63000
-#define PNX8550_GLB2_BASE 0xBBE4d000
-#define PNX8550_RESET_BASE 0xBBE60000
-
-/* PCI Inta Output Enable Registers */
-#define PNX8550_GLB2_ENAB_INTA_O *(volatile unsigned long *)(PNX8550_GLB2_BASE + 0x050)
-
-/* Bit 1:Enable DAC Powerdown
- 0:DACs are enabled and are working normally
- 1:DACs are powerdown
-*/
-#define PNX8550_GLB_DAC_PD 0x2
-/* Bit 0:Enable of PCI inta output
- 0 = Disable PCI inta output
- 1 = Enable PCI inta output
-*/
-#define PNX8550_GLB_ENABLE_INTA_O 0x1
-
-/* PCI Direct Mappings */
-#define PNX8550_PCIMEM 0x12000000
-#define PNX8550_PCIMEM_SIZE 0x08000000
-#define PNX8550_PCIIO 0x1c000000
-#define PNX8550_PCIIO_SIZE 0x02000000 /* 32M */
-
-#define PNX8550_PORT_BASE KSEG1
-
-// GPIO def
-#define PNX8550_GPIO_BASE 0x1Be00000
-
-#define PNX8550_GPIO_DIRQ0 (PNX8550_GPIO_BASE + 0x104500)
-#define PNX8550_GPIO_MC1 (PNX8550_GPIO_BASE + 0x104004)
-#define PNX8550_GPIO_MC_31_BIT 30
-#define PNX8550_GPIO_MC_30_BIT 28
-#define PNX8550_GPIO_MC_29_BIT 26
-#define PNX8550_GPIO_MC_28_BIT 24
-#define PNX8550_GPIO_MC_27_BIT 22
-#define PNX8550_GPIO_MC_26_BIT 20
-#define PNX8550_GPIO_MC_25_BIT 18
-#define PNX8550_GPIO_MC_24_BIT 16
-#define PNX8550_GPIO_MC_23_BIT 14
-#define PNX8550_GPIO_MC_22_BIT 12
-#define PNX8550_GPIO_MC_21_BIT 10
-#define PNX8550_GPIO_MC_20_BIT 8
-#define PNX8550_GPIO_MC_19_BIT 6
-#define PNX8550_GPIO_MC_18_BIT 4
-#define PNX8550_GPIO_MC_17_BIT 2
-#define PNX8550_GPIO_MC_16_BIT 0
-
-#define PNX8550_GPIO_MODE_PRIMOP 0x1
-#define PNX8550_GPIO_MODE_NO_OPENDR 0x2
-#define PNX8550_GPIO_MODE_OPENDR 0x3
-
-// RESET module
-#define PNX8550_RST_CTL *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x0)
-#define PNX8550_RST_CAUSE *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x4)
-#define PNX8550_RST_EN_WATCHDOG *(volatile unsigned long *)(PNX8550_RESET_BASE + 0x8)
-
-#define PNX8550_RST_REL_MIPS_RST_N 0x8
-#define PNX8550_RST_DO_SW_RST 0x4
-#define PNX8550_RST_REL_SYS_RST_OUT 0x2
-#define PNX8550_RST_ASSERT_SYS_RST_OUT 0x1
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/int.h b/arch/mips/include/asm/mach-pnx8550/int.h
deleted file mode 100644
index 0e0668b524f..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/int.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Interrupt specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_INT_H
-#define __PNX8550_INT_H
-
-#define PNX8550_GIC_BASE 0xBBE3E000
-
-#define PNX8550_GIC_PRIMASK_0 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x000)
-#define PNX8550_GIC_PRIMASK_1 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x004)
-#define PNX8550_GIC_VECTOR_0 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x100)
-#define PNX8550_GIC_VECTOR_1 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x104)
-#define PNX8550_GIC_PEND_1_31 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x200)
-#define PNX8550_GIC_PEND_32_63 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x204)
-#define PNX8550_GIC_PEND_64_70 *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x208)
-#define PNX8550_GIC_FEATURES *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x300)
-#define PNX8550_GIC_REQ(x) *(volatile unsigned long *)(PNX8550_GIC_BASE + 0x400 + (x)*4)
-#define PNX8550_GIC_MOD_ID *(volatile unsigned long *)(PNX8550_GIC_BASE + 0xFFC)
-
-// cp0 is two software + six hw exceptions
-#define PNX8550_INT_CP0_TOTINT 8
-#define PNX8550_INT_CP0_MIN 0
-#define PNX8550_INT_CP0_MAX (PNX8550_INT_CP0_MIN + PNX8550_INT_CP0_TOTINT - 1)
-
-#define MIPS_CPU_GIC_IRQ 2
-#define MIPS_CPU_TIMER_IRQ 7
-
-// GIC are 71 exceptions connected to cp0's first hardware exception
-#define PNX8550_INT_GIC_TOTINT 71
-#define PNX8550_INT_GIC_MIN (PNX8550_INT_CP0_MAX+1)
-#define PNX8550_INT_GIC_MAX (PNX8550_INT_GIC_MIN + PNX8550_INT_GIC_TOTINT - 1)
-
-#define PNX8550_INT_UNDEF (PNX8550_INT_GIC_MIN+0)
-#define PNX8550_INT_IPC_TARGET0_MIPS (PNX8550_INT_GIC_MIN+1)
-#define PNX8550_INT_IPC_TARGET1_TM32_1 (PNX8550_INT_GIC_MIN+2)
-#define PNX8550_INT_IPC_TARGET1_TM32_2 (PNX8550_INT_GIC_MIN+3)
-#define PNX8550_INT_RESERVED_4 (PNX8550_INT_GIC_MIN+4)
-#define PNX8550_INT_USB (PNX8550_INT_GIC_MIN+5)
-#define PNX8550_INT_GPIO_EQ1 (PNX8550_INT_GIC_MIN+6)
-#define PNX8550_INT_GPIO_EQ2 (PNX8550_INT_GIC_MIN+7)
-#define PNX8550_INT_GPIO_EQ3 (PNX8550_INT_GIC_MIN+8)
-#define PNX8550_INT_GPIO_EQ4 (PNX8550_INT_GIC_MIN+9)
-
-#define PNX8550_INT_GPIO_EQ5 (PNX8550_INT_GIC_MIN+10)
-#define PNX8550_INT_GPIO_EQ6 (PNX8550_INT_GIC_MIN+11)
-#define PNX8550_INT_RESERVED_12 (PNX8550_INT_GIC_MIN+12)
-#define PNX8550_INT_QVCP1 (PNX8550_INT_GIC_MIN+13)
-#define PNX8550_INT_QVCP2 (PNX8550_INT_GIC_MIN+14)
-#define PNX8550_INT_I2C1 (PNX8550_INT_GIC_MIN+15)
-#define PNX8550_INT_I2C2 (PNX8550_INT_GIC_MIN+16)
-#define PNX8550_INT_ISO_UART1 (PNX8550_INT_GIC_MIN+17)
-#define PNX8550_INT_ISO_UART2 (PNX8550_INT_GIC_MIN+18)
-#define PNX8550_INT_UART1 (PNX8550_INT_GIC_MIN+19)
-
-#define PNX8550_INT_UART2 (PNX8550_INT_GIC_MIN+20)
-#define PNX8550_INT_QNTR (PNX8550_INT_GIC_MIN+21)
-#define PNX8550_INT_RESERVED22 (PNX8550_INT_GIC_MIN+22)
-#define PNX8550_INT_T_DSC (PNX8550_INT_GIC_MIN+23)
-#define PNX8550_INT_M_DSC (PNX8550_INT_GIC_MIN+24)
-#define PNX8550_INT_RESERVED25 (PNX8550_INT_GIC_MIN+25)
-#define PNX8550_INT_2D_DRAW_ENG (PNX8550_INT_GIC_MIN+26)
-#define PNX8550_INT_MEM_BASED_SCALAR1 (PNX8550_INT_GIC_MIN+27)
-#define PNX8550_INT_VIDEO_MPEG (PNX8550_INT_GIC_MIN+28)
-#define PNX8550_INT_VIDEO_INPUT_P1 (PNX8550_INT_GIC_MIN+29)
-
-#define PNX8550_INT_VIDEO_INPUT_P2 (PNX8550_INT_GIC_MIN+30)
-#define PNX8550_INT_SPDI1 (PNX8550_INT_GIC_MIN+31)
-#define PNX8550_INT_SPDO (PNX8550_INT_GIC_MIN+32)
-#define PNX8550_INT_AUDIO_INPUT1 (PNX8550_INT_GIC_MIN+33)
-#define PNX8550_INT_AUDIO_OUTPUT1 (PNX8550_INT_GIC_MIN+34)
-#define PNX8550_INT_AUDIO_INPUT2 (PNX8550_INT_GIC_MIN+35)
-#define PNX8550_INT_AUDIO_OUTPUT2 (PNX8550_INT_GIC_MIN+36)
-#define PNX8550_INT_MEMBASED_SCALAR2 (PNX8550_INT_GIC_MIN+37)
-#define PNX8550_INT_VPK (PNX8550_INT_GIC_MIN+38)
-#define PNX8550_INT_MPEG1_MIPS (PNX8550_INT_GIC_MIN+39)
-
-#define PNX8550_INT_MPEG1_TM (PNX8550_INT_GIC_MIN+40)
-#define PNX8550_INT_MPEG2_MIPS (PNX8550_INT_GIC_MIN+41)
-#define PNX8550_INT_MPEG2_TM (PNX8550_INT_GIC_MIN+42)
-#define PNX8550_INT_TS_DMA (PNX8550_INT_GIC_MIN+43)
-#define PNX8550_INT_EDMA (PNX8550_INT_GIC_MIN+44)
-#define PNX8550_INT_TM_DEBUG1 (PNX8550_INT_GIC_MIN+45)
-#define PNX8550_INT_TM_DEBUG2 (PNX8550_INT_GIC_MIN+46)
-#define PNX8550_INT_PCI_INTA (PNX8550_INT_GIC_MIN+47)
-#define PNX8550_INT_CLOCK_MODULE (PNX8550_INT_GIC_MIN+48)
-#define PNX8550_INT_PCI_XIO_INTA_PCI (PNX8550_INT_GIC_MIN+49)
-
-#define PNX8550_INT_PCI_XIO_INTB_DMA (PNX8550_INT_GIC_MIN+50)
-#define PNX8550_INT_PCI_XIO_INTC_GPPM (PNX8550_INT_GIC_MIN+51)
-#define PNX8550_INT_PCI_XIO_INTD_GPXIO (PNX8550_INT_GIC_MIN+52)
-#define PNX8550_INT_DVD_CSS (PNX8550_INT_GIC_MIN+53)
-#define PNX8550_INT_VLD (PNX8550_INT_GIC_MIN+54)
-#define PNX8550_INT_GPIO_TSU_7_0 (PNX8550_INT_GIC_MIN+55)
-#define PNX8550_INT_GPIO_TSU_15_8 (PNX8550_INT_GIC_MIN+56)
-#define PNX8550_INT_GPIO_CTU_IR (PNX8550_INT_GIC_MIN+57)
-#define PNX8550_INT_GPIO0 (PNX8550_INT_GIC_MIN+58)
-#define PNX8550_INT_GPIO1 (PNX8550_INT_GIC_MIN+59)
-
-#define PNX8550_INT_GPIO2 (PNX8550_INT_GIC_MIN+60)
-#define PNX8550_INT_GPIO3 (PNX8550_INT_GIC_MIN+61)
-#define PNX8550_INT_GPIO4 (PNX8550_INT_GIC_MIN+62)
-#define PNX8550_INT_GPIO5 (PNX8550_INT_GIC_MIN+63)
-#define PNX8550_INT_GPIO6 (PNX8550_INT_GIC_MIN+64)
-#define PNX8550_INT_GPIO7 (PNX8550_INT_GIC_MIN+65)
-#define PNX8550_INT_PMAN_SECURITY (PNX8550_INT_GIC_MIN+66)
-#define PNX8550_INT_I2C3 (PNX8550_INT_GIC_MIN+67)
-#define PNX8550_INT_RESERVED_68 (PNX8550_INT_GIC_MIN+68)
-#define PNX8550_INT_SPDI2 (PNX8550_INT_GIC_MIN+69)
-
-#define PNX8550_INT_I2C4 (PNX8550_INT_GIC_MIN+70)
-
-// Timer are 3 exceptions connected to cp0's 7th hardware exception
-#define PNX8550_INT_TIMER_TOTINT 3
-#define PNX8550_INT_TIMER_MIN (PNX8550_INT_GIC_MAX+1)
-#define PNX8550_INT_TIMER_MAX (PNX8550_INT_TIMER_MIN + PNX8550_INT_TIMER_TOTINT - 1)
-
-#define PNX8550_INT_TIMER1 (PNX8550_INT_TIMER_MIN+0)
-#define PNX8550_INT_TIMER2 (PNX8550_INT_TIMER_MIN+1)
-#define PNX8550_INT_TIMER3 (PNX8550_INT_TIMER_MIN+2)
-#define PNX8550_INT_WATCHDOG PNX8550_INT_TIMER3
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h b/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h
deleted file mode 100644
index bdde00c9199..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/kernel-entry-init.h
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- */
-#ifndef __ASM_MACH_KERNEL_ENTRY_INIT_H
-#define __ASM_MACH_KERNEL_ENTRY_INIT_H
-
-#include <asm/cacheops.h>
-#include <asm/addrspace.h>
-
-#define CO_CONFIGPR_VALID 0x3F1F41FF /* valid bits to write to ConfigPR */
-#define HAZARD_CP0 nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;
-#define CACHE_OPC 0xBC000000 /* MIPS cache instruction opcode */
-#define ICACHE_LINE_SIZE 32 /* Instruction cache line size bytes */
-#define DCACHE_LINE_SIZE 32 /* Data cache line size in bytes */
-
-#define ICACHE_SET_COUNT 256 /* Instruction cache set count */
-#define DCACHE_SET_COUNT 128 /* Data cache set count */
-
-#define ICACHE_SET_SIZE (ICACHE_SET_COUNT * ICACHE_LINE_SIZE)
-#define DCACHE_SET_SIZE (DCACHE_SET_COUNT * DCACHE_LINE_SIZE)
-
- .macro kernel_entry_setup
- .set push
- .set noreorder
- /*
- * PNX8550 entry point, when running a non compressed
- * kernel. When loading a zImage, the head.S code in
- * arch/mips/zboot/pnx8550 will init the caches and,
- * decompress the kernel, and branch to kernel_entry.
- */
-cache_begin: li t0, (1<<28)
- mtc0 t0, CP0_STATUS /* cp0 usable */
- HAZARD_CP0
-
- mtc0 zero, CP0_CAUSE
- HAZARD_CP0
-
-
- /* Set static virtual to phys address translation and TLB disabled */
- mfc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- and t0, ~((1<<19) | (1<<20)) /* TLB/MAP cleared */
- mtc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- /* CPU boots with kseg0 cache algo set to 0x2 -- uncached */
-
- init_icache
- nop
- init_dcache
- nop
-
- cachePr4450ICReset
- nop
-
- cachePr4450DCReset
- nop
-
- /* read ConfigPR into t0 */
- mfc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-
- /* enable the TLB */
- or t0, (1<<19)
-
- /* disable the ICACHE: at least 10x slower */
- /* or t0, (1<<26) */
-
- /* disable the DCACHE; CONFIG_CPU_HAS_LLSC should not be set */
- /* or t0, (1<<27) */
-
- and t0, CO_CONFIGPR_VALID
-
- /* enable TLB. */
- mtc0 t0, CP0_CONFIG, 7
- HAZARD_CP0
-cache_end:
- /* Setup CMEM_0 to MMIO address space, 2MB */
- lui t0, 0x1BE0
- addi t0, t0, 0x3
- mtc0 $8, $22, 4
- nop
-
- /* Setup CMEM_1, 128MB */
- lui t0, 0x1000
- addi t0, t0, 0xf
- mtc0 $8, $22, 5
- nop
-
-
- /* Setup CMEM_2, 32MB */
- lui t0, 0x1C00
- addi t0, t0, 0xb
- mtc0 $8, $22, 6
- nop
-
- /* Setup CMEM_3, 0MB */
- lui t0, 0x0
- addi t0, t0, 0x0
- mtc0 $8, $22, 7
- nop
-
- /* Enable cache */
- mfc0 t0, CP0_CONFIG
- HAZARD_CP0
- and t0, t0, 0xFFFFFFF8
- or t0, t0, 3
- mtc0 t0, CP0_CONFIG
- HAZARD_CP0
- .set pop
- .endm
-
- .macro init_icache
- .set push
- .set noreorder
-
- /* Get Cache Configuration */
- mfc0 t3, CP0_CONFIG, 1
- HAZARD_CP0
-
- /* get cache Line size */
-
- srl t1, t3, 19 /* C0_CONFIGPR_IL_SHIFT */
- andi t1, t1, 0x7 /* C0_CONFIGPR_IL_MASK */
- beq t1, zero, pr4450_instr_cache_invalidated /* if zero instruction cache is absent */
- nop
- addiu t0, t1, 1
- ori t1, zero, 1
- sllv t1, t1, t0
-
- /* get max cache Index */
- srl t2, t3, 22 /* C0_CONFIGPR_IS_SHIFT */
- andi t2, t2, 0x7 /* C0_CONFIGPR_IS_MASK */
- addiu t0, t2, 6
- ori t2, zero, 1
- sllv t2, t2, t0
-
- /* get max cache way */
- srl t3, t3, 16 /* C0_CONFIGPR_IA_SHIFT */
- andi t3, t3, 0x7 /* C0_CONFIGPR_IA_MASK */
- addiu t3, t3, 1
-
- /* total no of cache lines */
- multu t2, t3 /* max index * max way */
- mflo t2
- addiu t2, t2, -1
-
- move t0, zero
-pr4450_next_instruction_cache_set:
- cache Index_Invalidate_I, 0(t0)
- addu t0, t0, t1 /* add bytes in a line */
- bne t2, zero, pr4450_next_instruction_cache_set
- addiu t2, t2, -1 /* reduce no of lines to invalidate by one */
-pr4450_instr_cache_invalidated:
- .set pop
- .endm
-
- .macro init_dcache
- .set push
- .set noreorder
- move t1, zero
-
- /* Store Tag Information */
- mtc0 zero, CP0_TAGLO, 0
- HAZARD_CP0
-
- mtc0 zero, CP0_TAGHI, 0
- HAZARD_CP0
-
- /* Cache size is 16384 = 512 lines x 32 bytes per line */
- or t2, zero, (128*4)-1 /* 512 lines */
- /* Invalidate all lines */
-2:
- cache Index_Store_Tag_D, 0(t1)
- addiu t2, t2, -1
- bne t2, zero, 2b
- addiu t1, t1, 32 /* 32 bytes in a line */
- .set pop
- .endm
-
- .macro cachePr4450ICReset
- .set push
- .set noreorder
-
- /* Save CP0 status reg on entry; */
- /* disable interrupts during cache reset */
- mfc0 t0, CP0_STATUS /* T0 = interrupt status on entry */
- HAZARD_CP0
-
- mtc0 zero, CP0_STATUS /* disable CPU interrupts */
- HAZARD_CP0
-
- or t1, zero, zero /* T1 = starting cache index (0) */
- ori t2, zero, (256 - 1) /* T2 = inst cache set cnt - 1 */
-
- icache_invd_loop:
- /* 9 == register t1 */
- .word CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
- (0 * ICACHE_SET_SIZE) /* invalidate inst cache WAY0 */
- .word CACHE_OPC | (9 << 21) | (Index_Invalidate_I << 16) | \
- (1 * ICACHE_SET_SIZE) /* invalidate inst cache WAY1 */
-
- addiu t1, t1, ICACHE_LINE_SIZE /* T1 = next cache line index */
- bne t2, zero, icache_invd_loop /* T2 = 0 if all sets invalidated */
- addiu t2, t2, -1 /* decrement T2 set cnt (delay slot) */
-
- /* Initialize the latches in the instruction cache tag */
- /* that drive the way selection tri-state bus drivers, by doing a */
- /* dummy load while the instruction cache is still disabled. */
- /* TODO: Is this needed ? */
- la t1, KSEG0 /* T1 = cached memory base address */
- lw zero, 0x0000(t1) /* (dummy read of first memory word) */
-
- mtc0 t0, CP0_STATUS /* restore interrupt status on entry */
- HAZARD_CP0
- .set pop
- .endm
-
- .macro cachePr4450DCReset
- .set push
- .set noreorder
- mfc0 t0, CP0_STATUS /* T0 = interrupt status on entry */
- HAZARD_CP0
- mtc0 zero, CP0_STATUS /* disable CPU interrupts */
- HAZARD_CP0
-
- /* Writeback/invalidate entire data cache sets/ways/lines */
- or t1, zero, zero /* T1 = starting cache index (0) */
- ori t2, zero, (DCACHE_SET_COUNT - 1) /* T2 = data cache set cnt - 1 */
-
- dcache_wbinvd_loop:
- /* 9 == register t1 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (0 * DCACHE_SET_SIZE) /* writeback/invalidate WAY0 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (1 * DCACHE_SET_SIZE) /* writeback/invalidate WAY1 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (2 * DCACHE_SET_SIZE) /* writeback/invalidate WAY2 */
- .word CACHE_OPC | (9 << 21) | (Index_Writeback_Inv_D << 16) | \
- (3 * DCACHE_SET_SIZE) /* writeback/invalidate WAY3 */
-
- addiu t1, t1, DCACHE_LINE_SIZE /* T1 = next data cache line index */
- bne t2, zero, dcache_wbinvd_loop /* T2 = 0 when wbinvd entire cache */
- addiu t2, t2, -1 /* decrement T2 set cnt (delay slot) */
-
- /* Initialize the latches in the data cache tag that drive the way
- selection tri-state bus drivers, by doing a dummy load while the
- data cache is still in the disabled mode. TODO: Is this needed ? */
- la t1, KSEG0 /* T1 = cached memory base address */
- lw zero, 0x0000(t1) /* (dummy read of first memory word) */
-
- mtc0 t0, CP0_STATUS /* restore interrupt status on entry */
- HAZARD_CP0
- .set pop
- .endm
-
-#endif /* __ASM_MACH_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-pnx8550/nand.h b/arch/mips/include/asm/mach-pnx8550/nand.h
deleted file mode 100644
index aefbc514ab0..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/nand.h
+++ /dev/null
@@ -1,121 +0,0 @@
-#ifndef __PNX8550_NAND_H
-#define __PNX8550_NAND_H
-
-#define PNX8550_NAND_BASE_ADDR 0x10000000
-#define PNX8550_PCIXIO_BASE 0xBBE40000
-
-#define PNX8550_DMA_EXT_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x800)
-#define PNX8550_DMA_INT_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x804)
-#define PNX8550_DMA_TRANS_SIZE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x808)
-#define PNX8550_DMA_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x80c)
-#define PNX8550_XIO_SEL0 *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x814)
-#define PNX8550_GPXIO_ADDR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x820)
-#define PNX8550_GPXIO_WR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x824)
-#define PNX8550_GPXIO_RD *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x828)
-#define PNX8550_GPXIO_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x82C)
-#define PNX8550_XIO_FLASH_CTRL *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0x830)
-#define PNX8550_GPXIO_INT_STATUS *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb0)
-#define PNX8550_GPXIO_INT_ENABLE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb4)
-#define PNX8550_GPXIO_INT_CLEAR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfb8)
-#define PNX8550_DMA_INT_STATUS *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd0)
-#define PNX8550_DMA_INT_ENABLE *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd4)
-#define PNX8550_DMA_INT_CLEAR *(volatile unsigned long *)(PNX8550_PCIXIO_BASE + 0xfd8)
-
-#define PNX8550_XIO_SEL0_EN_16BIT 0x00800000
-#define PNX8550_XIO_SEL0_USE_ACK 0x00400000
-#define PNX8550_XIO_SEL0_REN_HIGH 0x00100000
-#define PNX8550_XIO_SEL0_REN_LOW 0x00040000
-#define PNX8550_XIO_SEL0_WEN_HIGH 0x00010000
-#define PNX8550_XIO_SEL0_WEN_LOW 0x00004000
-#define PNX8550_XIO_SEL0_WAIT 0x00000200
-#define PNX8550_XIO_SEL0_OFFSET 0x00000020
-#define PNX8550_XIO_SEL0_TYPE_68360 0x00000000
-#define PNX8550_XIO_SEL0_TYPE_NOR 0x00000008
-#define PNX8550_XIO_SEL0_TYPE_NAND 0x00000010
-#define PNX8550_XIO_SEL0_TYPE_IDE 0x00000018
-#define PNX8550_XIO_SEL0_SIZE_8MB 0x00000000
-#define PNX8550_XIO_SEL0_SIZE_16MB 0x00000002
-#define PNX8550_XIO_SEL0_SIZE_32MB 0x00000004
-#define PNX8550_XIO_SEL0_SIZE_64MB 0x00000006
-#define PNX8550_XIO_SEL0_ENAB 0x00000001
-
-#define PNX8550_SEL0_DEFAULT ((PNX8550_XIO_SEL0_EN_16BIT) | \
- (PNX8550_XIO_SEL0_REN_HIGH*0)| \
- (PNX8550_XIO_SEL0_REN_LOW*2) | \
- (PNX8550_XIO_SEL0_WEN_HIGH*0)| \
- (PNX8550_XIO_SEL0_WEN_LOW*2) | \
- (PNX8550_XIO_SEL0_WAIT*4) | \
- (PNX8550_XIO_SEL0_OFFSET*0) | \
- (PNX8550_XIO_SEL0_TYPE_NAND) | \
- (PNX8550_XIO_SEL0_SIZE_32MB) | \
- (PNX8550_XIO_SEL0_ENAB))
-
-#define PNX8550_GPXIO_PENDING 0x00000200
-#define PNX8550_GPXIO_DONE 0x00000100
-#define PNX8550_GPXIO_CLR_DONE 0x00000080
-#define PNX8550_GPXIO_INIT 0x00000040
-#define PNX8550_GPXIO_READ_CMD 0x00000010
-#define PNX8550_GPXIO_BEN 0x0000000F
-
-#define PNX8550_XIO_FLASH_64MB 0x00200000
-#define PNX8550_XIO_FLASH_INC_DATA 0x00100000
-#define PNX8550_XIO_FLASH_CMD_PH 0x000C0000
-#define PNX8550_XIO_FLASH_CMD_PH2 0x00080000
-#define PNX8550_XIO_FLASH_CMD_PH1 0x00040000
-#define PNX8550_XIO_FLASH_CMD_PH0 0x00000000
-#define PNX8550_XIO_FLASH_ADR_PH 0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH3 0x00030000
-#define PNX8550_XIO_FLASH_ADR_PH2 0x00020000
-#define PNX8550_XIO_FLASH_ADR_PH1 0x00010000
-#define PNX8550_XIO_FLASH_ADR_PH0 0x00000000
-#define PNX8550_XIO_FLASH_CMD_B(x) ((x<<8) & 0x0000FF00)
-#define PNX8550_XIO_FLASH_CMD_A(x) (x & 0x000000FF)
-
-#define PNX8550_XIO_INT_ACK 0x00004000
-#define PNX8550_XIO_INT_COMPL 0x00002000
-#define PNX8550_XIO_INT_NONSUP 0x00000200
-#define PNX8550_XIO_INT_ABORT 0x00000004
-
-#define PNX8550_DMA_CTRL_SINGLE_DATA 0x00000400
-#define PNX8550_DMA_CTRL_SND2XIO 0x00000200
-#define PNX8550_DMA_CTRL_FIX_ADDR 0x00000100
-#define PNX8550_DMA_CTRL_BURST_8 0x00000000
-#define PNX8550_DMA_CTRL_BURST_16 0x00000020
-#define PNX8550_DMA_CTRL_BURST_32 0x00000040
-#define PNX8550_DMA_CTRL_BURST_64 0x00000060
-#define PNX8550_DMA_CTRL_BURST_128 0x00000080
-#define PNX8550_DMA_CTRL_BURST_256 0x000000A0
-#define PNX8550_DMA_CTRL_BURST_512 0x000000C0
-#define PNX8550_DMA_CTRL_BURST_NORES 0x000000E0
-#define PNX8550_DMA_CTRL_INIT_DMA 0x00000010
-#define PNX8550_DMA_CTRL_CMD_TYPE 0x0000000F
-
-/* see PCI system arch, page 100 for the full list: */
-#define PNX8550_DMA_CTRL_PCI_CMD_READ 0x00000006
-#define PNX8550_DMA_CTRL_PCI_CMD_WRITE 0x00000007
-
-#define PNX8550_DMA_INT_STAT_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_STAT_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_STAT_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_STAT_PERR5 (1<<5)
-#define PNX8550_DMA_INT_STAT_PERR4 (1<<4)
-#define PNX8550_DMA_INT_STAT_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_STAT_T_ABORT (1<<1)
-
-#define PNX8550_DMA_INT_EN_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_EN_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_EN_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_EN_PERR5 (1<<5)
-#define PNX8550_DMA_INT_EN_PERR4 (1<<4)
-#define PNX8550_DMA_INT_EN_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_EN_T_ABORT (1<<1)
-
-#define PNX8550_DMA_INT_CLR_ACK_DONE (1<<14)
-#define PNX8550_DMA_INT_CLR_DMA_DONE (1<<12)
-#define PNX8550_DMA_INT_CLR_DMA_ERR (1<<9)
-#define PNX8550_DMA_INT_CLR_PERR5 (1<<5)
-#define PNX8550_DMA_INT_CLR_PERR4 (1<<4)
-#define PNX8550_DMA_INT_CLR_M_ABORT (1<<2)
-#define PNX8550_DMA_INT_CLR_T_ABORT (1<<1)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/pci.h b/arch/mips/include/asm/mach-pnx8550/pci.h
deleted file mode 100644
index b921508d701..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/pci.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * PCI specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_PCI_H
-#define __PNX8550_PCI_H
-
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#define PCI_ACCESS_READ 0
-#define PCI_ACCESS_WRITE 1
-
-#define PCI_CMD_IOR 0x20
-#define PCI_CMD_IOW 0x30
-#define PCI_CMD_CONFIG_READ 0xa0
-#define PCI_CMD_CONFIG_WRITE 0xb0
-
-#define PCI_IO_TIMEOUT 1000
-#define PCI_IO_RETRY 5
-/* Timeout for IO and CFG accesses.
- This is in 1/1024 th of a jiffie(=10ms)
- i.e. approx 10us */
-#define PCI_IO_JIFFIES_TIMEOUT 40
-#define PCI_IO_JIFFIES_SHIFT 10
-
-#define PCI_BYTE_ENABLE_MASK 0x0000000f
-#define PCI_CFG_BUS_SHIFT 16
-#define PCI_CFG_FUNC_SHIFT 8
-#define PCI_CFG_REG_SHIFT 2
-
-#define PCI_BASE 0x1be00000
-#define PCI_SETUP 0x00040010
-#define PCI_DIS_REQGNT (1<<30)
-#define PCI_DIS_REQGNTA (1<<29)
-#define PCI_DIS_REQGNTB (1<<28)
-#define PCI_D2_SUPPORT (1<<27)
-#define PCI_D1_SUPPORT (1<<26)
-#define PCI_EN_TA (1<<24)
-#define PCI_EN_PCI2MMI (1<<23)
-#define PCI_EN_XIO (1<<22)
-#define PCI_BASE18_PREF (1<<21)
-#define SIZE_16M 0x3
-#define SIZE_32M 0x4
-#define SIZE_64M 0x5
-#define SIZE_128M 0x6
-#define PCI_SETUP_BASE18_SIZE(X) (X<<18)
-#define PCI_SETUP_BASE18_EN (1<<17)
-#define PCI_SETUP_BASE14_PREF (1<<16)
-#define PCI_SETUP_BASE14_SIZE(X) (X<<12)
-#define PCI_SETUP_BASE14_EN (1<<11)
-#define PCI_SETUP_BASE10_PREF (1<<10)
-#define PCI_SETUP_BASE10_SIZE(X) (X<<7)
-#define PCI_SETUP_CFGMANAGE_EN (1<<1)
-#define PCI_SETUP_PCIARB_EN (1<<0)
-
-#define PCI_CTRL 0x040014
-#define PCI_SWPB_DCS_PCI (1<<16)
-#define PCI_SWPB_PCI_PCI (1<<15)
-#define PCI_SWPB_PCI_DCS (1<<14)
-#define PCI_REG_WR_POST (1<<13)
-#define PCI_XIO_WR_POST (1<<12)
-#define PCI_PCI2_WR_POST (1<<13)
-#define PCI_PCI1_WR_POST (1<<12)
-#define PCI_SERR_SEEN (1<<11)
-#define PCI_B10_SPEC_RD (1<<6)
-#define PCI_B14_SPEC_RD (1<<5)
-#define PCI_B18_SPEC_RD (1<<4)
-#define PCI_B10_NOSUBWORD (1<<3)
-#define PCI_B14_NOSUBWORD (1<<2)
-#define PCI_B18_NOSUBWORD (1<<1)
-#define PCI_RETRY_TMREN (1<<0)
-
-#define PCI_BASE1_LO 0x040018
-#define PCI_BASE1_HI 0x04001C
-#define PCI_BASE2_LO 0x040020
-#define PCI_BASE2_HI 0x040024
-#define PCI_RDLIFETIM 0x040028
-#define PCI_GPPM_ADDR 0x04002C
-#define PCI_GPPM_WDAT 0x040030
-#define PCI_GPPM_RDAT 0x040034
-#define PCI_GPPM_CTRL 0x040038
-#define GPPM_DONE (1<<10)
-#define INIT_PCI_CYCLE (1<<9)
-#define GPPM_CMD(X) (((X)&0xf)<<4)
-#define GPPM_BYTEEN(X) ((X)&0xf)
-#define PCI_UNLOCKREG 0x04003C
-#define UNLOCK_SSID(X) (((X)&0xff)<<8)
-#define UNLOCK_SETUP(X) (((X)&0xff)<<0)
-#define UNLOCK_MAGIC 0xCA
-#define PCI_DEV_VEND_ID 0x040040
-#define DEVICE_ID(X) (((X)>>16)&0xffff)
-#define VENDOR_ID(X) (((X)&0xffff))
-#define PCI_CFG_CMDSTAT 0x040044
-#define PCI_CFG_STATUS(X) (((X)>>16)&0xffff)
-#define PCI_CFG_COMMAND(X) ((X)&0xffff)
-#define PCI_CLASS_REV 0x040048
-#define PCI_CLASSCODE(X) (((X)>>8)&0xffffff)
-#define PCI_REVID(X) ((X)&0xff)
-#define PCI_LAT_TMR 0x04004c
-#define PCI_BASE10 0x040050
-#define PCI_BASE14 0x040054
-#define PCI_BASE18 0x040058
-#define PCI_SUBSYS_ID 0x04006c
-#define PCI_CAP_PTR 0x040074
-#define PCI_CFG_MISC 0x04007c
-#define PCI_PMC 0x040080
-#define PCI_PWR_STATE 0x040084
-#define PCI_IO 0x040088
-#define PCI_SLVTUNING 0x04008C
-#define PCI_DMATUNING 0x040090
-#define PCI_DMAEADDR 0x040800
-#define PCI_DMAIADDR 0x040804
-#define PCI_DMALEN 0x040808
-#define PCI_DMACTRL 0x04080C
-#define PCI_XIOCTRL 0x040810
-#define PCI_SEL0PROF 0x040814
-#define PCI_SEL1PROF 0x040818
-#define PCI_SEL2PROF 0x04081C
-#define PCI_GPXIOADDR 0x040820
-#define PCI_NANDCTRLS 0x400830
-#define PCI_SEL3PROF 0x040834
-#define PCI_SEL4PROF 0x040838
-#define PCI_GPXIO_STAT 0x040FB0
-#define PCI_GPXIO_IMASK 0x040FB4
-#define PCI_GPXIO_ICLR 0x040FB8
-#define PCI_GPXIO_ISET 0x040FBC
-#define PCI_GPPM_STATUS 0x040FC0
-#define GPPM_DONE (1<<10)
-#define GPPM_ERR (1<<9)
-#define GPPM_MPAR_ERR (1<<8)
-#define GPPM_PAR_ERR (1<<7)
-#define GPPM_R_MABORT (1<<2)
-#define GPPM_R_TABORT (1<<1)
-#define PCI_GPPM_IMASK 0x040FC4
-#define PCI_GPPM_ICLR 0x040FC8
-#define PCI_GPPM_ISET 0x040FCC
-#define PCI_DMA_STATUS 0x040FD0
-#define PCI_DMA_IMASK 0x040FD4
-#define PCI_DMA_ICLR 0x040FD8
-#define PCI_DMA_ISET 0x040FDC
-#define PCI_ISTATUS 0x040FE0
-#define PCI_IMASK 0x040FE4
-#define PCI_ICLR 0x040FE8
-#define PCI_ISET 0x040FEC
-#define PCI_MOD_ID 0x040FFC
-
-/*
- * PCI configuration cycle AD bus definition
- */
-/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
-
-/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
-
-/*
- * Ethernet device DP83816 definition
- */
-#define DP83816_IRQ_ETHER 66
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/uart.h b/arch/mips/include/asm/mach-pnx8550/uart.h
deleted file mode 100644
index ad7608d4487..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/uart.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#ifndef __IP3106_UART_H
-#define __IP3106_UART_H
-
-#include <int.h>
-
-/* early macros for kgdb use. fixme: clean this up */
-
-#define UART_BASE 0xbbe4a000 /* PNX8550 */
-
-#define PNX8550_UART_PORT0 (UART_BASE)
-#define PNX8550_UART_PORT1 (UART_BASE + 0x1000)
-
-#define PNX8550_UART_INT(x) (PNX8550_INT_GIC_MIN+19+x)
-#define IRQ_TO_UART(x) (x-PNX8550_INT_GIC_MIN-19)
-
-/* early macros needed for prom/kgdb */
-
-#define ip3106_lcr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x000)
-#define ip3106_mcr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x004)
-#define ip3106_baud(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x008)
-#define ip3106_cfg(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x00C)
-#define ip3106_fifo(base, port) *(volatile u32 *)(base+(port*0x1000) + 0x028)
-#define ip3106_istat(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE0)
-#define ip3106_ien(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE4)
-#define ip3106_iclr(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFE8)
-#define ip3106_iset(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFEC)
-#define ip3106_pd(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFF4)
-#define ip3106_mid(base, port) *(volatile u32 *)(base+(port*0x1000) + 0xFFC)
-
-#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/usb.h b/arch/mips/include/asm/mach-pnx8550/usb.h
deleted file mode 100644
index 483b7fc65d4..00000000000
--- a/arch/mips/include/asm/mach-pnx8550/usb.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * USB specific definitions
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-
-#ifndef __PNX8550_USB_H
-#define __PNX8550_USB_H
-
-/*
- * USB Host controller
- */
-
-#define PNX8550_USB_OHCI_OP_BASE 0x1be48000
-#define PNX8550_USB_OHCI_OP_LEN 0x1000
-
-#endif
diff --git a/arch/mips/include/asm/mach-powertv/asic.h b/arch/mips/include/asm/mach-powertv/asic.h
index c7077a64b9a..b341108d12f 100644
--- a/arch/mips/include/asm/mach-powertv/asic.h
+++ b/arch/mips/include/asm/mach-powertv/asic.h
@@ -23,9 +23,9 @@
#include <linux/platform_device.h>
#include <asm/mach-powertv/asic_regs.h>
-#define DVR_CAPABLE (1<<0)
-#define PCIE_CAPABLE (1<<1)
-#define FFS_CAPABLE (1<<2)
+#define DVR_CAPABLE (1<<0)
+#define PCIE_CAPABLE (1<<1)
+#define FFS_CAPABLE (1<<2)
#define DISPLAY_CAPABLE (1<<3)
/* Platform Family types
@@ -111,7 +111,7 @@ enum sys_reboot_type {
* Older drivers may report as
* userReboot. */
sys_hardware_reset = 0x09, /* HW watchdog or front-panel
- * reset button reset. Older
+ * reset button reset. Older
* drivers may report as
* userReboot. */
sys_watchdogInterrupt = 0x0A /* Pre-watchdog interrupt */
diff --git a/arch/mips/include/asm/mach-powertv/asic_regs.h b/arch/mips/include/asm/mach-powertv/asic_regs.h
index deecb26a077..06712abb3e5 100644
--- a/arch/mips/include/asm/mach-powertv/asic_regs.h
+++ b/arch/mips/include/asm/mach-powertv/asic_regs.h
@@ -49,8 +49,8 @@ enum asic_type {
#define UART1_INTEN uart1_inten
#define UART1_CONFIG1 uart1_config1
#define UART1_CONFIG2 uart1_config2
-#define UART1_DIVISORHI uart1_divisorhi
-#define UART1_DIVISORLO uart1_divisorlo
+#define UART1_DIVISORHI uart1_divisorhi
+#define UART1_DIVISORLO uart1_divisorlo
#define UART1_DATA uart1_data
#define UART1_STATUS uart1_status
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h
index 35371641575..f8316720a21 100644
--- a/arch/mips/include/asm/mach-powertv/dma-coherence.h
+++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h
@@ -4,7 +4,7 @@
* for more details.
*
* Version from mach-generic modified to support PowerTV port
- * Portions Copyright (C) 2009 Cisco Systems, Inc.
+ * Portions Copyright (C) 2009 Cisco Systems, Inc.
* Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
*
*/
diff --git a/arch/mips/include/asm/mach-powertv/interrupts.h b/arch/mips/include/asm/mach-powertv/interrupts.h
index 4fd652ceb52..6c463be6215 100644
--- a/arch/mips/include/asm/mach-powertv/interrupts.h
+++ b/arch/mips/include/asm/mach-powertv/interrupts.h
@@ -16,7 +16,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
+#ifndef _ASM_MACH_POWERTV_INTERRUPTS_H_
#define _ASM_MACH_POWERTV_INTERRUPTS_H_
/*
@@ -49,9 +49,9 @@
* glue logic inside SPARC ILC
* (see INT_SBAG_STAT, below,
* for individual interrupts) */
-#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
+#define irq_qam_b_fec (ibase+116) /* QAM B FEC Interrupt */
#define irq_qam_a_fec (ibase+115) /* QAM A FEC Interrupt */
-/* 114 unused (bit 18) */
+/* 114 unused (bit 18) */
#define irq_mailbox (ibase+113) /* Mailbox Debug Interrupt --
* Ored by glue logic inside
* SPARC ILC (see
@@ -99,9 +99,9 @@
#define irq_sata1 (ibase+87) /* SATA 1 Interrupt */
#define irq_dtcp (ibase+86) /* DTCP Interrupt */
#define irq_pciexp1 (ibase+85) /* PCI Express 1 Interrupt */
-/* 84 unused (bit 20) */
-/* 83 unused (bit 19) */
-/* 82 unused (bit 18) */
+/* 84 unused (bit 20) */
+/* 83 unused (bit 19) */
+/* 82 unused (bit 18) */
#define irq_sata2 (ibase+81) /* SATA2 Interrupt */
#define irq_uart2 (ibase+80) /* UART2 Interrupt */
#define irq_legacy_usb (ibase+79) /* Legacy USB Host ISR (1.1
@@ -117,22 +117,22 @@
#define irq_mod_dma (ibase+70) /* Modulator DMA Interrupt */
#define irq_byte_eng1 (ibase+69) /* Byte Engine Interrupt [1] */
#define irq_byte_eng0 (ibase+68) /* Byte Engine Interrupt [0] */
-/* 67 unused (bit 03) */
-/* 66 unused (bit 02) */
-/* 65 unused (bit 01) */
-/* 64 unused (bit 00) */
+/* 67 unused (bit 03) */
+/* 66 unused (bit 02) */
+/* 65 unused (bit 01) */
+/* 64 unused (bit 00) */
/*------------- Register: int_stat_1 */
-/* 63 unused (bit 31) */
-/* 62 unused (bit 30) */
-/* 61 unused (bit 29) */
-/* 60 unused (bit 28) */
-/* 59 unused (bit 27) */
-/* 58 unused (bit 26) */
-/* 57 unused (bit 25) */
-/* 56 unused (bit 24) */
+/* 63 unused (bit 31) */
+/* 62 unused (bit 30) */
+/* 61 unused (bit 29) */
+/* 60 unused (bit 28) */
+/* 59 unused (bit 27) */
+/* 58 unused (bit 26) */
+/* 57 unused (bit 25) */
+/* 56 unused (bit 24) */
#define irq_buf_dma_mem2mem (ibase+55) /* BufDMA Memory to Memory
* Interrupt */
-#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
+#define irq_buf_dma_usbtransmit (ibase+54) /* BufDMA USB Transmit
* Interrupt */
#define irq_buf_dma_qpskpodtransmit (ibase+53) /* BufDMA QPSK/POD Tramsit
* Interrupt */
@@ -140,7 +140,7 @@
* Interrupt */
#define irq_buf_dma_usbrecv (ibase+51) /* BufDMA USB Receive
* Interrupt */
-#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
+#define irq_buf_dma_qpskpodrecv (ibase+50) /* BufDMA QPSK/POD Receive
* Interrupt */
#define irq_buf_dma_recv_error (ibase+49) /* BufDMA Receive Error
* Interrupt */
@@ -166,7 +166,7 @@
* Module */
#define irq_gpio2 (ibase+37) /* GP I/O IRQ 2 - From GP I/O
* Module (ABE_intN) */
-#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
+#define irq_pcrcmplt1 (ibase+36) /* PCR Capture Complete or
* Discontinuity 1 */
#define irq_pcrcmplt2 (ibase+35) /* PCR Capture Complete or
* Discontinuity 2 */
@@ -217,18 +217,18 @@
#define irq_qpsk_hecerr (ibase+11) /* QPSK HEC Error Interrupt */
#define irq_qpsk_crcerr (ibase+10) /* QPSK AAL-5 CRC Error
* Interrupt */
-/* 9 unused (bit 09) */
-/* 8 unused (bit 08) */
-#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
+/* 9 unused (bit 09) */
+/* 8 unused (bit 08) */
+#define irq_psicrcerr (ibase+7) /* QAM PSI CRC Error
* Interrupt */
-#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
+#define irq_psilength_err (ibase+6) /* QAM PSI Length Error
* Interrupt */
-#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
+#define irq_esfforward (ibase+5) /* ESF Interrupt Mark From
* Forward Path Reference -
* every 3ms when forward Mbits
* and forward slot control
* bytes are updated. */
-#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
+#define irq_esfreverse (ibase+4) /* ESF Interrupt Mark from
* Reverse Path Reference -
* delayed from forward mark by
* the ranging delay plus a
@@ -239,15 +239,15 @@
* 1.554 M upstream rates and
* every 6 ms for 256K upstream
* rate. */
-#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
+#define irq_aloha_timeout (ibase+3) /* Slotted-Aloha timeout on
* Channel 1. */
-#define irq_reservation (ibase+2) /* Partial (or Incremental)
+#define irq_reservation (ibase+2) /* Partial (or Incremental)
* Reservation Message Completed
* or Slotted aloha verify for
* channel 1. */
-#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
+#define irq_aloha3 (ibase+1) /* Slotted-Aloha Message Verify
* Interrupt or Reservation
* increment completed for
* channel 3. */
-#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
+#define irq_mpeg_d (ibase+0) /* MPEG Decoder Interrupt */
#endif /* _ASM_MACH_POWERTV_INTERRUPTS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
new file mode 100644
index 00000000000..5a508f9f943
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -0,0 +1,39 @@
+/*
+ * Ralink SoC register definitions
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef _RALINK_REGS_H_
+#define _RALINK_REGS_H_
+
+extern __iomem void *rt_sysc_membase;
+extern __iomem void *rt_memc_membase;
+
+static inline void rt_sysc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_sysc_membase + reg);
+}
+
+static inline u32 rt_sysc_r32(unsigned reg)
+{
+ return __raw_readl(rt_sysc_membase + reg);
+}
+
+static inline void rt_memc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_memc_membase + reg);
+}
+
+static inline u32 rt_memc_r32(unsigned reg)
+{
+ return __raw_readl(rt_memc_membase + reg);
+}
+
+#endif /* _RALINK_REGS_H_ */
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
new file mode 100644
index 00000000000..7d344f2d7d0
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -0,0 +1,139 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RT305X_REGS_H_
+#define _RT305X_REGS_H_
+
+enum rt305x_soc_type {
+ RT305X_SOC_UNKNOWN = 0,
+ RT305X_SOC_RT3050,
+ RT305X_SOC_RT3052,
+ RT305X_SOC_RT3350,
+ RT305X_SOC_RT3352,
+ RT305X_SOC_RT5350,
+};
+
+extern enum rt305x_soc_type rt305x_soc;
+
+static inline int soc_is_rt3050(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3050;
+}
+
+static inline int soc_is_rt3052(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3052;
+}
+
+static inline int soc_is_rt305x(void)
+{
+ return soc_is_rt3050() || soc_is_rt3052();
+}
+
+static inline int soc_is_rt3350(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3350;
+}
+
+static inline int soc_is_rt3352(void)
+{
+ return rt305x_soc == RT305X_SOC_RT3352;
+}
+
+static inline int soc_is_rt5350(void)
+{
+ return rt305x_soc == RT305X_SOC_RT5350;
+}
+
+#define RT305X_SYSC_BASE 0x10000000
+
+#define SYSC_REG_CHIP_NAME0 0x00
+#define SYSC_REG_CHIP_NAME1 0x04
+#define SYSC_REG_CHIP_ID 0x0c
+#define SYSC_REG_SYSTEM_CONFIG 0x10
+
+#define RT3052_CHIP_NAME0 0x30335452
+#define RT3052_CHIP_NAME1 0x20203235
+
+#define RT3350_CHIP_NAME0 0x33335452
+#define RT3350_CHIP_NAME1 0x20203035
+
+#define RT3352_CHIP_NAME0 0x33335452
+#define RT3352_CHIP_NAME1 0x20203235
+
+#define RT5350_CHIP_NAME0 0x33355452
+#define RT5350_CHIP_NAME1 0x20203035
+
+#define CHIP_ID_ID_MASK 0xff
+#define CHIP_ID_ID_SHIFT 8
+#define CHIP_ID_REV_MASK 0xff
+
+#define RT305X_SYSCFG_CPUCLK_SHIFT 18
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_CPUCLK_LOW 0x0
+#define RT305X_SYSCFG_CPUCLK_HIGH 0x1
+
+#define RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT 2
+#define RT305X_SYSCFG_CPUCLK_MASK 0x1
+#define RT305X_SYSCFG_SRAM_CS0_MODE_WDT 0x1
+
+#define RT3352_SYSCFG0_CPUCLK_SHIFT 8
+#define RT3352_SYSCFG0_CPUCLK_MASK 0x1
+#define RT3352_SYSCFG0_CPUCLK_LOW 0x0
+#define RT3352_SYSCFG0_CPUCLK_HIGH 0x1
+
+#define RT5350_SYSCFG0_CPUCLK_SHIFT 8
+#define RT5350_SYSCFG0_CPUCLK_MASK 0x3
+#define RT5350_SYSCFG0_CPUCLK_360 0x0
+#define RT5350_SYSCFG0_CPUCLK_320 0x2
+#define RT5350_SYSCFG0_CPUCLK_300 0x3
+
+/* multi function gpio pins */
+#define RT305X_GPIO_I2C_SD 1
+#define RT305X_GPIO_I2C_SCLK 2
+#define RT305X_GPIO_SPI_EN 3
+#define RT305X_GPIO_SPI_CLK 4
+/* GPIO 7-14 is shared between UART0, PCM and I2S interfaces */
+#define RT305X_GPIO_7 7
+#define RT305X_GPIO_10 10
+#define RT305X_GPIO_14 14
+#define RT305X_GPIO_UART1_TXD 15
+#define RT305X_GPIO_UART1_RXD 16
+#define RT305X_GPIO_JTAG_TDO 17
+#define RT305X_GPIO_JTAG_TDI 18
+#define RT305X_GPIO_MDIO_MDC 22
+#define RT305X_GPIO_MDIO_MDIO 23
+#define RT305X_GPIO_SDRAM_MD16 24
+#define RT305X_GPIO_SDRAM_MD31 39
+#define RT305X_GPIO_GE0_TXD0 40
+#define RT305X_GPIO_GE0_RXCLK 51
+
+#define RT305X_GPIO_MODE_I2C BIT(0)
+#define RT305X_GPIO_MODE_SPI BIT(1)
+#define RT305X_GPIO_MODE_UART0_SHIFT 2
+#define RT305X_GPIO_MODE_UART0_MASK 0x7
+#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
+#define RT305X_GPIO_MODE_UARTF 0x0
+#define RT305X_GPIO_MODE_PCM_UARTF 0x1
+#define RT305X_GPIO_MODE_PCM_I2S 0x2
+#define RT305X_GPIO_MODE_I2S_UARTF 0x3
+#define RT305X_GPIO_MODE_PCM_GPIO 0x4
+#define RT305X_GPIO_MODE_GPIO_UARTF 0x5
+#define RT305X_GPIO_MODE_GPIO_I2S 0x6
+#define RT305X_GPIO_MODE_GPIO 0x7
+#define RT305X_GPIO_MODE_UART1 BIT(5)
+#define RT305X_GPIO_MODE_JTAG BIT(6)
+#define RT305X_GPIO_MODE_MDIO BIT(7)
+#define RT305X_GPIO_MODE_SDRAM BIT(8)
+#define RT305X_GPIO_MODE_RGMII BIT(9)
+
+#endif
diff --git a/arch/mips/include/asm/mach-pnx8550/war.h b/arch/mips/include/asm/mach-ralink/war.h
index de8894c4668..a7b712cf2d2 100644
--- a/arch/mips/include/asm/mach-pnx8550/war.h
+++ b/arch/mips/include/asm/mach-ralink/war.h
@@ -5,8 +5,8 @@
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
*/
-#ifndef __ASM_MIPS_MACH_PNX8550_WAR_H
-#define __ASM_MIPS_MACH_PNX8550_WAR_H
+#ifndef __ASM_MACH_RALINK_WAR_H
+#define __ASM_MACH_RALINK_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
@@ -17,8 +17,9 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
-#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
+#endif /* __ASM_MACH_RALINK_WAR_H */
diff --git a/arch/mips/include/asm/mach-rc32434/ddr.h b/arch/mips/include/asm/mach-rc32434/ddr.h
index 291e2cf9dde..e1cad0c7fd5 100644
--- a/arch/mips/include/asm/mach-rc32434/ddr.h
+++ b/arch/mips/include/asm/mach-rc32434/ddr.h
@@ -138,4 +138,4 @@ struct ddr_ram {
#define RC32434_DLLED_DBE_BIT 0
#define RC32434_DLLED_DTE_BIT 1
-#endif /* _ASM_RC32434_DDR_H_ */
+#endif /* _ASM_RC32434_DDR_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/dma.h b/arch/mips/include/asm/mach-rc32434/dma.h
index 5f898b5873f..4322191e46b 100644
--- a/arch/mips/include/asm/mach-rc32434/dma.h
+++ b/arch/mips/include/asm/mach-rc32434/dma.h
@@ -5,7 +5,7 @@
* DMA register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
*/
#ifndef __ASM_RC32434_DMA_H
@@ -71,10 +71,10 @@ struct dma_reg {
#define DMA_CHAN_DONE_BIT (1 << 1)
#define DMA_CHAN_MODE_BIT (1 << 2)
#define DMA_CHAN_MODE_MSK 0x0000000c
-#define DMA_CHAN_MODE_AUTO 0
-#define DMA_CHAN_MODE_BURST 1
-#define DMA_CHAN_MODE_XFRT 2
-#define DMA_CHAN_MODE_RSVD 3
+#define DMA_CHAN_MODE_AUTO 0
+#define DMA_CHAN_MODE_BURST 1
+#define DMA_CHAN_MODE_XFRT 2
+#define DMA_CHAN_MODE_RSVD 3
#define DMA_CHAN_ACT_BIT (1 << 4)
/* DMA status registers */
@@ -100,4 +100,4 @@ struct dma_channel {
struct dma_reg ch[DMA_CHAN_COUNT];
};
-#endif /* __ASM_RC32434_DMA_H */
+#endif /* __ASM_RC32434_DMA_H */
diff --git a/arch/mips/include/asm/mach-rc32434/dma_v.h b/arch/mips/include/asm/mach-rc32434/dma_v.h
index 173a9f9146c..28c54063a34 100644
--- a/arch/mips/include/asm/mach-rc32434/dma_v.h
+++ b/arch/mips/include/asm/mach-rc32434/dma_v.h
@@ -5,7 +5,7 @@
* DMA register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
*/
#ifndef _ASM_RC32434_DMA_V_H_
@@ -49,4 +49,4 @@ static inline void rc32434_chain_dma(struct dma_reg *ch, u32 dma_addr)
__raw_writel(dma_addr, &ch->dmandptr);
}
-#endif /* _ASM_RC32434_DMA_V_H_ */
+#endif /* _ASM_RC32434_DMA_V_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/eth.h b/arch/mips/include/asm/mach-rc32434/eth.h
index a25cbc56173..c2645faadf5 100644
--- a/arch/mips/include/asm/mach-rc32434/eth.h
+++ b/arch/mips/include/asm/mach-rc32434/eth.h
@@ -26,8 +26,8 @@
*
*/
-#ifndef __ASM_RC32434_ETH_H
-#define __ASM_RC32434_ETH_H
+#ifndef __ASM_RC32434_ETH_H
+#define __ASM_RC32434_ETH_H
#define ETH0_BASE_ADDR 0x18060000
@@ -217,4 +217,4 @@ struct eth_regs {
#define ETH_TX_LE (1 << 16)
#define ETH_TX_CC 0x001E0000
-#endif /* __ASM_RC32434_ETH_H */
+#endif /* __ASM_RC32434_ETH_H */
diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h
index 12ee8d51016..4dee0a34250 100644
--- a/arch/mips/include/asm/mach-rc32434/gpio.h
+++ b/arch/mips/include/asm/mach-rc32434/gpio.h
@@ -5,7 +5,7 @@
* GPIO register definition.
*
* Author : ryan.holmQVist@idt.com
- * Date : 20011005
+ * Date : 20011005
* Copyright (C) 2001, 2002 Ryan Holm <ryan.holmQVist@idt.com>
* Copyright (C) 2008 Florian Fainelli <florian@openwrt.org>
*/
@@ -26,9 +26,9 @@
#define irq_to_gpio(irq) (irq - (8 + 4 * 32))
struct rb532_gpio_reg {
- u32 gpiofunc; /* GPIO Function Register
+ u32 gpiofunc; /* GPIO Function Register
* gpiofunc[x]==0 bit = gpio
- * func[x]==1 bit = altfunc
+ * func[x]==1 bit = altfunc
*/
u32 gpiocfg; /* GPIO Configuration Register
* gpiocfg[x]==0 bit = input
diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h
index 023a5b100ed..b76dec95c04 100644
--- a/arch/mips/include/asm/mach-rc32434/irq.h
+++ b/arch/mips/include/asm/mach-rc32434/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_RC32434_IRQ_H
#define __ASM_RC32434_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include <asm/mach-generic/irq.h>
#include <asm/mach-rc32434/rb.h>
@@ -25,12 +25,12 @@
#define UART0_IRQ (GROUP3_IRQ_BASE + 0)
-#define ETH0_DMA_RX_IRQ (GROUP1_IRQ_BASE + 0)
-#define ETH0_DMA_TX_IRQ (GROUP1_IRQ_BASE + 1)
-#define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9)
-#define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10)
+#define ETH0_DMA_RX_IRQ (GROUP1_IRQ_BASE + 0)
+#define ETH0_DMA_TX_IRQ (GROUP1_IRQ_BASE + 1)
+#define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9)
+#define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10)
#define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE
#define GPIO_MAPPED_IRQ_GROUP 4
-#endif /* __ASM_RC32434_IRQ_H */
+#endif /* __ASM_RC32434_IRQ_H */
diff --git a/arch/mips/include/asm/mach-rc32434/pci.h b/arch/mips/include/asm/mach-rc32434/pci.h
index 410638f2af7..6f40d151558 100644
--- a/arch/mips/include/asm/mach-rc32434/pci.h
+++ b/arch/mips/include/asm/mach-rc32434/pci.h
@@ -151,11 +151,11 @@ struct pci_msu {
#define PCI_CFGA_REG_PBA2 (0x18 >> 2) /* use PCIPBA_ */
#define PCI_CFGA_REG_PBA3 (0x1c >> 2) /* use PCIPBA_ */
#define PCI_CFGA_REG_SUBSYS (0x2c >> 2) /* use PCFGSS_ */
-#define PCI_CFGA_REG_3C (0x3C >> 2) /* use PCFG3C_ */
+#define PCI_CFGA_REG_3C (0x3C >> 2) /* use PCFG3C_ */
#define PCI_CFGA_REG_PBBA0C (0x44 >> 2) /* use PCIPBAC_ */
-#define PCI_CFGA_REG_PBA0M (0x48 >> 2)
+#define PCI_CFGA_REG_PBA0M (0x48 >> 2)
#define PCI_CFGA_REG_PBA1C (0x4c >> 2) /* use PCIPBAC_ */
-#define PCI_CFGA_REG_PBA1M (0x50 >> 2)
+#define PCI_CFGA_REG_PBA1M (0x50 >> 2)
#define PCI_CFGA_REG_PBA2C (0x54 >> 2) /* use PCIPBAC_ */
#define PCI_CFGA_REG_PBA2M (0x58 >> 2)
#define PCI_CFGA_REG_PBA3C (0x5c >> 2) /* use PCIPBAC_ */
@@ -164,9 +164,9 @@ struct pci_msu {
#define PCI_CFGA_FUNC_BIT 8
#define PCI_CFGA_FUNC 0x00000700
#define PCI_CFGA_DEV_BIT 11
-#define PCI_CFGA_DEV 0x0000f800
-#define PCI_CFGA_DEV_INTERN 0
-#define PCI_CFGA_BUS_BIT 16
+#define PCI_CFGA_DEV 0x0000f800
+#define PCI_CFGA_DEV_INTERN 0
+#define PCI_CFGA_BUS_BIT 16
#define PCI CFGA_BUS 0x00ff0000
#define PCI_CFGA_BUS_TYPE0 0
#define PCI_CFGA_EN (1 << 31)
@@ -201,13 +201,13 @@ struct pci_msu {
#define PCI_PBAC_P (1 << 1)
#define PCI_PBAC_SIZE_BIT 2
#define PCI_PBAC_SIZE 0x0000007c
-#define PCI_PBAC_SB (1 << 7)
-#define PCI_PBAC_PP (1 << 8)
+#define PCI_PBAC_SB (1 << 7)
+#define PCI_PBAC_PP (1 << 8)
#define PCI_PBAC_MR_BIT 9
#define PCI_PBAC_MR 0x00000600
#define PCI_PBAC_MR_RD 0
#define PCI_PBAC_MR_RD_LINE 1
-#define PCI_PBAC_MR_RD_MULT 2
+#define PCI_PBAC_MR_RD_MULT 2
#define PCI_PBAC_MRL (1 << 11)
#define PCI_PBAC_MRM (1 << 12)
#define PCI_PBAC_TRP (1 << 13)
@@ -227,14 +227,14 @@ struct pci_msu {
*/
#define PCI_LBAC_MSI (1 << 0)
-#define PCI_LBAC_MSI_MEM 0
-#define PCI_LBAC_MSI_IO 1
+#define PCI_LBAC_MSI_MEM 0
+#define PCI_LBAC_MSI_IO 1
#define PCI_LBAC_SIZE_BIT 2
#define PCI_LBAC_SIZE 0x0000007c
#define PCI_LBAC_SB (1 << 7)
#define PCI_LBAC_RT (1 << 8)
-#define PCI_LBAC_RT_NO_PREF 0
-#define PCI_LBAC_RT_PREF 1
+#define PCI_LBAC_RT_NO_PREF 0
+#define PCI_LBAC_RT_PREF 1
/*
* PCI Local Base Address [0|1|2|3] Mapping Register
@@ -279,16 +279,16 @@ struct pci_msu {
#define PCI_DMAD_PT 0x00c00000 /* preferred transaction field */
/* These are for reads (DMA channel 8) */
#define PCI_DMAD_DEVCMD_MR 0 /* memory read */
-#define PCI_DMAD_DEVCMD_MRL 1 /* memory read line */
-#define PCI_DMAD_DEVCMD_MRM 2 /* memory read multiple */
-#define PCI_DMAD_DEVCMD_IOR 3 /* I/O read */
+#define PCI_DMAD_DEVCMD_MRL 1 /* memory read line */
+#define PCI_DMAD_DEVCMD_MRM 2 /* memory read multiple */
+#define PCI_DMAD_DEVCMD_IOR 3 /* I/O read */
/* These are for writes (DMA channel 9) */
#define PCI_DMAD_DEVCMD_MW 0 /* memory write */
-#define PCI_DMAD_DEVCMD_MWI 1 /* memory write invalidate */
-#define PCI_DMAD_DEVCMD_IOW 3 /* I/O write */
+#define PCI_DMAD_DEVCMD_MWI 1 /* memory write invalidate */
+#define PCI_DMAD_DEVCMD_IOW 3 /* I/O write */
/* Swap byte field applies to both DMA channel 8 and 9 */
-#define PCI_DMAD_SB (1 << 24) /* swap byte field */
+#define PCI_DMAD_SB (1 << 24) /* swap byte field */
/*
@@ -309,7 +309,7 @@ struct pci_msu {
#define PCI_MSU_M1 (1 << 1)
#define PCI_MSU_DB (1 << 2)
-#define PCI_MSG_ADDR 0xB8088010
+#define PCI_MSG_ADDR 0xB8088010
#define PCI0_ADDR 0xB8080000
#define rc32434_pci ((struct pci_reg *) PCI0_ADDR)
#define rc32434_pci_msg ((struct pci_msu *) PCI_MSG_ADDR)
@@ -331,9 +331,9 @@ struct pci_msu {
#define PCILBA_SIZE_MASK 0x1F
#define SIZE_256MB 0x1C
#define SIZE_128MB 0x1B
-#define SIZE_64MB 0x1A
+#define SIZE_64MB 0x1A
#define SIZE_32MB 0x19
-#define SIZE_16MB 0x18
+#define SIZE_16MB 0x18
#define SIZE_4MB 0x16
#define SIZE_2MB 0x15
#define SIZE_1MB 0x14
@@ -363,7 +363,7 @@ struct pci_msu {
#define KORINA_CONFIG23_ADDR 0x8000005C
#define KORINA_CONFIG24_ADDR 0x80000060
#define KORINA_CONFIG25_ADDR 0x80000064
-#define KORINA_CMD (PCI_CFG04_CMD_IO_ENA | \
+#define KORINA_CMD (PCI_CFG04_CMD_IO_ENA | \
PCI_CFG04_CMD_MEM_ENA | \
PCI_CFG04_CMD_BM_ENA | \
PCI_CFG04_CMD_MW_INV | \
@@ -401,8 +401,8 @@ struct pci_msu {
#define KORINA_BAR3 0x48000008 /* Spare 128 MB Memory */
#define KORINA_CNFG4 KORINA_BAR0
-#define KORINA_CNFG5 KORINA_BAR1
-#define KORINA_CNFG6 KORINA_BAR2
+#define KORINA_CNFG5 KORINA_BAR1
+#define KORINA_CNFG6 KORINA_BAR2
#define KORINA_CNFG7 KORINA_BAR3
#define KORINA_SUBSYS_VENDOR_ID 0x011d
@@ -410,20 +410,20 @@ struct pci_msu {
#define KORINA_CNFG8 0
#define KORINA_CNFG9 0
#define KORINA_CNFG10 0
-#define KORINA_CNFG11 ((KORINA_SUBSYS_VENDOR_ID<<16) | \
+#define KORINA_CNFG11 ((KORINA_SUBSYS_VENDOR_ID<<16) | \
KORINA_SUBSYSTEM_ID)
#define KORINA_INT_LINE 1
#define KORINA_INT_PIN 1
#define KORINA_MIN_GNT 8
#define KORINA_MAX_LAT 0x38
#define KORINA_CNFG12 0
-#define KORINA_CNFG13 0
+#define KORINA_CNFG13 0
#define KORINA_CNFG14 0
#define KORINA_CNFG15 ((KORINA_MAX_LAT<<24) | \
(KORINA_MIN_GNT<<16) | \
(KORINA_INT_PIN<<8) | \
KORINA_INT_LINE)
-#define KORINA_RETRY_LIMIT 0x80
+#define KORINA_RETRY_LIMIT 0x80
#define KORINA_TRDY_LIMIT 0x80
#define KORINA_CNFG16 ((KORINA_RETRY_LIMIT<<8) | \
KORINA_TRDY_LIMIT)
@@ -475,7 +475,7 @@ struct pci_msu {
#define KORINA_PBA3M 0
#define KORINA_CNFG24 KORINA_PBA3M
-#define PCITC_DTIMER_VAL 8
+#define PCITC_DTIMER_VAL 8
#define PCITC_RTIMER_VAL 0x10
-#endif /* __ASM_RC32434_PCI_H */
+#endif /* __ASM_RC32434_PCI_H */
diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h
index 6dc5f8df1f3..aac8ce8902e 100644
--- a/arch/mips/include/asm/mach-rc32434/rb.h
+++ b/arch/mips/include/asm/mach-rc32434/rb.h
@@ -18,7 +18,7 @@
#include <linux/genhd.h>
#define REGBASE 0x18000000
-#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
+#define IDT434_REG_BASE ((volatile void *) KSEG1ADDR(REGBASE))
#define UART0BASE 0x58000
#define RST (1 << 15)
#define DEV0BASE 0x010000
@@ -80,10 +80,10 @@ struct cf_device {
struct mpmc_device {
unsigned char state;
spinlock_t lock;
- void __iomem *base;
+ void __iomem *base;
};
extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask);
extern unsigned char get_latch_u5(void);
-#endif /* __ASM_RC32434_RB_H */
+#endif /* __ASM_RC32434_RB_H */
diff --git a/arch/mips/include/asm/mach-rc32434/rc32434.h b/arch/mips/include/asm/mach-rc32434/rc32434.h
index fce25d4231f..02fd32b4be1 100644
--- a/arch/mips/include/asm/mach-rc32434/rc32434.h
+++ b/arch/mips/include/asm/mach-rc32434/rc32434.h
@@ -16,4 +16,4 @@ static inline void rc32434_sync(void)
__asm__ volatile ("sync");
}
-#endif /* _ASM_RC32434_RC32434_H_ */
+#endif /* _ASM_RC32434_RC32434_H_ */
diff --git a/arch/mips/include/asm/mach-rc32434/timer.h b/arch/mips/include/asm/mach-rc32434/timer.h
index e49b1d57a01..cda26bb9eea 100644
--- a/arch/mips/include/asm/mach-rc32434/timer.h
+++ b/arch/mips/include/asm/mach-rc32434/timer.h
@@ -51,15 +51,15 @@ struct timer {
#define RC32434_CTC_TO_BIT 1
/* Real time clock registers */
-#define RC32434_RTC_MSK(x) BIT_TO_MASK(x)
-#define RC32434_RTC_CE_BIT 0
-#define RC32434_RTC_TO_BIT 1
-#define RC32434_RTC_RQE_BIT 2
+#define RC32434_RTC_MSK(x) BIT_TO_MASK(x)
+#define RC32434_RTC_CE_BIT 0
+#define RC32434_RTC_TO_BIT 1
+#define RC32434_RTC_RQE_BIT 2
/* Counter registers */
-#define RC32434_RCOUNT_BIT 0
-#define RC32434_RCOUNT_MSK 0x0000ffff
-#define RC32434_RCOMP_BIT 0
-#define RC32434_RCOMP_MSK 0x0000ffff
+#define RC32434_RCOUNT_BIT 0
+#define RC32434_RCOUNT_MSK 0x0000ffff
+#define RC32434_RCOMP_BIT 0
+#define RC32434_RCOMP_MSK 0x0000ffff
-#endif /* __ASM_RC32434_TIMER_H */
+#endif /* __ASM_RC32434_TIMER_H */
diff --git a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 7f3e3f9bd23..d9c82841903 100644
--- a/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -23,8 +23,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
@@ -53,8 +53,8 @@
/* #define cpu_has_watch ? */
#define cpu_has_divec 1
#define cpu_has_vce 0
-/* #define cpu_has_cache_cdex_p ? */
-/* #define cpu_has_cache_cdex_s ? */
+/* #define cpu_has_cache_cdex_p ? */
+/* #define cpu_has_cache_cdex_s ? */
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
index 652ea4c38cd..5d154cfbcf4 100644
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ b/arch/mips/include/asm/mach-sead3/irq.h
@@ -1,7 +1,7 @@
#ifndef __ASM_MACH_MIPS_IRQ_H
#define __ASM_MACH_MIPS_IRQ_H
-#define NR_IRQS 256
+#define NR_IRQS 256
#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-sibyte/war.h b/arch/mips/include/asm/mach-sibyte/war.h
index 176f5b32dc6..0a227d426b9 100644
--- a/arch/mips/include/asm/mach-sibyte/war.h
+++ b/arch/mips/include/asm/mach-sibyte/war.h
@@ -21,12 +21,12 @@ extern int sb1250_m3_workaround_needed(void);
#endif
#define BCM1250_M3_WAR sb1250_m3_workaround_needed()
-#define SIBYTE_1956_WAR 1
+#define SIBYTE_1956_WAR 1
#else
#define BCM1250_M3_WAR 0
-#define SIBYTE_1956_WAR 0
+#define SIBYTE_1956_WAR 0
#endif
diff --git a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
index 83746b84a5e..00fa3684ac9 100644
--- a/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
+++ b/arch/mips/include/asm/mach-wrppmc/mach-gt64120.h
@@ -1,6 +1,6 @@
/*
* This is a direct copy of the ev96100.h file, with a global
- * search and replace. The numbers are the same.
+ * search and replace. The numbers are the same.
*
* The reason I'm duplicating this is so that the 64120/96100
* defines won't be confusing in the source code.
@@ -11,11 +11,11 @@
/*
* This is the CPU physical memory map of PPMC Board:
*
- * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#)
- * 0x1C000000-0x1C000000 - LED (CS0)
- * 0x1C800000-0x1C800007 - UART 16550 port (CS1)
- * 0x1F000000-0x1F000000 - MailBox (CS3)
- * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS)
+ * 0x00000000-0x03FFFFFF - 64MB SDRAM (SCS[0]#)
+ * 0x1C000000-0x1C000000 - LED (CS0)
+ * 0x1C800000-0x1C800007 - UART 16550 port (CS1)
+ * 0x1F000000-0x1F000000 - MailBox (CS3)
+ * 0x1FC00000-0x20000000 - 4MB Flash (BOOT CS)
*/
#define WRPPMC_SDRAM_SCS0_BASE 0x00000000
@@ -39,8 +39,8 @@
*
* NOTE: We only have PCI_0 hose interface
*/
-#define GT_PCI_MEM_BASE 0x13000000UL
-#define GT_PCI_MEM_SIZE 0x02000000UL
+#define GT_PCI_MEM_BASE 0x13000000UL
+#define GT_PCI_MEM_SIZE 0x02000000UL
#define GT_PCI_IO_BASE 0x11000000UL
#define GT_PCI_IO_SIZE 0x02000000UL
diff --git a/arch/mips/include/asm/mc146818-time.h b/arch/mips/include/asm/mc146818-time.h
index 4a08dbe37db..9e1ad26abdc 100644
--- a/arch/mips/include/asm/mc146818-time.h
+++ b/arch/mips/include/asm/mc146818-time.h
@@ -26,7 +26,7 @@
* MC146818A or Dallas DS12887 data sheet for details.
*
* BUG: This routine does not handle hour overflow properly; it just
- * sets the minutes. Usually you'll only notice that after reboot!
+ * sets the minutes. Usually you'll only notice that after reboot!
*/
static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
{
@@ -77,7 +77,7 @@ static inline int mc146818_set_rtc_mmss(unsigned long nowtime)
* battery and quartz) will not reset the oscillator and will not
* update precisely 500 ms later. You won't find this mentioned in
* the Dallas Semiconductor data sheets, but who believes data
- * sheets anyway ... -- Markus Kuhn
+ * sheets anyway ... -- Markus Kuhn
*/
CMOS_WRITE(save_control, RTC_CONTROL);
CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
diff --git a/arch/mips/include/asm/mips-boards/bonito64.h b/arch/mips/include/asm/mips-boards/bonito64.h
index d14e2adc4be..b2048d1bcc1 100644
--- a/arch/mips/include/asm/mips-boards/bonito64.h
+++ b/arch/mips/include/asm/mips-boards/bonito64.h
@@ -41,18 +41,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_BOOT_BASE 0x1fc00000
#define BONITO_BOOT_SIZE 0x00100000
-#define BONITO_BOOT_TOP (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
+#define BONITO_BOOT_TOP (BONITO_BOOT_BASE+BONITO_BOOT_SIZE-1)
#define BONITO_FLASH_BASE 0x1c000000
#define BONITO_FLASH_SIZE 0x03000000
#define BONITO_FLASH_TOP (BONITO_FLASH_BASE+BONITO_FLASH_SIZE-1)
#define BONITO_SOCKET_BASE 0x1f800000
#define BONITO_SOCKET_SIZE 0x00400000
#define BONITO_SOCKET_TOP (BONITO_SOCKET_BASE+BONITO_SOCKET_SIZE-1)
-#define BONITO_REG_BASE 0x1fe00000
-#define BONITO_REG_SIZE 0x00040000
+#define BONITO_REG_BASE 0x1fe00000
+#define BONITO_REG_SIZE 0x00040000
#define BONITO_REG_TOP (BONITO_REG_BASE+BONITO_REG_SIZE-1)
-#define BONITO_DEV_BASE 0x1ff00000
-#define BONITO_DEV_SIZE 0x00100000
+#define BONITO_DEV_BASE 0x1ff00000
+#define BONITO_DEV_SIZE 0x00100000
#define BONITO_DEV_TOP (BONITO_DEV_BASE+BONITO_DEV_SIZE-1)
#define BONITO_PCILO_BASE 0x10000000
#define BONITO_PCILO_SIZE 0x0c000000
@@ -79,14 +79,14 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* PCI Configuration Registers */
-#define BONITO_PCI_REG(x) BONITO(BONITO_PCICONFIGBASE + (x))
+#define BONITO_PCI_REG(x) BONITO(BONITO_PCICONFIGBASE + (x))
#define BONITO_PCIDID BONITO_PCI_REG(0x00)
#define BONITO_PCICMD BONITO_PCI_REG(0x04)
-#define BONITO_PCICLASS BONITO_PCI_REG(0x08)
+#define BONITO_PCICLASS BONITO_PCI_REG(0x08)
#define BONITO_PCILTIMER BONITO_PCI_REG(0x0c)
-#define BONITO_PCIBASE0 BONITO_PCI_REG(0x10)
-#define BONITO_PCIBASE1 BONITO_PCI_REG(0x14)
-#define BONITO_PCIBASE2 BONITO_PCI_REG(0x18)
+#define BONITO_PCIBASE0 BONITO_PCI_REG(0x10)
+#define BONITO_PCIBASE1 BONITO_PCI_REG(0x14)
+#define BONITO_PCIBASE2 BONITO_PCI_REG(0x18)
#define BONITO_PCIEXPRBASE BONITO_PCI_REG(0x30)
#define BONITO_PCIINT BONITO_PCI_REG(0x3c)
@@ -95,7 +95,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICMD_MABORT_CLR 0x20000000
#define BONITO_PCICMD_MTABORT_CLR 0x10000000
#define BONITO_PCICMD_TABORT_CLR 0x08000000
-#define BONITO_PCICMD_MPERR_CLR 0x01000000
+#define BONITO_PCICMD_MPERR_CLR 0x01000000
#define BONITO_PCICMD_PERRRESPEN 0x00000040
#define BONITO_PCICMD_ASTEPEN 0x00000080
#define BONITO_PCICMD_SERREN 0x00000100
@@ -139,7 +139,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* Other Bonito configuration */
-#define BONITO_BONGENCFG_OFFSET 0x4
+#define BONITO_BONGENCFG_OFFSET 0x4
#define BONITO_BONGENCFG BONITO(BONITO_REGBASE + BONITO_BONGENCFG_OFFSET)
#define BONITO_BONGENCFG_DEBUGMODE 0x00000001
@@ -165,7 +165,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* 2. IO & IDE configuration */
-#define BONITO_IODEVCFG BONITO(BONITO_REGBASE + 0x08)
+#define BONITO_IODEVCFG BONITO(BONITO_REGBASE + 0x08)
/* 3. IO & IDE configuration */
@@ -181,33 +181,33 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* GPIO Regs - r/w */
-#define BONITO_GPIODATA_OFFSET 0x1c
-#define BONITO_GPIODATA BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
+#define BONITO_GPIODATA_OFFSET 0x1c
+#define BONITO_GPIODATA BONITO(BONITO_REGBASE + BONITO_GPIODATA_OFFSET)
#define BONITO_GPIOIE BONITO(BONITO_REGBASE + 0x20)
/* ICU Configuration Regs - r/w */
#define BONITO_INTEDGE BONITO(BONITO_REGBASE + 0x24)
-#define BONITO_INTSTEER BONITO(BONITO_REGBASE + 0x28)
+#define BONITO_INTSTEER BONITO(BONITO_REGBASE + 0x28)
#define BONITO_INTPOL BONITO(BONITO_REGBASE + 0x2c)
/* ICU Enable Regs - IntEn & IntISR are r/o. */
-#define BONITO_INTENSET BONITO(BONITO_REGBASE + 0x30)
-#define BONITO_INTENCLR BONITO(BONITO_REGBASE + 0x34)
+#define BONITO_INTENSET BONITO(BONITO_REGBASE + 0x30)
+#define BONITO_INTENCLR BONITO(BONITO_REGBASE + 0x34)
#define BONITO_INTEN BONITO(BONITO_REGBASE + 0x38)
#define BONITO_INTISR BONITO(BONITO_REGBASE + 0x3c)
/* PCI mail boxes */
-#define BONITO_PCIMAIL0_OFFSET 0x40
-#define BONITO_PCIMAIL1_OFFSET 0x44
-#define BONITO_PCIMAIL2_OFFSET 0x48
-#define BONITO_PCIMAIL3_OFFSET 0x4c
-#define BONITO_PCIMAIL0 BONITO(BONITO_REGBASE + 0x40)
-#define BONITO_PCIMAIL1 BONITO(BONITO_REGBASE + 0x44)
-#define BONITO_PCIMAIL2 BONITO(BONITO_REGBASE + 0x48)
-#define BONITO_PCIMAIL3 BONITO(BONITO_REGBASE + 0x4c)
+#define BONITO_PCIMAIL0_OFFSET 0x40
+#define BONITO_PCIMAIL1_OFFSET 0x44
+#define BONITO_PCIMAIL2_OFFSET 0x48
+#define BONITO_PCIMAIL3_OFFSET 0x4c
+#define BONITO_PCIMAIL0 BONITO(BONITO_REGBASE + 0x40)
+#define BONITO_PCIMAIL1 BONITO(BONITO_REGBASE + 0x44)
+#define BONITO_PCIMAIL2 BONITO(BONITO_REGBASE + 0x48)
+#define BONITO_PCIMAIL3 BONITO(BONITO_REGBASE + 0x4c)
/* 6. PCI cache */
@@ -216,7 +216,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICACHETAG BONITO(BONITO_REGBASE + 0x54)
#define BONITO_PCIBADADDR BONITO(BONITO_REGBASE + 0x58)
-#define BONITO_PCIMSTAT BONITO(BONITO_REGBASE + 0x5c)
+#define BONITO_PCIMSTAT BONITO(BONITO_REGBASE + 0x5c)
/*
@@ -228,20 +228,20 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_CONFIGBASE 0x000
#define BONITO_BONITOBASE 0x100
-#define BONITO_LDMABASE 0x200
+#define BONITO_LDMABASE 0x200
#define BONITO_COPBASE 0x300
#define BONITO_REG_BLOCKMASK 0x300
-#define BONITO_LDMACTRL BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMASTAT BONITO(BONITO_LDMABASE + 0x0)
-#define BONITO_LDMAADDR BONITO(BONITO_LDMABASE + 0x4)
+#define BONITO_LDMACTRL BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMASTAT BONITO(BONITO_LDMABASE + 0x0)
+#define BONITO_LDMAADDR BONITO(BONITO_LDMABASE + 0x4)
#define BONITO_LDMAGO BONITO(BONITO_LDMABASE + 0x8)
-#define BONITO_LDMADATA BONITO(BONITO_LDMABASE + 0xc)
+#define BONITO_LDMADATA BONITO(BONITO_LDMABASE + 0xc)
#define BONITO_COPCTRL BONITO(BONITO_COPBASE + 0x0)
#define BONITO_COPSTAT BONITO(BONITO_COPBASE + 0x0)
-#define BONITO_COPPADDR BONITO(BONITO_COPBASE + 0x4)
-#define BONITO_COPDADDR BONITO(BONITO_COPBASE + 0x8)
+#define BONITO_COPPADDR BONITO(BONITO_COPBASE + 0x4)
+#define BONITO_COPDADDR BONITO(BONITO_COPBASE + 0x8)
#define BONITO_COPGO BONITO(BONITO_COPBASE + 0xc)
@@ -257,7 +257,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_IDECOPGO_DMA_SIZE_SHIFT 0
#define BONITO_IDECOPGO_DMA_WRITE 0x00010000
#define BONITO_IDECOPGO_DMAWCOUNT 0x000f0000
-#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
+#define BONITO_IDECOPGO_DMAWCOUNT_SHIFT 16
#define BONITO_IDECOPCTRL_DMA_STARTBIT 0x80000000
#define BONITO_IDECOPCTRL_DMA_RSTBIT 0x40000000
@@ -291,11 +291,11 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_SDCFG_DRAMMODESET 0x00200000
/* --- */
#define BONITO_SDCFG_DRAMEXTREGS 0x00400000
-#define BONITO_SDCFG_DRAMPARITY 0x00800000
+#define BONITO_SDCFG_DRAMPARITY 0x00800000
/* Added by RPF 11-9-00 */
-#define BONITO_SDCFG_DRAMBURSTLEN 0x03000000
-#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
-#define BONITO_SDCFG_DRAMMODESET_DONE 0x80000000
+#define BONITO_SDCFG_DRAMBURSTLEN 0x03000000
+#define BONITO_SDCFG_DRAMBURSTLEN_SHIFT 24
+#define BONITO_SDCFG_DRAMMODESET_DONE 0x80000000
/* --- */
/* PCI Cache - pciCacheCtrl */
@@ -308,7 +308,7 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCICACHECTRL_IOBCCOH_PRES 0x00000100
#define BONITO_PCICACHECTRL_IOBCCOH_EN 0x00000200
-#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
+#define BONITO_PCICACHECTRL_CPUCOH_PRES 0x00000400
#define BONITO_PCICACHECTRL_CPUCOH_EN 0x00000800
#define BONITO_IODEVCFG_BUFFBIT_CS0 0x00000001
@@ -343,18 +343,18 @@ extern unsigned long _pcictrl_bonito_pcicfg;
/* gpio */
#define BONITO_GPIO_GPIOW 0x000003ff
-#define BONITO_GPIO_GPIOW_SHIFT 0
+#define BONITO_GPIO_GPIOW_SHIFT 0
#define BONITO_GPIO_GPIOR 0x01ff0000
-#define BONITO_GPIO_GPIOR_SHIFT 16
+#define BONITO_GPIO_GPIOR_SHIFT 16
#define BONITO_GPIO_GPINR 0xfe000000
-#define BONITO_GPIO_GPINR_SHIFT 25
+#define BONITO_GPIO_GPINR_SHIFT 25
#define BONITO_GPIO_IOW(N) (1<<(BONITO_GPIO_GPIOW_SHIFT+(N)))
#define BONITO_GPIO_IOR(N) (1<<(BONITO_GPIO_GPIOR_SHIFT+(N)))
#define BONITO_GPIO_INR(N) (1<<(BONITO_GPIO_GPINR_SHIFT+(N)))
/* ICU */
#define BONITO_ICU_MBOXES 0x0000000f
-#define BONITO_ICU_MBOXES_SHIFT 0
+#define BONITO_ICU_MBOXES_SHIFT 0
#define BONITO_ICU_DMARDY 0x00000010
#define BONITO_ICU_DMAEMPTY 0x00000020
#define BONITO_ICU_COPYRDY 0x00000040
@@ -384,13 +384,13 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCIMAP_PCIMAP_2 0x00040000
#define BONITO_PCIMAP_WIN(WIN, ADDR) ((((ADDR)>>26) & BONITO_PCIMAP_PCIMAP_LO0) << ((WIN)*6))
-#define BONITO_PCIMAP_WINSIZE (1<<26)
+#define BONITO_PCIMAP_WINSIZE (1<<26)
#define BONITO_PCIMAP_WINOFFSET(ADDR) ((ADDR) & (BONITO_PCIMAP_WINSIZE - 1))
#define BONITO_PCIMAP_WINBASE(ADDR) ((ADDR) << 26)
/* pcimembaseCfg */
-#define BONITO_PCIMEMBASECFG_MASK 0xf0000000
+#define BONITO_PCIMEMBASECFG_MASK 0xf0000000
#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK 0x0000001f
#define BONITO_PCIMEMBASECFG_MEMBASE0_MASK_SHIFT 0
#define BONITO_PCIMEMBASECFG_MEMBASE0_TRANS 0x000003e0
@@ -406,21 +406,21 @@ extern unsigned long _pcictrl_bonito_pcicfg;
#define BONITO_PCIMEMBASECFG_MEMBASE1_IO 0x00800000
#define BONITO_PCIMEMBASECFG_ASHIFT 23
-#define BONITO_PCIMEMBASECFG_AMASK 0x007fffff
+#define BONITO_PCIMEMBASECFG_AMASK 0x007fffff
#define BONITO_PCIMEMBASECFGSIZE(WIN, SIZE) (((~((SIZE)-1))>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)
#define BONITO_PCIMEMBASECFGBASE(WIN, BASE) (((BASE)>>(BONITO_PCIMEMBASECFG_ASHIFT-BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS)
#define BONITO_PCIMEMBASECFG_SIZE(WIN, CFG) (((((~(CFG)) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK)) << (BONITO_PCIMEMBASECFG_ASHIFT - BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT)) | BONITO_PCIMEMBASECFG_AMASK)
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
-#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
+#define BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_MASK_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
#define BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG) ((((CFG) & BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS) >> BONITO_PCIMEMBASECFG_MEMBASE##WIN##_TRANS_SHIFT) << BONITO_PCIMEMBASECFG_ASHIFT)
-#define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \
- (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
- (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
- )
+#define BONITO_PCITOPHYS(WIN, ADDR, CFG) ( \
+ (((ADDR) & (~(BONITO_PCIMEMBASECFG_MASK))) & (~(BONITO_PCIMEMBASECFG_ADDRMASK(WIN, CFG)))) | \
+ (BONITO_PCIMEMBASECFG_ADDRTRANS(WIN, CFG)) \
+ )
/* PCICmd */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 6e23ceb0ba8..44a09a64160 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -1,21 +1,14 @@
/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
* Defines of the MIPS boards specific address-MAP, registers, etc.
+ *
+ * Copyright (C) 2000,2012 MIPS Technologies, Inc.
+ * All rights reserved.
+ * Authors: Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef __ASM_MIPS_BOARDS_GENERIC_H
#define __ASM_MIPS_BOARDS_GENERIC_H
@@ -27,39 +20,32 @@
/*
* Display register base.
*/
-#define ASCII_DISPLAY_WORD_BASE 0x1f000410
-#define ASCII_DISPLAY_POS_BASE 0x1f000418
-
-
-/*
- * Yamon Prom print address.
- */
-#define YAMON_PROM_PRINT_ADDR 0x1fc00504
-
+#define ASCII_DISPLAY_WORD_BASE 0x1f000410
+#define ASCII_DISPLAY_POS_BASE 0x1f000418
/*
* Reset register.
*/
-#define SOFTRES_REG 0x1f000500
-#define GORESET 0x42
+#define SOFTRES_REG 0x1f000500
+#define GORESET 0x42
/*
* Revision register.
*/
-#define MIPS_REVISION_REG 0x1fc00010
-#define MIPS_REVISION_CORID_QED_RM5261 0
-#define MIPS_REVISION_CORID_CORE_LV 1
-#define MIPS_REVISION_CORID_BONITO64 2
-#define MIPS_REVISION_CORID_CORE_20K 3
-#define MIPS_REVISION_CORID_CORE_FPGA 4
-#define MIPS_REVISION_CORID_CORE_MSC 5
-#define MIPS_REVISION_CORID_CORE_EMUL 6
-#define MIPS_REVISION_CORID_CORE_FPGA2 7
-#define MIPS_REVISION_CORID_CORE_FPGAR2 8
-#define MIPS_REVISION_CORID_CORE_FPGA3 9
-#define MIPS_REVISION_CORID_CORE_24K 10
-#define MIPS_REVISION_CORID_CORE_FPGA4 11
-#define MIPS_REVISION_CORID_CORE_FPGA5 12
+#define MIPS_REVISION_REG 0x1fc00010
+#define MIPS_REVISION_CORID_QED_RM5261 0
+#define MIPS_REVISION_CORID_CORE_LV 1
+#define MIPS_REVISION_CORID_BONITO64 2
+#define MIPS_REVISION_CORID_CORE_20K 3
+#define MIPS_REVISION_CORID_CORE_FPGA 4
+#define MIPS_REVISION_CORID_CORE_MSC 5
+#define MIPS_REVISION_CORID_CORE_EMUL 6
+#define MIPS_REVISION_CORID_CORE_FPGA2 7
+#define MIPS_REVISION_CORID_CORE_FPGAR2 8
+#define MIPS_REVISION_CORID_CORE_FPGA3 9
+#define MIPS_REVISION_CORID_CORE_24K 10
+#define MIPS_REVISION_CORID_CORE_FPGA4 11
+#define MIPS_REVISION_CORID_CORE_FPGA5 12
/**** Artificial corid defines ****/
/*
@@ -87,10 +73,14 @@
extern int mips_revision_sconid;
+#ifdef CONFIG_OF
+extern struct boot_param_header __dtb_start;
+#endif
+
#ifdef CONFIG_PCI
extern void mips_pcibios_init(void);
#else
#define mips_pcibios_init() do { } while (0)
#endif
-#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
+#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/mips-boards/launch.h b/arch/mips/include/asm/mips-boards/launch.h
index d8ae7f95a52..653477e4074 100644
--- a/arch/mips/include/asm/mips-boards/launch.h
+++ b/arch/mips/include/asm/mips-boards/launch.h
@@ -16,11 +16,11 @@ struct cpulaunch {
#else
#define LOG2CPULAUNCH 5
-#define LAUNCH_PC 0
-#define LAUNCH_GP 4
-#define LAUNCH_SP 8
-#define LAUNCH_A0 12
-#define LAUNCH_FLAGS 28
+#define LAUNCH_PC 0
+#define LAUNCH_GP 4
+#define LAUNCH_SP 8
+#define LAUNCH_A0 12
+#define LAUNCH_FLAGS 28
#endif
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index c1891578fa6..722bc889eab 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -33,9 +33,9 @@
* Malta I/O ports base address for the Galileo GT64120 and Algorithmics
* Bonito system controllers.
*/
-#define MALTA_GT_PORT_BASE get_gt_port_base(GT_PCI0IOLD_OFS)
-#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
-#define MALTA_MSC_PORT_BASE get_msc_port_base(MSC01_PCI_SC2PIOBASL)
+#define MALTA_GT_PORT_BASE get_gt_port_base(GT_PCI0IOLD_OFS)
+#define MALTA_BONITO_PORT_BASE ((unsigned long)ioremap (0x1fd00000, 0x10000))
+#define MALTA_MSC_PORT_BASE get_msc_port_base(MSC01_PCI_SC2PIOBASL)
static inline unsigned long get_gt_port_base(unsigned long reg)
{
@@ -77,8 +77,8 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
/*
* Malta RTC-device indirect register access.
*/
-#define MALTA_RTC_ADR_REG 0x70
-#define MALTA_RTC_DAT_REG 0x71
+#define MALTA_RTC_ADR_REG 0x70
+#define MALTA_RTC_DAT_REG 0x71
/*
* Malta SMSC FDC37M817 Super I/O Controller register.
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index 66924481575..e330732ddf9 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -4,8 +4,8 @@
* for more details.
*
* Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
- * Carsten Langgaard <carstenl@mips.com>
- * Steven J. Hill <sjhill@mips.com>
+ * Carsten Langgaard <carstenl@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef _MIPS_MALTAINT_H
#define _MIPS_MALTAINT_H
@@ -24,9 +24,9 @@
#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
#define MIPSCPU_INT_MB1 3
#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
-#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
+#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
#define MIPSCPU_INT_MB2 4
-#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
+#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
#define MIPSCPU_INT_MB3 5
#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
#define MIPSCPU_INT_MB4 6
diff --git a/arch/mips/include/asm/mips-boards/piix4.h b/arch/mips/include/asm/mips-boards/piix4.h
index 2971d60f2e9..a02596cf1ab 100644
--- a/arch/mips/include/asm/mips-boards/piix4.h
+++ b/arch/mips/include/asm/mips-boards/piix4.h
@@ -53,7 +53,7 @@
#define PIIX4_OCW2_SP (0x6 << 5)
#define PIIX4_OCW2_NOP (0x2 << 5)
-#define PIIX4_OCW2_SEL (0x0 << 3)
+#define PIIX4_OCW2_SEL (0x0 << 3)
#define PIIX4_OCW2_ILS_0 0
#define PIIX4_OCW2_ILS_1 1
@@ -72,9 +72,9 @@
#define PIIX4_OCW2_ILS_14 6
#define PIIX4_OCW2_ILS_15 7
-#define PIIX4_OCW3_SEL (0x1 << 3)
+#define PIIX4_OCW3_SEL (0x1 << 3)
-#define PIIX4_OCW3_IRR 0x2
-#define PIIX4_OCW3_ISR 0x3
+#define PIIX4_OCW3_IRR 0x2
+#define PIIX4_OCW3_ISR 0x3
#endif /* __ASM_MIPS_BOARDS_PIIX4_H */
diff --git a/arch/mips/include/asm/mips-boards/prom.h b/arch/mips/include/asm/mips-boards/prom.h
index a9db576a976..e7aed3e4ff5 100644
--- a/arch/mips/include/asm/mips-boards/prom.h
+++ b/arch/mips/include/asm/mips-boards/prom.h
@@ -39,9 +39,9 @@ extern int get_ethernet_addr(char *ethernet_addr);
/* Memory descriptor management. */
#define PROM_MAX_PMEMBLOCKS 32
struct prom_pmemblock {
- unsigned long base; /* Within KSEG0. */
- unsigned int size; /* In bytes. */
- unsigned int type; /* free or prom memory */
+ unsigned long base; /* Within KSEG0. */
+ unsigned int size; /* In bytes. */
+ unsigned int type; /* free or prom memory */
};
#endif /* !(_MIPS_PROM_H) */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
index d634d9a807f..6b17aaf7d90 100644
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ b/arch/mips/include/asm/mips-boards/sead3int.h
@@ -4,8 +4,8 @@
* for more details.
*
* Copyright (C) 2000,2012 MIPS Technologies, Inc. All rights reserved.
- * Douglas Leung <douglas@mips.com>
- * Steven J. Hill <sjhill@mips.com>
+ * Douglas Leung <douglas@mips.com>
+ * Steven J. Hill <sjhill@mips.com>
*/
#ifndef _MIPS_SEAD3INT_H
#define _MIPS_SEAD3INT_H
diff --git a/arch/mips/include/asm/mips-boards/sim.h b/arch/mips/include/asm/mips-boards/sim.h
index acb7c2331d9..b112fdc9f77 100644
--- a/arch/mips/include/asm/mips-boards/sim.h
+++ b/arch/mips/include/asm/mips-boards/sim.h
@@ -19,18 +19,18 @@
#ifndef _ASM_MIPS_BOARDS_SIM_H
#define _ASM_MIPS_BOARDS_SIM_H
-#define STATS_ON 1
-#define STATS_OFF 2
-#define STATS_CLEAR 3
-#define STATS_DUMP 4
+#define STATS_ON 1
+#define STATS_OFF 2
+#define STATS_CLEAR 3
+#define STATS_DUMP 4
#define TRACE_ON 5
-#define TRACE_OFF 6
+#define TRACE_OFF 6
#define simcfg(code) \
({ \
- __asm__ __volatile__( \
- "sltiu $0,$0, %0" \
+ __asm__ __volatile__( \
+ "sltiu $0,$0, %0" \
::"i"(code) \
); \
})
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h
index 5b3cb8553e9..38b7704ee37 100644
--- a/arch/mips/include/asm/mipsmtregs.h
+++ b/arch/mips/include/asm/mipsmtregs.h
@@ -270,14 +270,14 @@ static inline void ehb(void)
#define mftc0(rt,sel) \
({ \
- unsigned long __res; \
+ unsigned long __res; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set mips32r2 \n" \
" .set noat \n" \
- " # mftc0 $1, $" #rt ", " #sel " \n" \
- " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
+ " # mftc0 $1, $" #rt ", " #sel " \n" \
+ " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res)); \
@@ -334,7 +334,7 @@ do { \
" .set noat \n" \
" move $1, %0 \n" \
" # mttc0 %0," #rd ", " #sel " \n" \
- " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
+ " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \
" .set pop \n" \
: \
: "r" (v)); \
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 7e4e6f8fab3..12b70c25906 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -123,16 +123,16 @@
* Status Register Values
*/
-#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
-#define FPU_CSR_COND 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
-#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
-#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
-#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
-#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
-#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
-#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
-#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
+#define FPU_CSR_FLUSH 0x01000000 /* flush denormalised results to 0 */
+#define FPU_CSR_COND 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND0 0x00800000 /* $fcc0 */
+#define FPU_CSR_COND1 0x02000000 /* $fcc1 */
+#define FPU_CSR_COND2 0x04000000 /* $fcc2 */
+#define FPU_CSR_COND3 0x08000000 /* $fcc3 */
+#define FPU_CSR_COND4 0x10000000 /* $fcc4 */
+#define FPU_CSR_COND5 0x20000000 /* $fcc5 */
+#define FPU_CSR_COND6 0x40000000 /* $fcc6 */
+#define FPU_CSR_COND7 0x80000000 /* $fcc7 */
/*
* Bits 18 - 20 of the FPU Status Register will be read as 0,
@@ -145,34 +145,34 @@
* E the exception enable
* S the sticky/flag bit
*/
-#define FPU_CSR_ALL_X 0x0003f000
-#define FPU_CSR_UNI_X 0x00020000
-#define FPU_CSR_INV_X 0x00010000
-#define FPU_CSR_DIV_X 0x00008000
-#define FPU_CSR_OVF_X 0x00004000
-#define FPU_CSR_UDF_X 0x00002000
-#define FPU_CSR_INE_X 0x00001000
-
-#define FPU_CSR_ALL_E 0x00000f80
-#define FPU_CSR_INV_E 0x00000800
-#define FPU_CSR_DIV_E 0x00000400
-#define FPU_CSR_OVF_E 0x00000200
-#define FPU_CSR_UDF_E 0x00000100
-#define FPU_CSR_INE_E 0x00000080
-
-#define FPU_CSR_ALL_S 0x0000007c
-#define FPU_CSR_INV_S 0x00000040
-#define FPU_CSR_DIV_S 0x00000020
-#define FPU_CSR_OVF_S 0x00000010
-#define FPU_CSR_UDF_S 0x00000008
-#define FPU_CSR_INE_S 0x00000004
+#define FPU_CSR_ALL_X 0x0003f000
+#define FPU_CSR_UNI_X 0x00020000
+#define FPU_CSR_INV_X 0x00010000
+#define FPU_CSR_DIV_X 0x00008000
+#define FPU_CSR_OVF_X 0x00004000
+#define FPU_CSR_UDF_X 0x00002000
+#define FPU_CSR_INE_X 0x00001000
+
+#define FPU_CSR_ALL_E 0x00000f80
+#define FPU_CSR_INV_E 0x00000800
+#define FPU_CSR_DIV_E 0x00000400
+#define FPU_CSR_OVF_E 0x00000200
+#define FPU_CSR_UDF_E 0x00000100
+#define FPU_CSR_INE_E 0x00000080
+
+#define FPU_CSR_ALL_S 0x0000007c
+#define FPU_CSR_INV_S 0x00000040
+#define FPU_CSR_DIV_S 0x00000020
+#define FPU_CSR_OVF_S 0x00000010
+#define FPU_CSR_UDF_S 0x00000008
+#define FPU_CSR_INE_S 0x00000004
/* Bits 0 and 1 of FPU Status Register specify the rounding mode */
#define FPU_CSR_RM 0x00000003
-#define FPU_CSR_RN 0x0 /* nearest */
-#define FPU_CSR_RZ 0x1 /* towards zero */
-#define FPU_CSR_RU 0x2 /* towards +Infinity */
-#define FPU_CSR_RD 0x3 /* towards -Infinity */
+#define FPU_CSR_RN 0x0 /* nearest */
+#define FPU_CSR_RZ 0x1 /* towards zero */
+#define FPU_CSR_RU 0x2 /* towards +Infinity */
+#define FPU_CSR_RD 0x3 /* towards -Infinity */
/*
@@ -214,15 +214,15 @@
* Default page size for a given kernel configuration
*/
#ifdef CONFIG_PAGE_SIZE_4KB
-#define PM_DEFAULT_MASK PM_4K
+#define PM_DEFAULT_MASK PM_4K
#elif defined(CONFIG_PAGE_SIZE_8KB)
-#define PM_DEFAULT_MASK PM_8K
+#define PM_DEFAULT_MASK PM_8K
#elif defined(CONFIG_PAGE_SIZE_16KB)
-#define PM_DEFAULT_MASK PM_16K
+#define PM_DEFAULT_MASK PM_16K
#elif defined(CONFIG_PAGE_SIZE_32KB)
-#define PM_DEFAULT_MASK PM_32K
+#define PM_DEFAULT_MASK PM_32K
#elif defined(CONFIG_PAGE_SIZE_64KB)
-#define PM_DEFAULT_MASK PM_64K
+#define PM_DEFAULT_MASK PM_64K
#else
#error Bad page size configuration!
#endif
@@ -260,34 +260,34 @@
/*
* PageGrain bits
*/
-#define PG_RIE (_ULCAST_(1) << 31)
-#define PG_XIE (_ULCAST_(1) << 30)
-#define PG_ELPA (_ULCAST_(1) << 29)
-#define PG_ESP (_ULCAST_(1) << 28)
+#define PG_RIE (_ULCAST_(1) << 31)
+#define PG_XIE (_ULCAST_(1) << 30)
+#define PG_ELPA (_ULCAST_(1) << 29)
+#define PG_ESP (_ULCAST_(1) << 28)
/*
* R4x00 interrupt enable / cause bits
*/
-#define IE_SW0 (_ULCAST_(1) << 8)
-#define IE_SW1 (_ULCAST_(1) << 9)
-#define IE_IRQ0 (_ULCAST_(1) << 10)
-#define IE_IRQ1 (_ULCAST_(1) << 11)
-#define IE_IRQ2 (_ULCAST_(1) << 12)
-#define IE_IRQ3 (_ULCAST_(1) << 13)
-#define IE_IRQ4 (_ULCAST_(1) << 14)
-#define IE_IRQ5 (_ULCAST_(1) << 15)
+#define IE_SW0 (_ULCAST_(1) << 8)
+#define IE_SW1 (_ULCAST_(1) << 9)
+#define IE_IRQ0 (_ULCAST_(1) << 10)
+#define IE_IRQ1 (_ULCAST_(1) << 11)
+#define IE_IRQ2 (_ULCAST_(1) << 12)
+#define IE_IRQ3 (_ULCAST_(1) << 13)
+#define IE_IRQ4 (_ULCAST_(1) << 14)
+#define IE_IRQ5 (_ULCAST_(1) << 15)
/*
* R4x00 interrupt cause bits
*/
-#define C_SW0 (_ULCAST_(1) << 8)
-#define C_SW1 (_ULCAST_(1) << 9)
-#define C_IRQ0 (_ULCAST_(1) << 10)
-#define C_IRQ1 (_ULCAST_(1) << 11)
-#define C_IRQ2 (_ULCAST_(1) << 12)
-#define C_IRQ3 (_ULCAST_(1) << 13)
-#define C_IRQ4 (_ULCAST_(1) << 14)
-#define C_IRQ5 (_ULCAST_(1) << 15)
+#define C_SW0 (_ULCAST_(1) << 8)
+#define C_SW1 (_ULCAST_(1) << 9)
+#define C_IRQ0 (_ULCAST_(1) << 10)
+#define C_IRQ1 (_ULCAST_(1) << 11)
+#define C_IRQ2 (_ULCAST_(1) << 12)
+#define C_IRQ3 (_ULCAST_(1) << 13)
+#define C_IRQ4 (_ULCAST_(1) << 14)
+#define C_IRQ5 (_ULCAST_(1) << 15)
/*
* Bitfields in the R4xx0 cp0 status register
@@ -301,7 +301,7 @@
# define KSU_KERNEL 0x00000000
#define ST0_UX 0x00000020
#define ST0_SX 0x00000040
-#define ST0_KX 0x00000080
+#define ST0_KX 0x00000080
#define ST0_DE 0x00010000
#define ST0_CE 0x00020000
@@ -315,7 +315,7 @@
/*
* Bitfields in the R[23]000 cp0 status register.
*/
-#define ST0_IEC 0x00000001
+#define ST0_IEC 0x00000001
#define ST0_KUC 0x00000002
#define ST0_IEP 0x00000004
#define ST0_KUP 0x00000008
@@ -329,7 +329,7 @@
/*
* Bits specific to the R4640/R4650
*/
-#define ST0_UM (_ULCAST_(1) << 4)
+#define ST0_UM (_ULCAST_(1) << 4)
#define ST0_IL (_ULCAST_(1) << 23)
#define ST0_DL (_ULCAST_(1) << 24)
@@ -343,22 +343,22 @@
*/
#define TX39_CONF_ICS_SHIFT 19
#define TX39_CONF_ICS_MASK 0x00380000
-#define TX39_CONF_ICS_1KB 0x00000000
-#define TX39_CONF_ICS_2KB 0x00080000
-#define TX39_CONF_ICS_4KB 0x00100000
-#define TX39_CONF_ICS_8KB 0x00180000
-#define TX39_CONF_ICS_16KB 0x00200000
+#define TX39_CONF_ICS_1KB 0x00000000
+#define TX39_CONF_ICS_2KB 0x00080000
+#define TX39_CONF_ICS_4KB 0x00100000
+#define TX39_CONF_ICS_8KB 0x00180000
+#define TX39_CONF_ICS_16KB 0x00200000
#define TX39_CONF_DCS_SHIFT 16
#define TX39_CONF_DCS_MASK 0x00070000
-#define TX39_CONF_DCS_1KB 0x00000000
-#define TX39_CONF_DCS_2KB 0x00010000
-#define TX39_CONF_DCS_4KB 0x00020000
-#define TX39_CONF_DCS_8KB 0x00030000
-#define TX39_CONF_DCS_16KB 0x00040000
-
-#define TX39_CONF_CWFON 0x00004000
-#define TX39_CONF_WBON 0x00002000
+#define TX39_CONF_DCS_1KB 0x00000000
+#define TX39_CONF_DCS_2KB 0x00010000
+#define TX39_CONF_DCS_4KB 0x00020000
+#define TX39_CONF_DCS_8KB 0x00030000
+#define TX39_CONF_DCS_16KB 0x00040000
+
+#define TX39_CONF_CWFON 0x00004000
+#define TX39_CONF_WBON 0x00002000
#define TX39_CONF_RF_SHIFT 10
#define TX39_CONF_RF_MASK 0x00000c00
#define TX39_CONF_DOZE 0x00000200
@@ -375,38 +375,38 @@
* Status register bits available in all MIPS CPUs.
*/
#define ST0_IM 0x0000ff00
-#define STATUSB_IP0 8
-#define STATUSF_IP0 (_ULCAST_(1) << 8)
-#define STATUSB_IP1 9
-#define STATUSF_IP1 (_ULCAST_(1) << 9)
-#define STATUSB_IP2 10
-#define STATUSF_IP2 (_ULCAST_(1) << 10)
-#define STATUSB_IP3 11
-#define STATUSF_IP3 (_ULCAST_(1) << 11)
-#define STATUSB_IP4 12
-#define STATUSF_IP4 (_ULCAST_(1) << 12)
-#define STATUSB_IP5 13
-#define STATUSF_IP5 (_ULCAST_(1) << 13)
-#define STATUSB_IP6 14
-#define STATUSF_IP6 (_ULCAST_(1) << 14)
-#define STATUSB_IP7 15
-#define STATUSF_IP7 (_ULCAST_(1) << 15)
-#define STATUSB_IP8 0
-#define STATUSF_IP8 (_ULCAST_(1) << 0)
-#define STATUSB_IP9 1
-#define STATUSF_IP9 (_ULCAST_(1) << 1)
-#define STATUSB_IP10 2
-#define STATUSF_IP10 (_ULCAST_(1) << 2)
-#define STATUSB_IP11 3
-#define STATUSF_IP11 (_ULCAST_(1) << 3)
-#define STATUSB_IP12 4
-#define STATUSF_IP12 (_ULCAST_(1) << 4)
-#define STATUSB_IP13 5
-#define STATUSF_IP13 (_ULCAST_(1) << 5)
-#define STATUSB_IP14 6
-#define STATUSF_IP14 (_ULCAST_(1) << 6)
-#define STATUSB_IP15 7
-#define STATUSF_IP15 (_ULCAST_(1) << 7)
+#define STATUSB_IP0 8
+#define STATUSF_IP0 (_ULCAST_(1) << 8)
+#define STATUSB_IP1 9
+#define STATUSF_IP1 (_ULCAST_(1) << 9)
+#define STATUSB_IP2 10
+#define STATUSF_IP2 (_ULCAST_(1) << 10)
+#define STATUSB_IP3 11
+#define STATUSF_IP3 (_ULCAST_(1) << 11)
+#define STATUSB_IP4 12
+#define STATUSF_IP4 (_ULCAST_(1) << 12)
+#define STATUSB_IP5 13
+#define STATUSF_IP5 (_ULCAST_(1) << 13)
+#define STATUSB_IP6 14
+#define STATUSF_IP6 (_ULCAST_(1) << 14)
+#define STATUSB_IP7 15
+#define STATUSF_IP7 (_ULCAST_(1) << 15)
+#define STATUSB_IP8 0
+#define STATUSF_IP8 (_ULCAST_(1) << 0)
+#define STATUSB_IP9 1
+#define STATUSF_IP9 (_ULCAST_(1) << 1)
+#define STATUSB_IP10 2
+#define STATUSF_IP10 (_ULCAST_(1) << 2)
+#define STATUSB_IP11 3
+#define STATUSF_IP11 (_ULCAST_(1) << 3)
+#define STATUSB_IP12 4
+#define STATUSF_IP12 (_ULCAST_(1) << 4)
+#define STATUSB_IP13 5
+#define STATUSF_IP13 (_ULCAST_(1) << 5)
+#define STATUSB_IP14 6
+#define STATUSF_IP14 (_ULCAST_(1) << 6)
+#define STATUSB_IP15 7
+#define STATUSF_IP15 (_ULCAST_(1) << 7)
#define ST0_CH 0x00040000
#define ST0_NMI 0x00080000
#define ST0_SR 0x00100000
@@ -436,36 +436,36 @@
*
* Refer to your MIPS R4xx0 manual, chapter 5 for explanation.
*/
-#define CAUSEB_EXCCODE 2
-#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
-#define CAUSEB_IP 8
-#define CAUSEF_IP (_ULCAST_(255) << 8)
-#define CAUSEB_IP0 8
-#define CAUSEF_IP0 (_ULCAST_(1) << 8)
-#define CAUSEB_IP1 9
-#define CAUSEF_IP1 (_ULCAST_(1) << 9)
-#define CAUSEB_IP2 10
-#define CAUSEF_IP2 (_ULCAST_(1) << 10)
-#define CAUSEB_IP3 11
-#define CAUSEF_IP3 (_ULCAST_(1) << 11)
-#define CAUSEB_IP4 12
-#define CAUSEF_IP4 (_ULCAST_(1) << 12)
-#define CAUSEB_IP5 13
-#define CAUSEF_IP5 (_ULCAST_(1) << 13)
-#define CAUSEB_IP6 14
-#define CAUSEF_IP6 (_ULCAST_(1) << 14)
-#define CAUSEB_IP7 15
-#define CAUSEF_IP7 (_ULCAST_(1) << 15)
-#define CAUSEB_IV 23
-#define CAUSEF_IV (_ULCAST_(1) << 23)
-#define CAUSEB_PCI 26
-#define CAUSEF_PCI (_ULCAST_(1) << 26)
-#define CAUSEB_CE 28
-#define CAUSEF_CE (_ULCAST_(3) << 28)
-#define CAUSEB_TI 30
-#define CAUSEF_TI (_ULCAST_(1) << 30)
-#define CAUSEB_BD 31
-#define CAUSEF_BD (_ULCAST_(1) << 31)
+#define CAUSEB_EXCCODE 2
+#define CAUSEF_EXCCODE (_ULCAST_(31) << 2)
+#define CAUSEB_IP 8
+#define CAUSEF_IP (_ULCAST_(255) << 8)
+#define CAUSEB_IP0 8
+#define CAUSEF_IP0 (_ULCAST_(1) << 8)
+#define CAUSEB_IP1 9
+#define CAUSEF_IP1 (_ULCAST_(1) << 9)
+#define CAUSEB_IP2 10
+#define CAUSEF_IP2 (_ULCAST_(1) << 10)
+#define CAUSEB_IP3 11
+#define CAUSEF_IP3 (_ULCAST_(1) << 11)
+#define CAUSEB_IP4 12
+#define CAUSEF_IP4 (_ULCAST_(1) << 12)
+#define CAUSEB_IP5 13
+#define CAUSEF_IP5 (_ULCAST_(1) << 13)
+#define CAUSEB_IP6 14
+#define CAUSEF_IP6 (_ULCAST_(1) << 14)
+#define CAUSEB_IP7 15
+#define CAUSEF_IP7 (_ULCAST_(1) << 15)
+#define CAUSEB_IV 23
+#define CAUSEF_IV (_ULCAST_(1) << 23)
+#define CAUSEB_PCI 26
+#define CAUSEF_PCI (_ULCAST_(1) << 26)
+#define CAUSEB_CE 28
+#define CAUSEF_CE (_ULCAST_(3) << 28)
+#define CAUSEB_TI 30
+#define CAUSEF_TI (_ULCAST_(1) << 30)
+#define CAUSEB_BD 31
+#define CAUSEF_BD (_ULCAST_(1) << 31)
/*
* Bits in the coprocessor 0 config register.
@@ -483,11 +483,11 @@
#define CONF_BE (_ULCAST_(1) << 15)
/* Bits common to various processors. */
-#define CONF_CU (_ULCAST_(1) << 3)
-#define CONF_DB (_ULCAST_(1) << 4)
-#define CONF_IB (_ULCAST_(1) << 5)
-#define CONF_DC (_ULCAST_(7) << 6)
-#define CONF_IC (_ULCAST_(7) << 9)
+#define CONF_CU (_ULCAST_(1) << 3)
+#define CONF_DB (_ULCAST_(1) << 4)
+#define CONF_IB (_ULCAST_(1) << 5)
+#define CONF_DC (_ULCAST_(7) << 6)
+#define CONF_IC (_ULCAST_(7) << 9)
#define CONF_EB (_ULCAST_(1) << 13)
#define CONF_EM (_ULCAST_(1) << 14)
#define CONF_SM (_ULCAST_(1) << 16)
@@ -497,29 +497,29 @@
#define CONF_EC (_ULCAST_(7) << 28)
#define CONF_CM (_ULCAST_(1) << 31)
-/* Bits specific to the R4xx0. */
+/* Bits specific to the R4xx0. */
#define R4K_CONF_SW (_ULCAST_(1) << 20)
#define R4K_CONF_SS (_ULCAST_(1) << 21)
#define R4K_CONF_SB (_ULCAST_(3) << 22)
-/* Bits specific to the R5000. */
+/* Bits specific to the R5000. */
#define R5K_CONF_SE (_ULCAST_(1) << 12)
#define R5K_CONF_SS (_ULCAST_(3) << 20)
-/* Bits specific to the RM7000. */
-#define RM7K_CONF_SE (_ULCAST_(1) << 3)
+/* Bits specific to the RM7000. */
+#define RM7K_CONF_SE (_ULCAST_(1) << 3)
#define RM7K_CONF_TE (_ULCAST_(1) << 12)
#define RM7K_CONF_CLK (_ULCAST_(1) << 16)
#define RM7K_CONF_TC (_ULCAST_(1) << 17)
#define RM7K_CONF_SI (_ULCAST_(3) << 20)
#define RM7K_CONF_SC (_ULCAST_(1) << 31)
-/* Bits specific to the R10000. */
-#define R10K_CONF_DN (_ULCAST_(3) << 3)
-#define R10K_CONF_CT (_ULCAST_(1) << 5)
-#define R10K_CONF_PE (_ULCAST_(1) << 6)
-#define R10K_CONF_PM (_ULCAST_(3) << 7)
-#define R10K_CONF_EC (_ULCAST_(15)<< 9)
+/* Bits specific to the R10000. */
+#define R10K_CONF_DN (_ULCAST_(3) << 3)
+#define R10K_CONF_CT (_ULCAST_(1) << 5)
+#define R10K_CONF_PE (_ULCAST_(1) << 6)
+#define R10K_CONF_PM (_ULCAST_(3) << 7)
+#define R10K_CONF_EC (_ULCAST_(15)<< 9)
#define R10K_CONF_SB (_ULCAST_(1) << 13)
#define R10K_CONF_SK (_ULCAST_(1) << 14)
#define R10K_CONF_SS (_ULCAST_(7) << 16)
@@ -527,14 +527,14 @@
#define R10K_CONF_DC (_ULCAST_(7) << 26)
#define R10K_CONF_IC (_ULCAST_(7) << 29)
-/* Bits specific to the VR41xx. */
+/* Bits specific to the VR41xx. */
#define VR41_CONF_CS (_ULCAST_(1) << 12)
#define VR41_CONF_P4K (_ULCAST_(1) << 13)
#define VR41_CONF_BP (_ULCAST_(1) << 16)
#define VR41_CONF_M16 (_ULCAST_(1) << 20)
#define VR41_CONF_AD (_ULCAST_(1) << 23)
-/* Bits specific to the R30xx. */
+/* Bits specific to the R30xx. */
#define R30XX_CONF_FDM (_ULCAST_(1) << 19)
#define R30XX_CONF_REV (_ULCAST_(1) << 22)
#define R30XX_CONF_AC (_ULCAST_(1) << 23)
@@ -551,8 +551,8 @@
#define TX49_CONF_HALT (_ULCAST_(1) << 18)
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
-/* Bits specific to the MIPS32/64 PRA. */
-#define MIPS_CONF_MT (_ULCAST_(7) << 7)
+/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_AR (_ULCAST_(7) << 10)
#define MIPS_CONF_AT (_ULCAST_(3) << 13)
#define MIPS_CONF_M (_ULCAST_(1) << 31)
@@ -560,14 +560,14 @@
/*
* Bits in the MIPS32/64 PRA coprocessor 0 config registers 1 and above.
*/
-#define MIPS_CONF1_FP (_ULCAST_(1) << 0)
-#define MIPS_CONF1_EP (_ULCAST_(1) << 1)
-#define MIPS_CONF1_CA (_ULCAST_(1) << 2)
-#define MIPS_CONF1_WR (_ULCAST_(1) << 3)
-#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
-#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
-#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
-#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
+#define MIPS_CONF1_FP (_ULCAST_(1) << 0)
+#define MIPS_CONF1_EP (_ULCAST_(1) << 1)
+#define MIPS_CONF1_CA (_ULCAST_(1) << 2)
+#define MIPS_CONF1_WR (_ULCAST_(1) << 3)
+#define MIPS_CONF1_PC (_ULCAST_(1) << 4)
+#define MIPS_CONF1_MD (_ULCAST_(1) << 5)
+#define MIPS_CONF1_C2 (_ULCAST_(1) << 6)
+#define MIPS_CONF1_DA (_ULCAST_(7) << 7)
#define MIPS_CONF1_DL (_ULCAST_(7) << 10)
#define MIPS_CONF1_DS (_ULCAST_(7) << 13)
#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
@@ -575,26 +575,28 @@
#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
#define MIPS_CONF1_TLBS (_ULCAST_(63)<< 25)
-#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
-#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
-#define MIPS_CONF2_SS (_ULCAST_(15)<< 8)
+#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
+#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
+#define MIPS_CONF2_SS (_ULCAST_(15)<< 8)
#define MIPS_CONF2_SU (_ULCAST_(15)<< 12)
#define MIPS_CONF2_TA (_ULCAST_(15)<< 16)
#define MIPS_CONF2_TL (_ULCAST_(15)<< 20)
#define MIPS_CONF2_TS (_ULCAST_(15)<< 24)
#define MIPS_CONF2_TU (_ULCAST_(7) << 28)
-#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
-#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
-#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
-#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
-#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
-#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
-#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
+#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
+#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
+#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
+#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
+#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
+#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
+#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
#define MIPS_CONF3_DSP (_ULCAST_(1) << 10)
#define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11)
#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
+#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
+#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
@@ -621,7 +623,7 @@
#ifndef __ASSEMBLY__
/*
- * Functions to access the R10000 performance counters. These are basically
+ * Functions to access the R10000 performance counters. These are basically
* mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit
* performance counter number encoded into bits 1 ... 5 of the instruction.
* Only performance counters 0 to 1 actually exist, so for a non-R10000 aware
@@ -632,13 +634,13 @@
unsigned int __res; \
__asm__ __volatile__( \
"mfpc\t%0, %1" \
- : "=r" (__res) \
+ : "=r" (__res) \
: "i" (counter)); \
\
- __res; \
+ __res; \
})
-#define write_r10k_perf_cntr(counter,val) \
+#define write_r10k_perf_cntr(counter,val) \
do { \
__asm__ __volatile__( \
"mtpc\t%0, %1" \
@@ -651,13 +653,13 @@ do { \
unsigned int __res; \
__asm__ __volatile__( \
"mfps\t%0, %1" \
- : "=r" (__res) \
+ : "=r" (__res) \
: "i" (counter)); \
\
- __res; \
+ __res; \
})
-#define write_r10k_perf_cntl(counter,val) \
+#define write_r10k_perf_cntl(counter,val) \
do { \
__asm__ __volatile__( \
"mtps\t%0, %1" \
@@ -847,20 +849,20 @@ do { \
#define write_c0_context(val) __write_ulong_c0_register($4, 0, val)
#define read_c0_userlocal() __read_ulong_c0_register($4, 2)
-#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
+#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
#define read_c0_pagegrain() __read_32bit_c0_register($5, 1)
-#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
+#define write_c0_pagegrain(val) __write_32bit_c0_register($5, 1, val)
#define read_c0_wired() __read_32bit_c0_register($6, 0)
#define write_c0_wired(val) __write_32bit_c0_register($6, 0, val)
#define read_c0_info() __read_32bit_c0_register($7, 0)
-#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */
+#define read_c0_cache() __read_32bit_c0_register($7, 0) /* TX39xx */
#define write_c0_cache(val) __write_32bit_c0_register($7, 0, val)
#define read_c0_badvaddr() __read_ulong_c0_register($8, 0)
@@ -975,7 +977,7 @@ do { \
#define write_c0_intcontrol(val) __write_32bit_c0_ctrl_register($20, val)
#define read_c0_framemask() __read_32bit_c0_register($21, 0)
-#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
+#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
@@ -1005,27 +1007,27 @@ do { \
* MIPS32 / MIPS64 performance counters
*/
#define read_c0_perfctrl0() __read_32bit_c0_register($25, 0)
-#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
+#define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val)
#define read_c0_perfcntr0() __read_32bit_c0_register($25, 1)
-#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
+#define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val)
#define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1)
#define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val)
#define read_c0_perfctrl1() __read_32bit_c0_register($25, 2)
-#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
+#define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val)
#define read_c0_perfcntr1() __read_32bit_c0_register($25, 3)
-#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
+#define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val)
#define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3)
#define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val)
#define read_c0_perfctrl2() __read_32bit_c0_register($25, 4)
-#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
+#define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val)
#define read_c0_perfcntr2() __read_32bit_c0_register($25, 5)
-#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
+#define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val)
#define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5)
#define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val)
#define read_c0_perfctrl3() __read_32bit_c0_register($25, 6)
-#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
+#define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val)
#define read_c0_perfcntr3() __read_32bit_c0_register($25, 7)
-#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
+#define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val)
#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
@@ -1033,12 +1035,12 @@ do { \
#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)
#define read_c0_derraddr0() __read_ulong_c0_register($26, 1)
-#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
+#define write_c0_derraddr0(val) __write_ulong_c0_register($26, 1, val)
#define read_c0_cacheerr() __read_32bit_c0_register($27, 0)
#define read_c0_derraddr1() __read_ulong_c0_register($27, 1)
-#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
+#define write_c0_derraddr1(val) __write_ulong_c0_register($27, 1, val)
#define read_c0_taglo() __read_32bit_c0_register($28, 0)
#define write_c0_taglo(val) __write_32bit_c0_register($28, 0, val)
@@ -1083,9 +1085,9 @@ do { \
#define write_c0_cvmctl(val) __write_64bit_c0_register($9, 7, val)
#define read_c0_cvmmemctl() __read_64bit_c0_register($11, 7)
-#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
+#define write_c0_cvmmemctl(val) __write_64bit_c0_register($11, 7, val)
/*
- * The cacheerr registers are not standardized. On OCTEON, they are
+ * The cacheerr registers are not standardized. On OCTEON, they are
* 64 bits wide.
*/
#define read_octeon_c0_icacheerr() __read_64bit_c0_register($27, 0)
@@ -1142,48 +1144,42 @@ do { \
/*
* Macros to access the floating point coprocessor control registers
*/
-#define read_32bit_cp1_register(source) \
-({ int __res; \
- __asm__ __volatile__( \
- ".set\tpush\n\t" \
- ".set\treorder\n\t" \
- /* gas fails to assemble cfc1 for some archs (octeon).*/ \
- ".set\tmips1\n\t" \
- "cfc1\t%0,"STR(source)"\n\t" \
- ".set\tpop" \
- : "=r" (__res)); \
- __res;})
+#define read_32bit_cp1_register(source) \
+({ \
+ int __res; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " # gas fails to assemble cfc1 for some archs, \n" \
+ " # like Octeon. \n" \
+ " .set mips1 \n" \
+ " cfc1 %0,"STR(source)" \n" \
+ " .set pop \n" \
+ : "=r" (__res)); \
+ __res; \
+})
+#ifdef HAVE_AS_DSP
#define rddsp(mask) \
({ \
- unsigned int __res; \
+ unsigned int __dspctl; \
\
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # rddsp $1, %x1 \n" \
- " .word 0x7c000cb8 | (%x1 << 16) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__res) \
+ " rddsp %0, %x1 \n" \
+ : "=r" (__dspctl) \
: "i" (mask)); \
- __res; \
+ __dspctl; \
})
#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 11) \n" \
- " .set pop \n" \
- : \
+ " wrdsp %0, %x1 \n" \
+ : \
: "r" (val), "i" (mask)); \
} while (0)
-#if 0 /* Need DSP ASE capable assembler ... */
#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
@@ -1206,230 +1202,177 @@ do { \
#else
-#define mfhi0() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac0 \n" \
- " .word 0x00000810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi1() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac1 \n" \
- " .word 0x00200810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi2() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac2 \n" \
- " .word 0x00400810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mfhi3() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mfhi %0, $ac3 \n" \
- " .word 0x00600810 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo0() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac0 \n" \
- " .word 0x00000812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo1() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac1 \n" \
- " .word 0x00200812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo2() \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac2 \n" \
- " .word 0x00400812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mflo3() \
+#ifdef CONFIG_CPU_MICROMIPS
+#define rddsp(mask) \
({ \
- unsigned long __treg; \
+ unsigned int __res; \
\
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # mflo %0, $ac3 \n" \
- " .word 0x00600812 \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg)); \
- __treg; \
-})
-
-#define mthi0(x) \
-do { \
- __asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mthi $1, $ac0 \n" \
- " .word 0x00200011 \n" \
+ " # rddsp $1, %x1 \n" \
+ " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \
+ " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
-#define mthi1(x) \
+#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mthi $1, $ac1 \n" \
- " .word 0x00200811 \n" \
+ " # wrdsp $1, %x1 \n" \
+ " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \
+ " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (mask)); \
} while (0)
-#define mthi2(x) \
-do { \
+#define _umips_dsp_mfxxx(ins) \
+({ \
+ unsigned long __treg; \
+ \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mthi $1, $ac2 \n" \
- " .word 0x00201011 \n" \
+ " .hword 0x0001 \n" \
+ " .hword %x1 \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__treg) \
+ : "i" (ins)); \
+ __treg; \
+})
-#define mthi3(x) \
+#define _umips_dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mthi $1, $ac3 \n" \
- " .word 0x00201811 \n" \
+ " .hword 0x0001 \n" \
+ " .hword %x1 \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (ins)); \
} while (0)
-#define mtlo0(x) \
-do { \
+#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
+#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
+
+#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
+#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
+
+#define mflo0() _umips_dsp_mflo(0)
+#define mflo1() _umips_dsp_mflo(1)
+#define mflo2() _umips_dsp_mflo(2)
+#define mflo3() _umips_dsp_mflo(3)
+
+#define mfhi0() _umips_dsp_mfhi(0)
+#define mfhi1() _umips_dsp_mfhi(1)
+#define mfhi2() _umips_dsp_mfhi(2)
+#define mfhi3() _umips_dsp_mfhi(3)
+
+#define mtlo0(x) _umips_dsp_mtlo(x, 0)
+#define mtlo1(x) _umips_dsp_mtlo(x, 1)
+#define mtlo2(x) _umips_dsp_mtlo(x, 2)
+#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+
+#define mthi0(x) _umips_dsp_mthi(x, 0)
+#define mthi1(x) _umips_dsp_mthi(x, 1)
+#define mthi2(x) _umips_dsp_mthi(x, 2)
+#define mthi3(x) _umips_dsp_mthi(x, 3)
+
+#else /* !CONFIG_CPU_MICROMIPS */
+#define rddsp(mask) \
+({ \
+ unsigned int __res; \
+ \
__asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # mtlo $1, $ac0 \n" \
- " .word 0x00200013 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ " .set push \n" \
+ " .set noat \n" \
+ " # rddsp $1, %x1 \n" \
+ " .word 0x7c000cb8 | (%x1 << 16) \n" \
+ " move %0, $1 \n" \
+ " .set pop \n" \
+ : "=r" (__res) \
+ : "i" (mask)); \
+ __res; \
+})
-#define mtlo1(x) \
+#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mtlo $1, $ac1 \n" \
- " .word 0x00200813 \n" \
+ " # wrdsp $1, %x1 \n" \
+ " .word 0x7c2004f8 | (%x1 << 11) \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
+ : \
+ : "r" (val), "i" (mask)); \
} while (0)
-#define mtlo2(x) \
-do { \
+#define _dsp_mfxxx(ins) \
+({ \
+ unsigned long __treg; \
+ \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " move $1, %0 \n" \
- " # mtlo $1, $ac2 \n" \
- " .word 0x00201013 \n" \
+ " .word (0x00000810 | %1) \n" \
+ " move %0, $1 \n" \
" .set pop \n" \
- : \
- : "r" (x)); \
-} while (0)
+ : "=r" (__treg) \
+ : "i" (ins)); \
+ __treg; \
+})
-#define mtlo3(x) \
+#define _dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " # mtlo $1, $ac3 \n" \
- " .word 0x00201813 \n" \
+ " .word (0x00200011 | %1) \n" \
" .set pop \n" \
: \
- : "r" (x)); \
+ : "r" (val), "i" (ins)); \
} while (0)
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
+
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+
+#define mflo0() _dsp_mflo(0)
+#define mflo1() _dsp_mflo(1)
+#define mflo2() _dsp_mflo(2)
+#define mflo3() _dsp_mflo(3)
+
+#define mfhi0() _dsp_mfhi(0)
+#define mfhi1() _dsp_mfhi(1)
+#define mfhi2() _dsp_mfhi(2)
+#define mfhi3() _dsp_mfhi(3)
+
+#define mtlo0(x) _dsp_mtlo(x, 0)
+#define mtlo1(x) _dsp_mtlo(x, 1)
+#define mtlo2(x) _dsp_mtlo(x, 2)
+#define mtlo3(x) _dsp_mtlo(x, 3)
+
+#define mthi0(x) _dsp_mthi(x, 0)
+#define mthi1(x) _dsp_mthi(x, 1)
+#define mthi2(x) _dsp_mthi(x, 2)
+#define mthi3(x) _dsp_mthi(x, 3)
+
+#endif /* CONFIG_CPU_MICROMIPS */
#endif
/*
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 45cfa1ad86a..e81d719efcd 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -77,7 +77,7 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK (smtc_asid_mask)
-#define HW_ASID_MASK 0xff
+#define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */
@@ -140,7 +140,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+ struct task_struct *tsk)
{
unsigned int cpu = smp_processor_id();
unsigned long flags;
@@ -238,7 +238,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
- cpu_asid(cpu, next));
+ cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
diff --git a/arch/mips/include/asm/msc01_ic.h b/arch/mips/include/asm/msc01_ic.h
index d92406ae284..ff7f074d073 100644
--- a/arch/mips/include/asm/msc01_ic.h
+++ b/arch/mips/include/asm/msc01_ic.h
@@ -15,45 +15,45 @@
* Register offset addresses
*****************************************************************************/
-#define MSC01_IC_RST_OFS 0x00008 /* Software reset */
-#define MSC01_IC_ENAL_OFS 0x00100 /* Int_in enable mask 31:0 */
-#define MSC01_IC_ENAH_OFS 0x00108 /* Int_in enable mask 63:32 */
-#define MSC01_IC_DISL_OFS 0x00120 /* Int_in disable mask 31:0 */
-#define MSC01_IC_DISH_OFS 0x00128 /* Int_in disable mask 63:32 */
-#define MSC01_IC_ISBL_OFS 0x00140 /* Raw int_in 31:0 */
-#define MSC01_IC_ISBH_OFS 0x00148 /* Raw int_in 63:32 */
-#define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
-#define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
-#define MSC01_IC_LVL_OFS 0x00180 /* Disable priority int_out */
-#define MSC01_IC_RAMW_OFS 0x00180 /* Shadow set RAM (EI) */
-#define MSC01_IC_OSB_OFS 0x00188 /* Raw int_out */
-#define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
-#define MSC01_IC_GENA_OFS 0x00198 /* Global HW int enable */
-#define MSC01_IC_BASE_OFS 0x001a0 /* Base address of IC_VEC */
-#define MSC01_IC_VEC_OFS 0x001b0 /* Active int's vector address */
-#define MSC01_IC_EOI_OFS 0x001c0 /* Enable lower level ints */
-#define MSC01_IC_CFG_OFS 0x001c8 /* Configuration register */
-#define MSC01_IC_TRLD_OFS 0x001d0 /* Interval timer reload val */
-#define MSC01_IC_TVAL_OFS 0x001e0 /* Interval timer current val */
-#define MSC01_IC_TCFG_OFS 0x001f0 /* Interval timer config */
-#define MSC01_IC_SUP_OFS 0x00200 /* Set up int_in line 0 */
-#define MSC01_IC_ENA_OFS 0x00800 /* Int_in enable mask 63:0 */
-#define MSC01_IC_DIS_OFS 0x00820 /* Int_in disable mask 63:0 */
-#define MSC01_IC_ISB_OFS 0x00840 /* Raw int_in 63:0 */
-#define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
+#define MSC01_IC_RST_OFS 0x00008 /* Software reset */
+#define MSC01_IC_ENAL_OFS 0x00100 /* Int_in enable mask 31:0 */
+#define MSC01_IC_ENAH_OFS 0x00108 /* Int_in enable mask 63:32 */
+#define MSC01_IC_DISL_OFS 0x00120 /* Int_in disable mask 31:0 */
+#define MSC01_IC_DISH_OFS 0x00128 /* Int_in disable mask 63:32 */
+#define MSC01_IC_ISBL_OFS 0x00140 /* Raw int_in 31:0 */
+#define MSC01_IC_ISBH_OFS 0x00148 /* Raw int_in 63:32 */
+#define MSC01_IC_ISAL_OFS 0x00160 /* Masked int_in 31:0 */
+#define MSC01_IC_ISAH_OFS 0x00168 /* Masked int_in 63:32 */
+#define MSC01_IC_LVL_OFS 0x00180 /* Disable priority int_out */
+#define MSC01_IC_RAMW_OFS 0x00180 /* Shadow set RAM (EI) */
+#define MSC01_IC_OSB_OFS 0x00188 /* Raw int_out */
+#define MSC01_IC_OSA_OFS 0x00190 /* Masked int_out */
+#define MSC01_IC_GENA_OFS 0x00198 /* Global HW int enable */
+#define MSC01_IC_BASE_OFS 0x001a0 /* Base address of IC_VEC */
+#define MSC01_IC_VEC_OFS 0x001b0 /* Active int's vector address */
+#define MSC01_IC_EOI_OFS 0x001c0 /* Enable lower level ints */
+#define MSC01_IC_CFG_OFS 0x001c8 /* Configuration register */
+#define MSC01_IC_TRLD_OFS 0x001d0 /* Interval timer reload val */
+#define MSC01_IC_TVAL_OFS 0x001e0 /* Interval timer current val */
+#define MSC01_IC_TCFG_OFS 0x001f0 /* Interval timer config */
+#define MSC01_IC_SUP_OFS 0x00200 /* Set up int_in line 0 */
+#define MSC01_IC_ENA_OFS 0x00800 /* Int_in enable mask 63:0 */
+#define MSC01_IC_DIS_OFS 0x00820 /* Int_in disable mask 63:0 */
+#define MSC01_IC_ISB_OFS 0x00840 /* Raw int_in 63:0 */
+#define MSC01_IC_ISA_OFS 0x00860 /* Masked int_in 63:0 */
/*****************************************************************************
* Register field encodings
*****************************************************************************/
-#define MSC01_IC_RST_RST_SHF 0
-#define MSC01_IC_RST_RST_MSK 0x00000001
-#define MSC01_IC_RST_RST_BIT MSC01_IC_RST_RST_MSK
-#define MSC01_IC_LVL_LVL_SHF 0
-#define MSC01_IC_LVL_LVL_MSK 0x000000ff
-#define MSC01_IC_LVL_SPUR_SHF 16
-#define MSC01_IC_LVL_SPUR_MSK 0x00010000
-#define MSC01_IC_LVL_SPUR_BIT MSC01_IC_LVL_SPUR_MSK
+#define MSC01_IC_RST_RST_SHF 0
+#define MSC01_IC_RST_RST_MSK 0x00000001
+#define MSC01_IC_RST_RST_BIT MSC01_IC_RST_RST_MSK
+#define MSC01_IC_LVL_LVL_SHF 0
+#define MSC01_IC_LVL_LVL_MSK 0x000000ff
+#define MSC01_IC_LVL_SPUR_SHF 16
+#define MSC01_IC_LVL_SPUR_MSK 0x00010000
+#define MSC01_IC_LVL_SPUR_BIT MSC01_IC_LVL_SPUR_MSK
#define MSC01_IC_RAMW_RIPL_SHF 0
#define MSC01_IC_RAMW_RIPL_MSK 0x0000003f
#define MSC01_IC_RAMW_DATA_SHF 6
@@ -63,33 +63,33 @@
#define MSC01_IC_RAMW_READ_SHF 31
#define MSC01_IC_RAMW_READ_MSK 0x80000000
#define MSC01_IC_RAMW_READ_BIT MSC01_IC_RAMW_READ_MSK
-#define MSC01_IC_OSB_OSB_SHF 0
-#define MSC01_IC_OSB_OSB_MSK 0x000000ff
-#define MSC01_IC_OSA_OSA_SHF 0
-#define MSC01_IC_OSA_OSA_MSK 0x000000ff
-#define MSC01_IC_GENA_GENA_SHF 0
-#define MSC01_IC_GENA_GENA_MSK 0x00000001
-#define MSC01_IC_GENA_GENA_BIT MSC01_IC_GENA_GENA_MSK
-#define MSC01_IC_CFG_DIS_SHF 0
-#define MSC01_IC_CFG_DIS_MSK 0x00000001
-#define MSC01_IC_CFG_DIS_BIT MSC01_IC_CFG_DIS_MSK
-#define MSC01_IC_CFG_SHFT_SHF 8
-#define MSC01_IC_CFG_SHFT_MSK 0x00000f00
-#define MSC01_IC_TCFG_ENA_SHF 0
-#define MSC01_IC_TCFG_ENA_MSK 0x00000001
-#define MSC01_IC_TCFG_ENA_BIT MSC01_IC_TCFG_ENA_MSK
-#define MSC01_IC_TCFG_INT_SHF 8
-#define MSC01_IC_TCFG_INT_MSK 0x00000100
-#define MSC01_IC_TCFG_INT_BIT MSC01_IC_TCFG_INT_MSK
-#define MSC01_IC_TCFG_EDGE_SHF 16
-#define MSC01_IC_TCFG_EDGE_MSK 0x00010000
-#define MSC01_IC_TCFG_EDGE_BIT MSC01_IC_TCFG_EDGE_MSK
-#define MSC01_IC_SUP_PRI_SHF 0
-#define MSC01_IC_SUP_PRI_MSK 0x00000007
-#define MSC01_IC_SUP_EDGE_SHF 8
-#define MSC01_IC_SUP_EDGE_MSK 0x00000100
-#define MSC01_IC_SUP_EDGE_BIT MSC01_IC_SUP_EDGE_MSK
-#define MSC01_IC_SUP_STEP 8
+#define MSC01_IC_OSB_OSB_SHF 0
+#define MSC01_IC_OSB_OSB_MSK 0x000000ff
+#define MSC01_IC_OSA_OSA_SHF 0
+#define MSC01_IC_OSA_OSA_MSK 0x000000ff
+#define MSC01_IC_GENA_GENA_SHF 0
+#define MSC01_IC_GENA_GENA_MSK 0x00000001
+#define MSC01_IC_GENA_GENA_BIT MSC01_IC_GENA_GENA_MSK
+#define MSC01_IC_CFG_DIS_SHF 0
+#define MSC01_IC_CFG_DIS_MSK 0x00000001
+#define MSC01_IC_CFG_DIS_BIT MSC01_IC_CFG_DIS_MSK
+#define MSC01_IC_CFG_SHFT_SHF 8
+#define MSC01_IC_CFG_SHFT_MSK 0x00000f00
+#define MSC01_IC_TCFG_ENA_SHF 0
+#define MSC01_IC_TCFG_ENA_MSK 0x00000001
+#define MSC01_IC_TCFG_ENA_BIT MSC01_IC_TCFG_ENA_MSK
+#define MSC01_IC_TCFG_INT_SHF 8
+#define MSC01_IC_TCFG_INT_MSK 0x00000100
+#define MSC01_IC_TCFG_INT_BIT MSC01_IC_TCFG_INT_MSK
+#define MSC01_IC_TCFG_EDGE_SHF 16
+#define MSC01_IC_TCFG_EDGE_MSK 0x00010000
+#define MSC01_IC_TCFG_EDGE_BIT MSC01_IC_TCFG_EDGE_MSK
+#define MSC01_IC_SUP_PRI_SHF 0
+#define MSC01_IC_SUP_PRI_MSK 0x00000007
+#define MSC01_IC_SUP_EDGE_SHF 8
+#define MSC01_IC_SUP_EDGE_MSK 0x00000100
+#define MSC01_IC_SUP_EDGE_BIT MSC01_IC_SUP_EDGE_MSK
+#define MSC01_IC_SUP_STEP 8
/*
* MIPS System controller interrupt register base.
@@ -100,32 +100,32 @@
* Absolute register addresses
*****************************************************************************/
-#define MSC01_IC_RST (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
-#define MSC01_IC_ENAL (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
-#define MSC01_IC_ENAH (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
-#define MSC01_IC_DISL (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
-#define MSC01_IC_DISH (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
-#define MSC01_IC_ISBL (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
-#define MSC01_IC_ISBH (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
-#define MSC01_IC_ISAL (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
-#define MSC01_IC_ISAH (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
-#define MSC01_IC_LVL (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
-#define MSC01_IC_RAMW (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
-#define MSC01_IC_OSB (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
-#define MSC01_IC_OSA (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
-#define MSC01_IC_GENA (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
-#define MSC01_IC_BASE (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
-#define MSC01_IC_VEC (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
-#define MSC01_IC_EOI (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
-#define MSC01_IC_CFG (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
-#define MSC01_IC_TRLD (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
-#define MSC01_IC_TVAL (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
-#define MSC01_IC_TCFG (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
-#define MSC01_IC_SUP (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
-#define MSC01_IC_ENA (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
-#define MSC01_IC_DIS (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
-#define MSC01_IC_ISB (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
-#define MSC01_IC_ISA (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
+#define MSC01_IC_RST (MSC01_IC_REG_BASE + MSC01_IC_RST_OFS)
+#define MSC01_IC_ENAL (MSC01_IC_REG_BASE + MSC01_IC_ENAL_OFS)
+#define MSC01_IC_ENAH (MSC01_IC_REG_BASE + MSC01_IC_ENAH_OFS)
+#define MSC01_IC_DISL (MSC01_IC_REG_BASE + MSC01_IC_DISL_OFS)
+#define MSC01_IC_DISH (MSC01_IC_REG_BASE + MSC01_IC_DISH_OFS)
+#define MSC01_IC_ISBL (MSC01_IC_REG_BASE + MSC01_IC_ISBL_OFS)
+#define MSC01_IC_ISBH (MSC01_IC_REG_BASE + MSC01_IC_ISBH_OFS)
+#define MSC01_IC_ISAL (MSC01_IC_REG_BASE + MSC01_IC_ISAL_OFS)
+#define MSC01_IC_ISAH (MSC01_IC_REG_BASE + MSC01_IC_ISAH_OFS)
+#define MSC01_IC_LVL (MSC01_IC_REG_BASE + MSC01_IC_LVL_OFS)
+#define MSC01_IC_RAMW (MSC01_IC_REG_BASE + MSC01_IC_RAMW_OFS)
+#define MSC01_IC_OSB (MSC01_IC_REG_BASE + MSC01_IC_OSB_OFS)
+#define MSC01_IC_OSA (MSC01_IC_REG_BASE + MSC01_IC_OSA_OFS)
+#define MSC01_IC_GENA (MSC01_IC_REG_BASE + MSC01_IC_GENA_OFS)
+#define MSC01_IC_BASE (MSC01_IC_REG_BASE + MSC01_IC_BASE_OFS)
+#define MSC01_IC_VEC (MSC01_IC_REG_BASE + MSC01_IC_VEC_OFS)
+#define MSC01_IC_EOI (MSC01_IC_REG_BASE + MSC01_IC_EOI_OFS)
+#define MSC01_IC_CFG (MSC01_IC_REG_BASE + MSC01_IC_CFG_OFS)
+#define MSC01_IC_TRLD (MSC01_IC_REG_BASE + MSC01_IC_TRLD_OFS)
+#define MSC01_IC_TVAL (MSC01_IC_REG_BASE + MSC01_IC_TVAL_OFS)
+#define MSC01_IC_TCFG (MSC01_IC_REG_BASE + MSC01_IC_TCFG_OFS)
+#define MSC01_IC_SUP (MSC01_IC_REG_BASE + MSC01_IC_SUP_OFS)
+#define MSC01_IC_ENA (MSC01_IC_REG_BASE + MSC01_IC_ENA_OFS)
+#define MSC01_IC_DIS (MSC01_IC_REG_BASE + MSC01_IC_DIS_OFS)
+#define MSC01_IC_ISB (MSC01_IC_REG_BASE + MSC01_IC_ISB_OFS)
+#define MSC01_IC_ISA (MSC01_IC_REG_BASE + MSC01_IC_ISA_OFS)
/*
* Soc-it interrupts are configurable.
diff --git a/arch/mips/include/asm/netlogic/common.h b/arch/mips/include/asm/netlogic/common.h
index 42bfd5f1eee..aef560a51a7 100644
--- a/arch/mips/include/asm/netlogic/common.h
+++ b/arch/mips/include/asm/netlogic/common.h
@@ -38,11 +38,11 @@
/*
* Common SMP definitions
*/
-#define RESET_VEC_PHYS 0x1fc00000
-#define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10))
-#define BOOT_THREAD_MODE 0
-#define BOOT_NMI_LOCK 4
-#define BOOT_NMI_HANDLER 8
+#define RESET_VEC_PHYS 0x1fc00000
+#define RESET_DATA_PHYS (RESET_VEC_PHYS + (1<<10))
+#define BOOT_THREAD_MODE 0
+#define BOOT_NMI_LOCK 4
+#define BOOT_NMI_HANDLER 8
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
@@ -80,7 +80,7 @@ extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
struct nlm_soc_info {
- unsigned long coremask; /* cores enabled on the soc */
+ unsigned long coremask; /* cores enabled on the soc */
unsigned long ebase;
uint64_t irqmask;
uint64_t sysbase; /* only for XLP */
@@ -88,9 +88,9 @@ struct nlm_soc_info {
spinlock_t piclock;
};
-#define nlm_get_node(i) (&nlm_nodes[i])
+#define nlm_get_node(i) (&nlm_nodes[i])
#ifdef CONFIG_CPU_XLR
-#define nlm_current_node() (&nlm_nodes[0])
+#define nlm_current_node() (&nlm_nodes[0])
#else
#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
#endif
diff --git a/arch/mips/include/asm/netlogic/haldefs.h b/arch/mips/include/asm/netlogic/haldefs.h
index 72a0c788b47..419d8aef856 100644
--- a/arch/mips/include/asm/netlogic/haldefs.h
+++ b/arch/mips/include/asm/netlogic/haldefs.h
@@ -48,7 +48,7 @@
* access 64 bit addresses or data.
*
* We need to disable interrupts because we save just the lower 32 bits of
- * registers in interrupt handling. So if we get hit by an interrupt while
+ * registers in interrupt handling. So if we get hit by an interrupt while
* using the upper 32 bits of a register, we lose.
*/
static inline uint32_t nlm_save_flags_kx(void)
diff --git a/arch/mips/include/asm/netlogic/mips-extns.h b/arch/mips/include/asm/netlogic/mips-extns.h
index 32ba6d95d47..8ad2e0f8171 100644
--- a/arch/mips/include/asm/netlogic/mips-extns.h
+++ b/arch/mips/include/asm/netlogic/mips-extns.h
@@ -49,7 +49,7 @@
*/
#define write_c0_eimr(val) \
do { \
- if (sizeof(unsigned long) == 4) { \
+ if (sizeof(unsigned long) == 4) { \
unsigned long __flags; \
\
local_irq_save(__flags); \
@@ -68,6 +68,85 @@ do { \
__write_64bit_c0_register($9, 7, (val)); \
} while (0)
+/*
+ * Handling the 64 bit EIMR and EIRR registers in 32-bit mode with
+ * standard functions will be very inefficient. This provides
+ * optimized functions for the normal operations on the registers.
+ *
+ * Call with interrupts disabled.
+ */
+static inline void ack_c0_eirr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv $1, $1, %0\n\t"
+ "dmtc0 $1, $9, 6\n\t"
+ ".set pop"
+ : : "r" (irq));
+}
+
+static inline void set_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+static inline void clear_c0_eimr(int irq)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "li $1, 1\n\t"
+ "dsllv %0, $1, %0\n\t"
+ "dmfc0 $1, $9, 7\n\t"
+ "or $1, %0\n\t"
+ "xor $1, %0\n\t"
+ "dmtc0 $1, $9, 7\n\t"
+ ".set pop"
+ : "+r" (irq));
+}
+
+/*
+ * Read c0 eimr and c0 eirr, do AND of the two values, the result is
+ * the interrupts which are raised and are not masked.
+ */
+static inline uint64_t read_c0_eirr_and_eimr(void)
+{
+ uint64_t val;
+
+#ifdef CONFIG_64BIT
+ val = read_c0_eimr() & read_c0_eirr();
+#else
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set mips64\n\t"
+ ".set noat\n\t"
+ "dmfc0 %M0, $9, 6\n\t"
+ "dmfc0 %L0, $9, 7\n\t"
+ "and %M0, %L0\n\t"
+ "dsll %L0, %M0, 32\n\t"
+ "dsra %M0, %M0, 32\n\t"
+ "dsra %L0, %L0, 32\n\t"
+ ".set pop"
+ : "=r" (val));
+#endif
+
+ return val;
+}
+
static inline int hard_smp_processor_id(void)
{
return __read_32bit_c0_register($15, 1) & 0x3ff;
@@ -208,7 +287,7 @@ do { \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
- __asm__ __volatile__( \
+ __asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg ", " #sel "\n\t" \
".set\tmips0\n\t" \
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
index ca95133f1ad..790f0f1e55c 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/bridge.h
@@ -178,9 +178,9 @@
#define nlm_read_bridge_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_bridge_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_bridge_pcibase(node) \
+#define nlm_get_bridge_pcibase(node) \
nlm_pcicfg_base(XLP_IO_BRIDGE_OFFSET(node))
-#define nlm_get_bridge_regbase(node) \
+#define nlm_get_bridge_regbase(node) \
(nlm_get_bridge_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif /* __ASSEMBLY__ */
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
index 7b63a6b722a..6d2e58a9a54 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/cpucontrol.h
@@ -46,6 +46,8 @@
#define CPU_BLOCKID_FPU 9
#define CPU_BLOCKID_MAP 10
+#define ICU_DEFEATURE 0x100
+
#define LSU_DEFEATURE 0x304
#define LSU_DEBUG_ADDR 0x305
#define LSU_DEBUG_DATA0 0x306
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
index 2c63f975464..9fac46fb791 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/iomap.h
@@ -35,12 +35,12 @@
#ifndef __NLM_HAL_IOMAP_H__
#define __NLM_HAL_IOMAP_H__
-#define XLP_DEFAULT_IO_BASE 0x18000000
+#define XLP_DEFAULT_IO_BASE 0x18000000
#define XLP_DEFAULT_PCI_ECFG_BASE XLP_DEFAULT_IO_BASE
#define XLP_DEFAULT_PCI_CFG_BASE 0x1c000000
#define NMI_BASE 0xbfc00000
-#define XLP_IO_CLK 133333333
+#define XLP_IO_CLK 133333333
#define XLP_PCIE_CFG_SIZE 0x1000 /* 4K */
#define XLP_PCIE_DEV_BLK_SIZE (8 * XLP_PCIE_CFG_SIZE)
@@ -96,8 +96,8 @@
#define XLP_IO_NAND_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 1)
#define XLP_IO_SPI_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 2)
/* SD flash */
-#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
-#define XLP_IO_MMC_OFFSET(node, slot) \
+#define XLP_IO_SD_OFFSET(node) XLP_HDR_OFFSET(node, 0, 7, 3)
+#define XLP_IO_MMC_OFFSET(node, slot) \
((XLP_IO_SD_OFFSET(node))+(slot*0x100)+XLP_IO_PCI_HDRSZ)
/* PCI config header register id's */
@@ -125,26 +125,26 @@
#define XLP_PCI_SBB_WT_REG 0x3f
/* PCI IDs for SoC device */
-#define PCI_VENDOR_NETLOGIC 0x184e
-
-#define PCI_DEVICE_ID_NLM_ROOT 0x1001
-#define PCI_DEVICE_ID_NLM_ICI 0x1002
-#define PCI_DEVICE_ID_NLM_PIC 0x1003
-#define PCI_DEVICE_ID_NLM_PCIE 0x1004
-#define PCI_DEVICE_ID_NLM_EHCI 0x1007
-#define PCI_DEVICE_ID_NLM_OHCI 0x1008
-#define PCI_DEVICE_ID_NLM_NAE 0x1009
-#define PCI_DEVICE_ID_NLM_POE 0x100A
-#define PCI_DEVICE_ID_NLM_FMN 0x100B
-#define PCI_DEVICE_ID_NLM_RAID 0x100D
-#define PCI_DEVICE_ID_NLM_SAE 0x100D
-#define PCI_DEVICE_ID_NLM_RSA 0x100E
-#define PCI_DEVICE_ID_NLM_CMP 0x100F
-#define PCI_DEVICE_ID_NLM_UART 0x1010
-#define PCI_DEVICE_ID_NLM_I2C 0x1011
-#define PCI_DEVICE_ID_NLM_NOR 0x1015
-#define PCI_DEVICE_ID_NLM_NAND 0x1016
-#define PCI_DEVICE_ID_NLM_MMC 0x1018
+#define PCI_VENDOR_NETLOGIC 0x184e
+
+#define PCI_DEVICE_ID_NLM_ROOT 0x1001
+#define PCI_DEVICE_ID_NLM_ICI 0x1002
+#define PCI_DEVICE_ID_NLM_PIC 0x1003
+#define PCI_DEVICE_ID_NLM_PCIE 0x1004
+#define PCI_DEVICE_ID_NLM_EHCI 0x1007
+#define PCI_DEVICE_ID_NLM_OHCI 0x1008
+#define PCI_DEVICE_ID_NLM_NAE 0x1009
+#define PCI_DEVICE_ID_NLM_POE 0x100A
+#define PCI_DEVICE_ID_NLM_FMN 0x100B
+#define PCI_DEVICE_ID_NLM_RAID 0x100D
+#define PCI_DEVICE_ID_NLM_SAE 0x100D
+#define PCI_DEVICE_ID_NLM_RSA 0x100E
+#define PCI_DEVICE_ID_NLM_CMP 0x100F
+#define PCI_DEVICE_ID_NLM_UART 0x1010
+#define PCI_DEVICE_ID_NLM_I2C 0x1011
+#define PCI_DEVICE_ID_NLM_NOR 0x1015
+#define PCI_DEVICE_ID_NLM_NAND 0x1016
+#define PCI_DEVICE_ID_NLM_MMC 0x1018
#ifndef __ASSEMBLY__
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
index 66c323d1bd7..b559cb9f56e 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pcibus.h
@@ -33,42 +33,42 @@
*/
#ifndef __NLM_HAL_PCIBUS_H__
-#define __NLM_HAL_PCIBUS_H__
+#define __NLM_HAL_PCIBUS_H__
/* PCIE Memory and IO regions */
-#define PCIE_MEM_BASE 0xd0000000ULL
-#define PCIE_MEM_LIMIT 0xdfffffffULL
-#define PCIE_IO_BASE 0x14000000ULL
-#define PCIE_IO_LIMIT 0x15ffffffULL
+#define PCIE_MEM_BASE 0xd0000000ULL
+#define PCIE_MEM_LIMIT 0xdfffffffULL
+#define PCIE_IO_BASE 0x14000000ULL
+#define PCIE_IO_LIMIT 0x15ffffffULL
-#define PCIE_BRIDGE_CMD 0x1
-#define PCIE_BRIDGE_MSI_CAP 0x14
-#define PCIE_BRIDGE_MSI_ADDRL 0x15
-#define PCIE_BRIDGE_MSI_ADDRH 0x16
-#define PCIE_BRIDGE_MSI_DATA 0x17
+#define PCIE_BRIDGE_CMD 0x1
+#define PCIE_BRIDGE_MSI_CAP 0x14
+#define PCIE_BRIDGE_MSI_ADDRL 0x15
+#define PCIE_BRIDGE_MSI_ADDRH 0x16
+#define PCIE_BRIDGE_MSI_DATA 0x17
/* XLP Global PCIE configuration space registers */
-#define PCIE_BYTE_SWAP_MEM_BASE 0x247
-#define PCIE_BYTE_SWAP_MEM_LIM 0x248
-#define PCIE_BYTE_SWAP_IO_BASE 0x249
-#define PCIE_BYTE_SWAP_IO_LIM 0x24A
-#define PCIE_MSI_STATUS 0x25A
-#define PCIE_MSI_EN 0x25B
-#define PCIE_INT_EN0 0x261
+#define PCIE_BYTE_SWAP_MEM_BASE 0x247
+#define PCIE_BYTE_SWAP_MEM_LIM 0x248
+#define PCIE_BYTE_SWAP_IO_BASE 0x249
+#define PCIE_BYTE_SWAP_IO_LIM 0x24A
+#define PCIE_MSI_STATUS 0x25A
+#define PCIE_MSI_EN 0x25B
+#define PCIE_INT_EN0 0x261
/* PCIE_MSI_EN */
-#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
+#define PCIE_MSI_VECTOR_INT_EN 0xFFFFFFFF
/* PCIE_INT_EN0 */
-#define PCIE_MSI_INT_EN (1 << 9)
+#define PCIE_MSI_INT_EN (1 << 9)
#ifndef __ASSEMBLY__
-#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_pcie_base(node, inst) \
+#define nlm_read_pcie_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_pcie_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_pcie_base(node, inst) \
nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, inst))
-#define nlm_get_pcie_regbase(node, inst) \
+#define nlm_get_pcie_regbase(node, inst) \
(nlm_get_pcie_base(node, inst) + XLP_IO_PCI_HDRSZ)
int xlp_pcie_link_irt(int link);
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
index b2e53a5383a..3df53017fe5 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/pic.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/pic.h
@@ -36,7 +36,7 @@
#define _NLM_HAL_PIC_H
/* PIC Specific registers */
-#define PIC_CTRL 0x00
+#define PIC_CTRL 0x00
/* PIC control register defines */
#define PIC_CTRL_ITV 32 /* interrupt timeout value */
@@ -71,41 +71,41 @@
#define PIC_IRT_DB 16 /* Destination base */
#define PIC_IRT_DTE 0 /* Destination thread enables */
-#define PIC_BYTESWAP 0x02
-#define PIC_STATUS 0x04
+#define PIC_BYTESWAP 0x02
+#define PIC_STATUS 0x04
#define PIC_INTR_TIMEOUT 0x06
#define PIC_ICI0_INTR_TIMEOUT 0x08
#define PIC_ICI1_INTR_TIMEOUT 0x0a
#define PIC_ICI2_INTR_TIMEOUT 0x0c
#define PIC_IPI_CTL 0x0e
-#define PIC_INT_ACK 0x10
-#define PIC_INT_PENDING0 0x12
-#define PIC_INT_PENDING1 0x14
-#define PIC_INT_PENDING2 0x16
-
-#define PIC_WDOG0_MAXVAL 0x18
-#define PIC_WDOG0_COUNT 0x1a
-#define PIC_WDOG0_ENABLE0 0x1c
-#define PIC_WDOG0_ENABLE1 0x1e
-#define PIC_WDOG0_BEATCMD 0x20
-#define PIC_WDOG0_BEAT0 0x22
-#define PIC_WDOG0_BEAT1 0x24
-
-#define PIC_WDOG1_MAXVAL 0x26
-#define PIC_WDOG1_COUNT 0x28
-#define PIC_WDOG1_ENABLE0 0x2a
-#define PIC_WDOG1_ENABLE1 0x2c
-#define PIC_WDOG1_BEATCMD 0x2e
-#define PIC_WDOG1_BEAT0 0x30
-#define PIC_WDOG1_BEAT1 0x32
-
-#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
-#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
-#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
-#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
+#define PIC_INT_ACK 0x10
+#define PIC_INT_PENDING0 0x12
+#define PIC_INT_PENDING1 0x14
+#define PIC_INT_PENDING2 0x16
+
+#define PIC_WDOG0_MAXVAL 0x18
+#define PIC_WDOG0_COUNT 0x1a
+#define PIC_WDOG0_ENABLE0 0x1c
+#define PIC_WDOG0_ENABLE1 0x1e
+#define PIC_WDOG0_BEATCMD 0x20
+#define PIC_WDOG0_BEAT0 0x22
+#define PIC_WDOG0_BEAT1 0x24
+
+#define PIC_WDOG1_MAXVAL 0x26
+#define PIC_WDOG1_COUNT 0x28
+#define PIC_WDOG1_ENABLE0 0x2a
+#define PIC_WDOG1_ENABLE1 0x2c
+#define PIC_WDOG1_BEATCMD 0x2e
+#define PIC_WDOG1_BEAT0 0x30
+#define PIC_WDOG1_BEAT1 0x32
+
+#define PIC_WDOG_MAXVAL(i) (PIC_WDOG0_MAXVAL + ((i) ? 7 : 0))
+#define PIC_WDOG_COUNT(i) (PIC_WDOG0_COUNT + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE0(i) (PIC_WDOG0_ENABLE0 + ((i) ? 7 : 0))
+#define PIC_WDOG_ENABLE1(i) (PIC_WDOG0_ENABLE1 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEATCMD(i) (PIC_WDOG0_BEATCMD + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT0(i) (PIC_WDOG0_BEAT0 + ((i) ? 7 : 0))
+#define PIC_WDOG_BEAT1(i) (PIC_WDOG0_BEAT1 + ((i) ? 7 : 0))
#define PIC_TIMER0_MAXVAL 0x34
#define PIC_TIMER1_MAXVAL 0x36
@@ -127,28 +127,28 @@
#define PIC_TIMER7_COUNT 0x52
#define PIC_TIMER_COUNT(i) (PIC_TIMER0_COUNT + ((i) * 2))
-#define PIC_ITE0_N0_N1 0x54
-#define PIC_ITE1_N0_N1 0x58
-#define PIC_ITE2_N0_N1 0x5c
-#define PIC_ITE3_N0_N1 0x60
-#define PIC_ITE4_N0_N1 0x64
-#define PIC_ITE5_N0_N1 0x68
-#define PIC_ITE6_N0_N1 0x6c
-#define PIC_ITE7_N0_N1 0x70
-#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
-
-#define PIC_ITE0_N2_N3 0x56
-#define PIC_ITE1_N2_N3 0x5a
-#define PIC_ITE2_N2_N3 0x5e
-#define PIC_ITE3_N2_N3 0x62
-#define PIC_ITE4_N2_N3 0x66
-#define PIC_ITE5_N2_N3 0x6a
-#define PIC_ITE6_N2_N3 0x6e
-#define PIC_ITE7_N2_N3 0x72
-#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
-
-#define PIC_IRT0 0x74
-#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
+#define PIC_ITE0_N0_N1 0x54
+#define PIC_ITE1_N0_N1 0x58
+#define PIC_ITE2_N0_N1 0x5c
+#define PIC_ITE3_N0_N1 0x60
+#define PIC_ITE4_N0_N1 0x64
+#define PIC_ITE5_N0_N1 0x68
+#define PIC_ITE6_N0_N1 0x6c
+#define PIC_ITE7_N0_N1 0x70
+#define PIC_ITE_N0_N1(i) (PIC_ITE0_N0_N1 + ((i) * 4))
+
+#define PIC_ITE0_N2_N3 0x56
+#define PIC_ITE1_N2_N3 0x5a
+#define PIC_ITE2_N2_N3 0x5e
+#define PIC_ITE3_N2_N3 0x62
+#define PIC_ITE4_N2_N3 0x66
+#define PIC_ITE5_N2_N3 0x6a
+#define PIC_ITE6_N2_N3 0x6e
+#define PIC_ITE7_N2_N3 0x72
+#define PIC_ITE_N2_N3(i) (PIC_ITE0_N2_N3 + ((i) * 4))
+
+#define PIC_IRT0 0x74
+#define PIC_IRT(i) (PIC_IRT0 + ((i) * 2))
#define TIMER_CYCLES_MAXVAL 0xffffffffffffffffULL
@@ -261,6 +261,8 @@
#define PIC_LOCAL_SCHEDULING 1
#define PIC_GLOBAL_SCHEDULING 0
+#define PIC_CLK_HZ 133333333
+
#define nlm_read_pic_reg(b, r) nlm_read_reg64(b, r)
#define nlm_write_pic_reg(b, r, v) nlm_write_reg64(b, r, v)
#define nlm_get_pic_pcibase(node) nlm_pcicfg_base(XLP_IO_PIC_OFFSET(node))
@@ -315,6 +317,12 @@ nlm_pic_read_timer(uint64_t base, int timer)
return nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
}
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return (uint32_t)nlm_read_pic_reg(base, PIC_TIMER_COUNT(timer));
+}
+
static inline void
nlm_pic_write_timer(uint64_t base, int timer, uint64_t value)
{
@@ -376,9 +384,9 @@ nlm_pic_ack(uint64_t base, int irt_num)
}
static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
{
- nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
+ nlm_pic_write_irt_direct(base, irt, en, 0, 0, irq, hwt);
}
int nlm_irq_to_irt(int irq);
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/sys.h b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
index 258e8cc00e9..470e52bfc06 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/sys.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/sys.h
@@ -40,89 +40,89 @@
* @author Netlogic Microsystems
* @brief HAL for System configuration registers
*/
-#define SYS_CHIP_RESET 0x00
-#define SYS_POWER_ON_RESET_CFG 0x01
-#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
-#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
-#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
-#define SYS_EFUSE_DEVICE_CFG3 0x05
-#define SYS_EFUSE_DEVICE_CFG4 0x06
-#define SYS_EFUSE_DEVICE_CFG5 0x07
-#define SYS_EFUSE_DEVICE_CFG6 0x08
-#define SYS_EFUSE_DEVICE_CFG7 0x09
-#define SYS_PLL_CTRL 0x0a
-#define SYS_CPU_RESET 0x0b
-#define SYS_CPU_NONCOHERENT_MODE 0x0d
-#define SYS_CORE_DFS_DIS_CTRL 0x0e
-#define SYS_CORE_DFS_RST_CTRL 0x0f
-#define SYS_CORE_DFS_BYP_CTRL 0x10
-#define SYS_CORE_DFS_PHA_CTRL 0x11
-#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
-#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
-#define SYS_CORE_DFS_DIV_VALUE 0x14
-#define SYS_RESET 0x15
-#define SYS_DFS_DIS_CTRL 0x16
-#define SYS_DFS_RST_CTRL 0x17
-#define SYS_DFS_BYP_CTRL 0x18
-#define SYS_DFS_DIV_INC_CTRL 0x19
-#define SYS_DFS_DIV_DEC_CTRL 0x1a
-#define SYS_DFS_DIV_VALUE0 0x1b
-#define SYS_DFS_DIV_VALUE1 0x1c
-#define SYS_SENSE_AMP_DLY 0x1d
-#define SYS_SOC_SENSE_AMP_DLY 0x1e
-#define SYS_CTRL0 0x1f
-#define SYS_CTRL1 0x20
-#define SYS_TIMEOUT_BS1 0x21
-#define SYS_BYTE_SWAP 0x22
-#define SYS_VRM_VID 0x23
-#define SYS_PWR_RAM_CMD 0x24
-#define SYS_PWR_RAM_ADDR 0x25
-#define SYS_PWR_RAM_DATA0 0x26
-#define SYS_PWR_RAM_DATA1 0x27
-#define SYS_PWR_RAM_DATA2 0x28
-#define SYS_PWR_UCODE 0x29
-#define SYS_CPU0_PWR_STATUS 0x2a
-#define SYS_CPU1_PWR_STATUS 0x2b
-#define SYS_CPU2_PWR_STATUS 0x2c
-#define SYS_CPU3_PWR_STATUS 0x2d
-#define SYS_CPU4_PWR_STATUS 0x2e
-#define SYS_CPU5_PWR_STATUS 0x2f
-#define SYS_CPU6_PWR_STATUS 0x30
-#define SYS_CPU7_PWR_STATUS 0x31
-#define SYS_STATUS 0x32
-#define SYS_INT_POL 0x33
-#define SYS_INT_TYPE 0x34
-#define SYS_INT_STATUS 0x35
-#define SYS_INT_MASK0 0x36
-#define SYS_INT_MASK1 0x37
-#define SYS_UCO_S_ECC 0x38
-#define SYS_UCO_M_ECC 0x39
-#define SYS_UCO_ADDR 0x3a
-#define SYS_UCO_INSTR 0x3b
-#define SYS_MEM_BIST0 0x3c
-#define SYS_MEM_BIST1 0x3d
-#define SYS_MEM_BIST2 0x3e
-#define SYS_MEM_BIST3 0x3f
-#define SYS_MEM_BIST4 0x40
-#define SYS_MEM_BIST5 0x41
-#define SYS_MEM_BIST6 0x42
-#define SYS_MEM_BIST7 0x43
-#define SYS_MEM_BIST8 0x44
-#define SYS_MEM_BIST9 0x45
-#define SYS_MEM_BIST10 0x46
-#define SYS_MEM_BIST11 0x47
-#define SYS_MEM_BIST12 0x48
-#define SYS_SCRTCH0 0x49
-#define SYS_SCRTCH1 0x4a
-#define SYS_SCRTCH2 0x4b
-#define SYS_SCRTCH3 0x4c
+#define SYS_CHIP_RESET 0x00
+#define SYS_POWER_ON_RESET_CFG 0x01
+#define SYS_EFUSE_DEVICE_CFG_STATUS0 0x02
+#define SYS_EFUSE_DEVICE_CFG_STATUS1 0x03
+#define SYS_EFUSE_DEVICE_CFG_STATUS2 0x04
+#define SYS_EFUSE_DEVICE_CFG3 0x05
+#define SYS_EFUSE_DEVICE_CFG4 0x06
+#define SYS_EFUSE_DEVICE_CFG5 0x07
+#define SYS_EFUSE_DEVICE_CFG6 0x08
+#define SYS_EFUSE_DEVICE_CFG7 0x09
+#define SYS_PLL_CTRL 0x0a
+#define SYS_CPU_RESET 0x0b
+#define SYS_CPU_NONCOHERENT_MODE 0x0d
+#define SYS_CORE_DFS_DIS_CTRL 0x0e
+#define SYS_CORE_DFS_RST_CTRL 0x0f
+#define SYS_CORE_DFS_BYP_CTRL 0x10
+#define SYS_CORE_DFS_PHA_CTRL 0x11
+#define SYS_CORE_DFS_DIV_INC_CTRL 0x12
+#define SYS_CORE_DFS_DIV_DEC_CTRL 0x13
+#define SYS_CORE_DFS_DIV_VALUE 0x14
+#define SYS_RESET 0x15
+#define SYS_DFS_DIS_CTRL 0x16
+#define SYS_DFS_RST_CTRL 0x17
+#define SYS_DFS_BYP_CTRL 0x18
+#define SYS_DFS_DIV_INC_CTRL 0x19
+#define SYS_DFS_DIV_DEC_CTRL 0x1a
+#define SYS_DFS_DIV_VALUE0 0x1b
+#define SYS_DFS_DIV_VALUE1 0x1c
+#define SYS_SENSE_AMP_DLY 0x1d
+#define SYS_SOC_SENSE_AMP_DLY 0x1e
+#define SYS_CTRL0 0x1f
+#define SYS_CTRL1 0x20
+#define SYS_TIMEOUT_BS1 0x21
+#define SYS_BYTE_SWAP 0x22
+#define SYS_VRM_VID 0x23
+#define SYS_PWR_RAM_CMD 0x24
+#define SYS_PWR_RAM_ADDR 0x25
+#define SYS_PWR_RAM_DATA0 0x26
+#define SYS_PWR_RAM_DATA1 0x27
+#define SYS_PWR_RAM_DATA2 0x28
+#define SYS_PWR_UCODE 0x29
+#define SYS_CPU0_PWR_STATUS 0x2a
+#define SYS_CPU1_PWR_STATUS 0x2b
+#define SYS_CPU2_PWR_STATUS 0x2c
+#define SYS_CPU3_PWR_STATUS 0x2d
+#define SYS_CPU4_PWR_STATUS 0x2e
+#define SYS_CPU5_PWR_STATUS 0x2f
+#define SYS_CPU6_PWR_STATUS 0x30
+#define SYS_CPU7_PWR_STATUS 0x31
+#define SYS_STATUS 0x32
+#define SYS_INT_POL 0x33
+#define SYS_INT_TYPE 0x34
+#define SYS_INT_STATUS 0x35
+#define SYS_INT_MASK0 0x36
+#define SYS_INT_MASK1 0x37
+#define SYS_UCO_S_ECC 0x38
+#define SYS_UCO_M_ECC 0x39
+#define SYS_UCO_ADDR 0x3a
+#define SYS_UCO_INSTR 0x3b
+#define SYS_MEM_BIST0 0x3c
+#define SYS_MEM_BIST1 0x3d
+#define SYS_MEM_BIST2 0x3e
+#define SYS_MEM_BIST3 0x3f
+#define SYS_MEM_BIST4 0x40
+#define SYS_MEM_BIST5 0x41
+#define SYS_MEM_BIST6 0x42
+#define SYS_MEM_BIST7 0x43
+#define SYS_MEM_BIST8 0x44
+#define SYS_MEM_BIST9 0x45
+#define SYS_MEM_BIST10 0x46
+#define SYS_MEM_BIST11 0x47
+#define SYS_MEM_BIST12 0x48
+#define SYS_SCRTCH0 0x49
+#define SYS_SCRTCH1 0x4a
+#define SYS_SCRTCH2 0x4b
+#define SYS_SCRTCH3 0x4c
#ifndef __ASSEMBLY__
-#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
-#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
-#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
+#define nlm_read_sys_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sys_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
+#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
#endif
#endif
diff --git a/arch/mips/include/asm/netlogic/xlp-hal/uart.h b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
index 6a7046ca094..86d16e1e607 100644
--- a/arch/mips/include/asm/netlogic/xlp-hal/uart.h
+++ b/arch/mips/include/asm/netlogic/xlp-hal/uart.h
@@ -91,8 +91,8 @@
#if !defined(LOCORE) && !defined(__ASSEMBLY__)
-#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
-#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_read_uart_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_uart_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_uart_pcibase(node, inst) \
nlm_pcicfg_base(XLP_IO_UART_OFFSET(node, inst))
#define nlm_get_uart_regbase(node, inst) \
diff --git a/arch/mips/include/asm/netlogic/xlr/fmn.h b/arch/mips/include/asm/netlogic/xlr/fmn.h
index 68d5167c86b..2a78929cef7 100644
--- a/arch/mips/include/asm/netlogic/xlr/fmn.h
+++ b/arch/mips/include/asm/netlogic/xlr/fmn.h
@@ -38,108 +38,108 @@
#include <asm/netlogic/mips-extns.h> /* for COP2 access */
/* Station IDs */
-#define FMN_STNID_CPU0 0x00
-#define FMN_STNID_CPU1 0x08
-#define FMN_STNID_CPU2 0x10
-#define FMN_STNID_CPU3 0x18
-#define FMN_STNID_CPU4 0x20
-#define FMN_STNID_CPU5 0x28
-#define FMN_STNID_CPU6 0x30
-#define FMN_STNID_CPU7 0x38
-
-#define FMN_STNID_XGS0_TX 64
-#define FMN_STNID_XMAC0_00_TX 64
-#define FMN_STNID_XMAC0_01_TX 65
-#define FMN_STNID_XMAC0_02_TX 66
-#define FMN_STNID_XMAC0_03_TX 67
-#define FMN_STNID_XMAC0_04_TX 68
-#define FMN_STNID_XMAC0_05_TX 69
-#define FMN_STNID_XMAC0_06_TX 70
-#define FMN_STNID_XMAC0_07_TX 71
-#define FMN_STNID_XMAC0_08_TX 72
-#define FMN_STNID_XMAC0_09_TX 73
-#define FMN_STNID_XMAC0_10_TX 74
-#define FMN_STNID_XMAC0_11_TX 75
-#define FMN_STNID_XMAC0_12_TX 76
-#define FMN_STNID_XMAC0_13_TX 77
-#define FMN_STNID_XMAC0_14_TX 78
-#define FMN_STNID_XMAC0_15_TX 79
-
-#define FMN_STNID_XGS1_TX 80
-#define FMN_STNID_XMAC1_00_TX 80
-#define FMN_STNID_XMAC1_01_TX 81
-#define FMN_STNID_XMAC1_02_TX 82
-#define FMN_STNID_XMAC1_03_TX 83
-#define FMN_STNID_XMAC1_04_TX 84
-#define FMN_STNID_XMAC1_05_TX 85
-#define FMN_STNID_XMAC1_06_TX 86
-#define FMN_STNID_XMAC1_07_TX 87
-#define FMN_STNID_XMAC1_08_TX 88
-#define FMN_STNID_XMAC1_09_TX 89
-#define FMN_STNID_XMAC1_10_TX 90
-#define FMN_STNID_XMAC1_11_TX 91
-#define FMN_STNID_XMAC1_12_TX 92
-#define FMN_STNID_XMAC1_13_TX 93
-#define FMN_STNID_XMAC1_14_TX 94
-#define FMN_STNID_XMAC1_15_TX 95
-
-#define FMN_STNID_GMAC 96
-#define FMN_STNID_GMACJFR_0 96
-#define FMN_STNID_GMACRFR_0 97
-#define FMN_STNID_GMACTX0 98
-#define FMN_STNID_GMACTX1 99
-#define FMN_STNID_GMACTX2 100
-#define FMN_STNID_GMACTX3 101
-#define FMN_STNID_GMACJFR_1 102
-#define FMN_STNID_GMACRFR_1 103
-
-#define FMN_STNID_DMA 104
-#define FMN_STNID_DMA_0 104
-#define FMN_STNID_DMA_1 105
-#define FMN_STNID_DMA_2 106
-#define FMN_STNID_DMA_3 107
-
-#define FMN_STNID_XGS0FR 112
-#define FMN_STNID_XMAC0JFR 112
-#define FMN_STNID_XMAC0RFR 113
-
-#define FMN_STNID_XGS1FR 114
-#define FMN_STNID_XMAC1JFR 114
-#define FMN_STNID_XMAC1RFR 115
-#define FMN_STNID_SEC 120
-#define FMN_STNID_SEC0 120
-#define FMN_STNID_SEC1 121
-#define FMN_STNID_SEC2 122
-#define FMN_STNID_SEC3 123
-#define FMN_STNID_PK0 124
-#define FMN_STNID_SEC_RSA 124
-#define FMN_STNID_SEC_RSVD0 125
-#define FMN_STNID_SEC_RSVD1 126
-#define FMN_STNID_SEC_RSVD2 127
-
-#define FMN_STNID_GMAC1 80
-#define FMN_STNID_GMAC1_FR_0 81
-#define FMN_STNID_GMAC1_TX0 82
-#define FMN_STNID_GMAC1_TX1 83
-#define FMN_STNID_GMAC1_TX2 84
-#define FMN_STNID_GMAC1_TX3 85
-#define FMN_STNID_GMAC1_FR_1 87
-#define FMN_STNID_GMAC0 96
-#define FMN_STNID_GMAC0_FR_0 97
-#define FMN_STNID_GMAC0_TX0 98
-#define FMN_STNID_GMAC0_TX1 99
-#define FMN_STNID_GMAC0_TX2 100
-#define FMN_STNID_GMAC0_TX3 101
-#define FMN_STNID_GMAC0_FR_1 103
-#define FMN_STNID_CMP_0 108
-#define FMN_STNID_CMP_1 109
-#define FMN_STNID_CMP_2 110
-#define FMN_STNID_CMP_3 111
-#define FMN_STNID_PCIE_0 116
-#define FMN_STNID_PCIE_1 117
-#define FMN_STNID_PCIE_2 118
-#define FMN_STNID_PCIE_3 119
-#define FMN_STNID_XLS_PK0 121
+#define FMN_STNID_CPU0 0x00
+#define FMN_STNID_CPU1 0x08
+#define FMN_STNID_CPU2 0x10
+#define FMN_STNID_CPU3 0x18
+#define FMN_STNID_CPU4 0x20
+#define FMN_STNID_CPU5 0x28
+#define FMN_STNID_CPU6 0x30
+#define FMN_STNID_CPU7 0x38
+
+#define FMN_STNID_XGS0_TX 64
+#define FMN_STNID_XMAC0_00_TX 64
+#define FMN_STNID_XMAC0_01_TX 65
+#define FMN_STNID_XMAC0_02_TX 66
+#define FMN_STNID_XMAC0_03_TX 67
+#define FMN_STNID_XMAC0_04_TX 68
+#define FMN_STNID_XMAC0_05_TX 69
+#define FMN_STNID_XMAC0_06_TX 70
+#define FMN_STNID_XMAC0_07_TX 71
+#define FMN_STNID_XMAC0_08_TX 72
+#define FMN_STNID_XMAC0_09_TX 73
+#define FMN_STNID_XMAC0_10_TX 74
+#define FMN_STNID_XMAC0_11_TX 75
+#define FMN_STNID_XMAC0_12_TX 76
+#define FMN_STNID_XMAC0_13_TX 77
+#define FMN_STNID_XMAC0_14_TX 78
+#define FMN_STNID_XMAC0_15_TX 79
+
+#define FMN_STNID_XGS1_TX 80
+#define FMN_STNID_XMAC1_00_TX 80
+#define FMN_STNID_XMAC1_01_TX 81
+#define FMN_STNID_XMAC1_02_TX 82
+#define FMN_STNID_XMAC1_03_TX 83
+#define FMN_STNID_XMAC1_04_TX 84
+#define FMN_STNID_XMAC1_05_TX 85
+#define FMN_STNID_XMAC1_06_TX 86
+#define FMN_STNID_XMAC1_07_TX 87
+#define FMN_STNID_XMAC1_08_TX 88
+#define FMN_STNID_XMAC1_09_TX 89
+#define FMN_STNID_XMAC1_10_TX 90
+#define FMN_STNID_XMAC1_11_TX 91
+#define FMN_STNID_XMAC1_12_TX 92
+#define FMN_STNID_XMAC1_13_TX 93
+#define FMN_STNID_XMAC1_14_TX 94
+#define FMN_STNID_XMAC1_15_TX 95
+
+#define FMN_STNID_GMAC 96
+#define FMN_STNID_GMACJFR_0 96
+#define FMN_STNID_GMACRFR_0 97
+#define FMN_STNID_GMACTX0 98
+#define FMN_STNID_GMACTX1 99
+#define FMN_STNID_GMACTX2 100
+#define FMN_STNID_GMACTX3 101
+#define FMN_STNID_GMACJFR_1 102
+#define FMN_STNID_GMACRFR_1 103
+
+#define FMN_STNID_DMA 104
+#define FMN_STNID_DMA_0 104
+#define FMN_STNID_DMA_1 105
+#define FMN_STNID_DMA_2 106
+#define FMN_STNID_DMA_3 107
+
+#define FMN_STNID_XGS0FR 112
+#define FMN_STNID_XMAC0JFR 112
+#define FMN_STNID_XMAC0RFR 113
+
+#define FMN_STNID_XGS1FR 114
+#define FMN_STNID_XMAC1JFR 114
+#define FMN_STNID_XMAC1RFR 115
+#define FMN_STNID_SEC 120
+#define FMN_STNID_SEC0 120
+#define FMN_STNID_SEC1 121
+#define FMN_STNID_SEC2 122
+#define FMN_STNID_SEC3 123
+#define FMN_STNID_PK0 124
+#define FMN_STNID_SEC_RSA 124
+#define FMN_STNID_SEC_RSVD0 125
+#define FMN_STNID_SEC_RSVD1 126
+#define FMN_STNID_SEC_RSVD2 127
+
+#define FMN_STNID_GMAC1 80
+#define FMN_STNID_GMAC1_FR_0 81
+#define FMN_STNID_GMAC1_TX0 82
+#define FMN_STNID_GMAC1_TX1 83
+#define FMN_STNID_GMAC1_TX2 84
+#define FMN_STNID_GMAC1_TX3 85
+#define FMN_STNID_GMAC1_FR_1 87
+#define FMN_STNID_GMAC0 96
+#define FMN_STNID_GMAC0_FR_0 97
+#define FMN_STNID_GMAC0_TX0 98
+#define FMN_STNID_GMAC0_TX1 99
+#define FMN_STNID_GMAC0_TX2 100
+#define FMN_STNID_GMAC0_TX3 101
+#define FMN_STNID_GMAC0_FR_1 103
+#define FMN_STNID_CMP_0 108
+#define FMN_STNID_CMP_1 109
+#define FMN_STNID_CMP_2 110
+#define FMN_STNID_CMP_3 111
+#define FMN_STNID_PCIE_0 116
+#define FMN_STNID_PCIE_1 117
+#define FMN_STNID_PCIE_2 118
+#define FMN_STNID_PCIE_3 119
+#define FMN_STNID_XLS_PK0 121
#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
@@ -175,25 +175,25 @@
#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
-#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
-#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
-#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
-#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
-#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
-
-#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
-#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
-#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
-#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
-
-#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
-#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
-#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
-#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
-
-#define FMN_STN_RX_QSIZE 256
-#define FMN_NSTATIONS 128
-#define FMN_CORE_NBUCKETS 8
+#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
+#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
+#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
+#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
+#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
+
+#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
+#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
+#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
+#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
+
+#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
+#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
+#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
+#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
+
+#define FMN_STN_RX_QSIZE 256
+#define FMN_NSTATIONS 128
+#define FMN_CORE_NBUCKETS 8
static inline void nlm_msgsnd(unsigned int stid)
{
diff --git a/arch/mips/include/asm/netlogic/xlr/iomap.h b/arch/mips/include/asm/netlogic/xlr/iomap.h
index 2e768f032e8..ff4533d6ee6 100644
--- a/arch/mips/include/asm/netlogic/xlr/iomap.h
+++ b/arch/mips/include/asm/netlogic/xlr/iomap.h
@@ -35,66 +35,66 @@
#ifndef _ASM_NLM_IOMAP_H
#define _ASM_NLM_IOMAP_H
-#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
-#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
-#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
-#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
-#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
-#define NETLOGIC_IO_PIC_OFFSET 0x08000
-#define NETLOGIC_IO_UART_0_OFFSET 0x14000
-#define NETLOGIC_IO_UART_1_OFFSET 0x15100
+#define DEFAULT_NETLOGIC_IO_BASE CKSEG1ADDR(0x1ef00000)
+#define NETLOGIC_IO_DDR2_CHN0_OFFSET 0x01000
+#define NETLOGIC_IO_DDR2_CHN1_OFFSET 0x02000
+#define NETLOGIC_IO_DDR2_CHN2_OFFSET 0x03000
+#define NETLOGIC_IO_DDR2_CHN3_OFFSET 0x04000
+#define NETLOGIC_IO_PIC_OFFSET 0x08000
+#define NETLOGIC_IO_UART_0_OFFSET 0x14000
+#define NETLOGIC_IO_UART_1_OFFSET 0x15100
-#define NETLOGIC_IO_SIZE 0x1000
+#define NETLOGIC_IO_SIZE 0x1000
-#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
+#define NETLOGIC_IO_BRIDGE_OFFSET 0x00000
-#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
-#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
+#define NETLOGIC_IO_RLD2_CHN0_OFFSET 0x05000
+#define NETLOGIC_IO_RLD2_CHN1_OFFSET 0x06000
-#define NETLOGIC_IO_SRAM_OFFSET 0x07000
+#define NETLOGIC_IO_SRAM_OFFSET 0x07000
-#define NETLOGIC_IO_PCIX_OFFSET 0x09000
-#define NETLOGIC_IO_HT_OFFSET 0x0A000
+#define NETLOGIC_IO_PCIX_OFFSET 0x09000
+#define NETLOGIC_IO_HT_OFFSET 0x0A000
-#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
+#define NETLOGIC_IO_SECURITY_OFFSET 0x0B000
-#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
-#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
-#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
-#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
+#define NETLOGIC_IO_GMAC_0_OFFSET 0x0C000
+#define NETLOGIC_IO_GMAC_1_OFFSET 0x0D000
+#define NETLOGIC_IO_GMAC_2_OFFSET 0x0E000
+#define NETLOGIC_IO_GMAC_3_OFFSET 0x0F000
/* XLS devices */
-#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
-#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
-#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
-#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
+#define NETLOGIC_IO_GMAC_4_OFFSET 0x20000
+#define NETLOGIC_IO_GMAC_5_OFFSET 0x21000
+#define NETLOGIC_IO_GMAC_6_OFFSET 0x22000
+#define NETLOGIC_IO_GMAC_7_OFFSET 0x23000
-#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
-#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
-#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
-#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
+#define NETLOGIC_IO_PCIE_0_OFFSET 0x1E000
+#define NETLOGIC_IO_PCIE_1_OFFSET 0x1F000
+#define NETLOGIC_IO_SRIO_0_OFFSET 0x1E000
+#define NETLOGIC_IO_SRIO_1_OFFSET 0x1F000
-#define NETLOGIC_IO_USB_0_OFFSET 0x24000
-#define NETLOGIC_IO_USB_1_OFFSET 0x25000
+#define NETLOGIC_IO_USB_0_OFFSET 0x24000
+#define NETLOGIC_IO_USB_1_OFFSET 0x25000
-#define NETLOGIC_IO_COMP_OFFSET 0x1D000
+#define NETLOGIC_IO_COMP_OFFSET 0x1D000
/* end XLS devices */
/* XLR devices */
-#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
-#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
-#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
-#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
+#define NETLOGIC_IO_SPI4_0_OFFSET 0x10000
+#define NETLOGIC_IO_XGMAC_0_OFFSET 0x11000
+#define NETLOGIC_IO_SPI4_1_OFFSET 0x12000
+#define NETLOGIC_IO_XGMAC_1_OFFSET 0x13000
/* end XLR devices */
-#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
-#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
+#define NETLOGIC_IO_I2C_0_OFFSET 0x16000
+#define NETLOGIC_IO_I2C_1_OFFSET 0x17000
-#define NETLOGIC_IO_GPIO_OFFSET 0x18000
-#define NETLOGIC_IO_FLASH_OFFSET 0x19000
-#define NETLOGIC_IO_TB_OFFSET 0x1C000
+#define NETLOGIC_IO_GPIO_OFFSET 0x18000
+#define NETLOGIC_IO_FLASH_OFFSET 0x19000
+#define NETLOGIC_IO_TB_OFFSET 0x1C000
-#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
+#define NETLOGIC_CPLD_OFFSET KSEG1ADDR(0x1d840000)
/*
* Base Address (Virtual) of the PCI Config address space
@@ -102,8 +102,8 @@
* Config space spans 256 (num of buses) * 256 (num functions) * 256 bytes
* ie 1<<24 = 16M
*/
-#define DEFAULT_PCI_CONFIG_BASE 0x18000000
-#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
-#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
+#define DEFAULT_PCI_CONFIG_BASE 0x18000000
+#define DEFAULT_HT_TYPE0_CFG_BASE 0x16000000
+#define DEFAULT_HT_TYPE1_CFG_BASE 0x17000000
#endif
diff --git a/arch/mips/include/asm/netlogic/xlr/msidef.h b/arch/mips/include/asm/netlogic/xlr/msidef.h
index 7e39d40be4f..c95d18edf12 100644
--- a/arch/mips/include/asm/netlogic/xlr/msidef.h
+++ b/arch/mips/include/asm/netlogic/xlr/msidef.h
@@ -45,21 +45,21 @@
*/
#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR_MASK 0x000000ff
+#define MSI_DATA_VECTOR_MASK 0x000000ff
#define MSI_DATA_VECTOR(v) (((v) << MSI_DATA_VECTOR_SHIFT) & \
MSI_DATA_VECTOR_MASK)
#define MSI_DATA_DELIVERY_MODE_SHIFT 8
-#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
-#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_FIXED (0 << MSI_DATA_DELIVERY_MODE_SHIFT)
+#define MSI_DATA_DELIVERY_LOWPRI (1 << MSI_DATA_DELIVERY_MODE_SHIFT)
#define MSI_DATA_LEVEL_SHIFT 14
#define MSI_DATA_LEVEL_DEASSERT (0 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_LEVEL_ASSERT (1 << MSI_DATA_LEVEL_SHIFT)
#define MSI_DATA_TRIGGER_SHIFT 15
-#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
-#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_EDGE (0 << MSI_DATA_TRIGGER_SHIFT)
+#define MSI_DATA_TRIGGER_LEVEL (1 << MSI_DATA_TRIGGER_SHIFT)
/*
* Shift/mask fields for msi address
@@ -69,16 +69,16 @@
#define MSI_ADDR_BASE_LO 0xfee00000
#define MSI_ADDR_DEST_MODE_SHIFT 2
-#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
+#define MSI_ADDR_DEST_MODE_PHYSICAL (0 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_DEST_MODE_LOGICAL (1 << MSI_ADDR_DEST_MODE_SHIFT)
#define MSI_ADDR_REDIRECTION_SHIFT 3
-#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
-#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_CPU (0 << MSI_ADDR_REDIRECTION_SHIFT)
+#define MSI_ADDR_REDIRECTION_LOWPRI (1 << MSI_ADDR_REDIRECTION_SHIFT)
#define MSI_ADDR_DEST_ID_SHIFT 12
#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
+#define MSI_ADDR_DEST_ID(dest) (((dest) << MSI_ADDR_DEST_ID_SHIFT) & \
MSI_ADDR_DEST_ID_MASK)
#endif /* ASM_RMI_MSIDEF_H */
diff --git a/arch/mips/include/asm/netlogic/xlr/pic.h b/arch/mips/include/asm/netlogic/xlr/pic.h
index 9a691b1f91b..63c99176dff 100644
--- a/arch/mips/include/asm/netlogic/xlr/pic.h
+++ b/arch/mips/include/asm/netlogic/xlr/pic.h
@@ -35,10 +35,11 @@
#ifndef _ASM_NLM_XLR_PIC_H
#define _ASM_NLM_XLR_PIC_H
-#define PIC_CLKS_PER_SEC 66666666ULL
+#define PIC_CLK_HZ 66666666
/* PIC hardware interrupt numbers */
#define PIC_IRT_WD_INDEX 0
#define PIC_IRT_TIMER_0_INDEX 1
+#define PIC_IRT_TIMER_INDEX(i) ((i) + PIC_IRT_TIMER_0_INDEX)
#define PIC_IRT_TIMER_1_INDEX 2
#define PIC_IRT_TIMER_2_INDEX 3
#define PIC_IRT_TIMER_3_INDEX 4
@@ -99,6 +100,7 @@
/* PIC Registers */
#define PIC_CTRL 0x00
+#define PIC_CTRL_STE 8 /* timer enable start bit */
#define PIC_IPI 0x04
#define PIC_INT_ACK 0x06
@@ -116,7 +118,7 @@
#define PIC_TIMER_COUNT_0_BASE 0x120
#define PIC_TIMER_COUNT_1_BASE 0x130
-#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
+#define PIC_IRT_0(picintr) (PIC_IRT_0_BASE + (picintr))
#define PIC_IRT_1(picintr) (PIC_IRT_1_BASE + (picintr))
#define PIC_TIMER_MAXVAL_0(i) (PIC_TIMER_MAXVAL_0_BASE + (i))
@@ -130,9 +132,9 @@
* 8-39. This leaves the IRQ 0-7 for cpu interrupts like
* count/compare and FMN
*/
-#define PIC_IRQ_BASE 8
-#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
-#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
+#define PIC_IRQ_BASE 8
+#define PIC_INTR_TO_IRQ(i) (PIC_IRQ_BASE + (i))
+#define PIC_IRQ_TO_INTR(i) ((i) - PIC_IRQ_BASE)
#define PIC_IRT_FIRST_IRQ PIC_IRQ_BASE
#define PIC_WD_IRQ PIC_INTR_TO_IRQ(PIC_IRT_WD_INDEX)
@@ -168,7 +170,7 @@
#define PIC_BRIDGE_AERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_INDEX)
#define PIC_BRIDGE_BERR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_BERR_INDEX)
#define PIC_BRIDGE_TB_XLR_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_TB_XLR_INDEX)
-#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
+#define PIC_BRIDGE_AERR_NMI_IRQ PIC_INTR_TO_IRQ(PIC_IRT_BRIDGE_AERR_NMI_INDEX)
/* XLS defines */
#define PIC_GMAC_4_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC4_INDEX)
#define PIC_GMAC_5_IRQ PIC_INTR_TO_IRQ(PIC_IRT_GMAC5_INDEX)
@@ -251,12 +253,52 @@ nlm_pic_ack(uint64_t base, int irt)
}
static inline void
-nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
+nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt, int en)
{
nlm_write_reg(base, PIC_IRT_0(irt), (1u << hwt));
/* local scheduling, invalid, level by default */
nlm_write_reg(base, PIC_IRT_1(irt),
- (1 << 30) | (1 << 6) | irq);
+ (en << 30) | (1 << 6) | irq);
+}
+
+static inline uint64_t
+nlm_pic_read_timer(uint64_t base, int timer)
+{
+ uint32_t up1, up2, low;
+
+ up1 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ up2 = nlm_read_reg(base, PIC_TIMER_COUNT_1(timer));
+
+ if (up1 != up2) /* wrapped, get the new low */
+ low = nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+ return ((uint64_t)up2 << 32) | low;
+
+}
+
+static inline uint32_t
+nlm_pic_read_timer32(uint64_t base, int timer)
+{
+ return nlm_read_reg(base, PIC_TIMER_COUNT_0(timer));
+}
+
+static inline void
+nlm_pic_set_timer(uint64_t base, int timer, uint64_t value, int irq, int cpu)
+{
+ uint32_t up, low;
+ uint64_t pic_ctrl = nlm_read_reg(base, PIC_CTRL);
+ int en;
+
+ en = (irq > 0);
+ up = value >> 32;
+ low = value & 0xFFFFFFFF;
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_0(timer), low);
+ nlm_write_reg(base, PIC_TIMER_MAXVAL_1(timer), up);
+ nlm_pic_init_irt(base, PIC_IRT_TIMER_INDEX(timer), irq, cpu, 0);
+
+ /* enable the timer */
+ pic_ctrl |= (1 << (PIC_CTRL_STE + timer));
+ nlm_write_reg(base, PIC_CTRL, pic_ctrl);
}
#endif
#endif /* _ASM_NLM_XLR_PIC_H */
diff --git a/arch/mips/include/asm/nile4.h b/arch/mips/include/asm/nile4.h
index af0e51a9f68..2e2436d0e94 100644
--- a/arch/mips/include/asm/nile4.h
+++ b/arch/mips/include/asm/nile4.h
@@ -2,7 +2,7 @@
* asm-mips/nile4.h -- NEC Vrc-5074 Nile 4 definitions
*
* Copyright (C) 2000 Geert Uytterhoeven <geert@sonycom.com>
- * Sony Software Development Center Europe (SDCE), Brussels
+ * Sony Software Development Center Europe (SDCE), Brussels
*
* This file is based on the following documentation:
*
@@ -17,7 +17,7 @@
/*
- * Physical Device Address Registers (PDARs)
+ * Physical Device Address Registers (PDARs)
*/
#define NILE4_SDRAM0 0x0000 /* SDRAM Bank 0 [R/W] */
@@ -37,7 +37,7 @@
/*
- * CPU Interface Registers
+ * CPU Interface Registers
*/
#define NILE4_CPUSTAT 0x0080 /* CPU Status [R/W] */
@@ -50,7 +50,7 @@
/*
- * Memory-Interface Registers
+ * Memory-Interface Registers
*/
#define NILE4_MEMCTRL 0x00C0 /* Memory Control */
@@ -59,7 +59,7 @@
/*
- * PCI-Bus Registers
+ * PCI-Bus Registers
*/
#define NILE4_PCICTRL 0x00E0 /* PCI Control [R/W] */
@@ -70,7 +70,7 @@
/*
- * Local-Bus Registers
+ * Local-Bus Registers
*/
#define NILE4_LCNFG 0x0100 /* Local Bus Configuration [R/W] */
@@ -88,7 +88,7 @@
/*
- * DMA Registers
+ * DMA Registers
*/
#define NILE4_DMACTRL0 0x0180 /* DMA Control 0 [R/W] */
@@ -100,7 +100,7 @@
/*
- * Timer Registers
+ * Timer Registers
*/
#define NILE4_T0CTRL 0x01C0 /* SDRAM Refresh Control [R/W] */
@@ -114,7 +114,7 @@
/*
- * PCI Configuration Space Registers
+ * PCI Configuration Space Registers
*/
#define NILE4_PCI_BASE 0x0200
@@ -153,10 +153,10 @@
/*
- * Serial-Port Registers
+ * Serial-Port Registers
*/
-#define NILE4_UART_BASE 0x0300
+#define NILE4_UART_BASE 0x0300
#define NILE4_UARTRBR 0x0300 /* UART Receiver Data Buffer [R] */
#define NILE4_UARTTHR 0x0300 /* UART Transmitter Data Holding [W] */
@@ -175,7 +175,7 @@
/*
- * Interrupt Lines
+ * Interrupt Lines
*/
#define NILE4_INT_CPCE 0 /* CPU-Interface Parity-Error Interrupt */
@@ -185,7 +185,7 @@
#define NILE4_INT_UART 4 /* UART Interrupt */
#define NILE4_INT_WDOG 5 /* Watchdog Timer Interrupt */
#define NILE4_INT_GPT 6 /* General-Purpose Timer Interrupt */
-#define NILE4_INT_LBRTD 7 /* Local-Bus Ready Timer Interrupt */
+#define NILE4_INT_LBRTD 7 /* Local-Bus Ready Timer Interrupt */
#define NILE4_INT_INTA 8 /* PCI Interrupt Signal INTA# */
#define NILE4_INT_INTB 9 /* PCI Interrupt Signal INTB# */
#define NILE4_INT_INTC 10 /* PCI Interrupt Signal INTC# */
@@ -197,7 +197,7 @@
/*
- * Nile 4 Register Access
+ * Nile 4 Register Access
*/
static inline void nile4_sync(void)
@@ -247,7 +247,7 @@ static inline u8 nile4_in8(u32 offset)
/*
- * Physical Device Address Registers
+ * Physical Device Address Registers
*/
extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
@@ -255,7 +255,7 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
/*
- * PCI Master Registers
+ * PCI Master Registers
*/
#define NILE4_PCICMD_IACK 0 /* PCI Interrupt Acknowledge */
@@ -265,9 +265,9 @@ extern void nile4_set_pdar(u32 pdar, u32 phys, u32 size, int width,
/*
- * PCI Address Spaces
+ * PCI Address Spaces
*
- * Note that these are multiplexed using PCIINIT[01]!
+ * Note that these are multiplexed using PCIINIT[01]!
*/
#define NILE4_PCI_IO_BASE 0xa6000000
@@ -280,7 +280,7 @@ extern void nile4_set_pmr(u32 pmr, u32 type, u32 addr);
/*
- * Interrupt Programming
+ * Interrupt Programming
*/
#define NUM_I8259_INTERRUPTS 16
diff --git a/arch/mips/include/asm/octeon/cvmx-address.h b/arch/mips/include/asm/octeon/cvmx-address.h
index 3c74d826e2e..e2d874e681f 100644
--- a/arch/mips/include/asm/octeon/cvmx-address.h
+++ b/arch/mips/include/asm/octeon/cvmx-address.h
@@ -84,20 +84,20 @@ typedef enum {
* Octeon-I HW never interprets this X (<39:36> reserved
* for future expansion), software should set to 0.
*
- * - 0x0 XXX0 0000 0000 to DRAM Cached
+ * - 0x0 XXX0 0000 0000 to DRAM Cached
* - 0x0 XXX0 0FFF FFFF
*
- * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
- * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
+ * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
+ * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
*
- * - 0x0 XXX0 2000 0000 to DRAM Cached
+ * - 0x0 XXX0 2000 0000 to DRAM Cached
* - 0x0 XXXF FFFF FFFF
*
- * - 0x1 00X0 0000 0000 to Boot Bus Uncached
+ * - 0x1 00X0 0000 0000 to Boot Bus Uncached
* - 0x1 00XF FFFF FFFF
*
- * - 0x1 01X0 0000 0000 to Other NCB Uncached
- * - 0x1 FFXF FFFF FFFF devices
+ * - 0x1 01X0 0000 0000 to Other NCB Uncached
+ * - 0x1 FFXF FFFF FFFF devices
*
* Decode of all Octeon addresses
*/
@@ -129,9 +129,9 @@ typedef union {
*/
struct {
uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
- uint64_t cca:3; /* ignored by octeon */
+ uint64_t cca:3; /* ignored by octeon */
uint64_t mbz:10;
- uint64_t pa:49; /* physical address */
+ uint64_t pa:49; /* physical address */
} sxkphys;
/* physical address */
@@ -253,22 +253,22 @@ typedef union {
#define CVMX_OCT_DID_ASX1 23ULL
#define CVMX_OCT_DID_IOB 30ULL
-#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
+#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
+#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
+#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
+#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
+#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
+#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
+#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
+#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
+#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
+#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
+#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
+#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
+#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
+#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
+#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
+#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
#endif /* __CVMX_ADDRESS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 1db1dc2724c..284fa8d773b 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -91,11 +91,11 @@ struct cvmx_bootinfo {
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These are the physical
* addresses, so care must be taken to use the correct
* XKPHYS/KSEG0 addressing depending on the application's
- * ABI. These values will be 0 if CF is not present.
+ * ABI. These values will be 0 if CF is not present.
*/
uint64_t compact_flash_common_base_addr;
uint64_t compact_flash_attribute_base_addr;
@@ -131,7 +131,7 @@ struct cvmx_bootinfo {
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/* This flag is set if the TLB mappings are not contained in the
* 0x10000000 - 0x20000000 boot bus region. */
-#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
+#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
@@ -164,9 +164,9 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
- CVMX_BOARD_TYPE_CB5600 = 25,
- CVMX_BOARD_TYPE_CB5601 = 26,
- CVMX_BOARD_TYPE_CB5200 = 27,
+ CVMX_BOARD_TYPE_CB5600 = 25,
+ CVMX_BOARD_TYPE_CB5601 = 26,
+ CVMX_BOARD_TYPE_CB5200 = 27,
/* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_GENERIC = 28,
CVMX_BOARD_TYPE_EBH5610 = 29,
@@ -223,7 +223,7 @@ enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/*
- * Set aside a range for customer private use. The SDK won't
+ * Set aside a range for customer private use. The SDK won't
* use any numbers in this range.
*/
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
diff --git a/arch/mips/include/asm/octeon/cvmx-bootmem.h b/arch/mips/include/asm/octeon/cvmx-bootmem.h
index 42db2be663f..352f1dc2508 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootmem.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootmem.h
@@ -39,7 +39,7 @@
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
/* minimum alignment of bootmem alloced blocks */
-#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
+#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
/* Flags for cvmx_bootmem_phy_mem* functions */
/* Allocate from end of block instead of beginning */
@@ -151,8 +151,8 @@ extern void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment);
* memory cannot be allocated at the specified address.
*
* @size: Size in bytes of block to allocate
- * @address: Physical address to allocate memory at. If this memory is not
- * available, the allocation fails.
+ * @address: Physical address to allocate memory at. If this memory is not
+ * available, the allocation fails.
* @alignment: Alignment required - must be power of 2
* Returns pointer to block of memory, NULL on error
*/
@@ -181,7 +181,7 @@ extern void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
* @name: name of block to free
*
* Returns 0 on failure,
- * !0 on success
+ * !0 on success
*/
@@ -210,9 +210,9 @@ extern void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment,
*
* @size: Size in bytes of block to allocate
* @address: Physical address to allocate memory at. If this
- * memory is not available, the allocation fails.
+ * memory is not available, the allocation fails.
* @name: name of block - must be less than CVMX_BOOTMEM_NAME_LEN
- * bytes
+ * bytes
*
* Returns a pointer to block of memory, NULL on error
*/
@@ -249,7 +249,7 @@ extern int cvmx_bootmem_free_named(char *name);
* @name: name of block to free
*
* Returns pointer to named block descriptor on success
- * 0 on failure
+ * 0 on failure
*/
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
@@ -258,20 +258,20 @@ struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name);
* (optional) requested address and alignment.
*
* @req_size: size of region to allocate. All requests are rounded up
- * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
+ * to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @address_min: Minimum address that block can occupy.
*
* @address_max: Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
*
* @alignment: Requested alignment of the block. If this alignment
- * cannot be met, the allocation fails. This must be a
- * power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less than
- * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * cannot be met, the allocation fails. This must be a
+ * power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less than
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
*
* @flags: Flags to control options for the allocation.
*
@@ -285,21 +285,21 @@ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
* Allocates a named block of physical memory from the free list, at
* (optional) requested address and alignment.
*
- * @param size size of region to allocate. All requests are rounded
- * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
- * bytes size
+ * @param size size of region to allocate. All requests are rounded
+ * up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE
+ * bytes size
* @param min_addr Minimum address that block can occupy.
* @param max_addr Specifies the maximum address_min (inclusive) that
- * the allocation can use.
+ * the allocation can use.
* @param alignment Requested alignment of the block. If this
- * alignment cannot be met, the allocation fails.
- * This must be a power of 2. (Note: Alignment of
- * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
- * internally enforced. Requested alignments of less
- * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
- * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
- * @param name name to assign to named block
- * @param flags Flags to control options for the allocation.
+ * alignment cannot be met, the allocation fails.
+ * This must be a power of 2. (Note: Alignment of
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
+ * internally enforced. Requested alignments of less
+ * than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
+ * CVMX_BOOTMEM_ALIGNMENT_SIZE.)
+ * @param name name to assign to named block
+ * @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
@@ -312,14 +312,14 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
- * @name: Name of memory block to find. If NULL pointer given, then
- * finds unused descriptor, if available.
+ * @name: Name of memory block to find. If NULL pointer given, then
+ * finds unused descriptor, if available.
*
* @flags: Flags to control options for the allocation.
*
* Returns Pointer to memory block descriptor, NULL if not found.
- * If NULL returned when name parameter is NULL, then no memory
- * block descriptors are available.
+ * If NULL returned when name parameter is NULL, then no memory
+ * block descriptors are available.
*/
struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
@@ -331,31 +331,31 @@ cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags);
* @flags: flags for passing options
*
* Returns 0 on failure
- * 1 on success
+ * 1 on success
*/
int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags);
/**
- * Frees a block to the bootmem allocator list. This must
+ * Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
- * *
+ * *
*
* @phy_addr: physical address of block
* @size: size of block in bytes.
* @flags: flags for passing options
*
* Returns 1 on success,
- * 0 on failure
+ * 0 on failure
*/
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags);
/**
- * Locks the bootmem allocator. This is useful in certain situations
+ * Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index fed91125317..024a71b2bff 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -244,33 +244,33 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
".set noreorder\n"
"1:\n"
/* Atomic add one to ticket_ptr */
- "ll %[my_ticket], %[ticket_ptr]\n"
+ "ll %[my_ticket], %[ticket_ptr]\n"
/* and store the original value */
- "li %[ticket], 1\n"
+ "li %[ticket], 1\n"
/* in my_ticket */
- "baddu %[ticket], %[my_ticket]\n"
- "sc %[ticket], %[ticket_ptr]\n"
- "beqz %[ticket], 1b\n"
+ "baddu %[ticket], %[my_ticket]\n"
+ "sc %[ticket], %[ticket_ptr]\n"
+ "beqz %[ticket], 1b\n"
" nop\n"
/* Load the current now_serving ticket */
- "lbu %[ticket], %[now_serving]\n"
+ "lbu %[ticket], %[now_serving]\n"
"2:\n"
/* Jump out if now_serving == my_ticket */
- "beq %[ticket], %[my_ticket], 4f\n"
+ "beq %[ticket], %[my_ticket], 4f\n"
/* Find out how many tickets are in front of me */
- " subu %[ticket], %[my_ticket], %[ticket]\n"
+ " subu %[ticket], %[my_ticket], %[ticket]\n"
/* Use tickets in front of me minus one to delay */
"subu %[ticket], 1\n"
/* Delay will be ((tickets in front)-1)*32 loops */
- "cins %[ticket], %[ticket], 5, 7\n"
+ "cins %[ticket], %[ticket], 5, 7\n"
"3:\n"
/* Loop here until our ticket might be up */
- "bnez %[ticket], 3b\n"
- " subu %[ticket], 1\n"
+ "bnez %[ticket], 3b\n"
+ " subu %[ticket], 1\n"
/* Jump back up to check out ticket again */
- "b 2b\n"
+ "b 2b\n"
/* Load the current now_serving ticket */
- " lbu %[ticket], %[now_serving]\n"
+ " lbu %[ticket], %[now_serving]\n"
"4:\n"
".set pop\n" :
[ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
@@ -313,9 +313,9 @@ static inline __cvmx_cmd_queue_state_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd_count: Number of command words to write
* @cmds: Array of commands to write
*
@@ -411,9 +411,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
*
@@ -510,9 +510,9 @@ static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
*
* @queue_id: Hardware command queue to write to
* @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
+ * Use internal locking to ensure exclusive access for queue
+ * updates. If you don't use this locking you must ensure
+ * exclusivity some other way. Locking is strongly recommended.
* @cmd1: Command
* @cmd2: Command
* @cmd3: Command
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index 26835d1b43b..f7dd17d0dc2 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -31,13 +31,13 @@
/* Pools in use */
/* Packet buffers */
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
+#define CVMX_FPA_PACKET_POOL (0)
+#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
/* Work queue entrys */
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
+#define CVMX_FPA_WQE_POOL (1)
+#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
+#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
/************************* FAU allocation ********************************/
@@ -45,7 +45,7 @@
* in order of descending size so that all alignment constraints are
* automatically met. The enums are linked so that the following enum
* continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero. The macros
+ * numbering within each enum always starts with zero. The macros
* take care of the address increment size, so the values entered
* always increase by 1. FAU registers are accessed with byte
* addresses.
@@ -90,9 +90,9 @@ typedef enum {
* be taken into account.
*/
/* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH (0)
+#define CVMX_SCR_SCRATCH (0)
/* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE (8)
+#define CVMX_SCR_REG_AVAIL_BASE (8)
/*
* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
@@ -145,14 +145,14 @@ typedef enum {
* 1: include
*/
#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
+#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
diff --git a/arch/mips/include/asm/octeon/cvmx-fau.h b/arch/mips/include/asm/octeon/cvmx-fau.h
index a6939fc8ba1..ef98f7fc102 100644
--- a/arch/mips/include/asm/octeon/cvmx-fau.h
+++ b/arch/mips/include/asm/octeon/cvmx-fau.h
@@ -37,13 +37,13 @@
*/
#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR 63, 56
-#define CVMX_FAU_BITS_LEN 55, 48
-#define CVMX_FAU_BITS_INEVAL 35, 14
-#define CVMX_FAU_BITS_TAGWAIT 13, 13
-#define CVMX_FAU_BITS_NOADD 13, 13
-#define CVMX_FAU_BITS_SIZE 12, 11
-#define CVMX_FAU_BITS_REGISTER 10, 0
+#define CVMX_FAU_BITS_SCRADDR 63, 56
+#define CVMX_FAU_BITS_LEN 55, 48
+#define CVMX_FAU_BITS_INEVAL 35, 14
+#define CVMX_FAU_BITS_TAGWAIT 13, 13
+#define CVMX_FAU_BITS_NOADD 13, 13
+#define CVMX_FAU_BITS_SIZE 12, 11
+#define CVMX_FAU_BITS_REGISTER 10, 0
typedef enum {
CVMX_FAU_OP_SIZE_8 = 0,
@@ -109,11 +109,11 @@ typedef union {
* Builds a store I/O address for writing to the FAU
*
* @noadd: 0 = Store value is atomically added to the current value
- * 1 = Store value is atomically written over the current value
+ * 1 = Store value is atomically written over the current value
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Address to store for atomic update
*/
static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
@@ -127,16 +127,16 @@ static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
* Builds a I/O address for accessing the FAU
*
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* Returns Address to read from for atomic update
*/
static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
@@ -152,9 +152,9 @@ static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
@@ -167,9 +167,9 @@ static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Value of the register before the update
*/
static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
@@ -182,7 +182,7 @@ static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Value of the register before the update
*/
@@ -209,12 +209,12 @@ static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait64_t
cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -233,12 +233,12 @@ cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait32_t
cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -257,11 +257,11 @@ cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
* completes
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait16_t
cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -282,8 +282,8 @@ cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
+ * the value of the register before the update will be
+ * returned
*/
static inline cvmx_fau_tagwait8_t
cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
@@ -301,21 +301,21 @@ cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
*
* @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
* @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
+ * Note: When performing 32 and 64 bit access, only the low
+ * 22 bits are available.
* @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
+ * operation to complete.
+ * - 0 = Don't wait
+ * - 1 = Wait for tag switch to complete
* @size: The size of the operation:
- * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
- * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
+ * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
+ * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
+ * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
+ * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
+ * - Step by 2 for 16 bit access.
+ * - Step by 4 for 32 bit access.
+ * - Step by 8 for 64 bit access.
* Returns Data to write using cvmx_send_single
*/
static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
@@ -337,11 +337,11 @@ static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
@@ -357,11 +357,11 @@ static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
@@ -377,9 +377,9 @@ static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
*/
@@ -396,7 +396,7 @@ static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
* placed in the scratch memory at byte address scraddr.
*
* @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
+ * Must be 8 byte aligned.
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
* Returns Placed in the scratch pad register
@@ -414,14 +414,14 @@ static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
@@ -437,14 +437,14 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
- * Note: Only the low 22 bits are available.
+ * Note: Only the low 22 bits are available.
* Returns Placed in the scratch pad register
*/
static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
@@ -460,12 +460,12 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*
* Returns Placed in the scratch pad register
@@ -483,9 +483,9 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
* switch completes.
*
* @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
+ * 8 byte aligned. If a timeout occurs, the error bit (63)
+ * will be set. Otherwise the value of the register before
+ * the update will be returned
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
* @value: Signed value to add.
@@ -504,7 +504,7 @@ static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
* Perform an atomic 64 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -516,7 +516,7 @@ static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -528,7 +528,7 @@ static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit add
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to add.
*/
static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
@@ -551,7 +551,7 @@ static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
* Perform an atomic 64 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
+ * - Step by 8 for 64 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
@@ -563,7 +563,7 @@ static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
* Perform an atomic 32 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
+ * - Step by 4 for 32 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
@@ -575,7 +575,7 @@ static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
* Perform an atomic 16 bit write
*
* @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
+ * - Step by 2 for 16 bit access.
* @value: Signed value to write.
*/
static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa.h b/arch/mips/include/asm/octeon/cvmx-fpa.h
index 541a1ae02b6..aa26a2ce5a0 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa.h
@@ -39,9 +39,9 @@
#include <asm/octeon/cvmx-address.h>
#include <asm/octeon/cvmx-fpa-defs.h>
-#define CVMX_FPA_NUM_POOLS 8
+#define CVMX_FPA_NUM_POOLS 8
#define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT 128
+#define CVMX_FPA_ALIGNMENT 128
/**
* Structure describing the data format used for stores to the FPA.
@@ -186,8 +186,8 @@ static inline void *cvmx_fpa_alloc(uint64_t pool)
/**
* Asynchronously get a new block from the FPA
*
- * @scr_addr: Local scratch address to put response in. This is a byte address,
- * but must be 8 byte aligned.
+ * @scr_addr: Local scratch address to put response in. This is a byte address,
+ * but must be 8 byte aligned.
* @pool: Pool to get the block from
*/
static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
@@ -212,7 +212,7 @@ static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -234,7 +234,7 @@ static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
* @ptr: Block to free
* @pool: Pool to put it in
* @num_cache_lines:
- * Cache lines to invalidate
+ * Cache lines to invalidate
*/
static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
uint64_t num_cache_lines)
@@ -245,7 +245,7 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
/*
* Make sure that any previous writes to memory go out before
- * we free this buffer. This also serves as a barrier to
+ * we free this buffer. This also serves as a barrier to
* prevent GCC from reordering operations to after the
* free.
*/
@@ -259,17 +259,17 @@ static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
* This can only be called once per pool. Make sure proper
* locking enforces this.
*
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
+ * @pool: Pool to initialize
+ * 0 <= pool < 8
+ * @name: Constant character string to name this pool.
+ * String is not copied.
+ * @buffer: Pointer to the block of memory to use. This must be
+ * accessible by all processors and external hardware.
* @block_size: Size for each block controlled by the FPA
* @num_blocks: Number of blocks
*
* Returns 0 on Success,
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
uint64_t block_size, uint64_t num_blocks);
@@ -282,8 +282,8 @@ extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
*
* @pool: Pool to shutdown
* Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
+ * - Positive is count of missing buffers
+ * - Negative is too many buffers or corrupted pointers
*/
extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 442f508eaac..41785dd0ddd 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -48,7 +48,7 @@ typedef enum {
* Fake IPD port, the RGMII/MII interface may use different PHY, use
* this macro to return appropriate MIX address to read the PHY.
*/
-#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
+#define CVMX_HELPER_BOARD_MGMT_IPD_PORT -10
/**
* cvmx_override_board_link_get(int ipd_port) is a function
@@ -86,10 +86,10 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
*
* @phy_addr: The address of the PHY to program
* @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
+ * Flags to control autonegotiation. Bit 0 is autonegotiation
+ * enable/disable to maintain backware compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
+ * is enabled, all possible negotiation speeds are advertised.
*
* Returns Zero on success, negative on failure
*/
@@ -111,10 +111,10 @@ int cvmx_helper_board_link_set_phy(int phy_addr,
* enumeration from the bootloader.
*
* @ipd_port: IPD input port associated with the port we want to get link
- * status for.
+ * status for.
*
* Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
+ * return zero.
*/
extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
@@ -134,10 +134,10 @@ extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
*
* @interface: Interface to probe
* @supported_ports:
- * Number of ports Octeon supports.
+ * Number of ports Octeon supports.
*
* Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
+ * simple be "support_ports".
*/
extern int __cvmx_helper_board_interface_probe(int interface,
int supported_ports);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
index 78295ba0050..4d7a3db3a9f 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-rgmii.h
@@ -98,9 +98,9 @@ extern int __cvmx_helper_rgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
index 9a9b6c103ed..4debb1c5153 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-sgmii.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_sgmii_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-util.h b/arch/mips/include/asm/octeon/cvmx-helper-util.h
index 01c8ddd84ff..f446f212bbd 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-util.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-util.h
@@ -57,11 +57,11 @@ extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
*
* @queue: Input queue to setup RED on (0-7)
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
@@ -71,11 +71,11 @@ extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
* Setup Random Early Drop to automatically begin dropping packets.
*
* @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
+ * Packets will begin slowly dropping when there are less than
+ * this many packet buffers free in FPA 0.
* @drop_thresh:
- * All incoming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
+ * All incoming packets will be dropped when there are less
+ * than this many free packet buffers in FPA 0.
* Returns Zero on success. Negative on failure
*/
extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
@@ -84,7 +84,7 @@ extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
* Get the version of the CVMX libraries.
*
* Returns Version string. Note this buffer is allocated statically
- * and will be shared by all callers.
+ * and will be shared by all callers.
*/
extern const char *cvmx_helper_get_version(void);
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
index f6fbc4f45b5..5e89ed703ea 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-xaui.h
@@ -92,9 +92,9 @@ extern int __cvmx_helper_xaui_link_set(int ipd_port,
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-helper.h b/arch/mips/include/asm/octeon/cvmx-helper.h
index 691c8142cd4..5a3090dc6f2 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper.h
@@ -93,12 +93,12 @@ extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
/**
* This function enables the IPD and also enables the packet interfaces.
* The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD. This should be called by the user program after any additional
+ * IPD. This should be called by the user program after any additional
* IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
* is not set in the executive-config.h file.
*
* Returns 0 on success
- * -1 on failure
+ * -1 on failure
*/
extern int cvmx_helper_ipd_and_packet_input_enable(void);
@@ -128,7 +128,7 @@ extern int cvmx_helper_initialize_packet_io_local(void);
* @interface: Which interface to return port count for.
*
* Returns Port count for interface
- * -1 for uninitialized interface
+ * -1 for uninitialized interface
*/
extern int cvmx_helper_ports_on_interface(int interface);
@@ -150,7 +150,7 @@ extern int cvmx_helper_get_number_of_interfaces(void);
* @interface: Interface to probe
*
* Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
+ * DISABLED.
*/
extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
interface);
@@ -214,9 +214,9 @@ extern int cvmx_helper_interface_enumerate(int interface);
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
- * Non zero if you want internal loopback
+ * Non zero if you want internal loopback
* @enable_external:
- * Non zero if you want external loopback
+ * Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index 115a552c5c7..e13490ebbb2 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -38,8 +38,8 @@
#include <asm/octeon/cvmx-ipd-defs.h>
enum cvmx_ipd_mode {
- CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
@@ -60,17 +60,17 @@ typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
*
* @mbuff_size: Packets buffer size in 8 byte words
* @first_mbuff_skip:
- * Number of 8 byte words to skip in the first buffer
+ * Number of 8 byte words to skip in the first buffer
* @not_first_mbuff_skip:
- * Number of 8 byte words to skip in each following buffer
+ * Number of 8 byte words to skip in each following buffer
* @first_back: Must be same as first_mbuff_skip / 128
* @second_back:
- * Must be same as not_first_mbuff_skip / 128
+ * Must be same as not_first_mbuff_skip / 128
* @wqe_fpa_pool:
- * FPA pool to get work entries from
+ * FPA pool to get work entries from
* @cache_mode:
* @back_pres_enable_flag:
- * Enable or disable port back pressure
+ * Enable or disable port back pressure
*/
static inline void cvmx_ipd_config(uint64_t mbuff_size,
uint64_t first_mbuff_skip,
diff --git a/arch/mips/include/asm/octeon/cvmx-l2c.h b/arch/mips/include/asm/octeon/cvmx-l2c.h
index 2c8ff9e33ec..11c0a8fa8eb 100644
--- a/arch/mips/include/asm/octeon/cvmx-l2c.h
+++ b/arch/mips/include/asm/octeon/cvmx-l2c.h
@@ -33,13 +33,13 @@
#ifndef __CVMX_L2C_H__
#define __CVMX_L2C_H__
-#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
+#define CVMX_L2_ASSOC cvmx_l2c_get_num_assoc() /* Deprecated macro, use function */
#define CVMX_L2_SET_BITS cvmx_l2c_get_set_bits() /* Deprecated macro, use function */
-#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
+#define CVMX_L2_SETS cvmx_l2c_get_num_sets() /* Deprecated macro, use function */
#define CVMX_L2C_IDX_ADDR_SHIFT 7 /* based on 128 byte cache line size */
-#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
+#define CVMX_L2C_IDX_MASK (cvmx_l2c_get_num_sets() - 1)
/* Defines for index aliasing computations */
#define CVMX_L2C_TAG_ADDR_ALIAS_SHIFT (CVMX_L2C_IDX_ADDR_SHIFT + cvmx_l2c_get_set_bits())
@@ -67,91 +67,91 @@ union cvmx_l2c_tag {
/* L2C Performance Counter events. */
enum cvmx_l2c_event {
- CVMX_L2C_EVENT_CYCLES = 0,
+ CVMX_L2C_EVENT_CYCLES = 0,
CVMX_L2C_EVENT_INSTRUCTION_MISS = 1,
- CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
- CVMX_L2C_EVENT_DATA_MISS = 3,
- CVMX_L2C_EVENT_DATA_HIT = 4,
- CVMX_L2C_EVENT_MISS = 5,
- CVMX_L2C_EVENT_HIT = 6,
- CVMX_L2C_EVENT_VICTIM_HIT = 7,
- CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
- CVMX_L2C_EVENT_TAG_PROBE = 9,
- CVMX_L2C_EVENT_TAG_UPDATE = 10,
- CVMX_L2C_EVENT_TAG_COMPLETE = 11,
- CVMX_L2C_EVENT_TAG_DIRTY = 12,
- CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
- CVMX_L2C_EVENT_DATA_STORE_READ = 14,
+ CVMX_L2C_EVENT_INSTRUCTION_HIT = 2,
+ CVMX_L2C_EVENT_DATA_MISS = 3,
+ CVMX_L2C_EVENT_DATA_HIT = 4,
+ CVMX_L2C_EVENT_MISS = 5,
+ CVMX_L2C_EVENT_HIT = 6,
+ CVMX_L2C_EVENT_VICTIM_HIT = 7,
+ CVMX_L2C_EVENT_INDEX_CONFLICT = 8,
+ CVMX_L2C_EVENT_TAG_PROBE = 9,
+ CVMX_L2C_EVENT_TAG_UPDATE = 10,
+ CVMX_L2C_EVENT_TAG_COMPLETE = 11,
+ CVMX_L2C_EVENT_TAG_DIRTY = 12,
+ CVMX_L2C_EVENT_DATA_STORE_NOP = 13,
+ CVMX_L2C_EVENT_DATA_STORE_READ = 14,
CVMX_L2C_EVENT_DATA_STORE_WRITE = 15,
- CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
- CVMX_L2C_EVENT_WRITE_REQUEST = 17,
- CVMX_L2C_EVENT_READ_REQUEST = 18,
+ CVMX_L2C_EVENT_FILL_DATA_VALID = 16,
+ CVMX_L2C_EVENT_WRITE_REQUEST = 17,
+ CVMX_L2C_EVENT_READ_REQUEST = 18,
CVMX_L2C_EVENT_WRITE_DATA_VALID = 19,
- CVMX_L2C_EVENT_XMC_NOP = 20,
- CVMX_L2C_EVENT_XMC_LDT = 21,
- CVMX_L2C_EVENT_XMC_LDI = 22,
- CVMX_L2C_EVENT_XMC_LDD = 23,
- CVMX_L2C_EVENT_XMC_STF = 24,
- CVMX_L2C_EVENT_XMC_STT = 25,
- CVMX_L2C_EVENT_XMC_STP = 26,
- CVMX_L2C_EVENT_XMC_STC = 27,
- CVMX_L2C_EVENT_XMC_DWB = 28,
- CVMX_L2C_EVENT_XMC_PL2 = 29,
- CVMX_L2C_EVENT_XMC_PSL1 = 30,
- CVMX_L2C_EVENT_XMC_IOBLD = 31,
- CVMX_L2C_EVENT_XMC_IOBST = 32,
- CVMX_L2C_EVENT_XMC_IOBDMA = 33,
- CVMX_L2C_EVENT_XMC_IOBRSP = 34,
- CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
- CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
- CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
- CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
- CVMX_L2C_EVENT_RSC_NOP = 39,
- CVMX_L2C_EVENT_RSC_STDN = 40,
- CVMX_L2C_EVENT_RSC_FILL = 41,
- CVMX_L2C_EVENT_RSC_REFL = 42,
- CVMX_L2C_EVENT_RSC_STIN = 43,
- CVMX_L2C_EVENT_RSC_SCIN = 44,
- CVMX_L2C_EVENT_RSC_SCFL = 45,
- CVMX_L2C_EVENT_RSC_SCDN = 46,
- CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
- CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
- CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
- CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
- CVMX_L2C_EVENT_LRF_REQ = 51,
- CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
- CVMX_L2C_EVENT_DT_WR_INVAL = 53,
+ CVMX_L2C_EVENT_XMC_NOP = 20,
+ CVMX_L2C_EVENT_XMC_LDT = 21,
+ CVMX_L2C_EVENT_XMC_LDI = 22,
+ CVMX_L2C_EVENT_XMC_LDD = 23,
+ CVMX_L2C_EVENT_XMC_STF = 24,
+ CVMX_L2C_EVENT_XMC_STT = 25,
+ CVMX_L2C_EVENT_XMC_STP = 26,
+ CVMX_L2C_EVENT_XMC_STC = 27,
+ CVMX_L2C_EVENT_XMC_DWB = 28,
+ CVMX_L2C_EVENT_XMC_PL2 = 29,
+ CVMX_L2C_EVENT_XMC_PSL1 = 30,
+ CVMX_L2C_EVENT_XMC_IOBLD = 31,
+ CVMX_L2C_EVENT_XMC_IOBST = 32,
+ CVMX_L2C_EVENT_XMC_IOBDMA = 33,
+ CVMX_L2C_EVENT_XMC_IOBRSP = 34,
+ CVMX_L2C_EVENT_XMC_BUS_VALID = 35,
+ CVMX_L2C_EVENT_XMC_MEM_DATA = 36,
+ CVMX_L2C_EVENT_XMC_REFL_DATA = 37,
+ CVMX_L2C_EVENT_XMC_IOBRSP_DATA = 38,
+ CVMX_L2C_EVENT_RSC_NOP = 39,
+ CVMX_L2C_EVENT_RSC_STDN = 40,
+ CVMX_L2C_EVENT_RSC_FILL = 41,
+ CVMX_L2C_EVENT_RSC_REFL = 42,
+ CVMX_L2C_EVENT_RSC_STIN = 43,
+ CVMX_L2C_EVENT_RSC_SCIN = 44,
+ CVMX_L2C_EVENT_RSC_SCFL = 45,
+ CVMX_L2C_EVENT_RSC_SCDN = 46,
+ CVMX_L2C_EVENT_RSC_DATA_VALID = 47,
+ CVMX_L2C_EVENT_RSC_VALID_FILL = 48,
+ CVMX_L2C_EVENT_RSC_VALID_STRSP = 49,
+ CVMX_L2C_EVENT_RSC_VALID_REFL = 50,
+ CVMX_L2C_EVENT_LRF_REQ = 51,
+ CVMX_L2C_EVENT_DT_RD_ALLOC = 52,
+ CVMX_L2C_EVENT_DT_WR_INVAL = 53,
CVMX_L2C_EVENT_MAX
};
/* L2C Performance Counter events for Octeon2. */
enum cvmx_l2c_tad_event {
- CVMX_L2C_TAD_EVENT_NONE = 0,
- CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
- CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
- CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
- CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
- CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
- CVMX_L2C_TAD_EVENT_SC_PASS = 6,
- CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
- CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
- CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
- CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
- CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
- CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
- CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
- CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
- CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
- CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
- CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
- CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
- CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
- CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
- CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
- CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
- CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
- CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
+ CVMX_L2C_TAD_EVENT_NONE = 0,
+ CVMX_L2C_TAD_EVENT_TAG_HIT = 1,
+ CVMX_L2C_TAD_EVENT_TAG_MISS = 2,
+ CVMX_L2C_TAD_EVENT_TAG_NOALLOC = 3,
+ CVMX_L2C_TAD_EVENT_TAG_VICTIM = 4,
+ CVMX_L2C_TAD_EVENT_SC_FAIL = 5,
+ CVMX_L2C_TAD_EVENT_SC_PASS = 6,
+ CVMX_L2C_TAD_EVENT_LFB_VALID = 7,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_LFB = 8,
+ CVMX_L2C_TAD_EVENT_LFB_WAIT_VAB = 9,
+ CVMX_L2C_TAD_EVENT_QUAD0_INDEX = 128,
+ CVMX_L2C_TAD_EVENT_QUAD0_READ = 129,
+ CVMX_L2C_TAD_EVENT_QUAD0_BANK = 130,
+ CVMX_L2C_TAD_EVENT_QUAD0_WDAT = 131,
+ CVMX_L2C_TAD_EVENT_QUAD1_INDEX = 144,
+ CVMX_L2C_TAD_EVENT_QUAD1_READ = 145,
+ CVMX_L2C_TAD_EVENT_QUAD1_BANK = 146,
+ CVMX_L2C_TAD_EVENT_QUAD1_WDAT = 147,
+ CVMX_L2C_TAD_EVENT_QUAD2_INDEX = 160,
+ CVMX_L2C_TAD_EVENT_QUAD2_READ = 161,
+ CVMX_L2C_TAD_EVENT_QUAD2_BANK = 162,
+ CVMX_L2C_TAD_EVENT_QUAD2_WDAT = 163,
+ CVMX_L2C_TAD_EVENT_QUAD3_INDEX = 176,
+ CVMX_L2C_TAD_EVENT_QUAD3_READ = 177,
+ CVMX_L2C_TAD_EVENT_QUAD3_BANK = 178,
+ CVMX_L2C_TAD_EVENT_QUAD3_WDAT = 179,
CVMX_L2C_TAD_EVENT_MAX
};
@@ -159,10 +159,10 @@ enum cvmx_l2c_tad_event {
* Configure one of the four L2 Cache performance counters to capture event
* occurrences.
*
- * @counter: The counter to configure. Range 0..3.
- * @event: The type of L2 Cache event occurrence to count.
+ * @counter: The counter to configure. Range 0..3.
+ * @event: The type of L2 Cache event occurrence to count.
* @clear_on_read: When asserted, any read of the performance counter
- * clears the counter.
+ * clears the counter.
*
* @note The routine does not clear the counter.
*/
@@ -184,8 +184,8 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter);
* @core: The core processor of interest.
*
* Returns The mask specifying the partitioning. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_core_way_partition(uint32_t core);
@@ -194,16 +194,16 @@ int cvmx_l2c_get_core_way_partition(uint32_t core);
*
* @core: The core that the partitioning applies to.
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
@@ -211,8 +211,8 @@ int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask);
* Return the L2 Cache way partitioning for the hw blocks.
*
* Returns The mask specifying the reserved way. 0 bits in mask indicates
- * the cache 'ways' that a core can evict from.
- * -1 on error
+ * the cache 'ways' that a core can evict from.
+ * -1 on error
*/
int cvmx_l2c_get_hw_way_partition(void);
@@ -220,16 +220,16 @@ int cvmx_l2c_get_hw_way_partition(void);
* Partitions the L2 cache for the hardware blocks.
*
* @mask: The partitioning of the ways expressed as a binary
- * mask. A 0 bit allows the core to evict cache lines from
- * a way, while a 1 bit blocks the core from evicting any
- * lines from that way. There must be at least one allowed
- * way (0 bit) in the mask.
+ * mask. A 0 bit allows the core to evict cache lines from
+ * a way, while a 1 bit blocks the core from evicting any
+ * lines from that way. There must be at least one allowed
+ * way (0 bit) in the mask.
*
* @note If any ways are blocked for all cores and the HW blocks, then
- * those ways will never have any cache lines evicted from them.
- * All cores and the hardware blocks are free to read from all
- * ways regardless of the partitioning.
+ * those ways will never have any cache lines evicted from them.
+ * All cores and the hardware blocks are free to read from all
+ * ways regardless of the partitioning.
*/
int cvmx_l2c_set_hw_way_partition(uint32_t mask);
@@ -240,7 +240,7 @@ int cvmx_l2c_set_hw_way_partition(uint32_t mask);
* @addr: physical address of line to lock
*
* Returns 0 on success,
- * 1 if line not locked.
+ * 1 if line not locked.
*/
int cvmx_l2c_lock_line(uint64_t addr);
@@ -258,7 +258,7 @@ int cvmx_l2c_lock_line(uint64_t addr);
* @len: Length (in bytes) of region to lock
*
* Returns Number of requested lines that where not locked.
- * 0 on success (all locked)
+ * 0 on success (all locked)
*/
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
@@ -272,7 +272,7 @@ int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len);
* @address: Physical address to unlock
*
* Returns 0: line not unlocked
- * 1: line unlocked
+ * 1: line unlocked
*/
int cvmx_l2c_unlock_line(uint64_t address);
@@ -290,7 +290,7 @@ int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len);
* Read the L2 controller tag for a given location in L2
*
* @association:
- * Which association to read line from
+ * Which association to read line from
* @index: Which way to read from.
*
* Returns l2c tag structure for line requested.
diff --git a/arch/mips/include/asm/octeon/cvmx-mdio.h b/arch/mips/include/asm/octeon/cvmx-mdio.h
index 6f0cd182cec..9f6a4f32a83 100644
--- a/arch/mips/include/asm/octeon/cvmx-mdio.h
+++ b/arch/mips/include/asm/octeon/cvmx-mdio.h
@@ -246,21 +246,21 @@ typedef union {
} cvmx_mdio_phy_reg_mmd_address_data_t;
/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
+#define MDIO_CLAUSE_45_READ 3
/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
+#define CVMX_MMD_DEVICE_PMA_PMD 1
+#define CVMX_MMD_DEVICE_WIS 2
+#define CVMX_MMD_DEVICE_PCS 3
+#define CVMX_MMD_DEVICE_PHY_XS 4
+#define CVMX_MMD_DEVICE_DTS_XS 5
+#define CVMX_MMD_DEVICE_TC 6
#define CVMX_MMD_DEVICE_CL22_EXT 29
#define CVMX_MMD_DEVICE_VENDOR_1 30
#define CVMX_MMD_DEVICE_VENDOR_2 31
@@ -291,7 +291,7 @@ static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to read
*
@@ -328,13 +328,13 @@ static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
* registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
{
@@ -370,7 +370,7 @@ static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
* read PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to read
@@ -407,7 +407,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
} while (smi_wr.s.pending && --timeout);
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
+ "device %2d register %2d TIME OUT(address)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -425,7 +425,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
if (timeout <= 0) {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
+ "device %2d register %2d TIME OUT(data)\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -434,7 +434,7 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
return smi_rd.s.dat;
else {
cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
+ "device %2d register %2d INVALID READ\n",
bus_id, phy_id, device, location);
return -1;
}
@@ -445,14 +445,14 @@ static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
* write PHY registers controlling auto negotiation.
*
* @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
+ * support multiple busses.
* @phy_id: The MII phy id
* @device: MDIO Managable Device (MMD) id
* @location: Register location to write
* @val: Value to write
*
* Returns -1 on error
- * 0 on success
+ * 0 on success
*/
static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
int location, int val)
diff --git a/arch/mips/include/asm/octeon/cvmx-pip-defs.h b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
index 05a917d6ebe..e975c7d2e48 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip-defs.h
@@ -44,7 +44,7 @@ enum cvmx_pip_port_parse_mode {
*/
CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
/*
- * Input packets are assumed to be IP. Results from non IP
+ * Input packets are assumed to be IP. Results from non IP
* packets is undefined. Pointers reference the beginning of
* the IP header.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pip.h b/arch/mips/include/asm/octeon/cvmx-pip.h
index 9e739a64085..a76fe5a57a9 100644
--- a/arch/mips/include/asm/octeon/cvmx-pip.h
+++ b/arch/mips/include/asm/octeon/cvmx-pip.h
@@ -37,8 +37,8 @@
#include <asm/octeon/cvmx-fpa.h>
#include <asm/octeon/cvmx-pip-defs.h>
-#define CVMX_PIP_NUM_INPUT_PORTS 40
-#define CVMX_PIP_NUM_WATCHERS 4
+#define CVMX_PIP_NUM_INPUT_PORTS 40
+#define CVMX_PIP_NUM_WATCHERS 4
/*
* Encodes the different error and exception codes
@@ -92,10 +92,10 @@ typedef enum {
/**
* NOTES
- * late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as bad FCS
- * or carrier extend error which is CVMX_PIP_EXTEND_ERR
+ * late collision (data received before collision)
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as bad FCS
+ * or carrier extend error which is CVMX_PIP_EXTEND_ERR
*/
typedef enum {
/* No error */
@@ -122,11 +122,11 @@ typedef enum {
* error)
*/
CVMX_PIP_UNDER_FCS_ERR = 6ull,
- /* RGM 7 = FCS error */
+ /* RGM 7 = FCS error */
CVMX_PIP_GMX_FCS_ERR = 7ull,
/* RGM+SPI 8 = min frame error (pkt len < min frame len) */
CVMX_PIP_UNDER_ERR = 8ull,
- /* RGM 9 = Frame carrier extend error */
+ /* RGM 9 = Frame carrier extend error */
CVMX_PIP_EXTEND_ERR = 9ull,
/*
* RGM 10 = length mismatch (len did not match len in L2
@@ -161,10 +161,10 @@ typedef enum {
CVMX_PIP_PIP_L2_MAL_HDR = 18L
/*
* NOTES: xx = late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as
- * bad FCS or carrier extend error which is
- * CVMX_PIP_EXTEND_ERR
+ * late collisions cannot be detected by the receiver
+ * they would appear as JAM bits which would appear as
+ * bad FCS or carrier extend error which is
+ * CVMX_PIP_EXTEND_ERR
*/
} cvmx_pip_rcv_err_t;
@@ -192,13 +192,13 @@ typedef struct {
/* Number of packets processed by PIP */
uint32_t packets;
/*
- * Number of indentified L2 multicast packets. Does not
+ * Number of indentified L2 multicast packets. Does not
* include broadcast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
uint32_t multicast_packets;
/*
- * Number of indentified L2 broadcast packets. Does not
+ * Number of indentified L2 broadcast packets. Does not
* include multicast packets. Only includes packets whose
* parse mode is SKIP_TO_L2
*/
@@ -287,7 +287,7 @@ typedef union {
* @port_num: Port number to configure
* @port_cfg: Port hardware configuration
* @port_tag_cfg:
- * Port POW tagging configuration
+ * Port POW tagging configuration
*/
static inline void cvmx_pip_config_port(uint64_t port_num,
union cvmx_pip_prt_cfgx port_cfg,
@@ -298,20 +298,20 @@ static inline void cvmx_pip_config_port(uint64_t port_num,
}
#if 0
/**
- * @deprecated This function is a thin wrapper around the Pass1 version
- * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- * setting the group that is incompatible with this function,
- * the preferred upgrade path is to use the CSR directly.
+ * @deprecated This function is a thin wrapper around the Pass1 version
+ * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
+ * setting the group that is incompatible with this function,
+ * the preferred upgrade path is to use the CSR directly.
*
* Configure the global QoS packet watchers. Each watcher is
* capable of matching a field in a packet to determine the
* QoS queue for scheduling.
*
- * @watcher: Watcher number to configure (0 - 3).
+ * @watcher: Watcher number to configure (0 - 3).
* @match_type: Watcher match type
* @match_value:
- * Value the watcher will match against
- * @qos: QoS queue for packets matching this watcher
+ * Value the watcher will match against
+ * @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_watcher(uint64_t watcher,
cvmx_pip_qos_watch_types match_type,
@@ -331,7 +331,7 @@ static inline void cvmx_pip_config_watcher(uint64_t watcher,
* Configure the VLAN priority to QoS queue mapping.
*
* @vlan_priority:
- * VLAN priority (0-7)
+ * VLAN priority (0-7)
* @qos: QoS queue for packets matching this watcher
*/
static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
@@ -451,10 +451,10 @@ static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
*
* @interface: Interface to configure (0 or 1)
* @invert_result:
- * Invert the result of the CRC
+ * Invert the result of the CRC
* @reflect: Reflect
* @initialization_vector:
- * CRC initialization vector
+ * CRC initialization vector
*/
static inline void cvmx_pip_config_crc(uint64_t interface,
uint64_t invert_result, uint64_t reflect,
@@ -500,13 +500,13 @@ static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
*
* @mask_index: Which tag mask to modify (0..3)
* @offset: Offset into the bitmask to set bits at. Use the GCC macro
- * offsetof() to determine the offsets into packet headers.
- * For example, offsetof(ethhdr, protocol) returns the offset
- * of the ethernet protocol field. The bitmask selects which
- * bytes to include the the tag, with bit offset X selecting
- * byte at offset X from the beginning of the packet data.
+ * offsetof() to determine the offsets into packet headers.
+ * For example, offsetof(ethhdr, protocol) returns the offset
+ * of the ethernet protocol field. The bitmask selects which
+ * bytes to include the the tag, with bit offset X selecting
+ * byte at offset X from the beginning of the packet data.
* @len: Number of bytes to include. Usually this is the sizeof()
- * the field.
+ * the field.
*/
static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
uint64_t len)
diff --git a/arch/mips/include/asm/octeon/cvmx-pko.h b/arch/mips/include/asm/octeon/cvmx-pko.h
index c6daeedf1f8..f7d2a671884 100644
--- a/arch/mips/include/asm/octeon/cvmx-pko.h
+++ b/arch/mips/include/asm/octeon/cvmx-pko.h
@@ -69,16 +69,16 @@
#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
+#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
(OCTEON_IS_MODEL(OCTEON_CN58XX) || \
OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS 40
+#define CVMX_PKO_NUM_OUTPUT_PORTS 40
/* use this for queues that are not used */
#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
-#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
+#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
+#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
#define CVMX_PKO_MAX_QUEUE_DEPTH 0
typedef enum {
@@ -269,13 +269,13 @@ extern void cvmx_pko_shutdown(void);
/**
* Configure a output port and the associated queues for use.
*
- * @port: Port to configure.
+ * @port: Port to configure.
* @base_queue: First queue number to associate with this port.
* @num_queues: Number of queues t oassociate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 1-8. A value of 8 get 8 times the traffic
- * of a value of 1. There must be num_queues elements in the
- * array.
+ * @priority: Array of priority levels for each queue. Values are
+ * allowed to be 1-8. A value of 8 get 8 times the traffic
+ * of a value of 1. There must be num_queues elements in the
+ * array.
*/
extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
uint64_t base_queue,
@@ -285,7 +285,7 @@ extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
/**
* Ring the packet output doorbell. This tells the packet
* output hardware that "len" command words have been added
- * to its pending list. This command includes the required
+ * to its pending list. This command includes the required
* CVMX_SYNCWS before the doorbell ring.
*
* @port: Port the packet is for
@@ -322,18 +322,18 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* The use_locking parameter allows the caller to use three
* possible locking modes.
* - CVMX_PKO_LOCK_NONE
- * - PKO doesn't do any locking. It is the responsibility
- * of the application to make sure that no other core
- * is accessing the same queue at the same time.
+ * - PKO doesn't do any locking. It is the responsibility
+ * of the application to make sure that no other core
+ * is accessing the same queue at the same time.
* - CVMX_PKO_LOCK_ATOMIC_TAG
- * - PKO performs an atomic tagswitch to insure exclusive
- * access to the output queue. This will maintain
- * packet ordering on output.
+ * - PKO performs an atomic tagswitch to insure exclusive
+ * access to the output queue. This will maintain
+ * packet ordering on output.
* - CVMX_PKO_LOCK_CMD_QUEUE
- * - PKO uses the common command queue locks to insure
- * exclusive access to the output queue. This is a
- * memory based ll/sc. This is the most portable
- * locking mechanism.
+ * - PKO uses the common command queue locks to insure
+ * exclusive access to the output queue. This is a
+ * memory based ll/sc. This is the most portable
+ * locking mechanism.
*
* NOTE: If atomic locking is used, the POW entry CANNOT be
* descheduled, as it does not contain a valid WQE pointer.
@@ -341,7 +341,7 @@ static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*/
static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
@@ -351,11 +351,11 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
/*
* Must do a full switch here to handle all cases. We
* use a fake WQE pointer, as the POW does not access
- * this memory. The WQE pointer and group are only
+ * this memory. The WQE pointer and group are only
* used if this work is descheduled, which is not
* supported by the
* cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
- * combination. Note that this is a special case in
+ * combination. Note that this is a special case in
* which these fake values can be used - this is not a
* general technique.
*/
@@ -377,10 +377,10 @@ static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -418,12 +418,12 @@ static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
* @port: Port to send it on
* @queue: Queue to use
* @pko_command:
- * PKO HW command word
+ * PKO HW command word
* @packet: Packet to send
* @addr: Plysical address of a work queue entry or physical address
- * to zero on complete.
+ * to zero on complete.
* @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
+ * CVMX_PKO_LOCK_CMD_QUEUE
*
* Returns returns CVMX_PKO_SUCCESS on success, or error code on
* failure of output
@@ -588,7 +588,7 @@ static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
* @port: Port to rate limit
* @packets_s: Maximum packet/sec
* @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
@@ -601,7 +601,7 @@ extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
* @port: Port to rate limit
* @bits_s: PKO rate limit in bits/sec
* @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
+ * limiting cuts in.
*
* Returns Zero on success, negative on failure
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 92742b241a5..4b4d0ecfd9e 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -70,7 +70,7 @@ enum cvmx_pow_tag_type {
* The work queue entry from the order - NEVER tag switch from
* NULL to NULL
*/
- CVMX_POW_TAG_TYPE_NULL = 2L,
+ CVMX_POW_TAG_TYPE_NULL = 2L,
/* A tag switch to NULL, and there is no space reserved in POW
* - NEVER tag switch to NULL_NULL
* - NEVER tag switch from NULL_NULL
@@ -90,7 +90,7 @@ typedef enum {
} cvmx_pow_wait_t;
/**
- * POW tag operations. These are used in the data stored to the POW.
+ * POW tag operations. These are used in the data stored to the POW.
*/
typedef enum {
/*
@@ -341,14 +341,14 @@ typedef union {
* lists. The two memory-input queue lists associated
* with each QOS level are:
*
- * - qosgrp = 0, qosgrp = 8: QOS0
- * - qosgrp = 1, qosgrp = 9: QOS1
- * - qosgrp = 2, qosgrp = 10: QOS2
- * - qosgrp = 3, qosgrp = 11: QOS3
- * - qosgrp = 4, qosgrp = 12: QOS4
- * - qosgrp = 5, qosgrp = 13: QOS5
- * - qosgrp = 6, qosgrp = 14: QOS6
- * - qosgrp = 7, qosgrp = 15: QOS7
+ * - qosgrp = 0, qosgrp = 8: QOS0
+ * - qosgrp = 1, qosgrp = 9: QOS1
+ * - qosgrp = 2, qosgrp = 10: QOS2
+ * - qosgrp = 3, qosgrp = 11: QOS3
+ * - qosgrp = 4, qosgrp = 12: QOS4
+ * - qosgrp = 5, qosgrp = 13: QOS5
+ * - qosgrp = 6, qosgrp = 14: QOS6
+ * - qosgrp = 7, qosgrp = 15: QOS7
*/
uint64_t qosgrp:4;
/*
@@ -942,11 +942,11 @@ typedef union {
* operations.
*
* NOTE: The following is the behavior of the pending switch bit at the PP
- * for POW stores (i.e. when did<7:3> == 0xc)
- * - did<2:0> == 0 => pending switch bit is set
- * - did<2:0> == 1 => no affect on the pending switch bit
- * - did<2:0> == 3 => pending switch bit is cleared
- * - did<2:0> == 7 => no affect on the pending switch bit
+ * for POW stores (i.e. when did<7:3> == 0xc)
+ * - did<2:0> == 0 => pending switch bit is set
+ * - did<2:0> == 1 => no affect on the pending switch bit
+ * - did<2:0> == 3 => pending switch bit is cleared
+ * - did<2:0> == 7 => no affect on the pending switch bit
* - did<2:0> == others => must not be used
* - No other loads/stores have an affect on the pending switch bit
* - The switch bus from POW can clear the pending switch bit
@@ -1053,7 +1053,7 @@ static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
}
#ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
+#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
#endif
/**
@@ -1097,7 +1097,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
* so the caller must ensure that there is not a pending tag switch.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1131,7 +1131,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
* requesting the new work.
*
* @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
+ * If not set, returns immediately.
*
* Returns Returns the WQE pointer from POW. Returns NULL if no work
* was available.
@@ -1148,7 +1148,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
}
/**
- * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
+ * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
* This function waits for any previous tag switch to complete before
* requesting the null_rd.
*
@@ -1183,11 +1183,11 @@ static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
* there is not a pending tag switch.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1212,11 +1212,11 @@ static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
* tag switch to complete before requesting the new work.
*
* @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
+ * to, which is either a valid WQE, or a response with the
+ * invalid bit set. Byte address, must be 8 byte aligned.
*
* @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
+ * timeout), 0 to cause response to return immediately
*/
static inline void cvmx_pow_work_request_async(int scr_addr,
cvmx_pow_wait_t wait)
@@ -1234,7 +1234,7 @@ static inline void cvmx_pow_work_request_async(int scr_addr,
* to wait for the response.
*
* @scr_addr: Scratch memory address to get result from Byte address,
- * must be 8 byte aligned.
+ * must be 8 byte aligned.
*
* Returns Returns the WQE from the scratch register, or NULL if no
* work was available.
@@ -1260,7 +1260,7 @@ static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
* @wqe_ptr: pointer to a work queue entry returned by the POW
*
* Returns 0 if pointer is valid
- * 1 if invalid (no work was returned)
+ * 1 if invalid (no work was returned)
*/
static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
{
@@ -1314,7 +1314,7 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1361,7 +1361,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1390,7 +1390,7 @@ static inline void cvmx_pow_tag_sw(uint32_t tag,
* previous tag switch has completed.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @group: group value for the work queue entry.
@@ -1429,7 +1429,7 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
/*
* Note that WQE in DRAM is not updated here, as the POW does
* not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
+ * manual for complete details. It is the application's
* responsibility to keep track of the current tag value if
* that is important.
*/
@@ -1468,10 +1468,10 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
* before requesting the tag switch.
*
* @wqp: pointer to work queue entry to submit. This entry is updated
- * to match the other parameters
+ * to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
- * @group: group value for the work queue entry.
+ * @group: group value for the work queue entry.
*/
static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1560,7 +1560,7 @@ static inline void cvmx_pow_tag_sw_null(void)
* unrelated to the tag that the core currently holds.
*
* @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
+ * updated to match the other parameters
* @tag: tag value to be assigned to work queue entry
* @tag_type: type of tag
* @qos: Input queue to add to.
@@ -1592,7 +1592,7 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
ptr.sio.offset = cvmx_ptr_to_phys(wqp);
/*
- * SYNC write to memory before the work submit. This is
+ * SYNC write to memory before the work submit. This is
* necessary as POW may read values from DRAM at this time.
*/
CVMX_SYNCWS;
@@ -1604,11 +1604,11 @@ static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
* indicates which groups each core will accept work from. There are
* 16 groups.
*
- * @core_num: core to apply mask to
+ * @core_num: core to apply mask to
* @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
- * representing groups 0-15.
- * Each 1 bit in the mask enables the core to accept work from
- * the corresponding group.
+ * representing groups 0-15.
+ * Each 1 bit in the mask enables the core to accept work from
+ * the corresponding group.
*/
static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
{
@@ -1623,14 +1623,14 @@ static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
* This function sets POW static priorities for a core. Each input queue has
* an associated priority value.
*
- * @core_num: core to apply priorities to
- * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
- * Highest priority is 0 and lowest is 7. A priority value
- * of 0xF instructs POW to skip the Input Queue when
- * scheduling to this specific core.
- * NOTE: priorities should not have gaps in values, meaning
- * {0,1,1,1,1,1,1,1} is a valid configuration while
- * {0,2,2,2,2,2,2,2} is not.
+ * @core_num: core to apply priorities to
+ * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
+ * Highest priority is 0 and lowest is 7. A priority value
+ * of 0xF instructs POW to skip the Input Queue when
+ * scheduling to this specific core.
+ * NOTE: priorities should not have gaps in values, meaning
+ * {0,1,1,1,1,1,1,1} is a valid configuration while
+ * {0,2,2,2,2,2,2,2} is not.
*/
static inline void cvmx_pow_set_priority(uint64_t core_num,
const uint8_t priority[])
@@ -1708,8 +1708,8 @@ static inline void cvmx_pow_set_priority(uint64_t core_num,
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched_nocheck(
uint32_t tag,
@@ -1794,8 +1794,8 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
* @tag_type: New tag type
* @group: New group value
* @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
+ * - 1 : don't schedule this work
+ * - 0 : allow this work to be scheduled.
*/
static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
enum cvmx_pow_tag_type tag_type,
@@ -1819,8 +1819,8 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
* Descchedules the current work queue entry.
*
* @no_sched: no schedule flag value to be set on the work queue
- * entry. If this is set the entry will not be
- * rescheduled.
+ * entry. If this is set the entry will not be
+ * rescheduled.
*/
static inline void cvmx_pow_desched(uint64_t no_sched)
{
@@ -1863,7 +1863,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
*****************************************************/
/*
- * Number of bits of the tag used by software. The SW bits are always
+ * Number of bits of the tag used by software. The SW bits are always
* a contiguous block of the high starting at bit 31. The hardware
* bits are always the low bits. By default, the top 8 bits of the
* tag are reserved for software, and the low 24 are set by the IPD
@@ -1890,7 +1890,7 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* are defined here.
*/
/* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
+#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
#define CVMX_TAG_SUBGROUP_SHIFT 16
#define CVMX_TAG_SUBGROUP_PKO 0x1
@@ -1905,12 +1905,12 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
* This function creates a 32 bit tag value from the two values provided.
*
* @sw_bits: The upper bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * hw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * hw_bits parameter.
*
* @hw_bits: The lower bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * sw_bits parameter.
+ * to this value. The remainder of bits are set by the
+ * sw_bits parameter.
*
* Returns 32 bit value of the combined hw and sw bits.
*/
@@ -1957,7 +1957,7 @@ static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
*
* @buffer: Buffer to store capture into
* @buffer_size:
- * The size of the supplied buffer
+ * The size of the supplied buffer
*
* Returns Zero on success, negative on failure
*/
@@ -1968,7 +1968,7 @@ extern int cvmx_pow_capture(void *buffer, int buffer_size);
*
* @buffer: POW capture from cvmx_pow_capture()
* @buffer_size:
- * Size of the buffer
+ * Size of the buffer
*/
extern void cvmx_pow_display(void *buffer, int buffer_size);
diff --git a/arch/mips/include/asm/octeon/cvmx-scratch.h b/arch/mips/include/asm/octeon/cvmx-scratch.h
index 96b70cfd624..8d21cc5e4e4 100644
--- a/arch/mips/include/asm/octeon/cvmx-scratch.h
+++ b/arch/mips/include/asm/octeon/cvmx-scratch.h
@@ -39,7 +39,7 @@
* Note: This define must be a long, not a long long in order to
* compile without warnings for both 32bit and 64bit.
*/
-#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
+#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
/**
* Reads an 8 bit value from the processor local scratchpad memory.
diff --git a/arch/mips/include/asm/octeon/cvmx-spi.h b/arch/mips/include/asm/octeon/cvmx-spi.h
index 3bf53b537bc..d5038cc4b47 100644
--- a/arch/mips/include/asm/octeon/cvmx-spi.h
+++ b/arch/mips/include/asm/octeon/cvmx-spi.h
@@ -84,11 +84,11 @@ static inline int cvmx_spi_is_spi_interface(int interface)
* Initialize and start the SPI interface.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* @num_ports: Number of SPI ports to configure
*
@@ -102,11 +102,11 @@ extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
* with its corespondant system.
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
* Returns Zero on success, negative of failure.
*/
@@ -154,7 +154,7 @@ static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
/**
* Get current SPI4 initialization callbacks
*
- * @callbacks: Pointer to the callbacks structure.to fill
+ * @callbacks: Pointer to the callbacks structure.to fill
*
* Returns Pointer to cvmx_spi_callbacks_t structure.
*/
@@ -171,11 +171,11 @@ extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
* Callback to perform SPI4 reset
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
@@ -187,11 +187,11 @@ extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
* detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @num_ports: Number of ports to configure on SPI
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -204,11 +204,11 @@ extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform clock detection
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for clock synchronization in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -221,11 +221,11 @@ extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform link training
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for link to be trained (in seconds)
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -238,11 +238,11 @@ extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
* Callback to perform calendar data synchronization
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
* @timeout: Timeout to wait for calendar data in seconds
*
* Returns Zero on success, non-zero error code on failure (will cause
@@ -255,11 +255,11 @@ extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
* Callback to handle interface up
*
* @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
+ * use as a SPI interface.
* @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
+ * can operate as a full duplex (both Tx and Rx data paths
+ * active) or as a halfplex (either the Tx data path is
+ * active or the Rx data path is active, but not both).
*
* Returns Zero on success, non-zero error code on failure (will cause
* SPI initialization to abort)
diff --git a/arch/mips/include/asm/octeon/cvmx-spinlock.h b/arch/mips/include/asm/octeon/cvmx-spinlock.h
index a672abb1bc4..4f09cff8b8c 100644
--- a/arch/mips/include/asm/octeon/cvmx-spinlock.h
+++ b/arch/mips/include/asm/octeon/cvmx-spinlock.h
@@ -26,7 +26,7 @@
***********************license end**************************************/
/**
- * Implementation of spinlocks for Octeon CVMX. Although similar in
+ * Implementation of spinlocks for Octeon CVMX. Although similar in
* function to Linux kernel spinlocks, they are not compatible.
* Octeon CVMX spinlocks are only used to synchronize with the boot
* monitor and other non-Linux programs running in the system.
@@ -50,8 +50,8 @@ typedef struct {
} cvmx_spinlock_t;
/* note - macros not expanded in inline ASM, so values hardcoded */
-#define CVMX_SPINLOCK_UNLOCKED_VAL 0
-#define CVMX_SPINLOCK_LOCKED_VAL 1
+#define CVMX_SPINLOCK_UNLOCKED_VAL 0
+#define CVMX_SPINLOCK_LOCKED_VAL 1
#define CVMX_SPINLOCK_UNLOCKED_INITIALIZER {CVMX_SPINLOCK_UNLOCKED_VAL}
@@ -96,7 +96,7 @@ static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock)
* @lock: pointer to lock structure
*
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
@@ -104,16 +104,16 @@ static inline unsigned int cvmx_spinlock_trylock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bnez %[tmp], 2f \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set reorder \n" :
+ " bnez %[tmp], 2f \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -129,14 +129,14 @@ static inline void cvmx_spinlock_lock(cvmx_spinlock_t *lock)
{
unsigned int tmp;
- __asm__ __volatile__(".set noreorder \n"
+ __asm__ __volatile__(".set noreorder \n"
"1: ll %[tmp], %[val] \n"
- " bnez %[tmp], 1b \n"
- " li %[tmp], 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set reorder \n" :
+ " bnez %[tmp], 1b \n"
+ " li %[tmp], 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set reorder \n" :
[val] "+m"(lock->value), [tmp] "=&r"(tmp)
: : "memory");
@@ -163,17 +163,17 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
unsigned int tmp;
unsigned int sav;
- __asm__ __volatile__(".set noreorder \n"
- ".set noat \n"
+ __asm__ __volatile__(".set noreorder \n"
+ ".set noat \n"
"1: ll %[tmp], %[val] \n"
- " bbit1 %[tmp], 31, 1b \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " nop \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 1b \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp), [sav] "=&r"(sav)
: : "memory");
@@ -187,7 +187,7 @@ static inline void cvmx_spinlock_bit_lock(uint32_t *word)
*
* @word: word to lock bit 31 of
* Returns 0: lock successfully taken
- * 1: lock not taken, held by someone else
+ * 1: lock not taken, held by someone else
* These return values match the Linux semantics.
*/
static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
@@ -198,15 +198,15 @@ static inline unsigned int cvmx_spinlock_bit_trylock(uint32_t *word)
".set noat\n"
"1: ll %[tmp], %[val] \n"
/* if lock held, fail immediately */
- " bbit1 %[tmp], 31, 2f \n"
- " li $at, 1 \n"
- " ins %[tmp], $at, 31, 1 \n"
- " sc %[tmp], %[val] \n"
- " beqz %[tmp], 1b \n"
- " li %[tmp], 0 \n"
- "2: \n"
- ".set at \n"
- ".set reorder \n" :
+ " bbit1 %[tmp], 31, 2f \n"
+ " li $at, 1 \n"
+ " ins %[tmp], $at, 31, 1 \n"
+ " sc %[tmp], %[val] \n"
+ " beqz %[tmp], 1b \n"
+ " li %[tmp], 0 \n"
+ "2: \n"
+ ".set at \n"
+ ".set reorder \n" :
[val] "+m"(*word), [tmp] "=&r"(tmp)
: : "memory");
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
index 61dd5741afe..2131197422e 100644
--- a/arch/mips/include/asm/octeon/cvmx-sysinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -85,7 +85,7 @@ struct cvmx_sysinfo {
char board_serial_number[OCTEON_SERIAL_LEN];
/*
* Several boards support compact flash on the Octeon boot
- * bus. The CF memory spaces may be mapped to different
+ * bus. The CF memory spaces may be mapped to different
* addresses on different boards. These values will be 0 if
* CF is not present. Note that these addresses are physical
* addresses, and it is up to the application to use the
@@ -123,25 +123,25 @@ extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
/**
* This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
+ * Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
+ * (bootmem descriptor) @board_type: Octeon board
+ * type enumeration
*
* @board_rev_major:
- * Board major revision
+ * Board major revision
* @board_rev_minor:
- * Board minor revision
+ * Board minor revision
* @cpu_clock_hz:
- * CPU clock freqency in hertz
+ * CPU clock freqency in hertz
*
* Returns 0: Failure
- * 1: success
+ * 1: success
*/
extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
diff --git a/arch/mips/include/asm/octeon/cvmx-wqe.h b/arch/mips/include/asm/octeon/cvmx-wqe.h
index df762389e27..aa0d3d0de75 100644
--- a/arch/mips/include/asm/octeon/cvmx-wqe.h
+++ b/arch/mips/include/asm/octeon/cvmx-wqe.h
@@ -101,23 +101,23 @@ typedef union {
* - 1 = Malformed L4
* - 2 = L4 Checksum Error: the L4 checksum value is
* - 3 = UDP Length Error: The UDP length field would
- * make the UDP data longer than what remains in
- * the IP packet (as defined by the IP header
- * length field).
+ * make the UDP data longer than what remains in
+ * the IP packet (as defined by the IP header
+ * length field).
* - 4 = Bad L4 Port: either the source or destination
- * TCP/UDP port is 0.
+ * TCP/UDP port is 0.
* - 8 = TCP FIN Only: the packet is TCP and only the
- * FIN flag set.
+ * FIN flag set.
* - 9 = TCP No Flags: the packet is TCP and no flags
- * are set.
+ * are set.
* - 10 = TCP FIN RST: the packet is TCP and both FIN
- * and RST are set.
+ * and RST are set.
* - 11 = TCP SYN URG: the packet is TCP and both SYN
- * and URG are set.
+ * and URG are set.
* - 12 = TCP SYN RST: the packet is TCP and both SYN
- * and RST are set.
+ * and RST are set.
* - 13 = TCP SYN FIN: the packet is TCP and both SYN
- * and FIN are set.
+ * and FIN are set.
*/
uint64_t L4_error:1;
/* set if the packet is a fragment */
@@ -127,16 +127,16 @@ typedef union {
* failure indicated in err_code below, decode:
*
* - 1 = Not IP: the IP version field is neither 4 nor
- * 6.
+ * 6.
* - 2 = IPv4 Header Checksum Error: the IPv4 header
- * has a checksum violation.
+ * has a checksum violation.
* - 3 = IP Malformed Header: the packet is not long
- * enough to contain the IP header.
+ * enough to contain the IP header.
* - 4 = IP Malformed: the packet is not long enough
* to contain the bytes indicated by the IP
* header. Pad is allowed.
* - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
- * Hop Count field are zero.
+ * Hop Count field are zero.
* - 6 = IP Options
*/
uint64_t IP_exc:1;
@@ -243,46 +243,46 @@ typedef union {
* decode:
*
* - 1 = partial error: a packet was partially
- * received, but internal buffering / bandwidth
- * was not adequate to receive the entire
- * packet.
+ * received, but internal buffering / bandwidth
+ * was not adequate to receive the entire
+ * packet.
* - 2 = jabber error: the RGMII packet was too large
- * and is truncated.
+ * and is truncated.
* - 3 = overrun error: the RGMII packet is longer
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 4 = oversize error: the RGMII packet is longer
- * than allowed.
+ * than allowed.
* - 5 = alignment error: the RGMII packet is not an
- * integer number of bytes
- * and had an FCS error (100M and 10M only).
+ * integer number of bytes
+ * and had an FCS error (100M and 10M only).
* - 6 = fragment error: the RGMII packet is shorter
- * than allowed and had an FCS error.
+ * than allowed and had an FCS error.
* - 7 = GMX FCS error: the RGMII packet had an FCS
- * error.
+ * error.
* - 8 = undersize error: the RGMII packet is shorter
- * than allowed.
+ * than allowed.
* - 9 = extend error: the RGMII packet had an extend
- * error.
+ * error.
* - 10 = length mismatch error: the RGMII packet had
- * a length that did not match the length field
- * in the L2 HDR.
+ * a length that did not match the length field
+ * in the L2 HDR.
* - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
- * packet had one or more data reception errors
- * (RXERR) or the SPI4 packet had one or more
- * DIP4 errors.
+ * packet had one or more data reception errors
+ * (RXERR) or the SPI4 packet had one or more
+ * DIP4 errors.
* - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
- * packet was not large enough to cover the
- * skipped bytes or the SPI4 packet was
- * terminated with an About EOPS.
+ * packet was not large enough to cover the
+ * skipped bytes or the SPI4 packet was
+ * terminated with an About EOPS.
* - 13 = RGMII nibble error/SPI4 Port NXA Error: the
- * RGMII packet had a studder error (data not
- * repeated - 10/100M only) or the SPI4 packet
- * was sent to an NXA.
+ * RGMII packet had a studder error (data not
+ * repeated - 10/100M only) or the SPI4 packet
+ * was sent to an NXA.
* - 16 = FCS error: a SPI4.2 packet had an FCS error.
* - 17 = Skip error: a packet was not large enough to
- * cover the skipped bytes.
+ * cover the skipped bytes.
* - 18 = L2 header malformed: the packet is not long
- * enough to contain the L2.
+ * enough to contain the L2.
*/
uint64_t rcv_error:1;
@@ -309,7 +309,7 @@ typedef struct {
/*****************************************************************
* WORD 0
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -323,14 +323,14 @@ typedef struct {
/**
* Next pointer used by hardware for list maintenance.
* May be written/read by HW before the work queue
- * entry is scheduled to a PP
+ * entry is scheduled to a PP
* (Only 36 bits used in Octeon 1)
*/
uint64_t next_ptr:40;
/*****************************************************************
* WORD 1
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
+ * HW WRITE: the following 64 bits are filled by HW when a packet arrives
*/
/**
@@ -362,8 +362,8 @@ typedef struct {
/**
* WORD 2 HW WRITE: the following 64-bits are filled in by
- * hardware when a packet arrives This indicates a variety of
- * status and error conditions.
+ * hardware when a packet arrives This indicates a variety of
+ * status and error conditions.
*/
cvmx_pip_wqe_word2 word2;
@@ -373,15 +373,15 @@ typedef struct {
union cvmx_buf_ptr packet_ptr;
/**
- * HW WRITE: octeon will fill in a programmable amount from the
- * packet, up to (at most, but perhaps less) the amount
- * needed to fill the work queue entry to 128 bytes
+ * HW WRITE: octeon will fill in a programmable amount from the
+ * packet, up to (at most, but perhaps less) the amount
+ * needed to fill the work queue entry to 128 bytes
*
- * If the packet is recognized to be IP, the hardware starts
- * (except that the IPv4 header is padded for appropriate
- * alignment) writing here where the IP header starts. If the
- * packet is not recognized to be IP, the hardware starts
- * writing the beginning of the packet here.
+ * If the packet is recognized to be IP, the hardware starts
+ * (except that the IPv4 header is padded for appropriate
+ * alignment) writing here where the IP header starts. If the
+ * packet is not recognized to be IP, the hardware starts
+ * writing the beginning of the packet here.
*/
uint8_t packet_data[96];
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index db58beab6cb..f991e7701d3 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -76,14 +76,14 @@ enum cvmx_mips_space {
#endif
#if CVMX_ENABLE_DEBUG_PRINTS
-#define cvmx_dprintf printk
+#define cvmx_dprintf printk
#else
#define cvmx_dprintf(...) {}
#endif
-#define CVMX_MAX_CORES (16)
-#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
-#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
+#define CVMX_MAX_CORES (16)
+#define CVMX_CACHE_LINE_SIZE (128) /* In bytes */
+#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) /* In bytes */
#define CVMX_CACHE_LINE_ALIGNED __attribute__ ((aligned(CVMX_CACHE_LINE_SIZE)))
#define CAST64(v) ((long long)(long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
@@ -133,8 +133,8 @@ static inline uint64_t cvmx_build_io_address(uint64_t major_did,
*
* Example: cvmx_build_bits(39,24,value)
* <pre>
- * 6 5 4 3 3 2 1
- * 3 5 7 9 1 3 5 7 0
+ * 6 5 4 3 3 2 1
+ * 3 5 7 9 1 3 5 7 0
* +-------+-------+-------+-------+-------+-------+-------+------+
* 000000000000000000000000___________value000000000000000000000000
* </pre>
@@ -183,7 +183,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
* memory pointer (void *).
*
* @physical_address:
- * Hardware physical address to memory
+ * Hardware physical address to memory
* Returns Pointer to memory
*/
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
@@ -207,10 +207,10 @@ static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_WRITE64(TYPE, ST) \
-static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
-{ \
- *CASTPTR(volatile TYPE##_t, addr) = val; \
+#define CVMX_BUILD_WRITE64(TYPE, ST) \
+static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
+{ \
+ *CASTPTR(volatile TYPE##_t, addr) = val; \
}
@@ -221,19 +221,19 @@ static inline void cvmx_write64_##TYPE(uint64_t addr, TYPE##_t val) \
/* We have a full 64bit ABI. Writing to a 64bit address can be done with
a simple volatile pointer */
-#define CVMX_BUILD_READ64(TYPE, LT) \
-static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
-{ \
+#define CVMX_BUILD_READ64(TYPE, LT) \
+static inline TYPE##_t cvmx_read64_##TYPE(uint64_t addr) \
+{ \
return *CASTPTR(volatile TYPE##_t, addr); \
}
/* The following defines 8 functions for writing to a 64bit address. Each
takes two arguments, the address and the value to write.
- cvmx_write64_int64 cvmx_write64_uint64
- cvmx_write64_int32 cvmx_write64_uint32
- cvmx_write64_int16 cvmx_write64_uint16
- cvmx_write64_int8 cvmx_write64_uint8 */
+ cvmx_write64_int64 cvmx_write64_uint64
+ cvmx_write64_int32 cvmx_write64_uint32
+ cvmx_write64_int16 cvmx_write64_uint16
+ cvmx_write64_int8 cvmx_write64_uint8 */
CVMX_BUILD_WRITE64(int64, "sd");
CVMX_BUILD_WRITE64(int32, "sw");
CVMX_BUILD_WRITE64(int16, "sh");
@@ -246,10 +246,10 @@ CVMX_BUILD_WRITE64(uint8, "sb");
/* The following defines 8 functions for reading from a 64bit address. Each
takes the address as the only argument
- cvmx_read64_int64 cvmx_read64_uint64
- cvmx_read64_int32 cvmx_read64_uint32
- cvmx_read64_int16 cvmx_read64_uint16
- cvmx_read64_int8 cvmx_read64_uint8 */
+ cvmx_read64_int64 cvmx_read64_uint64
+ cvmx_read64_int32 cvmx_read64_uint32
+ cvmx_read64_int16 cvmx_read64_uint16
+ cvmx_read64_int8 cvmx_read64_uint8 */
CVMX_BUILD_READ64(int64, "ld");
CVMX_BUILD_READ64(int32, "lw");
CVMX_BUILD_READ64(int16, "lh");
@@ -389,7 +389,7 @@ static inline void cvmx_wait(uint64_t cycles)
/**
* Reads a chip global cycle counter. This counts CPU cycles since
- * chip reset. The counter is 64 bit.
+ * chip reset. The counter is 64 bit.
* This register does not exist on CN38XX pass 1 silicion
*
* Returns Global chip cycle count since chip reset.
@@ -453,7 +453,7 @@ static inline uint32_t cvmx_octeon_num_cores(void)
/**
* Read a byte of fuse data
- * @byte_addr: address to read
+ * @byte_addr: address to read
*
* Returns fuse value: 0 or 1
*/
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8008da2f877..90e05a8d4b1 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -35,7 +35,7 @@
#include <asm/octeon/cvmx-rnm-defs.h>
enum octeon_feature {
- /* CN68XX uses port kinds for packet interface */
+ /* CN68XX uses port kinds for packet interface */
OCTEON_FEATURE_PKND,
/* CN68XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN68XX_WQE,
@@ -51,7 +51,7 @@ enum octeon_feature {
OCTEON_FEATURE_DORM_CRYPTO,
/* Does this Octeon support PCI express? */
OCTEON_FEATURE_PCIE,
- /* Does this Octeon support SRIOs */
+ /* Does this Octeon support SRIOs */
OCTEON_FEATURE_SRIO,
/* Does this Octeon support Interlaken */
OCTEON_FEATURE_ILK,
@@ -75,7 +75,7 @@ enum octeon_feature {
/* Octeon MDIO block supports clause 45 transactions for 10
* Gig support */
OCTEON_FEATURE_MDIO_CLAUSE_45,
- /*
+ /*
* CN52XX and CN56XX used a block named NPEI for PCIe
* access. Newer chips replaced this with SLI+DPI.
*/
@@ -94,10 +94,10 @@ static inline int cvmx_fuse_read(int fuse);
* be kept out of fast path code.
*
* @feature: Feature to check for. This should always be a constant so the
- * compiler can remove the switch statement through optimization.
+ * compiler can remove the switch statement through optimization.
*
* Returns Non zero if the feature exists. Zero if the feature does not
- * exist.
+ * exist.
*/
static inline int octeon_has_feature(enum octeon_feature feature)
{
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 349bb2ba840..e2c122c6a65 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -29,7 +29,7 @@
/*
* The defines below should be used with the OCTEON_IS_MODEL() macro
- * to determine what model of chip the software is running on. Models
+ * to determine what model of chip the software is running on. Models
* ending in 'XX' match multiple models (families), while specific
* models match only that model. If a pass (revision) is specified,
* then only that revision will be matched. Care should be taken when
@@ -40,183 +40,183 @@
* subject to change at anytime without notice.
*
* NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN*
- * macros should be used outside of this file. All other macros are
+ * macros should be used outside of this file. All other macros are
* for internal use only, and may change without notice.
*/
-#define OCTEON_FAMILY_MASK 0x00ffff00
+#define OCTEON_FAMILY_MASK 0x00ffff00
/* Flag bits in top byte */
/* Ignores revision in model checks */
-#define OM_IGNORE_REVISION 0x01000000
+#define OM_IGNORE_REVISION 0x01000000
/* Check submodels */
-#define OM_CHECK_SUBMODEL 0x02000000
+#define OM_CHECK_SUBMODEL 0x02000000
/* Match all models previous than the one specified */
#define OM_MATCH_PREVIOUS_MODELS 0x04000000
/* Ignores the minor revison on newer parts */
#define OM_IGNORE_MINOR_REVISION 0x08000000
-#define OM_FLAG_MASK 0xff000000
+#define OM_FLAG_MASK 0xff000000
/* Match all cn5XXX Octeon models. */
-#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
+#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
/* Match all cn6XXX Octeon models. */
-#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
+#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
-#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
+#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
/*
* CNF7XXX models with new revision encoding
*/
-#define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_0 0x000d9400
-#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN6XXX models with new revision encoding
*/
-#define OCTEON_CN68XX_PASS1_0 0x000d9100
-#define OCTEON_CN68XX_PASS1_1 0x000d9101
-#define OCTEON_CN68XX_PASS1_2 0x000d9102
-#define OCTEON_CN68XX_PASS2_0 0x000d9108
+#define OCTEON_CN68XX_PASS1_0 0x000d9100
+#define OCTEON_CN68XX_PASS1_1 0x000d9101
+#define OCTEON_CN68XX_PASS1_2 0x000d9102
+#define OCTEON_CN68XX_PASS2_0 0x000d9108
-#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN68XX_PASS2_X (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
-#define OCTEON_CN66XX_PASS1_0 0x000d9200
-#define OCTEON_CN66XX_PASS1_2 0x000d9202
+#define OCTEON_CN66XX_PASS1_0 0x000d9200
+#define OCTEON_CN66XX_PASS1_2 0x000d9202
-#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN66XX_PASS1_X (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS1_0 0x000d9000
-#define OCTEON_CN63XX_PASS1_1 0x000d9001
-#define OCTEON_CN63XX_PASS1_2 0x000d9002
-#define OCTEON_CN63XX_PASS2_0 0x000d9008
-#define OCTEON_CN63XX_PASS2_1 0x000d9009
-#define OCTEON_CN63XX_PASS2_2 0x000d900a
+#define OCTEON_CN63XX_PASS1_0 0x000d9000
+#define OCTEON_CN63XX_PASS1_1 0x000d9001
+#define OCTEON_CN63XX_PASS1_2 0x000d9002
+#define OCTEON_CN63XX_PASS2_0 0x000d9008
+#define OCTEON_CN63XX_PASS2_1 0x000d9009
+#define OCTEON_CN63XX_PASS2_2 0x000d900a
-#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN61XX_PASS1_0 0x000d9300
+#define OCTEON_CN61XX_PASS1_0 0x000d9300
-#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN5XXX models with new revision encoding
*/
-#define OCTEON_CN58XX_PASS1_0 0x000d0300
-#define OCTEON_CN58XX_PASS1_1 0x000d0301
-#define OCTEON_CN58XX_PASS1_2 0x000d0303
-#define OCTEON_CN58XX_PASS2_0 0x000d0308
-#define OCTEON_CN58XX_PASS2_1 0x000d0309
-#define OCTEON_CN58XX_PASS2_2 0x000d030a
-#define OCTEON_CN58XX_PASS2_3 0x000d030b
-
-#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
-#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
-
-#define OCTEON_CN56XX_PASS1_0 0x000d0400
-#define OCTEON_CN56XX_PASS1_1 0x000d0401
-#define OCTEON_CN56XX_PASS2_0 0x000d0408
-#define OCTEON_CN56XX_PASS2_1 0x000d0409
-
-#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
-#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
-
-#define OCTEON_CN57XX OCTEON_CN56XX
-#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN55XX OCTEON_CN56XX
-#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN54XX OCTEON_CN56XX
-#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
-#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
-
-#define OCTEON_CN50XX_PASS1_0 0x000d0600
-
-#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
+#define OCTEON_CN58XX_PASS1_0 0x000d0300
+#define OCTEON_CN58XX_PASS1_1 0x000d0301
+#define OCTEON_CN58XX_PASS1_2 0x000d0303
+#define OCTEON_CN58XX_PASS2_0 0x000d0308
+#define OCTEON_CN58XX_PASS2_1 0x000d0309
+#define OCTEON_CN58XX_PASS2_2 0x000d030a
+#define OCTEON_CN58XX_PASS2_3 0x000d030b
+
+#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X
+#define OCTEON_CN58XX_PASS2 OCTEON_CN58XX_PASS2_X
+
+#define OCTEON_CN56XX_PASS1_0 0x000d0400
+#define OCTEON_CN56XX_PASS1_1 0x000d0401
+#define OCTEON_CN56XX_PASS2_0 0x000d0408
+#define OCTEON_CN56XX_PASS2_1 0x000d0409
+
+#define OCTEON_CN56XX (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN56XX_PASS1_X (OCTEON_CN56XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS2_X (OCTEON_CN56XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN56XX_PASS1 OCTEON_CN56XX_PASS1_X
+#define OCTEON_CN56XX_PASS2 OCTEON_CN56XX_PASS2_X
+
+#define OCTEON_CN57XX OCTEON_CN56XX
+#define OCTEON_CN57XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN57XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN55XX OCTEON_CN56XX
+#define OCTEON_CN55XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN55XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN54XX OCTEON_CN56XX
+#define OCTEON_CN54XX_PASS1 OCTEON_CN56XX_PASS1
+#define OCTEON_CN54XX_PASS2 OCTEON_CN56XX_PASS2
+
+#define OCTEON_CN50XX_PASS1_0 0x000d0600
+
+#define OCTEON_CN50XX (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN50XX_PASS1_X (OCTEON_CN50XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN50XX_PASS1 OCTEON_CN50XX_PASS1_X
/*
* NOTE: Octeon CN5000F model is not identifiable using the
* OCTEON_IS_MODEL() functions, but are treated as CN50XX.
*/
-#define OCTEON_CN52XX_PASS1_0 0x000d0700
-#define OCTEON_CN52XX_PASS2_0 0x000d0708
+#define OCTEON_CN52XX_PASS1_0 0x000d0700
+#define OCTEON_CN52XX_PASS2_0 0x000d0708
-#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
-#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
-#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
-#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
+#define OCTEON_CN52XX (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN52XX_PASS1_X (OCTEON_CN52XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS2_X (OCTEON_CN52XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN52XX_PASS1 OCTEON_CN52XX_PASS1_X
+#define OCTEON_CN52XX_PASS2 OCTEON_CN52XX_PASS2_X
/*
* CN3XXX models with old revision enconding
*/
-#define OCTEON_CN38XX_PASS1 0x000d0000
-#define OCTEON_CN38XX_PASS2 0x000d0001
-#define OCTEON_CN38XX_PASS3 0x000d0003
-#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
+#define OCTEON_CN38XX_PASS1 0x000d0000
+#define OCTEON_CN38XX_PASS2 0x000d0001
+#define OCTEON_CN38XX_PASS3 0x000d0003
+#define OCTEON_CN38XX (OCTEON_CN38XX_PASS3 | OM_IGNORE_REVISION)
-#define OCTEON_CN36XX OCTEON_CN38XX
-#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
-#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
+#define OCTEON_CN36XX OCTEON_CN38XX
+#define OCTEON_CN36XX_PASS2 OCTEON_CN38XX_PASS2
+#define OCTEON_CN36XX_PASS3 OCTEON_CN38XX_PASS3
/* The OCTEON_CN31XX matches CN31XX models and the CN3020 */
-#define OCTEON_CN31XX_PASS1 0x000d0100
-#define OCTEON_CN31XX_PASS1_1 0x000d0102
-#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN31XX_PASS1 0x000d0100
+#define OCTEON_CN31XX_PASS1_1 0x000d0102
+#define OCTEON_CN31XX (OCTEON_CN31XX_PASS1 | OM_IGNORE_REVISION)
/*
* This model is only used for internal checks, it is not a valid
* model for the OCTEON_MODEL environment variable. This matches the
* CN3010 and CN3005 but NOT the CN3020.
*/
-#define OCTEON_CN30XX_PASS1 0x000d0200
-#define OCTEON_CN30XX_PASS1_1 0x000d0202
-#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
+#define OCTEON_CN30XX_PASS1 0x000d0200
+#define OCTEON_CN30XX_PASS1_1 0x000d0202
+#define OCTEON_CN30XX (OCTEON_CN30XX_PASS1 | OM_IGNORE_REVISION)
-#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_0 (0x000d0210 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005_PASS1_1 (0x000d0212 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3005 (OCTEON_CN3005_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_0 (0x000d0200 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010_PASS1_1 (0x000d0202 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3010 (OCTEON_CN3010_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
-#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_0 (0x000d0110 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020_PASS1_1 (0x000d0112 | OM_CHECK_SUBMODEL)
+#define OCTEON_CN3020 (OCTEON_CN3020_PASS1 | OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)
/*
* This matches the complete family of CN3xxx CPUs, and not subsequent
* models
*/
-#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
-#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
-#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
+#define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
+#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
+#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
/* These are used to cover entire families of OCTEON processors */
#define OCTEON_FAM_1 (OCTEON_CN3XXX)
@@ -243,18 +243,18 @@
*/
/* Masks used for the various types of model/family/revision matching */
-#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
+#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
-#define OCTEON_38XX_MODEL_MASK 0x00ffff10
+#define OCTEON_38XX_MODEL_MASK 0x00ffff10
#define OCTEON_38XX_MODEL_REV_MASK (OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
/* CN5XXX and later use different layout of bits in the revision ID field */
-#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
+#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK 0x00ffffc0
#define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
-#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
+#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
/* forward declarations */
static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
@@ -264,7 +264,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
/* NOTE: This for internal use only! */
#define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \
-((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
+((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_38XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -276,7 +276,7 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
&& (((chip_model) & OCTEON_38XX_MODEL_MASK) < ((arg_model) & OCTEON_38XX_MODEL_MASK))) \
)) || \
- (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
+ (((arg_model & OCTEON_38XX_FAMILY_MASK) >= OCTEON_CN58XX_PASS1_0) && ( \
((((arg_model) & (OM_FLAG_MASK)) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) \
&& __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
((((arg_model) & (OM_FLAG_MASK)) == 0) \
@@ -320,7 +320,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
* Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
* is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
* I.e.:
- * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
+ * #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
*/
#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
#define OCTEON_IS_COMMON_BINARY() 1
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index 254e9954ed7..a2eed23c49a 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -75,15 +75,15 @@ struct octeon_boot_descriptor {
uint32_t argc;
uint32_t argv[OCTEON_ARGV_MAX_ARGS];
-#define BOOT_FLAG_INIT_CORE (1 << 0)
-#define OCTEON_BL_FLAG_DEBUG (1 << 1)
-#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
+#define BOOT_FLAG_INIT_CORE (1 << 0)
+#define OCTEON_BL_FLAG_DEBUG (1 << 1)
+#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2)
/* If set, use uart1 for console */
-#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
+#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3)
/* If set, use PCI console */
-#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
+#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4)
/* Call exit on break on serial port */
-#define OCTEON_BL_FLAG_BREAK (1 << 5)
+#define OCTEON_BL_FLAG_BREAK (1 << 5)
uint32_t flags;
uint32_t core_mask;
diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
index c66734bd338..64ba56a0284 100644
--- a/arch/mips/include/asm/octeon/pci-octeon.h
+++ b/arch/mips/include/asm/octeon/pci-octeon.h
@@ -22,7 +22,7 @@
#define CVMX_PCIE_BAR1_PHYS_SIZE (1ull << 28)
/*
- * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
+ * The RC base of BAR1. gen1 has a 39-bit BAR2, gen2 has 41-bit BAR2,
* place BAR1 so it is the same for both.
*/
#define CVMX_PCIE_BAR1_RC_BASE (1ull << 41)
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
index 9ce5a1e7e14..2474fc5d175 100644
--- a/arch/mips/include/asm/paccess.h
+++ b/arch/mips/include/asm/paccess.h
@@ -43,7 +43,7 @@ struct __large_pstruct { unsigned long buf[100]; };
case 1: __get_dbe_asm("lb"); break; \
case 2: __get_dbe_asm("lh"); break; \
case 4: __get_dbe_asm("lw"); break; \
- case 8: __get_dbe_asm("ld"); break; \
+ case 8: __get_dbe_asm("ld"); break; \
default: __get_dbe_unknown(); break; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index dbaec94046d..99fc547af9d 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -31,7 +31,7 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK (~(PAGE_SIZE - 1))
+#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
@@ -95,11 +95,11 @@ extern void copy_user_highpage(struct page *to, struct page *from,
#ifdef CONFIG_64BIT_PHYS_ADDR
#ifdef CONFIG_CPU_MIPS32
typedef struct { unsigned long pte_low, pte_high; } pte_t;
- #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
+ #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
+ #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#else
typedef struct { unsigned long long pte; } pte_t;
- #define pte_val(x) ((x).pte)
+ #define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) } )
#endif
#else
@@ -191,8 +191,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
unsigned long __pfn = (pfn); \
int __n = pfn_to_nid(__pfn); \
((__n >= 0) ? (__pfn < NODE_DATA(__n)->node_start_pfn + \
- NODE_DATA(__n)->node_spanned_pages) \
- : 0); \
+ NODE_DATA(__n)->node_spanned_pages) \
+ : 0); \
})
#endif
@@ -206,7 +206,7 @@ extern int __virt_addr_valid(const volatile void *kaddr);
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
+#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
PHYS_OFFSET)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \
PHYS_OFFSET)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index d69ea743272..b8e24fd4cbc 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -12,7 +12,7 @@
/*
* This file essentially defines the interface between board
- * specific PCI code and MIPS common PCI code. Should potentially put
+ * specific PCI code and MIPS common PCI code. Should potentially put
* into include/asm/pci.h file.
*/
@@ -20,7 +20,7 @@
#include <linux/of.h>
/*
- * Each pci channel is a top-level PCI bus seem by CPU. A machine with
+ * Each pci channel is a top-level PCI bus seem by CPU. A machine with
* multiple PCI channels may have multiple PCI host controllers or a
* single controller supporting multiple channels.
*/
@@ -99,7 +99,7 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
struct pci_dev;
/*
- * The PCI address space does equal the physical memory address space. The
+ * The PCI address space does equal the physical memory address space. The
* networking and block device layers use this boolean for bounce buffer
* decisions. This is set if any hose does not have an IOMMU.
*/
@@ -144,8 +144,13 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
extern char * (*pcibios_plat_setup)(char *str);
+#ifdef CONFIG_OF
/* this function parses memory ranges from a device node */
extern void pci_load_of_ranges(struct pci_controller *hose,
struct device_node *node);
+#else
+static inline void pci_load_of_ranges(struct pci_controller *hose,
+ struct device_node *node) {}
+#endif
#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index be44fb0266d..af2c8a351ca 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -85,7 +85,7 @@ typedef volatile struct bridge_s {
#define b_wid_llp b_widget.w_llp_cfg
#define b_wid_tflush b_widget.w_tflush
- /* bridge-specific widget configuration 0x000058-0x00007F */
+ /* bridge-specific widget configuration 0x000058-0x00007F */
bridgereg_t _pad_000058;
bridgereg_t b_wid_aux_err; /* 0x00005C */
bridgereg_t _pad_000060;
@@ -167,8 +167,8 @@ typedef volatile struct bridge_s {
bridgereg_t __pad; /* 0x0002{80,,,88} */
bridgereg_t reg; /* 0x0002{84,,,8C} */
} b_rrb_map[2]; /* 0x000280 */
-#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
-#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
+#define b_even_resp b_rrb_map[0].reg /* 0x000284 */
+#define b_odd_resp b_rrb_map[1].reg /* 0x00028C */
bridgereg_t _pad_000290;
bridgereg_t b_resp_status; /* 0x000294 */
@@ -233,7 +233,7 @@ typedef volatile struct bridge_s {
u8 _pad_030007[0x04fff8]; /* 0x030008-0x07FFFF */
/* External Address Translation Entry RAM 0x080000-0x0FFFFF */
- bridge_ate_t b_ext_ate_ram[0x10000];
+ bridge_ate_t b_ext_ate_ram[0x10000];
/* Reserved 0x100000-0x1FFFFF */
char _pad_100000[0x200000-0x100000];
@@ -400,7 +400,7 @@ typedef struct bridge_err_cmdword_s {
#define BRIDGE_REV_A 0x1
#define BRIDGE_REV_B 0x2
#define BRIDGE_REV_C 0x3
-#define BRIDGE_REV_D 0x4
+#define BRIDGE_REV_D 0x4
/* Bridge widget status register bits definition */
@@ -691,21 +691,21 @@ typedef struct bridge_err_cmdword_s {
#define BRIDGE_CREDIT 3
/* RRB assignment register */
-#define BRIDGE_RRB_EN 0x8 /* after shifting down */
-#define BRIDGE_RRB_DEV 0x7 /* after shifting down */
-#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */
-#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */
+#define BRIDGE_RRB_EN 0x8 /* after shifting down */
+#define BRIDGE_RRB_DEV 0x7 /* after shifting down */
+#define BRIDGE_RRB_VDEV 0x4 /* after shifting down */
+#define BRIDGE_RRB_PDEV 0x3 /* after shifting down */
/* RRB status register */
-#define BRIDGE_RRB_VALID(r) (0x00010000<<(r))
-#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r))
+#define BRIDGE_RRB_VALID(r) (0x00010000<<(r))
+#define BRIDGE_RRB_INUSE(r) (0x00000001<<(r))
/* RRB clear register */
-#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r))
+#define BRIDGE_RRB_CLEAR(r) (0x00000001<<(r))
/* xbox system controller declarations */
-#define XBOX_BRIDGE_WID 8
-#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */
+#define XBOX_BRIDGE_WID 8
+#define FLASH_PROM1_BASE 0xE00000 /* To read the xbox sysctlr status */
#define XBOX_RPS_EXISTS 1 << 6 /* RPS bit in status register */
#define XBOX_RPS_FAIL 1 << 4 /* RPS status bit in register */
@@ -838,7 +838,7 @@ struct bridge_controller {
bridge_t *base;
nasid_t nasid;
unsigned int widget_id;
- unsigned int irq_cpu;
+ unsigned int irq_cpu;
u64 baddr;
unsigned int pci_int[8];
};
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 5d56bb23034..b4204c179b9 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -47,7 +47,7 @@
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
-#define VMALLOC_START MAP_BASE
+#define VMALLOC_START MAP_BASE
#define PKMAP_BASE (0xfe000000UL)
@@ -136,7 +136,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + __pte_offset(address))
-#define pte_offset_map(dir, address) \
+#define pte_offset_map(dir, address) \
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
@@ -155,7 +155,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
(((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
+ (((_pte).pte >> 10) << 6 ))
#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
(((off) & 0x38) << 2 ) | \
@@ -167,14 +167,14 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
/* Swap entries must have VALID and GLOBAL bits cleared. */
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 7)
+#define __swp_offset(x) ((x).val >> 7)
#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+ ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
#else
#define __swp_type(x) (((x).val >> 8) & 0x1f)
-#define __swp_offset(x) ((x).val >> 13)
+#define __swp_offset(x) ((x).val >> 13)
#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+ ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
@@ -184,7 +184,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define PTE_FILE_MAX_BITS 30
#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
+#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
#else
/*
@@ -194,7 +194,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
(((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
+ (((_pte).pte >> 8) << 4))
#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
(((off) & 0x8) << 2) | \
@@ -208,7 +208,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
#else
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#endif
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 013d5f78126..e1c49a96807 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -115,7 +115,7 @@
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
#if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD (1)
+#define USER_PTRS_PER_PGD (1)
#else
#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
#endif
@@ -288,7 +288,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define __swp_type(x) (((x).val >> 32) & 0xff)
#define __swp_offset(x) ((x).val >> 40)
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
-#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index f6a0439a408..32aea4852fb 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -21,7 +21,7 @@
* Similar to the Alpha port, we need to keep track of the ref
* and mod bits in software. We have a software "yeah you can read
* from this page" bit, and a hardware one which actually lets the
- * process read from the page. On the same token we have a software
+ * process read from the page. On the same token we have a software
* writable bit and the real hardware one which actually lets the
* process write to the page, this keeps a mod bit via the hardware
* dirty bit.
@@ -41,9 +41,9 @@
#define _PAGE_GLOBAL (1 << 0)
#define _PAGE_VALID_SHIFT 1
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ (1 << 1) /* synonym */
+#define _PAGE_SILENT_READ (1 << 1) /* synonym */
#define _PAGE_DIRTY_SHIFT 2
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
+#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1 << 2)
#define _CACHE_SHIFT 3
#define _CACHE_MASK (7 << 3)
@@ -52,7 +52,7 @@
* The following bits are implemented in software
*
* _PAGE_FILE semantics: set:pagecache unset:swap
- */
+ */
#define _PAGE_PRESENT_SHIFT 6
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT 7
@@ -134,7 +134,7 @@
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#else
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -143,7 +143,7 @@
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
#else
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
-#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
#endif
/* Page cannot be executed */
@@ -159,10 +159,10 @@
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-/* synonym */
+/* synonym */
#define _PAGE_SILENT_READ (_PAGE_VALID)
-/* The MIPS dirty bit */
+/* The MIPS dirty bit */
#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
#define _PAGE_SILENT_WRITE (_PAGE_DIRTY)
@@ -175,7 +175,7 @@
#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
#ifndef _PFN_SHIFT
-#define _PFN_SHIFT PAGE_SHIFT
+#define _PFN_SHIFT PAGE_SHIFT
#endif
#define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
@@ -230,28 +230,28 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
/* No penalty for being coherent on the SB1, so just
use it for "noncoherent" spaces, too. Shouldn't hurt. */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#else
-#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */
-#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */
-#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */
-#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */
-#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */
+#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */
+#define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT) /* R4600 only */
+#define _CACHE_UNCACHED (2<<_CACHE_SHIFT) /* R4[0246]00 */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* R4[0246]00 */
+#define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COHERENT (5<<_CACHE_SHIFT) /* MIPS32R2 CMP */
+#define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT) /* R4[04]00MC only */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT) /* R10000 only */
#endif
#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
#endif /* _ASM_PGTABLE_BITS_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index ec50d52cfb7..fdc62fb5630 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -112,7 +112,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* it better already be global)
*/
if (pte_none(*buddy)) {
- buddy->pte_low |= _PAGE_GLOBAL;
+ buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
}
@@ -319,7 +319,7 @@ static inline int pte_special(pte_t pte) { return 0; }
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
- * Macro to make mark a page protection value as "uncacheable". Note
+ * Macro to make mark a page protection value as "uncacheable". Note
* that "protection" is really a misnomer here as the protection value
* contains the memory attribute bits, dirty bits, and various other
* bits as well.
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index bd98b503f04..2a5fa7abb34 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -112,8 +112,8 @@ struct mips_fpu_struct {
typedef __u32 dspreg_t;
struct mips_dsp_state {
- dspreg_t dspr[NUM_DSP_REGS];
- unsigned int dspcontrol;
+ dspreg_t dspr[NUM_DSP_REGS];
+ unsigned int dspcontrol;
};
#define INIT_CPUMASK { \
@@ -137,46 +137,46 @@ union mips_watch_reg_state {
struct octeon_cop2_state {
/* DMFC2 rt, 0x0201 */
- unsigned long cop2_crc_iv;
+ unsigned long cop2_crc_iv;
/* DMFC2 rt, 0x0202 (Set with DMTC2 rt, 0x1202) */
- unsigned long cop2_crc_length;
+ unsigned long cop2_crc_length;
/* DMFC2 rt, 0x0200 (set with DMTC2 rt, 0x4200) */
- unsigned long cop2_crc_poly;
+ unsigned long cop2_crc_poly;
/* DMFC2 rt, 0x0402; DMFC2 rt, 0x040A */
- unsigned long cop2_llm_dat[2];
+ unsigned long cop2_llm_dat[2];
/* DMFC2 rt, 0x0084 */
- unsigned long cop2_3des_iv;
+ unsigned long cop2_3des_iv;
/* DMFC2 rt, 0x0080; DMFC2 rt, 0x0081; DMFC2 rt, 0x0082 */
- unsigned long cop2_3des_key[3];
+ unsigned long cop2_3des_key[3];
/* DMFC2 rt, 0x0088 (Set with DMTC2 rt, 0x0098) */
- unsigned long cop2_3des_result;
+ unsigned long cop2_3des_result;
/* DMFC2 rt, 0x0111 (FIXME: Read Pass1 Errata) */
- unsigned long cop2_aes_inp0;
+ unsigned long cop2_aes_inp0;
/* DMFC2 rt, 0x0102; DMFC2 rt, 0x0103 */
- unsigned long cop2_aes_iv[2];
+ unsigned long cop2_aes_iv[2];
/* DMFC2 rt, 0x0104; DMFC2 rt, 0x0105; DMFC2 rt, 0x0106; DMFC2
* rt, 0x0107 */
- unsigned long cop2_aes_key[4];
+ unsigned long cop2_aes_key[4];
/* DMFC2 rt, 0x0110 */
- unsigned long cop2_aes_keylen;
+ unsigned long cop2_aes_keylen;
/* DMFC2 rt, 0x0100; DMFC2 rt, 0x0101 */
- unsigned long cop2_aes_result[2];
+ unsigned long cop2_aes_result[2];
/* DMFC2 rt, 0x0240; DMFC2 rt, 0x0241; DMFC2 rt, 0x0242; DMFC2
* rt, 0x0243; DMFC2 rt, 0x0244; DMFC2 rt, 0x0245; DMFC2 rt,
* 0x0246; DMFC2 rt, 0x0247; DMFC2 rt, 0x0248; DMFC2 rt,
* 0x0249; DMFC2 rt, 0x024A; DMFC2 rt, 0x024B; DMFC2 rt,
* 0x024C; DMFC2 rt, 0x024D; DMFC2 rt, 0x024E - Pass2 */
- unsigned long cop2_hsh_datw[15];
+ unsigned long cop2_hsh_datw[15];
/* DMFC2 rt, 0x0250; DMFC2 rt, 0x0251; DMFC2 rt, 0x0252; DMFC2
* rt, 0x0253; DMFC2 rt, 0x0254; DMFC2 rt, 0x0255; DMFC2 rt,
* 0x0256; DMFC2 rt, 0x0257 - Pass2 */
- unsigned long cop2_hsh_ivw[8];
+ unsigned long cop2_hsh_ivw[8];
/* DMFC2 rt, 0x0258; DMFC2 rt, 0x0259 - Pass2 */
- unsigned long cop2_gfm_mult[2];
+ unsigned long cop2_gfm_mult[2];
/* DMFC2 rt, 0x025E - Pass2 */
- unsigned long cop2_gfm_poly;
+ unsigned long cop2_gfm_poly;
/* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
- unsigned long cop2_gfm_result[2];
+ unsigned long cop2_gfm_result[2];
};
#define INIT_OCTEON_COP2 {0,}
@@ -249,9 +249,9 @@ struct thread_struct {
#endif /* CONFIG_CPU_CAVIUM_OCTEON */
#define INIT_THREAD { \
- /* \
- * Saved main processor registers \
- */ \
+ /* \
+ * Saved main processor registers \
+ */ \
.reg16 = 0, \
.reg17 = 0, \
.reg18 = 0, \
@@ -332,7 +332,7 @@ unsigned long get_wchan(struct task_struct *p);
* aborts compilation on some CPUs. It's simply not possible to unwind
* some CPU's stackframes.
*
- * __builtin_return_address works only for non-leaf functions. We avoid the
+ * __builtin_return_address works only for non-leaf functions. We avoid the
* overhead of a function call by forcing the compiler to save the return
* address register on the stack.
*/
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 54ea47da59a..a0b2650516a 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -22,10 +22,10 @@
* for indexed cache operations. Two issues here:
*
* - The MIPS32 and MIPS64 specs permit an implementation to directly derive
- * the index bits from the virtual address. This breaks with tradition
- * set by the R4000. To keep unpleasant surprises from happening we pick
+ * the index bits from the virtual address. This breaks with tradition
+ * set by the R4000. To keep unpleasant surprises from happening we pick
* an address in KSEG0 / CKSEG0.
- * - We need a properly sign extended address for 64-bit code. To get away
+ * - We need a properly sign extended address for 64-bit code. To get away
* without ifdefs we let the compiler do it by a type cast.
*/
#define INDEX_BASE CKSEG0
@@ -347,7 +347,7 @@ static inline void blast_##pfx##cache##lsize(void) \
unsigned long end = start + current_cpu_data.desc.waysize; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
- current_cpu_data.desc.waybit; \
+ current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
@@ -359,7 +359,7 @@ static inline void blast_##pfx##cache##lsize(void) \
__##pfx##flush_epilogue \
} \
\
-static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
+static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \
{ \
unsigned long start = page; \
unsigned long end = page + PAGE_SIZE; \
@@ -381,7 +381,7 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE; \
unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
unsigned long ws_end = current_cpu_data.desc.ways << \
- current_cpu_data.desc.waybit; \
+ current_cpu_data.desc.waybit; \
unsigned long ws, addr; \
\
__##pfx##flush_prologue \
diff --git a/arch/mips/include/asm/regdef.h b/arch/mips/include/asm/regdef.h
index 785a5189b37..3c687df1d51 100644
--- a/arch/mips/include/asm/regdef.h
+++ b/arch/mips/include/asm/regdef.h
@@ -19,44 +19,44 @@
/*
* Symbolic register names for 32 bit ABI
*/
-#define zero $0 /* wired zero */
-#define AT $1 /* assembler temp - uppercase because of ".set at" */
-#define v0 $2 /* return value */
-#define v1 $3
-#define a0 $4 /* argument registers */
-#define a1 $5
-#define a2 $6
-#define a3 $7
-#define t0 $8 /* caller saved */
-#define t1 $9
-#define t2 $10
-#define t3 $11
-#define t4 $12
+#define zero $0 /* wired zero */
+#define AT $1 /* assembler temp - uppercase because of ".set at" */
+#define v0 $2 /* return value */
+#define v1 $3
+#define a0 $4 /* argument registers */
+#define a1 $5
+#define a2 $6
+#define a3 $7
+#define t0 $8 /* caller saved */
+#define t1 $9
+#define t2 $10
+#define t3 $11
+#define t4 $12
#define ta0 $12
-#define t5 $13
+#define t5 $13
#define ta1 $13
-#define t6 $14
+#define t6 $14
#define ta2 $14
-#define t7 $15
+#define t7 $15
#define ta3 $15
-#define s0 $16 /* callee saved */
-#define s1 $17
-#define s2 $18
-#define s3 $19
-#define s4 $20
-#define s5 $21
-#define s6 $22
-#define s7 $23
-#define t8 $24 /* caller saved */
-#define t9 $25
-#define jp $25 /* PIC jump register */
-#define k0 $26 /* kernel scratch */
-#define k1 $27
-#define gp $28 /* global pointer */
-#define sp $29 /* stack pointer */
-#define fp $30 /* frame pointer */
+#define s0 $16 /* callee saved */
+#define s1 $17
+#define s2 $18
+#define s3 $19
+#define s4 $20
+#define s5 $21
+#define s6 $22
+#define s7 $23
+#define t8 $24 /* caller saved */
+#define t9 $25
+#define jp $25 /* PIC jump register */
+#define k0 $26 /* kernel scratch */
+#define k1 $27
+#define gp $28 /* global pointer */
+#define sp $29 /* stack pointer */
+#define fp $30 /* frame pointer */
#define s8 $30 /* same like fp! */
-#define ra $31 /* return address */
+#define ra $31 /* return address */
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
diff --git a/arch/mips/include/asm/rtlx.h b/arch/mips/include/asm/rtlx.h
index 4ca3063ed2c..90985b61dbd 100644
--- a/arch/mips/include/asm/rtlx.h
+++ b/arch/mips/include/asm/rtlx.h
@@ -38,7 +38,7 @@ enum rtlx_state {
#define RTLX_BUFFER_SIZE 2048
/* each channel supports read and write.
- linux (vpe0) reads lx_buffer and writes rt_buffer
+ linux (vpe0) reads lx_buffer and writes rt_buffer
SP (vpe1) reads rt_buffer and writes lx_buffer
*/
struct rtlx_channel {
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index ae6306ebdca..f29c75cf83c 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -10,7 +10,7 @@
/*
* Kludge alert:
*
- * The generic seccomp code currently allows only a single compat ABI. Until
+ * The generic seccomp code currently allows only a single compat ABI. Until
* this is fixed we priorize O32 as the compat ABI over N32.
*/
#ifdef CONFIG_MIPS32_O32
diff --git a/arch/mips/include/asm/sgi/gio.h b/arch/mips/include/asm/sgi/gio.h
index 889cf028c95..24be2b425be 100644
--- a/arch/mips/include/asm/sgi/gio.h
+++ b/arch/mips/include/asm/sgi/gio.h
@@ -18,18 +18,18 @@
* three physical connectors, but only two slots, GFX and EXP0.
*
* There is 10MB of GIO address space for GIO64 slot devices
- * slot# slot type address range size
+ * slot# slot type address range size
* ----- --------- ----------------------- -----
- * 0 GFX 0x1f000000 - 0x1f3fffff 4MB
- * 1 EXP0 0x1f400000 - 0x1f5fffff 2MB
- * 2 EXP1 0x1f600000 - 0x1f9fffff 4MB
+ * 0 GFX 0x1f000000 - 0x1f3fffff 4MB
+ * 1 EXP0 0x1f400000 - 0x1f5fffff 2MB
+ * 2 EXP1 0x1f600000 - 0x1f9fffff 4MB
*
* There are un-slotted devices, HPC, I/O and misc devices, which are grouped
* into the HPC address space.
- * - MISC 0x1fb00000 - 0x1fbfffff 1MB
+ * - MISC 0x1fb00000 - 0x1fbfffff 1MB
*
* Following space is reserved and unused
- * - RESERVED 0x18000000 - 0x1effffff 112MB
+ * - RESERVED 0x18000000 - 0x1effffff 112MB
*
* GIO bus IDs
*
@@ -39,10 +39,10 @@
* the slot undefined.
*
* 32-bit IDs are divided into
- * bits 0:6 the product ID; ranges from 0x00 to 0x7F.
+ * bits 0:6 the product ID; ranges from 0x00 to 0x7F.
* bit 7 0=GIO Product ID is 8 bits wide
* 1=GIO Product ID is 32 bits wide.
- * bits 8:15 manufacturer version for the product.
+ * bits 8:15 manufacturer version for the product.
* bit 16 0=GIO32 and GIO32-bis, 1=GIO64.
* bit 17 0=no ROM present
* 1=ROM present on this board AND next three words
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index c4729f53191..59920b34594 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -65,39 +65,39 @@ struct hpc3_scsiregs {
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 bcd; /* byte count info */
#define HPC3_SBCD_BCNTMSK 0x00003fff /* bytes to transfer from/to memory */
-#define HPC3_SBCD_XIE 0x00004000 /* Send IRQ when done with cur buf */
-#define HPC3_SBCD_EOX 0x00008000 /* Indicates this is last buf in chain */
+#define HPC3_SBCD_XIE 0x00004000 /* Send IRQ when done with cur buf */
+#define HPC3_SBCD_EOX 0x00008000 /* Indicates this is last buf in chain */
volatile u32 ctrl; /* control register */
-#define HPC3_SCTRL_IRQ 0x01 /* IRQ asserted, either dma done or parity */
+#define HPC3_SCTRL_IRQ 0x01 /* IRQ asserted, either dma done or parity */
#define HPC3_SCTRL_ENDIAN 0x02 /* DMA endian mode, 0=big 1=little */
-#define HPC3_SCTRL_DIR 0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
+#define HPC3_SCTRL_DIR 0x04 /* DMA direction, 1=dev2mem 0=mem2dev */
#define HPC3_SCTRL_FLUSH 0x08 /* Tells HPC3 to flush scsi fifos */
#define HPC3_SCTRL_ACTIVE 0x10 /* SCSI DMA channel is active */
#define HPC3_SCTRL_AMASK 0x20 /* DMA active inhibits PIO */
#define HPC3_SCTRL_CRESET 0x40 /* Resets dma channel and external controller */
-#define HPC3_SCTRL_PERR 0x80 /* Bad parity on HPC3 iface to scsi controller */
+#define HPC3_SCTRL_PERR 0x80 /* Bad parity on HPC3 iface to scsi controller */
volatile u32 gfptr; /* current GIO fifo ptr */
volatile u32 dfptr; /* current device fifo ptr */
volatile u32 dconfig; /* DMA configuration register */
#define HPC3_SDCFG_HCLK 0x00001 /* Enable DMA half clock mode */
-#define HPC3_SDCFG_D1 0x00006 /* Cycles to spend in D1 state */
-#define HPC3_SDCFG_D2 0x00038 /* Cycles to spend in D2 state */
-#define HPC3_SDCFG_D3 0x001c0 /* Cycles to spend in D3 state */
+#define HPC3_SDCFG_D1 0x00006 /* Cycles to spend in D1 state */
+#define HPC3_SDCFG_D2 0x00038 /* Cycles to spend in D2 state */
+#define HPC3_SDCFG_D3 0x001c0 /* Cycles to spend in D3 state */
#define HPC3_SDCFG_HWAT 0x00e00 /* DMA high water mark */
-#define HPC3_SDCFG_HW 0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
+#define HPC3_SDCFG_HW 0x01000 /* Enable 16-bit halfword DMA accesses to scsi */
#define HPC3_SDCFG_SWAP 0x02000 /* Byte swap all DMA accesses */
#define HPC3_SDCFG_EPAR 0x04000 /* Enable parity checking for DMA */
#define HPC3_SDCFG_POLL 0x08000 /* hd_dreq polarity control */
#define HPC3_SDCFG_ERLY 0x30000 /* hd_dreq behavior control bits */
volatile u32 pconfig; /* PIO configuration register */
-#define HPC3_SPCFG_P3 0x0003 /* Cycles to spend in P3 state */
-#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
-#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
-#define HPC3_SPCFG_P1 0x0e00 /* Cycles to spend in P1 state */
-#define HPC3_SPCFG_HW 0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
+#define HPC3_SPCFG_P3 0x0003 /* Cycles to spend in P3 state */
+#define HPC3_SPCFG_P2W 0x001c /* Cycles to spend in P2 state for writes */
+#define HPC3_SPCFG_P2R 0x01e0 /* Cycles to spend in P2 state for reads */
+#define HPC3_SPCFG_P1 0x0e00 /* Cycles to spend in P1 state */
+#define HPC3_SPCFG_HW 0x1000 /* Enable 16-bit halfword PIO accesses to scsi */
#define HPC3_SPCFG_SWAP 0x2000 /* Byte swap all PIO accesses */
#define HPC3_SPCFG_EPAR 0x4000 /* Enable parity checking for PIO */
#define HPC3_SPCFG_FUJI 0x8000 /* Fujitsu scsi controller mode for faster dma/pio */
@@ -108,13 +108,13 @@ struct hpc3_scsiregs {
/* SEEQ ethernet HPC3 registers, only one seeq per HPC3. */
struct hpc3_ethregs {
/* Receiver registers. */
- volatile u32 rx_cbptr; /* current dma buffer ptr, diagnostic use only */
- volatile u32 rx_ndptr; /* next dma descriptor ptr */
+ volatile u32 rx_cbptr; /* current dma buffer ptr, diagnostic use only */
+ volatile u32 rx_ndptr; /* next dma descriptor ptr */
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 rx_bcd; /* byte count info */
#define HPC3_ERXBCD_BCNTMSK 0x00003fff /* bytes to be sent to memory */
-#define HPC3_ERXBCD_XIE 0x20000000 /* HPC3 interrupts cpu at end of this buf */
-#define HPC3_ERXBCD_EOX 0x80000000 /* flags this as end of descriptor chain */
+#define HPC3_ERXBCD_XIE 0x20000000 /* HPC3 interrupts cpu at end of this buf */
+#define HPC3_ERXBCD_EOX 0x80000000 /* flags this as end of descriptor chain */
volatile u32 rx_ctrl; /* control register */
#define HPC3_ERXCTRL_STAT50 0x0000003f /* Receive status reg bits of Seeq8003 */
@@ -131,23 +131,23 @@ struct hpc3_ethregs {
volatile u32 reset; /* reset register */
#define HPC3_ERST_CRESET 0x1 /* Reset dma channel and external controller */
#define HPC3_ERST_CLRIRQ 0x2 /* Clear channel interrupt */
-#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
+#define HPC3_ERST_LBACK 0x4 /* Enable diagnostic loopback mode of Seeq8003 */
- volatile u32 dconfig; /* DMA configuration register */
-#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
-#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
-#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
+ volatile u32 dconfig; /* DMA configuration register */
+#define HPC3_EDCFG_D1 0x0000f /* Cycles to spend in D1 state for PIO */
+#define HPC3_EDCFG_D2 0x000f0 /* Cycles to spend in D2 state for PIO */
+#define HPC3_EDCFG_D3 0x00f00 /* Cycles to spend in D3 state for PIO */
#define HPC3_EDCFG_WCTRL 0x01000 /* Enable writes of desc into ex ctrl port */
#define HPC3_EDCFG_FRXDC 0x02000 /* Clear eop stat bits upon rxdc, hw seeq fix */
-#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
-#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
-#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
+#define HPC3_EDCFG_FEOP 0x04000 /* Bad packet marker timeout enable */
+#define HPC3_EDCFG_FIRQ 0x08000 /* Another bad packet timeout enable */
+#define HPC3_EDCFG_PTO 0x30000 /* Programmed timeout value for above two */
- volatile u32 pconfig; /* PIO configuration register */
-#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
-#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
-#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
+ volatile u32 pconfig; /* PIO configuration register */
+#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
+#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
+#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
+#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
u32 _unused2[0x1000/4 - 8]; /* padding */
@@ -158,9 +158,9 @@ struct hpc3_ethregs {
volatile u32 tx_bcd; /* byte count info */
#define HPC3_ETXBCD_BCNTMSK 0x00003fff /* bytes to be read from memory */
#define HPC3_ETXBCD_ESAMP 0x10000000 /* if set, too late to add descriptor */
-#define HPC3_ETXBCD_XIE 0x20000000 /* Interrupt cpu at end of cur desc */
-#define HPC3_ETXBCD_EOP 0x40000000 /* Last byte of cur buf is end of packet */
-#define HPC3_ETXBCD_EOX 0x80000000 /* This buf is the end of desc chain */
+#define HPC3_ETXBCD_XIE 0x20000000 /* Interrupt cpu at end of cur desc */
+#define HPC3_ETXBCD_EOP 0x40000000 /* Last byte of cur buf is end of packet */
+#define HPC3_ETXBCD_EOX 0x80000000 /* This buf is the end of desc chain */
volatile u32 tx_ctrl; /* control register */
#define HPC3_ETXCTRL_STAT30 0x0000000f /* Rdonly copy of seeq tx stat reg */
@@ -215,10 +215,10 @@ struct hpc3_regs {
volatile u32 istat1; /* Irq status, only bits <9:5> reliable. */
volatile u32 bestat; /* Bus error interrupt status reg. */
-#define HPC3_BESTAT_BLMASK 0x000ff /* Bus lane where bad parity occurred */
-#define HPC3_BESTAT_CTYPE 0x00100 /* Bus cycle type, 0=PIO 1=DMA */
+#define HPC3_BESTAT_BLMASK 0x000ff /* Bus lane where bad parity occurred */
+#define HPC3_BESTAT_CTYPE 0x00100 /* Bus cycle type, 0=PIO 1=DMA */
#define HPC3_BESTAT_PIDSHIFT 9
-#define HPC3_BESTAT_PIDMASK 0x3f700 /* DMA channel parity identifier */
+#define HPC3_BESTAT_PIDMASK 0x3f700 /* DMA channel parity identifier */
u32 _unused1[0x14000/4 - 5]; /* padding */
@@ -259,7 +259,7 @@ struct hpc3_regs {
#define HPC3_DMACFG_RTIME 0x00200000
/* 5 bit burst count for DMA device */
#define HPC3_DMACFG_BURST_MASK 0x07c00000
-#define HPC3_DMACFG_BURST_SHIFT 22
+#define HPC3_DMACFG_BURST_SHIFT 22
/* Use live pbus_dreq unsynchronized signal */
#define HPC3_DMACFG_DRQLIVE 0x08000000
volatile u32 pbus_piocfg[16][64];
@@ -288,20 +288,20 @@ struct hpc3_regs {
/* PBUS PROM control regs. */
volatile u32 pbus_promwe; /* PROM write enable register */
-#define HPC3_PROM_WENAB 0x1 /* Enable writes to the PROM */
+#define HPC3_PROM_WENAB 0x1 /* Enable writes to the PROM */
u32 _unused5[0x0800/4 - 1];
volatile u32 pbus_promswap; /* Chip select swap reg */
#define HPC3_PROM_SWAP 0x1 /* invert GIO addr bit to select prom0 or prom1 */
u32 _unused6[0x0800/4 - 1];
- volatile u32 pbus_gout; /* PROM general purpose output reg */
+ volatile u32 pbus_gout; /* PROM general purpose output reg */
#define HPC3_PROM_STAT 0x1 /* General purpose status bit in gout */
u32 _unused7[0x1000/4 - 1];
volatile u32 rtcregs[14]; /* Dallas clock registers */
u32 _unused8[50];
- volatile u32 bbram[8192-50-14]; /* Battery backed ram */
+ volatile u32 bbram[8192-50-14]; /* Battery backed ram */
};
/*
diff --git a/arch/mips/include/asm/sgi/ioc.h b/arch/mips/include/asm/sgi/ioc.h
index 380347b648e..53c6b1ca686 100644
--- a/arch/mips/include/asm/sgi/ioc.h
+++ b/arch/mips/include/asm/sgi/ioc.h
@@ -138,7 +138,7 @@ struct sgioc_regs {
u8 _sysid[3];
volatile u8 sysid;
#define SGIOC_SYSID_FULLHOUSE 0x01
-#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
+#define SGIOC_SYSID_BOARDREV(x) (((x) & 0x1e) >> 1)
#define SGIOC_SYSID_CHIPREV(x) (((x) & 0xe0) >> 5)
u32 _unused2;
u8 _read[3];
@@ -150,7 +150,7 @@ struct sgioc_regs {
#define SGIOC_DMASEL_ISDNB 0x01 /* enable isdn B */
#define SGIOC_DMASEL_ISDNA 0x02 /* enable isdn A */
#define SGIOC_DMASEL_PPORT 0x04 /* use parallel DMA */
-#define SGIOC_DMASEL_SCLK667MHZ 0x10 /* use 6.67MHZ serial clock */
+#define SGIOC_DMASEL_SCLK667MHZ 0x10 /* use 6.67MHZ serial clock */
#define SGIOC_DMASEL_SCLKEXT 0x20 /* use external serial clock */
u32 _unused4;
u8 _reset[3];
diff --git a/arch/mips/include/asm/sgi/ip22.h b/arch/mips/include/asm/sgi/ip22.h
index c0501f91719..8db1a3588cf 100644
--- a/arch/mips/include/asm/sgi/ip22.h
+++ b/arch/mips/include/asm/sgi/ip22.h
@@ -38,8 +38,8 @@
#define SGI_SOFT_0_IRQ SGINT_CPU + 0
#define SGI_SOFT_1_IRQ SGINT_CPU + 1
-#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
-#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
+#define SGI_LOCAL_0_IRQ SGINT_CPU + 2
+#define SGI_LOCAL_1_IRQ SGINT_CPU + 3
#define SGI_8254_0_IRQ SGINT_CPU + 4
#define SGI_8254_1_IRQ SGINT_CPU + 5
#define SGI_BUSERR_IRQ SGINT_CPU + 6
@@ -51,7 +51,7 @@
#define SGI_WD93_1_IRQ SGINT_LOCAL0 + 2 /* 2nd onboard WD93 */
#define SGI_ENET_IRQ SGINT_LOCAL0 + 3 /* onboard ethernet */
#define SGI_MCDMA_IRQ SGINT_LOCAL0 + 4 /* MC DMA done */
-#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5 /* Parallel port */
+#define SGI_PARPORT_IRQ SGINT_LOCAL0 + 5 /* Parallel port */
#define SGI_GIO_1_IRQ SGINT_LOCAL0 + 6 /* GE / GIO-1 / 2nd-HPC */
#define SGI_MAP_0_IRQ SGINT_LOCAL0 + 7 /* Mappable interrupt 0 */
diff --git a/arch/mips/include/asm/sgi/mc.h b/arch/mips/include/asm/sgi/mc.h
index 1576c2394de..3a070cec97e 100644
--- a/arch/mips/include/asm/sgi/mc.h
+++ b/arch/mips/include/asm/sgi/mc.h
@@ -29,10 +29,10 @@ struct sgimc_regs {
#define SGIMC_CCTRL0_IENAB 0x00002000 /* Allow interrupts from MC */
#define SGIMC_CCTRL0_ESNOOP 0x00004000 /* Snooping I/O enable */
#define SGIMC_CCTRL0_EPROMWR 0x00008000 /* Prom writes from cpu enable */
-#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
+#define SGIMC_CCTRL0_WRESETPMEM 0x00010000 /* Perform warm reset, preserves mem */
#define SGIMC_CCTRL0_LENDIAN 0x00020000 /* Put MC in little-endian mode */
-#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
-#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
+#define SGIMC_CCTRL0_WRESETDMEM 0x00040000 /* Warm reset, destroys mem contents */
+#define SGIMC_CCTRL0_CMEMBADPAR 0x02000000 /* Generate bad perr from cpu to mem */
#define SGIMC_CCTRL0_R4KNOCHKPARR 0x04000000 /* Don't chk parity on mem data reads */
#define SGIMC_CCTRL0_GIOBTOB 0x08000000 /* Allow GIO back to back writes */
u32 _unused1;
@@ -40,13 +40,13 @@ struct sgimc_regs {
#define SGIMC_CCTRL1_EGIOTIMEO 0x00000010 /* GIO bus timeout enable */
#define SGIMC_CCTRL1_FIXEDEHPC 0x00001000 /* Fixed HPC endianness */
#define SGIMC_CCTRL1_LITTLEHPC 0x00002000 /* Little endian HPC */
-#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
-#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
-#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
+#define SGIMC_CCTRL1_FIXEDEEXP0 0x00004000 /* Fixed EXP0 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP0 0x00008000 /* Little endian EXP0 */
+#define SGIMC_CCTRL1_FIXEDEEXP1 0x00010000 /* Fixed EXP1 endianness */
+#define SGIMC_CCTRL1_LITTLEEXP1 0x00020000 /* Little endian EXP1 */
u32 _unused2;
- volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
+ volatile u32 watchdogt; /* Watchdog reg rdonly, write clears */
u32 _unused3;
volatile u32 systemid; /* MC system ID register, readonly */
@@ -81,11 +81,11 @@ struct sgimc_regs {
#define SGIMC_GIOPAR_RTIMEGFX 0x00000040 /* GFX device has realtime attr */
#define SGIMC_GIOPAR_RTIMEEXP0 0x00000080 /* EXP(slot0) has realtime attr */
#define SGIMC_GIOPAR_RTIMEEXP1 0x00000100 /* EXP(slot1) has realtime attr */
-#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
+#define SGIMC_GIOPAR_MASTEREISA 0x00000200 /* EISA bus can act as bus master */
#define SGIMC_GIOPAR_ONEBUS 0x00000400 /* Exists one GIO64 pipelined bus */
#define SGIMC_GIOPAR_MASTERGFX 0x00000800 /* GFX can act as a bus master */
-#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
-#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP0 0x00001000 /* EXP(slot0) can bus master */
+#define SGIMC_GIOPAR_MASTEREXP1 0x00002000 /* EXP(slot1) can bus master */
#define SGIMC_GIOPAR_PLINEEXP0 0x00004000 /* EXP(slot0) has pipeline attr */
#define SGIMC_GIOPAR_PLINEEXP1 0x00008000 /* EXP(slot1) has pipeline attr */
@@ -107,9 +107,9 @@ struct sgimc_regs {
#define SGIMC_MCONFIG_SBANKS 0x00004000 /* Number of subbanks */
u32 _unused13;
- volatile u32 cmacc; /* Mem access config for CPU */
+ volatile u32 cmacc; /* Mem access config for CPU */
u32 _unused14;
- volatile u32 gmacc; /* Mem access config for GIO */
+ volatile u32 gmacc; /* Mem access config for GIO */
/* This define applies to both cmacc and gmacc registers above. */
#define SGIMC_MACC_ALIASBIG 0x20000000 /* 512MB home for alias */
diff --git a/arch/mips/include/asm/sgi/pi1.h b/arch/mips/include/asm/sgi/pi1.h
index c9506915dc5..96b1a0771ec 100644
--- a/arch/mips/include/asm/sgi/pi1.h
+++ b/arch/mips/include/asm/sgi/pi1.h
@@ -28,16 +28,16 @@ struct pi1_regs {
#define PI1_STAT_BUSY 0x80
u8 _dmactrl[3];
volatile u8 dmactrl;
-#define PI1_DMACTRL_FIFO_EMPTY 0x01 /* fifo empty R/O */
-#define PI1_DMACTRL_ABORT 0x02 /* reset DMA and internal fifo W/O */
-#define PI1_DMACTRL_STDMODE 0x00 /* bits 2-3 */
-#define PI1_DMACTRL_SGIMODE 0x04 /* bits 2-3 */
-#define PI1_DMACTRL_RICOHMODE 0x08 /* bits 2-3 */
-#define PI1_DMACTRL_HPMODE 0x0c /* bits 2-3 */
-#define PI1_DMACTRL_BLKMODE 0x10 /* block mode */
-#define PI1_DMACTRL_FIFO_CLEAR 0x20 /* clear fifo W/O */
-#define PI1_DMACTRL_READ 0x40 /* read */
-#define PI1_DMACTRL_RUN 0x80 /* pedal to the metal */
+#define PI1_DMACTRL_FIFO_EMPTY 0x01 /* fifo empty R/O */
+#define PI1_DMACTRL_ABORT 0x02 /* reset DMA and internal fifo W/O */
+#define PI1_DMACTRL_STDMODE 0x00 /* bits 2-3 */
+#define PI1_DMACTRL_SGIMODE 0x04 /* bits 2-3 */
+#define PI1_DMACTRL_RICOHMODE 0x08 /* bits 2-3 */
+#define PI1_DMACTRL_HPMODE 0x0c /* bits 2-3 */
+#define PI1_DMACTRL_BLKMODE 0x10 /* block mode */
+#define PI1_DMACTRL_FIFO_CLEAR 0x20 /* clear fifo W/O */
+#define PI1_DMACTRL_READ 0x40 /* read */
+#define PI1_DMACTRL_RUN 0x80 /* pedal to the metal */
u8 _intstat[3];
volatile u8 intstat;
#define PI1_INTSTAT_ACK 0x04
diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h
index f5811576945..753275accd1 100644
--- a/arch/mips/include/asm/sgialib.h
+++ b/arch/mips/include/asm/sgialib.h
@@ -37,7 +37,7 @@ extern char prom_getchar(void);
* in chain is CURR is NULL.
*/
extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
-#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
+#define PROM_NULL_MDESC ((struct linux_mdesc *) 0)
/* Called by prom_init to setup the physical memory pmemblock
* array.
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 3dce7c788b3..26ddfff28c8 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -16,33 +16,33 @@
#include <asm/fw/arc/types.h>
/* Various ARCS error codes. */
-#define PROM_ESUCCESS 0x00
-#define PROM_E2BIG 0x01
-#define PROM_EACCESS 0x02
-#define PROM_EAGAIN 0x03
-#define PROM_EBADF 0x04
-#define PROM_EBUSY 0x05
-#define PROM_EFAULT 0x06
-#define PROM_EINVAL 0x07
-#define PROM_EIO 0x08
-#define PROM_EISDIR 0x09
-#define PROM_EMFILE 0x0a
-#define PROM_EMLINK 0x0b
-#define PROM_ENAMETOOLONG 0x0c
-#define PROM_ENODEV 0x0d
-#define PROM_ENOENT 0x0e
-#define PROM_ENOEXEC 0x0f
-#define PROM_ENOMEM 0x10
-#define PROM_ENOSPC 0x11
-#define PROM_ENOTDIR 0x12
-#define PROM_ENOTTY 0x13
-#define PROM_ENXIO 0x14
-#define PROM_EROFS 0x15
+#define PROM_ESUCCESS 0x00
+#define PROM_E2BIG 0x01
+#define PROM_EACCESS 0x02
+#define PROM_EAGAIN 0x03
+#define PROM_EBADF 0x04
+#define PROM_EBUSY 0x05
+#define PROM_EFAULT 0x06
+#define PROM_EINVAL 0x07
+#define PROM_EIO 0x08
+#define PROM_EISDIR 0x09
+#define PROM_EMFILE 0x0a
+#define PROM_EMLINK 0x0b
+#define PROM_ENAMETOOLONG 0x0c
+#define PROM_ENODEV 0x0d
+#define PROM_ENOENT 0x0e
+#define PROM_ENOEXEC 0x0f
+#define PROM_ENOMEM 0x10
+#define PROM_ENOSPC 0x11
+#define PROM_ENOTDIR 0x12
+#define PROM_ENOTTY 0x13
+#define PROM_ENXIO 0x14
+#define PROM_EROFS 0x15
/* SGI ARCS specific errno's. */
-#define PROM_EADDRNOTAVAIL 0x1f
-#define PROM_ETIMEDOUT 0x20
-#define PROM_ECONNABORTED 0x21
-#define PROM_ENOCONNECT 0x22
+#define PROM_EADDRNOTAVAIL 0x1f
+#define PROM_ETIMEDOUT 0x20
+#define PROM_ECONNABORTED 0x21
+#define PROM_ENOCONNECT 0x22
/* Device classes, types, and identifiers for prom
* device inventory queries.
@@ -77,14 +77,14 @@ enum linux_identifier {
/* A prom device tree component. */
struct linux_component {
- enum linux_devclass class; /* node class */
- enum linux_devtypes type; /* node type */
- enum linux_identifier iflags; /* node flags */
- USHORT vers; /* node version */
- USHORT rev; /* node revision */
- ULONG key; /* completely magic */
- ULONG amask; /* XXX affinity mask??? */
- ULONG cdsize; /* size of configuration data */
+ enum linux_devclass class; /* node class */
+ enum linux_devtypes type; /* node type */
+ enum linux_identifier iflags; /* node flags */
+ USHORT vers; /* node version */
+ USHORT rev; /* node revision */
+ ULONG key; /* completely magic */
+ ULONG amask; /* XXX affinity mask??? */
+ ULONG cdsize; /* size of configuration data */
ULONG ilen; /* length of string identifier */
_PULONG iname; /* string identifier */
};
@@ -177,13 +177,13 @@ struct linux_finfo {
struct linux_bigint end;
struct linux_bigint cur;
enum linux_devtypes dtype;
- unsigned long namelen;
- unsigned char attr;
- char name[32]; /* XXX imperical, should be define */
+ unsigned long namelen;
+ unsigned char attr;
+ char name[32]; /* XXX imperical, should be define */
};
/* This describes the vector containing function pointers to the ARC
- firmware functions. */
+ firmware functions. */
struct linux_romvec {
LONG load; /* Load an executable image. */
LONG invoke; /* Invoke a standalong image. */
@@ -244,7 +244,7 @@ struct linux_romvec {
*/
typedef struct _SYSTEM_PARAMETER_BLOCK {
ULONG magic; /* magic cookie */
-#define PROMBLOCK_MAGIC 0x53435241
+#define PROMBLOCK_MAGIC 0x53435241
ULONG len; /* length of parm block */
USHORT ver; /* ARCS firmware version */
@@ -294,16 +294,16 @@ struct linux_cdata {
};
/* Common SGI ARCS firmware file descriptors. */
-#define SGIPROM_STDIN 0
-#define SGIPROM_STDOUT 1
+#define SGIPROM_STDIN 0
+#define SGIPROM_STDOUT 1
/* Common SGI ARCS firmware file types. */
-#define SGIPROM_ROFILE 0x01 /* read-only file */
-#define SGIPROM_HFILE 0x02 /* hidden file */
-#define SGIPROM_SFILE 0x04 /* System file */
-#define SGIPROM_AFILE 0x08 /* Archive file */
-#define SGIPROM_DFILE 0x10 /* Directory file */
-#define SGIPROM_DELFILE 0x20 /* Deleted file */
+#define SGIPROM_ROFILE 0x01 /* read-only file */
+#define SGIPROM_HFILE 0x02 /* hidden file */
+#define SGIPROM_SFILE 0x04 /* System file */
+#define SGIPROM_AFILE 0x08 /* Archive file */
+#define SGIPROM_DFILE 0x10 /* Directory file */
+#define SGIPROM_DELFILE 0x20 /* Deleted file */
/* SGI ARCS boot record information. */
struct sgi_partition {
@@ -318,7 +318,7 @@ struct sgi_partition {
unsigned char tsect0, tsect1, tsect2, tsect3;
};
-#define SGIBBLOCK_MAGIC 0xaa55
+#define SGIBBLOCK_MAGIC 0xaa55
#define SGIBBLOCK_MAXPART 0x0004
struct sgi_bootblock {
@@ -332,34 +332,34 @@ struct sgi_bparm_block {
unsigned short bytes_sect; /* bytes per sector */
unsigned char sect_clust; /* sectors per cluster */
unsigned short sect_resv; /* reserved sectors */
- unsigned char nfats; /* # of allocation tables */
+ unsigned char nfats; /* # of allocation tables */
unsigned short nroot_dirents; /* # of root directory entries */
unsigned short sect_volume; /* sectors in volume */
unsigned char media_type; /* media descriptor */
unsigned short sect_fat; /* sectors per allocation table */
unsigned short sect_track; /* sectors per track */
- unsigned short nheads; /* # of heads */
- unsigned short nhsects; /* # of hidden sectors */
+ unsigned short nheads; /* # of heads */
+ unsigned short nhsects; /* # of hidden sectors */
};
struct sgi_bsector {
- unsigned char jmpinfo[3];
- unsigned char manuf_name[8];
+ unsigned char jmpinfo[3];
+ unsigned char manuf_name[8];
struct sgi_bparm_block info;
};
/* Debugging block used with SGI symmon symbolic debugger. */
-#define SMB_DEBUG_MAGIC 0xfeeddead
+#define SMB_DEBUG_MAGIC 0xfeeddead
struct linux_smonblock {
- unsigned long magic;
- void (*handler)(void); /* Breakpoint routine. */
- unsigned long dtable_base; /* Base addr of dbg table. */
- int (*printf)(const char *fmt, ...);
- unsigned long btable_base; /* Breakpoint table. */
- unsigned long mpflushreqs; /* SMP cache flush request list. */
- unsigned long ntab; /* Name table. */
- unsigned long stab; /* Symbol table. */
- int smax; /* Max # of symbols. */
+ unsigned long magic;
+ void (*handler)(void); /* Breakpoint routine. */
+ unsigned long dtable_base; /* Base addr of dbg table. */
+ int (*printf)(const char *fmt, ...);
+ unsigned long btable_base; /* Breakpoint table. */
+ unsigned long mpflushreqs; /* SMP cache flush request list. */
+ unsigned long ntab; /* Name table. */
+ unsigned long stab; /* Symbol table. */
+ int smax; /* Max # of symbols. */
};
/*
@@ -369,7 +369,7 @@ struct linux_smonblock {
#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
#define __arc_clobbers \
- "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
+ "$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
"$12", "$13", "$14", "$15", "$16", "$24", "$25", "$31"
#define ARC_CALL0(dest) \
@@ -447,7 +447,7 @@ struct linux_smonblock {
"daddu\t$29, 32\n\t" \
"move\t%0, $2" \
: "=r" (__res), "=r" (__vec) \
- : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
+ : "1" (__vec), "r" (__a1), "r" (__a2), "r" (__a3), \
"r" (__a4) \
: __arc_clobbers); \
__res; \
@@ -468,8 +468,8 @@ struct linux_smonblock {
"daddu\t$29, 32\n\t" \
"move\t%0, $2" \
: "=r" (__res), "=r" (__vec) \
- : "1" (__vec), \
- "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
+ : "1" (__vec), \
+ "r" (__a1), "r" (__a2), "r" (__a3), "r" (__a4), \
"r" (__a5) \
: __arc_clobbers); \
__res; \
@@ -512,7 +512,7 @@ struct linux_smonblock {
long __a1 = (long) (a1); \
long __a2 = (long) (a2); \
long __a3 = (long) (a3); \
- long (*__vec)(long, long, long) = (void *) romvec->dest; \
+ long (*__vec)(long, long, long) = (void *) romvec->dest; \
\
__res = __vec(__a1, __a2, __a3); \
__res; \
diff --git a/arch/mips/include/asm/shmparam.h b/arch/mips/include/asm/shmparam.h
index 09290720751..324d04042bd 100644
--- a/arch/mips/include/asm/shmparam.h
+++ b/arch/mips/include/asm/shmparam.h
@@ -8,6 +8,6 @@
#define __ARCH_FORCE_SHMLBA 1
-#define SHMLBA 0x40000 /* attach addr a multiple of this */
+#define SHMLBA 0x40000 /* attach addr a multiple of this */
#endif /* _ASM_SHMPARAM_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_int.h b/arch/mips/include/asm/sibyte/bcm1480_int.h
index fffb224d229..6b82ed3c235 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_int.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_int.h
@@ -60,253 +60,253 @@
* Interrupt sources (Table 22)
*/
-#define K_BCM1480_INT_SOURCES 128
+#define K_BCM1480_INT_SOURCES 128
#define _BCM1480_INT_HIGH(k) (k)
#define _BCM1480_INT_LOW(k) ((k)+64)
-#define K_BCM1480_INT_ADDR_TRAP _BCM1480_INT_HIGH(1)
-#define K_BCM1480_INT_GPIO_0 _BCM1480_INT_HIGH(4)
-#define K_BCM1480_INT_GPIO_1 _BCM1480_INT_HIGH(5)
-#define K_BCM1480_INT_GPIO_2 _BCM1480_INT_HIGH(6)
-#define K_BCM1480_INT_GPIO_3 _BCM1480_INT_HIGH(7)
-#define K_BCM1480_INT_PCI_INTA _BCM1480_INT_HIGH(8)
-#define K_BCM1480_INT_PCI_INTB _BCM1480_INT_HIGH(9)
-#define K_BCM1480_INT_PCI_INTC _BCM1480_INT_HIGH(10)
-#define K_BCM1480_INT_PCI_INTD _BCM1480_INT_HIGH(11)
-#define K_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_HIGH(12)
-#define K_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_HIGH(13)
-#define K_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_HIGH(14)
-#define K_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_HIGH(15)
-#define K_BCM1480_INT_TIMER_0 _BCM1480_INT_HIGH(20)
-#define K_BCM1480_INT_TIMER_1 _BCM1480_INT_HIGH(21)
-#define K_BCM1480_INT_TIMER_2 _BCM1480_INT_HIGH(22)
-#define K_BCM1480_INT_TIMER_3 _BCM1480_INT_HIGH(23)
-#define K_BCM1480_INT_DM_CH_0 _BCM1480_INT_HIGH(28)
-#define K_BCM1480_INT_DM_CH_1 _BCM1480_INT_HIGH(29)
-#define K_BCM1480_INT_DM_CH_2 _BCM1480_INT_HIGH(30)
-#define K_BCM1480_INT_DM_CH_3 _BCM1480_INT_HIGH(31)
-#define K_BCM1480_INT_MAC_0 _BCM1480_INT_HIGH(36)
-#define K_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_HIGH(37)
-#define K_BCM1480_INT_MAC_1 _BCM1480_INT_HIGH(38)
-#define K_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_HIGH(39)
-#define K_BCM1480_INT_MAC_2 _BCM1480_INT_HIGH(40)
-#define K_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_HIGH(41)
-#define K_BCM1480_INT_MAC_3 _BCM1480_INT_HIGH(42)
-#define K_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_HIGH(43)
-#define K_BCM1480_INT_PMI_LOW _BCM1480_INT_HIGH(52)
-#define K_BCM1480_INT_PMI_HIGH _BCM1480_INT_HIGH(53)
-#define K_BCM1480_INT_PMO_LOW _BCM1480_INT_HIGH(54)
-#define K_BCM1480_INT_PMO_HIGH _BCM1480_INT_HIGH(55)
-#define K_BCM1480_INT_MBOX_0_0 _BCM1480_INT_HIGH(56)
-#define K_BCM1480_INT_MBOX_0_1 _BCM1480_INT_HIGH(57)
-#define K_BCM1480_INT_MBOX_0_2 _BCM1480_INT_HIGH(58)
-#define K_BCM1480_INT_MBOX_0_3 _BCM1480_INT_HIGH(59)
-#define K_BCM1480_INT_MBOX_1_0 _BCM1480_INT_HIGH(60)
-#define K_BCM1480_INT_MBOX_1_1 _BCM1480_INT_HIGH(61)
-#define K_BCM1480_INT_MBOX_1_2 _BCM1480_INT_HIGH(62)
-#define K_BCM1480_INT_MBOX_1_3 _BCM1480_INT_HIGH(63)
+#define K_BCM1480_INT_ADDR_TRAP _BCM1480_INT_HIGH(1)
+#define K_BCM1480_INT_GPIO_0 _BCM1480_INT_HIGH(4)
+#define K_BCM1480_INT_GPIO_1 _BCM1480_INT_HIGH(5)
+#define K_BCM1480_INT_GPIO_2 _BCM1480_INT_HIGH(6)
+#define K_BCM1480_INT_GPIO_3 _BCM1480_INT_HIGH(7)
+#define K_BCM1480_INT_PCI_INTA _BCM1480_INT_HIGH(8)
+#define K_BCM1480_INT_PCI_INTB _BCM1480_INT_HIGH(9)
+#define K_BCM1480_INT_PCI_INTC _BCM1480_INT_HIGH(10)
+#define K_BCM1480_INT_PCI_INTD _BCM1480_INT_HIGH(11)
+#define K_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_HIGH(12)
+#define K_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_HIGH(13)
+#define K_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_HIGH(14)
+#define K_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_HIGH(15)
+#define K_BCM1480_INT_TIMER_0 _BCM1480_INT_HIGH(20)
+#define K_BCM1480_INT_TIMER_1 _BCM1480_INT_HIGH(21)
+#define K_BCM1480_INT_TIMER_2 _BCM1480_INT_HIGH(22)
+#define K_BCM1480_INT_TIMER_3 _BCM1480_INT_HIGH(23)
+#define K_BCM1480_INT_DM_CH_0 _BCM1480_INT_HIGH(28)
+#define K_BCM1480_INT_DM_CH_1 _BCM1480_INT_HIGH(29)
+#define K_BCM1480_INT_DM_CH_2 _BCM1480_INT_HIGH(30)
+#define K_BCM1480_INT_DM_CH_3 _BCM1480_INT_HIGH(31)
+#define K_BCM1480_INT_MAC_0 _BCM1480_INT_HIGH(36)
+#define K_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_HIGH(37)
+#define K_BCM1480_INT_MAC_1 _BCM1480_INT_HIGH(38)
+#define K_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_HIGH(39)
+#define K_BCM1480_INT_MAC_2 _BCM1480_INT_HIGH(40)
+#define K_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_HIGH(41)
+#define K_BCM1480_INT_MAC_3 _BCM1480_INT_HIGH(42)
+#define K_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_HIGH(43)
+#define K_BCM1480_INT_PMI_LOW _BCM1480_INT_HIGH(52)
+#define K_BCM1480_INT_PMI_HIGH _BCM1480_INT_HIGH(53)
+#define K_BCM1480_INT_PMO_LOW _BCM1480_INT_HIGH(54)
+#define K_BCM1480_INT_PMO_HIGH _BCM1480_INT_HIGH(55)
+#define K_BCM1480_INT_MBOX_0_0 _BCM1480_INT_HIGH(56)
+#define K_BCM1480_INT_MBOX_0_1 _BCM1480_INT_HIGH(57)
+#define K_BCM1480_INT_MBOX_0_2 _BCM1480_INT_HIGH(58)
+#define K_BCM1480_INT_MBOX_0_3 _BCM1480_INT_HIGH(59)
+#define K_BCM1480_INT_MBOX_1_0 _BCM1480_INT_HIGH(60)
+#define K_BCM1480_INT_MBOX_1_1 _BCM1480_INT_HIGH(61)
+#define K_BCM1480_INT_MBOX_1_2 _BCM1480_INT_HIGH(62)
+#define K_BCM1480_INT_MBOX_1_3 _BCM1480_INT_HIGH(63)
-#define K_BCM1480_INT_BAD_ECC _BCM1480_INT_LOW(1)
-#define K_BCM1480_INT_COR_ECC _BCM1480_INT_LOW(2)
-#define K_BCM1480_INT_IO_BUS _BCM1480_INT_LOW(3)
-#define K_BCM1480_INT_PERF_CNT _BCM1480_INT_LOW(4)
-#define K_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_LOW(5)
-#define K_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_LOW(6)
-#define K_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_LOW(7)
-#define K_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_LOW(8)
-#define K_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_LOW(9)
-#define K_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_LOW(10)
-#define K_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_LOW(11)
-#define K_BCM1480_INT_PCI_ERROR _BCM1480_INT_LOW(16)
-#define K_BCM1480_INT_PCI_RESET _BCM1480_INT_LOW(17)
-#define K_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_LOW(18)
-#define K_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_LOW(19)
-#define K_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_LOW(20)
-#define K_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_LOW(21)
-#define K_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_LOW(22)
-#define K_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_LOW(23)
-#define K_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_LOW(24)
-#define K_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_LOW(25)
-#define K_BCM1480_INT_LDT_SMI _BCM1480_INT_LOW(32)
-#define K_BCM1480_INT_LDT_NMI _BCM1480_INT_LOW(33)
-#define K_BCM1480_INT_LDT_INIT _BCM1480_INT_LOW(34)
-#define K_BCM1480_INT_LDT_STARTUP _BCM1480_INT_LOW(35)
-#define K_BCM1480_INT_LDT_EXT _BCM1480_INT_LOW(36)
-#define K_BCM1480_INT_SMB_0 _BCM1480_INT_LOW(40)
-#define K_BCM1480_INT_SMB_1 _BCM1480_INT_LOW(41)
-#define K_BCM1480_INT_PCMCIA _BCM1480_INT_LOW(42)
-#define K_BCM1480_INT_UART_0 _BCM1480_INT_LOW(44)
-#define K_BCM1480_INT_UART_1 _BCM1480_INT_LOW(45)
-#define K_BCM1480_INT_UART_2 _BCM1480_INT_LOW(46)
-#define K_BCM1480_INT_UART_3 _BCM1480_INT_LOW(47)
-#define K_BCM1480_INT_GPIO_4 _BCM1480_INT_LOW(52)
-#define K_BCM1480_INT_GPIO_5 _BCM1480_INT_LOW(53)
-#define K_BCM1480_INT_GPIO_6 _BCM1480_INT_LOW(54)
-#define K_BCM1480_INT_GPIO_7 _BCM1480_INT_LOW(55)
-#define K_BCM1480_INT_GPIO_8 _BCM1480_INT_LOW(56)
-#define K_BCM1480_INT_GPIO_9 _BCM1480_INT_LOW(57)
-#define K_BCM1480_INT_GPIO_10 _BCM1480_INT_LOW(58)
-#define K_BCM1480_INT_GPIO_11 _BCM1480_INT_LOW(59)
-#define K_BCM1480_INT_GPIO_12 _BCM1480_INT_LOW(60)
-#define K_BCM1480_INT_GPIO_13 _BCM1480_INT_LOW(61)
-#define K_BCM1480_INT_GPIO_14 _BCM1480_INT_LOW(62)
-#define K_BCM1480_INT_GPIO_15 _BCM1480_INT_LOW(63)
+#define K_BCM1480_INT_BAD_ECC _BCM1480_INT_LOW(1)
+#define K_BCM1480_INT_COR_ECC _BCM1480_INT_LOW(2)
+#define K_BCM1480_INT_IO_BUS _BCM1480_INT_LOW(3)
+#define K_BCM1480_INT_PERF_CNT _BCM1480_INT_LOW(4)
+#define K_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_LOW(5)
+#define K_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_LOW(6)
+#define K_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_LOW(7)
+#define K_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_LOW(8)
+#define K_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_LOW(9)
+#define K_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_LOW(10)
+#define K_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_LOW(11)
+#define K_BCM1480_INT_PCI_ERROR _BCM1480_INT_LOW(16)
+#define K_BCM1480_INT_PCI_RESET _BCM1480_INT_LOW(17)
+#define K_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_LOW(18)
+#define K_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_LOW(19)
+#define K_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_LOW(20)
+#define K_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_LOW(21)
+#define K_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_LOW(22)
+#define K_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_LOW(23)
+#define K_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_LOW(24)
+#define K_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_LOW(25)
+#define K_BCM1480_INT_LDT_SMI _BCM1480_INT_LOW(32)
+#define K_BCM1480_INT_LDT_NMI _BCM1480_INT_LOW(33)
+#define K_BCM1480_INT_LDT_INIT _BCM1480_INT_LOW(34)
+#define K_BCM1480_INT_LDT_STARTUP _BCM1480_INT_LOW(35)
+#define K_BCM1480_INT_LDT_EXT _BCM1480_INT_LOW(36)
+#define K_BCM1480_INT_SMB_0 _BCM1480_INT_LOW(40)
+#define K_BCM1480_INT_SMB_1 _BCM1480_INT_LOW(41)
+#define K_BCM1480_INT_PCMCIA _BCM1480_INT_LOW(42)
+#define K_BCM1480_INT_UART_0 _BCM1480_INT_LOW(44)
+#define K_BCM1480_INT_UART_1 _BCM1480_INT_LOW(45)
+#define K_BCM1480_INT_UART_2 _BCM1480_INT_LOW(46)
+#define K_BCM1480_INT_UART_3 _BCM1480_INT_LOW(47)
+#define K_BCM1480_INT_GPIO_4 _BCM1480_INT_LOW(52)
+#define K_BCM1480_INT_GPIO_5 _BCM1480_INT_LOW(53)
+#define K_BCM1480_INT_GPIO_6 _BCM1480_INT_LOW(54)
+#define K_BCM1480_INT_GPIO_7 _BCM1480_INT_LOW(55)
+#define K_BCM1480_INT_GPIO_8 _BCM1480_INT_LOW(56)
+#define K_BCM1480_INT_GPIO_9 _BCM1480_INT_LOW(57)
+#define K_BCM1480_INT_GPIO_10 _BCM1480_INT_LOW(58)
+#define K_BCM1480_INT_GPIO_11 _BCM1480_INT_LOW(59)
+#define K_BCM1480_INT_GPIO_12 _BCM1480_INT_LOW(60)
+#define K_BCM1480_INT_GPIO_13 _BCM1480_INT_LOW(61)
+#define K_BCM1480_INT_GPIO_14 _BCM1480_INT_LOW(62)
+#define K_BCM1480_INT_GPIO_15 _BCM1480_INT_LOW(63)
/*
* Mask values for each interrupt
*/
-#define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F))
-#define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F))
-#define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6)
+#define _BCM1480_INT_MASK(w, n) _SB_MAKEMASK(w, ((n) & 0x3F))
+#define _BCM1480_INT_MASK1(n) _SB_MAKEMASK1(((n) & 0x3F))
+#define _BCM1480_INT_OFFSET(n) (((n) & 0x40) << 6)
-#define M_BCM1480_INT_CASCADE _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
+#define M_BCM1480_INT_CASCADE _BCM1480_INT_MASK1(_BCM1480_INT_HIGH(0))
-#define M_BCM1480_INT_ADDR_TRAP _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
-#define M_BCM1480_INT_GPIO_0 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
-#define M_BCM1480_INT_GPIO_1 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
-#define M_BCM1480_INT_GPIO_2 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
-#define M_BCM1480_INT_GPIO_3 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
-#define M_BCM1480_INT_PCI_INTA _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
-#define M_BCM1480_INT_PCI_INTB _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
-#define M_BCM1480_INT_PCI_INTC _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
-#define M_BCM1480_INT_PCI_INTD _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
-#define M_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
-#define M_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
-#define M_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
-#define M_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
-#define M_BCM1480_INT_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
-#define M_BCM1480_INT_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
-#define M_BCM1480_INT_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
-#define M_BCM1480_INT_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
-#define M_BCM1480_INT_DM_CH_0 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
-#define M_BCM1480_INT_DM_CH_1 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
-#define M_BCM1480_INT_DM_CH_2 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
-#define M_BCM1480_INT_DM_CH_3 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
-#define M_BCM1480_INT_MAC_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
-#define M_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
-#define M_BCM1480_INT_MAC_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
-#define M_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
-#define M_BCM1480_INT_MAC_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
-#define M_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
-#define M_BCM1480_INT_MAC_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
-#define M_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
-#define M_BCM1480_INT_PMI_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
-#define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
-#define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
-#define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
-#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
-#define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
-#define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
-#define M_BCM1480_INT_MBOX_0_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
-#define M_BCM1480_INT_MBOX_1_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
-#define M_BCM1480_INT_MBOX_1_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
-#define M_BCM1480_INT_MBOX_1_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
-#define M_BCM1480_INT_MBOX_1_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
-#define M_BCM1480_INT_BAD_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
-#define M_BCM1480_INT_COR_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
-#define M_BCM1480_INT_IO_BUS _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
-#define M_BCM1480_INT_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
-#define M_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
-#define M_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
-#define M_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
-#define M_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
-#define M_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
-#define M_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
-#define M_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
-#define M_BCM1480_INT_PCI_ERROR _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
-#define M_BCM1480_INT_PCI_RESET _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
-#define M_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
-#define M_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
-#define M_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
-#define M_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
-#define M_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
-#define M_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
-#define M_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
-#define M_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
-#define M_BCM1480_INT_LDT_SMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
-#define M_BCM1480_INT_LDT_NMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
-#define M_BCM1480_INT_LDT_INIT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
-#define M_BCM1480_INT_LDT_STARTUP _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
-#define M_BCM1480_INT_LDT_EXT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
-#define M_BCM1480_INT_SMB_0 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
-#define M_BCM1480_INT_SMB_1 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
-#define M_BCM1480_INT_PCMCIA _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
-#define M_BCM1480_INT_UART_0 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
-#define M_BCM1480_INT_UART_1 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
-#define M_BCM1480_INT_UART_2 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
-#define M_BCM1480_INT_UART_3 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
-#define M_BCM1480_INT_GPIO_4 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
-#define M_BCM1480_INT_GPIO_5 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
-#define M_BCM1480_INT_GPIO_6 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
-#define M_BCM1480_INT_GPIO_7 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
-#define M_BCM1480_INT_GPIO_8 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
-#define M_BCM1480_INT_GPIO_9 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
-#define M_BCM1480_INT_GPIO_10 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
-#define M_BCM1480_INT_GPIO_11 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
-#define M_BCM1480_INT_GPIO_12 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
-#define M_BCM1480_INT_GPIO_13 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
-#define M_BCM1480_INT_GPIO_14 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
-#define M_BCM1480_INT_GPIO_15 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
+#define M_BCM1480_INT_ADDR_TRAP _BCM1480_INT_MASK1(K_BCM1480_INT_ADDR_TRAP)
+#define M_BCM1480_INT_GPIO_0 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_0)
+#define M_BCM1480_INT_GPIO_1 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_1)
+#define M_BCM1480_INT_GPIO_2 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_2)
+#define M_BCM1480_INT_GPIO_3 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_3)
+#define M_BCM1480_INT_PCI_INTA _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTA)
+#define M_BCM1480_INT_PCI_INTB _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTB)
+#define M_BCM1480_INT_PCI_INTC _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTC)
+#define M_BCM1480_INT_PCI_INTD _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_INTD)
+#define M_BCM1480_INT_CYCLE_CP0 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP0)
+#define M_BCM1480_INT_CYCLE_CP1 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP1)
+#define M_BCM1480_INT_CYCLE_CP2 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP2)
+#define M_BCM1480_INT_CYCLE_CP3 _BCM1480_INT_MASK1(K_BCM1480_INT_CYCLE_CP3)
+#define M_BCM1480_INT_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_0)
+#define M_BCM1480_INT_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_1)
+#define M_BCM1480_INT_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_2)
+#define M_BCM1480_INT_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_TIMER_3)
+#define M_BCM1480_INT_DM_CH_0 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_0)
+#define M_BCM1480_INT_DM_CH_1 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_1)
+#define M_BCM1480_INT_DM_CH_2 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_2)
+#define M_BCM1480_INT_DM_CH_3 _BCM1480_INT_MASK1(K_BCM1480_INT_DM_CH_3)
+#define M_BCM1480_INT_MAC_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0)
+#define M_BCM1480_INT_MAC_0_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_0_CH1)
+#define M_BCM1480_INT_MAC_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1)
+#define M_BCM1480_INT_MAC_1_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_1_CH1)
+#define M_BCM1480_INT_MAC_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2)
+#define M_BCM1480_INT_MAC_2_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_2_CH1)
+#define M_BCM1480_INT_MAC_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3)
+#define M_BCM1480_INT_MAC_3_CH1 _BCM1480_INT_MASK1(K_BCM1480_INT_MAC_3_CH1)
+#define M_BCM1480_INT_PMI_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_LOW)
+#define M_BCM1480_INT_PMI_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMI_HIGH)
+#define M_BCM1480_INT_PMO_LOW _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_LOW)
+#define M_BCM1480_INT_PMO_HIGH _BCM1480_INT_MASK1(K_BCM1480_INT_PMO_HIGH)
+#define M_BCM1480_INT_MBOX_ALL _BCM1480_INT_MASK(8, K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_0)
+#define M_BCM1480_INT_MBOX_0_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_1)
+#define M_BCM1480_INT_MBOX_0_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_2)
+#define M_BCM1480_INT_MBOX_0_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_0_3)
+#define M_BCM1480_INT_MBOX_1_0 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_0)
+#define M_BCM1480_INT_MBOX_1_1 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_1)
+#define M_BCM1480_INT_MBOX_1_2 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_2)
+#define M_BCM1480_INT_MBOX_1_3 _BCM1480_INT_MASK1(K_BCM1480_INT_MBOX_1_3)
+#define M_BCM1480_INT_BAD_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_BAD_ECC)
+#define M_BCM1480_INT_COR_ECC _BCM1480_INT_MASK1(K_BCM1480_INT_COR_ECC)
+#define M_BCM1480_INT_IO_BUS _BCM1480_INT_MASK1(K_BCM1480_INT_IO_BUS)
+#define M_BCM1480_INT_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_PERF_CNT)
+#define M_BCM1480_INT_SW_PERF_CNT _BCM1480_INT_MASK1(K_BCM1480_INT_SW_PERF_CNT)
+#define M_BCM1480_INT_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_TRACE_FREEZE)
+#define M_BCM1480_INT_SW_TRACE_FREEZE _BCM1480_INT_MASK1(K_BCM1480_INT_SW_TRACE_FREEZE)
+#define M_BCM1480_INT_WATCHDOG_TIMER_0 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_0)
+#define M_BCM1480_INT_WATCHDOG_TIMER_1 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_1)
+#define M_BCM1480_INT_WATCHDOG_TIMER_2 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_2)
+#define M_BCM1480_INT_WATCHDOG_TIMER_3 _BCM1480_INT_MASK1(K_BCM1480_INT_WATCHDOG_TIMER_3)
+#define M_BCM1480_INT_PCI_ERROR _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_ERROR)
+#define M_BCM1480_INT_PCI_RESET _BCM1480_INT_MASK1(K_BCM1480_INT_PCI_RESET)
+#define M_BCM1480_INT_NODE_CONTROLLER _BCM1480_INT_MASK1(K_BCM1480_INT_NODE_CONTROLLER)
+#define M_BCM1480_INT_HOST_BRIDGE _BCM1480_INT_MASK1(K_BCM1480_INT_HOST_BRIDGE)
+#define M_BCM1480_INT_PORT_0_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_FATAL)
+#define M_BCM1480_INT_PORT_0_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_0_NONFATAL)
+#define M_BCM1480_INT_PORT_1_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_FATAL)
+#define M_BCM1480_INT_PORT_1_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_1_NONFATAL)
+#define M_BCM1480_INT_PORT_2_FATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_FATAL)
+#define M_BCM1480_INT_PORT_2_NONFATAL _BCM1480_INT_MASK1(K_BCM1480_INT_PORT_2_NONFATAL)
+#define M_BCM1480_INT_LDT_SMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_SMI)
+#define M_BCM1480_INT_LDT_NMI _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_NMI)
+#define M_BCM1480_INT_LDT_INIT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_INIT)
+#define M_BCM1480_INT_LDT_STARTUP _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_STARTUP)
+#define M_BCM1480_INT_LDT_EXT _BCM1480_INT_MASK1(K_BCM1480_INT_LDT_EXT)
+#define M_BCM1480_INT_SMB_0 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_0)
+#define M_BCM1480_INT_SMB_1 _BCM1480_INT_MASK1(K_BCM1480_INT_SMB_1)
+#define M_BCM1480_INT_PCMCIA _BCM1480_INT_MASK1(K_BCM1480_INT_PCMCIA)
+#define M_BCM1480_INT_UART_0 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_0)
+#define M_BCM1480_INT_UART_1 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_1)
+#define M_BCM1480_INT_UART_2 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_2)
+#define M_BCM1480_INT_UART_3 _BCM1480_INT_MASK1(K_BCM1480_INT_UART_3)
+#define M_BCM1480_INT_GPIO_4 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_4)
+#define M_BCM1480_INT_GPIO_5 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_5)
+#define M_BCM1480_INT_GPIO_6 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_6)
+#define M_BCM1480_INT_GPIO_7 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_7)
+#define M_BCM1480_INT_GPIO_8 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_8)
+#define M_BCM1480_INT_GPIO_9 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_9)
+#define M_BCM1480_INT_GPIO_10 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_10)
+#define M_BCM1480_INT_GPIO_11 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_11)
+#define M_BCM1480_INT_GPIO_12 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_12)
+#define M_BCM1480_INT_GPIO_13 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_13)
+#define M_BCM1480_INT_GPIO_14 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_14)
+#define M_BCM1480_INT_GPIO_15 _BCM1480_INT_MASK1(K_BCM1480_INT_GPIO_15)
/*
* Interrupt mappings (Table 18)
*/
-#define K_BCM1480_INT_MAP_I0 0 /* interrupt pins on processor */
-#define K_BCM1480_INT_MAP_I1 1
-#define K_BCM1480_INT_MAP_I2 2
-#define K_BCM1480_INT_MAP_I3 3
-#define K_BCM1480_INT_MAP_I4 4
-#define K_BCM1480_INT_MAP_I5 5
-#define K_BCM1480_INT_MAP_NMI 6 /* nonmaskable */
-#define K_BCM1480_INT_MAP_DINT 7 /* debug interrupt */
+#define K_BCM1480_INT_MAP_I0 0 /* interrupt pins on processor */
+#define K_BCM1480_INT_MAP_I1 1
+#define K_BCM1480_INT_MAP_I2 2
+#define K_BCM1480_INT_MAP_I3 3
+#define K_BCM1480_INT_MAP_I4 4
+#define K_BCM1480_INT_MAP_I5 5
+#define K_BCM1480_INT_MAP_NMI 6 /* nonmaskable */
+#define K_BCM1480_INT_MAP_DINT 7 /* debug interrupt */
/*
* Interrupt LDT Set Register (Table 19)
*/
-#define S_BCM1480_INT_HT_INTMSG 0
-#define M_BCM1480_INT_HT_INTMSG _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
-#define V_BCM1480_INT_HT_INTMSG(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
-#define G_BCM1480_INT_HT_INTMSG(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
+#define S_BCM1480_INT_HT_INTMSG 0
+#define M_BCM1480_INT_HT_INTMSG _SB_MAKEMASK(3, S_BCM1480_INT_HT_INTMSG)
+#define V_BCM1480_INT_HT_INTMSG(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTMSG)
+#define G_BCM1480_INT_HT_INTMSG(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTMSG, M_BCM1480_INT_HT_INTMSG)
-#define K_BCM1480_INT_HT_INTMSG_FIXED 0
+#define K_BCM1480_INT_HT_INTMSG_FIXED 0
#define K_BCM1480_INT_HT_INTMSG_ARBITRATED 1
-#define K_BCM1480_INT_HT_INTMSG_SMI 2
-#define K_BCM1480_INT_HT_INTMSG_NMI 3
-#define K_BCM1480_INT_HT_INTMSG_INIT 4
-#define K_BCM1480_INT_HT_INTMSG_STARTUP 5
-#define K_BCM1480_INT_HT_INTMSG_EXTINT 6
+#define K_BCM1480_INT_HT_INTMSG_SMI 2
+#define K_BCM1480_INT_HT_INTMSG_NMI 3
+#define K_BCM1480_INT_HT_INTMSG_INIT 4
+#define K_BCM1480_INT_HT_INTMSG_STARTUP 5
+#define K_BCM1480_INT_HT_INTMSG_EXTINT 6
#define K_BCM1480_INT_HT_INTMSG_RESERVED 7
-#define M_BCM1480_INT_HT_TRIGGERMODE _SB_MAKEMASK1(3)
-#define V_BCM1480_INT_HT_EDGETRIGGER 0
-#define V_BCM1480_INT_HT_LEVELTRIGGER M_BCM1480_INT_HT_TRIGGERMODE
+#define M_BCM1480_INT_HT_TRIGGERMODE _SB_MAKEMASK1(3)
+#define V_BCM1480_INT_HT_EDGETRIGGER 0
+#define V_BCM1480_INT_HT_LEVELTRIGGER M_BCM1480_INT_HT_TRIGGERMODE
-#define M_BCM1480_INT_HT_DESTMODE _SB_MAKEMASK1(4)
-#define V_BCM1480_INT_HT_PHYSICALDEST 0
-#define V_BCM1480_INT_HT_LOGICALDEST M_BCM1480_INT_HT_DESTMODE
+#define M_BCM1480_INT_HT_DESTMODE _SB_MAKEMASK1(4)
+#define V_BCM1480_INT_HT_PHYSICALDEST 0
+#define V_BCM1480_INT_HT_LOGICALDEST M_BCM1480_INT_HT_DESTMODE
-#define S_BCM1480_INT_HT_INTDEST 5
-#define M_BCM1480_INT_HT_INTDEST _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
-#define V_BCM1480_INT_HT_INTDEST(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
-#define G_BCM1480_INT_HT_INTDEST(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
+#define S_BCM1480_INT_HT_INTDEST 5
+#define M_BCM1480_INT_HT_INTDEST _SB_MAKEMASK(8, S_BCM1480_INT_HT_INTDEST)
+#define V_BCM1480_INT_HT_INTDEST(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_INTDEST)
+#define G_BCM1480_INT_HT_INTDEST(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_INTDEST, M_BCM1480_INT_HT_INTDEST)
-#define S_BCM1480_INT_HT_VECTOR 13
-#define M_BCM1480_INT_HT_VECTOR _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
-#define V_BCM1480_INT_HT_VECTOR(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
-#define G_BCM1480_INT_HT_VECTOR(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
+#define S_BCM1480_INT_HT_VECTOR 13
+#define M_BCM1480_INT_HT_VECTOR _SB_MAKEMASK(8, S_BCM1480_INT_HT_VECTOR)
+#define V_BCM1480_INT_HT_VECTOR(x) _SB_MAKEVALUE(x, S_BCM1480_INT_HT_VECTOR)
+#define G_BCM1480_INT_HT_VECTOR(x) _SB_GETVALUE(x, S_BCM1480_INT_HT_VECTOR, M_BCM1480_INT_HT_VECTOR)
/*
* Vector prefix (Table 4-7)
*/
#define M_BCM1480_HTVECT_RAISE_INTLDT_HIGH 0x00
-#define M_BCM1480_HTVECT_RAISE_MBOX_0 0x40
+#define M_BCM1480_HTVECT_RAISE_MBOX_0 0x40
#define M_BCM1480_HTVECT_RAISE_INTLDT_LO 0x80
-#define M_BCM1480_HTVECT_RAISE_MBOX_1 0xC0
+#define M_BCM1480_HTVECT_RAISE_MBOX_1 0xC0
#endif /* _BCM1480_INT_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_l2c.h b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
index 725d38cb9d1..910e5c7e1b0 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_l2c.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_l2c.h
@@ -39,120 +39,120 @@
* Format of level 2 cache management address (Table 55)
*/
-#define S_BCM1480_L2C_MGMT_INDEX 5
-#define M_BCM1480_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
-#define V_BCM1480_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
-#define G_BCM1480_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
+#define S_BCM1480_L2C_MGMT_INDEX 5
+#define M_BCM1480_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_MGMT_INDEX)
+#define V_BCM1480_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_INDEX)
+#define G_BCM1480_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_INDEX, M_BCM1480_L2C_MGMT_INDEX)
-#define S_BCM1480_L2C_MGMT_WAY 17
-#define M_BCM1480_L2C_MGMT_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
-#define V_BCM1480_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
-#define G_BCM1480_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
+#define S_BCM1480_L2C_MGMT_WAY 17
+#define M_BCM1480_L2C_MGMT_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_MGMT_WAY)
+#define V_BCM1480_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_WAY)
+#define G_BCM1480_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_WAY, M_BCM1480_L2C_MGMT_WAY)
-#define M_BCM1480_L2C_MGMT_DIRTY _SB_MAKEMASK1(20)
-#define M_BCM1480_L2C_MGMT_VALID _SB_MAKEMASK1(21)
+#define M_BCM1480_L2C_MGMT_DIRTY _SB_MAKEMASK1(20)
+#define M_BCM1480_L2C_MGMT_VALID _SB_MAKEMASK1(21)
-#define S_BCM1480_L2C_MGMT_ECC_DIAG 22
-#define M_BCM1480_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define V_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
-#define G_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
+#define S_BCM1480_L2C_MGMT_ECC_DIAG 22
+#define M_BCM1480_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define V_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG)
+#define G_BCM1480_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_MGMT_ECC_DIAG, M_BCM1480_L2C_MGMT_ECC_DIAG)
-#define A_BCM1480_L2C_MGMT_TAG_BASE 0x00D0000000
+#define A_BCM1480_L2C_MGMT_TAG_BASE 0x00D0000000
-#define BCM1480_L2C_ENTRIES_PER_WAY 4096
-#define BCM1480_L2C_NUM_WAYS 8
+#define BCM1480_L2C_ENTRIES_PER_WAY 4096
+#define BCM1480_L2C_NUM_WAYS 8
/*
* Level 2 Cache Tag register (Table 59)
*/
-#define S_BCM1480_L2C_TAG_MBZ 0
-#define M_BCM1480_L2C_TAG_MBZ _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
+#define S_BCM1480_L2C_TAG_MBZ 0
+#define M_BCM1480_L2C_TAG_MBZ _SB_MAKEMASK(5, S_BCM1480_L2C_TAG_MBZ)
-#define S_BCM1480_L2C_TAG_INDEX 5
-#define M_BCM1480_L2C_TAG_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
-#define V_BCM1480_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
-#define G_BCM1480_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
+#define S_BCM1480_L2C_TAG_INDEX 5
+#define M_BCM1480_L2C_TAG_INDEX _SB_MAKEMASK(12, S_BCM1480_L2C_TAG_INDEX)
+#define V_BCM1480_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_INDEX)
+#define G_BCM1480_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_INDEX, M_BCM1480_L2C_TAG_INDEX)
/* Note that index bit 16 is also tag bit 40 */
-#define S_BCM1480_L2C_TAG_TAG 17
-#define M_BCM1480_L2C_TAG_TAG _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
-#define V_BCM1480_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
-#define G_BCM1480_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
+#define S_BCM1480_L2C_TAG_TAG 17
+#define M_BCM1480_L2C_TAG_TAG _SB_MAKEMASK(23, S_BCM1480_L2C_TAG_TAG)
+#define V_BCM1480_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_TAG)
+#define G_BCM1480_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_TAG, M_BCM1480_L2C_TAG_TAG)
-#define S_BCM1480_L2C_TAG_ECC 40
-#define M_BCM1480_L2C_TAG_ECC _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
-#define V_BCM1480_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
-#define G_BCM1480_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
+#define S_BCM1480_L2C_TAG_ECC 40
+#define M_BCM1480_L2C_TAG_ECC _SB_MAKEMASK(6, S_BCM1480_L2C_TAG_ECC)
+#define V_BCM1480_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_ECC)
+#define G_BCM1480_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_ECC, M_BCM1480_L2C_TAG_ECC)
-#define S_BCM1480_L2C_TAG_WAY 46
-#define M_BCM1480_L2C_TAG_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
-#define V_BCM1480_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
-#define G_BCM1480_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
+#define S_BCM1480_L2C_TAG_WAY 46
+#define M_BCM1480_L2C_TAG_WAY _SB_MAKEMASK(3, S_BCM1480_L2C_TAG_WAY)
+#define V_BCM1480_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_TAG_WAY)
+#define G_BCM1480_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_BCM1480_L2C_TAG_WAY, M_BCM1480_L2C_TAG_WAY)
-#define M_BCM1480_L2C_TAG_DIRTY _SB_MAKEMASK1(49)
-#define M_BCM1480_L2C_TAG_VALID _SB_MAKEMASK1(50)
+#define M_BCM1480_L2C_TAG_DIRTY _SB_MAKEMASK1(49)
+#define M_BCM1480_L2C_TAG_VALID _SB_MAKEMASK1(50)
-#define S_BCM1480_L2C_DATA_ECC 51
-#define M_BCM1480_L2C_DATA_ECC _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
-#define V_BCM1480_L2C_DATA_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
-#define G_BCM1480_L2C_DATA_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
+#define S_BCM1480_L2C_DATA_ECC 51
+#define M_BCM1480_L2C_DATA_ECC _SB_MAKEMASK(10, S_BCM1480_L2C_DATA_ECC)
+#define V_BCM1480_L2C_DATA_ECC(x) _SB_MAKEVALUE(x, S_BCM1480_L2C_DATA_ECC)
+#define G_BCM1480_L2C_DATA_ECC(x) _SB_GETVALUE(x, S_BCM1480_L2C_DATA_ECC, M_BCM1480_L2C_DATA_ECC)
/*
* L2 Misc0 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC0_WAY_REMOTE 0
-#define M_BCM1480_L2C_MISC0_WAY_REMOTE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
+#define S_BCM1480_L2C_MISC0_WAY_REMOTE 0
+#define M_BCM1480_L2C_MISC0_WAY_REMOTE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_REMOTE)
#define G_BCM1480_L2C_MISC0_WAY_REMOTE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_REMOTE, M_BCM1480_L2C_MISC0_WAY_REMOTE)
-#define S_BCM1480_L2C_MISC0_WAY_LOCAL 8
-#define M_BCM1480_L2C_MISC0_WAY_LOCAL _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
+#define S_BCM1480_L2C_MISC0_WAY_LOCAL 8
+#define M_BCM1480_L2C_MISC0_WAY_LOCAL _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_LOCAL)
#define G_BCM1480_L2C_MISC0_WAY_LOCAL(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_LOCAL, M_BCM1480_L2C_MISC0_WAY_LOCAL)
-#define S_BCM1480_L2C_MISC0_WAY_ENABLE 16
-#define M_BCM1480_L2C_MISC0_WAY_ENABLE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
+#define S_BCM1480_L2C_MISC0_WAY_ENABLE 16
+#define M_BCM1480_L2C_MISC0_WAY_ENABLE _SB_MAKEMASK(8, S_BCM1480_L2C_MISC0_WAY_ENABLE)
#define G_BCM1480_L2C_MISC0_WAY_ENABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_WAY_ENABLE, M_BCM1480_L2C_MISC0_WAY_ENABLE)
#define S_BCM1480_L2C_MISC0_CACHE_DISABLE 24
#define M_BCM1480_L2C_MISC0_CACHE_DISABLE _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_DISABLE)
#define G_BCM1480_L2C_MISC0_CACHE_DISABLE(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_DISABLE, M_BCM1480_L2C_MISC0_CACHE_DISABLE)
-#define S_BCM1480_L2C_MISC0_CACHE_QUAD 26
-#define M_BCM1480_L2C_MISC0_CACHE_QUAD _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
+#define S_BCM1480_L2C_MISC0_CACHE_QUAD 26
+#define M_BCM1480_L2C_MISC0_CACHE_QUAD _SB_MAKEMASK(2, S_BCM1480_L2C_MISC0_CACHE_QUAD)
#define G_BCM1480_L2C_MISC0_CACHE_QUAD(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC0_CACHE_QUAD, M_BCM1480_L2C_MISC0_CACHE_QUAD)
-#define S_BCM1480_L2C_MISC0_MC_PRIORITY 30
-#define M_BCM1480_L2C_MISC0_MC_PRIORITY _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
+#define S_BCM1480_L2C_MISC0_MC_PRIORITY 30
+#define M_BCM1480_L2C_MISC0_MC_PRIORITY _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_MC_PRIORITY)
-#define S_BCM1480_L2C_MISC0_ECC_CLEANUP 31
-#define M_BCM1480_L2C_MISC0_ECC_CLEANUP _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
+#define S_BCM1480_L2C_MISC0_ECC_CLEANUP 31
+#define M_BCM1480_L2C_MISC0_ECC_CLEANUP _SB_MAKEMASK1(S_BCM1480_L2C_MISC0_ECC_CLEANUP)
/*
* L2 Misc1 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_0 0
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_0 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_0 0
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_0 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_0)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_0(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_0, M_BCM1480_L2C_MISC1_WAY_AGENT_0)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_1 8
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_1 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_1 8
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_1 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_1)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_1(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_1, M_BCM1480_L2C_MISC1_WAY_AGENT_1)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_2 16
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_2 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_2 16
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_2 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_2)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_2(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_2, M_BCM1480_L2C_MISC1_WAY_AGENT_2)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_3 24
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_3 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_3 24
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_3 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_3)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_3(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_3, M_BCM1480_L2C_MISC1_WAY_AGENT_3)
-#define S_BCM1480_L2C_MISC1_WAY_AGENT_4 32
-#define M_BCM1480_L2C_MISC1_WAY_AGENT_4 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
+#define S_BCM1480_L2C_MISC1_WAY_AGENT_4 32
+#define M_BCM1480_L2C_MISC1_WAY_AGENT_4 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC1_WAY_AGENT_4)
#define G_BCM1480_L2C_MISC1_WAY_AGENT_4(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC1_WAY_AGENT_4, M_BCM1480_L2C_MISC1_WAY_AGENT_4)
@@ -160,16 +160,16 @@
* L2 Misc2 Value Register (Table 60)
*/
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_8 0
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_8 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_8 0
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_8 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_8)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_8(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_8, M_BCM1480_L2C_MISC2_WAY_AGENT_8)
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_9 8
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_9 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_9 8
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_9 _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_9)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_9(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_9, M_BCM1480_L2C_MISC2_WAY_AGENT_9)
-#define S_BCM1480_L2C_MISC2_WAY_AGENT_A 16
-#define M_BCM1480_L2C_MISC2_WAY_AGENT_A _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
+#define S_BCM1480_L2C_MISC2_WAY_AGENT_A 16
+#define M_BCM1480_L2C_MISC2_WAY_AGENT_A _SB_MAKEMASK(8, S_BCM1480_L2C_MISC2_WAY_AGENT_A)
#define G_BCM1480_L2C_MISC2_WAY_AGENT_A(x) _SB_GETVALUE(x, S_BCM1480_L2C_MISC2_WAY_AGENT_A, M_BCM1480_L2C_MISC2_WAY_AGENT_A)
diff --git a/arch/mips/include/asm/sibyte/bcm1480_mc.h b/arch/mips/include/asm/sibyte/bcm1480_mc.h
index 4307a758e3b..86908fdb403 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_mc.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_mc.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1280/BCM1480 Board Support Package
*
- * Memory Controller constants File: bcm1480_mc.h
+ * Memory Controller constants File: bcm1480_mc.h
*
* This module contains constants and macros useful for
* programming the memory controller.
@@ -39,33 +39,33 @@
* Memory Channel Configuration Register (Table 81)
*/
-#define S_BCM1480_MC_INTLV0 0
-#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0_DEFAULT V_BCM1480_MC_INTLV0(0)
-
-#define S_BCM1480_MC_INTLV1 8
-#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1_DEFAULT V_BCM1480_MC_INTLV1(0)
-
-#define S_BCM1480_MC_INTLV2 16
-#define M_BCM1480_MC_INTLV2 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
-#define G_BCM1480_MC_INTLV2(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
-#define V_BCM1480_MC_INTLV2_DEFAULT V_BCM1480_MC_INTLV2(0)
-
-#define S_BCM1480_MC_CS_MODE 32
-#define M_BCM1480_MC_CS_MODE _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
-#define G_BCM1480_MC_CS_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
-#define V_BCM1480_MC_CS_MODE_DEFAULT V_BCM1480_MC_CS_MODE(0)
-
-#define V_BCM1480_MC_CONFIG_DEFAULT (V_BCM1480_MC_INTLV0_DEFAULT | \
- V_BCM1480_MC_INTLV1_DEFAULT | \
- V_BCM1480_MC_INTLV2_DEFAULT | \
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0_DEFAULT V_BCM1480_MC_INTLV0(0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1_DEFAULT V_BCM1480_MC_INTLV1(0)
+
+#define S_BCM1480_MC_INTLV2 16
+#define M_BCM1480_MC_INTLV2 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV2)
+#define G_BCM1480_MC_INTLV2(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV2, M_BCM1480_MC_INTLV2)
+#define V_BCM1480_MC_INTLV2_DEFAULT V_BCM1480_MC_INTLV2(0)
+
+#define S_BCM1480_MC_CS_MODE 32
+#define M_BCM1480_MC_CS_MODE _SB_MAKEMASK(8, S_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS_MODE)
+#define G_BCM1480_MC_CS_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_CS_MODE, M_BCM1480_MC_CS_MODE)
+#define V_BCM1480_MC_CS_MODE_DEFAULT V_BCM1480_MC_CS_MODE(0)
+
+#define V_BCM1480_MC_CONFIG_DEFAULT (V_BCM1480_MC_INTLV0_DEFAULT | \
+ V_BCM1480_MC_INTLV1_DEFAULT | \
+ V_BCM1480_MC_INTLV2_DEFAULT | \
V_BCM1480_MC_CS_MODE_DEFAULT)
#define K_BCM1480_MC_CS01_MODE 0x03
@@ -80,254 +80,254 @@
* Chip Select Start Address Register (Table 82)
*/
-#define S_BCM1480_MC_CS0_START 0
-#define M_BCM1480_MC_CS0_START _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
-#define V_BCM1480_MC_CS0_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
-#define G_BCM1480_MC_CS0_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
+#define S_BCM1480_MC_CS0_START 0
+#define M_BCM1480_MC_CS0_START _SB_MAKEMASK(12, S_BCM1480_MC_CS0_START)
+#define V_BCM1480_MC_CS0_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_START)
+#define G_BCM1480_MC_CS0_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_START, M_BCM1480_MC_CS0_START)
-#define S_BCM1480_MC_CS1_START 16
-#define M_BCM1480_MC_CS1_START _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
-#define V_BCM1480_MC_CS1_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
-#define G_BCM1480_MC_CS1_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
+#define S_BCM1480_MC_CS1_START 16
+#define M_BCM1480_MC_CS1_START _SB_MAKEMASK(12, S_BCM1480_MC_CS1_START)
+#define V_BCM1480_MC_CS1_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_START)
+#define G_BCM1480_MC_CS1_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_START, M_BCM1480_MC_CS1_START)
-#define S_BCM1480_MC_CS2_START 32
-#define M_BCM1480_MC_CS2_START _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
-#define V_BCM1480_MC_CS2_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
-#define G_BCM1480_MC_CS2_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
+#define S_BCM1480_MC_CS2_START 32
+#define M_BCM1480_MC_CS2_START _SB_MAKEMASK(12, S_BCM1480_MC_CS2_START)
+#define V_BCM1480_MC_CS2_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_START)
+#define G_BCM1480_MC_CS2_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_START, M_BCM1480_MC_CS2_START)
-#define S_BCM1480_MC_CS3_START 48
-#define M_BCM1480_MC_CS3_START _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
-#define V_BCM1480_MC_CS3_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
-#define G_BCM1480_MC_CS3_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
+#define S_BCM1480_MC_CS3_START 48
+#define M_BCM1480_MC_CS3_START _SB_MAKEMASK(12, S_BCM1480_MC_CS3_START)
+#define V_BCM1480_MC_CS3_START(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_START)
+#define G_BCM1480_MC_CS3_START(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_START, M_BCM1480_MC_CS3_START)
/*
* Chip Select End Address Register (Table 83)
*/
-#define S_BCM1480_MC_CS0_END 0
-#define M_BCM1480_MC_CS0_END _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
-#define V_BCM1480_MC_CS0_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
-#define G_BCM1480_MC_CS0_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
+#define S_BCM1480_MC_CS0_END 0
+#define M_BCM1480_MC_CS0_END _SB_MAKEMASK(12, S_BCM1480_MC_CS0_END)
+#define V_BCM1480_MC_CS0_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0_END)
+#define G_BCM1480_MC_CS0_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0_END, M_BCM1480_MC_CS0_END)
-#define S_BCM1480_MC_CS1_END 16
-#define M_BCM1480_MC_CS1_END _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
-#define V_BCM1480_MC_CS1_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
-#define G_BCM1480_MC_CS1_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
+#define S_BCM1480_MC_CS1_END 16
+#define M_BCM1480_MC_CS1_END _SB_MAKEMASK(12, S_BCM1480_MC_CS1_END)
+#define V_BCM1480_MC_CS1_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS1_END)
+#define G_BCM1480_MC_CS1_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS1_END, M_BCM1480_MC_CS1_END)
-#define S_BCM1480_MC_CS2_END 32
-#define M_BCM1480_MC_CS2_END _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
-#define V_BCM1480_MC_CS2_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
-#define G_BCM1480_MC_CS2_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
+#define S_BCM1480_MC_CS2_END 32
+#define M_BCM1480_MC_CS2_END _SB_MAKEMASK(12, S_BCM1480_MC_CS2_END)
+#define V_BCM1480_MC_CS2_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS2_END)
+#define G_BCM1480_MC_CS2_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS2_END, M_BCM1480_MC_CS2_END)
-#define S_BCM1480_MC_CS3_END 48
-#define M_BCM1480_MC_CS3_END _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
-#define V_BCM1480_MC_CS3_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
-#define G_BCM1480_MC_CS3_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
+#define S_BCM1480_MC_CS3_END 48
+#define M_BCM1480_MC_CS3_END _SB_MAKEMASK(12, S_BCM1480_MC_CS3_END)
+#define V_BCM1480_MC_CS3_END(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS3_END)
+#define G_BCM1480_MC_CS3_END(x) _SB_GETVALUE(x, S_BCM1480_MC_CS3_END, M_BCM1480_MC_CS3_END)
/*
* Row Address Bit Select Register 0 (Table 84)
*/
-#define S_BCM1480_MC_ROW00 0
-#define M_BCM1480_MC_ROW00 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
-#define V_BCM1480_MC_ROW00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
-#define G_BCM1480_MC_ROW00(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
-
-#define S_BCM1480_MC_ROW01 8
-#define M_BCM1480_MC_ROW01 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
-#define V_BCM1480_MC_ROW01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
-#define G_BCM1480_MC_ROW01(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
-
-#define S_BCM1480_MC_ROW02 16
-#define M_BCM1480_MC_ROW02 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
-#define V_BCM1480_MC_ROW02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
-#define G_BCM1480_MC_ROW02(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
-
-#define S_BCM1480_MC_ROW03 24
-#define M_BCM1480_MC_ROW03 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
-#define V_BCM1480_MC_ROW03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
-#define G_BCM1480_MC_ROW03(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
-
-#define S_BCM1480_MC_ROW04 32
-#define M_BCM1480_MC_ROW04 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
-#define V_BCM1480_MC_ROW04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
-#define G_BCM1480_MC_ROW04(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
-
-#define S_BCM1480_MC_ROW05 40
-#define M_BCM1480_MC_ROW05 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
-#define V_BCM1480_MC_ROW05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
-#define G_BCM1480_MC_ROW05(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
-
-#define S_BCM1480_MC_ROW06 48
-#define M_BCM1480_MC_ROW06 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
-#define V_BCM1480_MC_ROW06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
-#define G_BCM1480_MC_ROW06(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
-
-#define S_BCM1480_MC_ROW07 56
-#define M_BCM1480_MC_ROW07 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
-#define V_BCM1480_MC_ROW07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
-#define G_BCM1480_MC_ROW07(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
+#define S_BCM1480_MC_ROW00 0
+#define M_BCM1480_MC_ROW00 _SB_MAKEMASK(6, S_BCM1480_MC_ROW00)
+#define V_BCM1480_MC_ROW00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW00)
+#define G_BCM1480_MC_ROW00(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW00, M_BCM1480_MC_ROW00)
+
+#define S_BCM1480_MC_ROW01 8
+#define M_BCM1480_MC_ROW01 _SB_MAKEMASK(6, S_BCM1480_MC_ROW01)
+#define V_BCM1480_MC_ROW01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW01)
+#define G_BCM1480_MC_ROW01(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW01, M_BCM1480_MC_ROW01)
+
+#define S_BCM1480_MC_ROW02 16
+#define M_BCM1480_MC_ROW02 _SB_MAKEMASK(6, S_BCM1480_MC_ROW02)
+#define V_BCM1480_MC_ROW02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW02)
+#define G_BCM1480_MC_ROW02(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW02, M_BCM1480_MC_ROW02)
+
+#define S_BCM1480_MC_ROW03 24
+#define M_BCM1480_MC_ROW03 _SB_MAKEMASK(6, S_BCM1480_MC_ROW03)
+#define V_BCM1480_MC_ROW03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW03)
+#define G_BCM1480_MC_ROW03(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW03, M_BCM1480_MC_ROW03)
+
+#define S_BCM1480_MC_ROW04 32
+#define M_BCM1480_MC_ROW04 _SB_MAKEMASK(6, S_BCM1480_MC_ROW04)
+#define V_BCM1480_MC_ROW04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW04)
+#define G_BCM1480_MC_ROW04(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW04, M_BCM1480_MC_ROW04)
+
+#define S_BCM1480_MC_ROW05 40
+#define M_BCM1480_MC_ROW05 _SB_MAKEMASK(6, S_BCM1480_MC_ROW05)
+#define V_BCM1480_MC_ROW05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW05)
+#define G_BCM1480_MC_ROW05(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW05, M_BCM1480_MC_ROW05)
+
+#define S_BCM1480_MC_ROW06 48
+#define M_BCM1480_MC_ROW06 _SB_MAKEMASK(6, S_BCM1480_MC_ROW06)
+#define V_BCM1480_MC_ROW06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW06)
+#define G_BCM1480_MC_ROW06(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW06, M_BCM1480_MC_ROW06)
+
+#define S_BCM1480_MC_ROW07 56
+#define M_BCM1480_MC_ROW07 _SB_MAKEMASK(6, S_BCM1480_MC_ROW07)
+#define V_BCM1480_MC_ROW07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW07)
+#define G_BCM1480_MC_ROW07(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW07, M_BCM1480_MC_ROW07)
/*
* Row Address Bit Select Register 1 (Table 85)
*/
-#define S_BCM1480_MC_ROW08 0
-#define M_BCM1480_MC_ROW08 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
-#define V_BCM1480_MC_ROW08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
-#define G_BCM1480_MC_ROW08(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
+#define S_BCM1480_MC_ROW08 0
+#define M_BCM1480_MC_ROW08 _SB_MAKEMASK(6, S_BCM1480_MC_ROW08)
+#define V_BCM1480_MC_ROW08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW08)
+#define G_BCM1480_MC_ROW08(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW08, M_BCM1480_MC_ROW08)
-#define S_BCM1480_MC_ROW09 8
-#define M_BCM1480_MC_ROW09 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
-#define V_BCM1480_MC_ROW09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
-#define G_BCM1480_MC_ROW09(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
+#define S_BCM1480_MC_ROW09 8
+#define M_BCM1480_MC_ROW09 _SB_MAKEMASK(6, S_BCM1480_MC_ROW09)
+#define V_BCM1480_MC_ROW09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW09)
+#define G_BCM1480_MC_ROW09(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW09, M_BCM1480_MC_ROW09)
-#define S_BCM1480_MC_ROW10 16
-#define M_BCM1480_MC_ROW10 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
-#define V_BCM1480_MC_ROW10(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
-#define G_BCM1480_MC_ROW10(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
+#define S_BCM1480_MC_ROW10 16
+#define M_BCM1480_MC_ROW10 _SB_MAKEMASK(6, S_BCM1480_MC_ROW10)
+#define V_BCM1480_MC_ROW10(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW10)
+#define G_BCM1480_MC_ROW10(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW10, M_BCM1480_MC_ROW10)
-#define S_BCM1480_MC_ROW11 24
-#define M_BCM1480_MC_ROW11 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
-#define V_BCM1480_MC_ROW11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
-#define G_BCM1480_MC_ROW11(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
+#define S_BCM1480_MC_ROW11 24
+#define M_BCM1480_MC_ROW11 _SB_MAKEMASK(6, S_BCM1480_MC_ROW11)
+#define V_BCM1480_MC_ROW11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW11)
+#define G_BCM1480_MC_ROW11(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW11, M_BCM1480_MC_ROW11)
-#define S_BCM1480_MC_ROW12 32
-#define M_BCM1480_MC_ROW12 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
-#define V_BCM1480_MC_ROW12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
-#define G_BCM1480_MC_ROW12(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
+#define S_BCM1480_MC_ROW12 32
+#define M_BCM1480_MC_ROW12 _SB_MAKEMASK(6, S_BCM1480_MC_ROW12)
+#define V_BCM1480_MC_ROW12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW12)
+#define G_BCM1480_MC_ROW12(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW12, M_BCM1480_MC_ROW12)
-#define S_BCM1480_MC_ROW13 40
-#define M_BCM1480_MC_ROW13 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
-#define V_BCM1480_MC_ROW13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
-#define G_BCM1480_MC_ROW13(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
+#define S_BCM1480_MC_ROW13 40
+#define M_BCM1480_MC_ROW13 _SB_MAKEMASK(6, S_BCM1480_MC_ROW13)
+#define V_BCM1480_MC_ROW13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW13)
+#define G_BCM1480_MC_ROW13(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW13, M_BCM1480_MC_ROW13)
-#define S_BCM1480_MC_ROW14 48
-#define M_BCM1480_MC_ROW14 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
-#define V_BCM1480_MC_ROW14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
-#define G_BCM1480_MC_ROW14(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
+#define S_BCM1480_MC_ROW14 48
+#define M_BCM1480_MC_ROW14 _SB_MAKEMASK(6, S_BCM1480_MC_ROW14)
+#define V_BCM1480_MC_ROW14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ROW14)
+#define G_BCM1480_MC_ROW14(x) _SB_GETVALUE(x, S_BCM1480_MC_ROW14, M_BCM1480_MC_ROW14)
-#define K_BCM1480_MC_ROWX_BIT_SPACING 8
+#define K_BCM1480_MC_ROWX_BIT_SPACING 8
/*
* Column Address Bit Select Register 0 (Table 86)
*/
-#define S_BCM1480_MC_COL00 0
-#define M_BCM1480_MC_COL00 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
-#define V_BCM1480_MC_COL00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
-#define G_BCM1480_MC_COL00(x) _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
-
-#define S_BCM1480_MC_COL01 8
-#define M_BCM1480_MC_COL01 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
-#define V_BCM1480_MC_COL01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
-#define G_BCM1480_MC_COL01(x) _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
-
-#define S_BCM1480_MC_COL02 16
-#define M_BCM1480_MC_COL02 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
-#define V_BCM1480_MC_COL02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
-#define G_BCM1480_MC_COL02(x) _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
-
-#define S_BCM1480_MC_COL03 24
-#define M_BCM1480_MC_COL03 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
-#define V_BCM1480_MC_COL03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
-#define G_BCM1480_MC_COL03(x) _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
-
-#define S_BCM1480_MC_COL04 32
-#define M_BCM1480_MC_COL04 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
-#define V_BCM1480_MC_COL04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
-#define G_BCM1480_MC_COL04(x) _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
-
-#define S_BCM1480_MC_COL05 40
-#define M_BCM1480_MC_COL05 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
-#define V_BCM1480_MC_COL05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
-#define G_BCM1480_MC_COL05(x) _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
-
-#define S_BCM1480_MC_COL06 48
-#define M_BCM1480_MC_COL06 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
-#define V_BCM1480_MC_COL06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
-#define G_BCM1480_MC_COL06(x) _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
-
-#define S_BCM1480_MC_COL07 56
-#define M_BCM1480_MC_COL07 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
-#define V_BCM1480_MC_COL07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
-#define G_BCM1480_MC_COL07(x) _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
+#define S_BCM1480_MC_COL00 0
+#define M_BCM1480_MC_COL00 _SB_MAKEMASK(6, S_BCM1480_MC_COL00)
+#define V_BCM1480_MC_COL00(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL00)
+#define G_BCM1480_MC_COL00(x) _SB_GETVALUE(x, S_BCM1480_MC_COL00, M_BCM1480_MC_COL00)
+
+#define S_BCM1480_MC_COL01 8
+#define M_BCM1480_MC_COL01 _SB_MAKEMASK(6, S_BCM1480_MC_COL01)
+#define V_BCM1480_MC_COL01(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL01)
+#define G_BCM1480_MC_COL01(x) _SB_GETVALUE(x, S_BCM1480_MC_COL01, M_BCM1480_MC_COL01)
+
+#define S_BCM1480_MC_COL02 16
+#define M_BCM1480_MC_COL02 _SB_MAKEMASK(6, S_BCM1480_MC_COL02)
+#define V_BCM1480_MC_COL02(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL02)
+#define G_BCM1480_MC_COL02(x) _SB_GETVALUE(x, S_BCM1480_MC_COL02, M_BCM1480_MC_COL02)
+
+#define S_BCM1480_MC_COL03 24
+#define M_BCM1480_MC_COL03 _SB_MAKEMASK(6, S_BCM1480_MC_COL03)
+#define V_BCM1480_MC_COL03(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL03)
+#define G_BCM1480_MC_COL03(x) _SB_GETVALUE(x, S_BCM1480_MC_COL03, M_BCM1480_MC_COL03)
+
+#define S_BCM1480_MC_COL04 32
+#define M_BCM1480_MC_COL04 _SB_MAKEMASK(6, S_BCM1480_MC_COL04)
+#define V_BCM1480_MC_COL04(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL04)
+#define G_BCM1480_MC_COL04(x) _SB_GETVALUE(x, S_BCM1480_MC_COL04, M_BCM1480_MC_COL04)
+
+#define S_BCM1480_MC_COL05 40
+#define M_BCM1480_MC_COL05 _SB_MAKEMASK(6, S_BCM1480_MC_COL05)
+#define V_BCM1480_MC_COL05(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL05)
+#define G_BCM1480_MC_COL05(x) _SB_GETVALUE(x, S_BCM1480_MC_COL05, M_BCM1480_MC_COL05)
+
+#define S_BCM1480_MC_COL06 48
+#define M_BCM1480_MC_COL06 _SB_MAKEMASK(6, S_BCM1480_MC_COL06)
+#define V_BCM1480_MC_COL06(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL06)
+#define G_BCM1480_MC_COL06(x) _SB_GETVALUE(x, S_BCM1480_MC_COL06, M_BCM1480_MC_COL06)
+
+#define S_BCM1480_MC_COL07 56
+#define M_BCM1480_MC_COL07 _SB_MAKEMASK(6, S_BCM1480_MC_COL07)
+#define V_BCM1480_MC_COL07(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL07)
+#define G_BCM1480_MC_COL07(x) _SB_GETVALUE(x, S_BCM1480_MC_COL07, M_BCM1480_MC_COL07)
/*
* Column Address Bit Select Register 1 (Table 87)
*/
-#define S_BCM1480_MC_COL08 0
-#define M_BCM1480_MC_COL08 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
-#define V_BCM1480_MC_COL08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
-#define G_BCM1480_MC_COL08(x) _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
+#define S_BCM1480_MC_COL08 0
+#define M_BCM1480_MC_COL08 _SB_MAKEMASK(6, S_BCM1480_MC_COL08)
+#define V_BCM1480_MC_COL08(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL08)
+#define G_BCM1480_MC_COL08(x) _SB_GETVALUE(x, S_BCM1480_MC_COL08, M_BCM1480_MC_COL08)
-#define S_BCM1480_MC_COL09 8
-#define M_BCM1480_MC_COL09 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
-#define V_BCM1480_MC_COL09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
-#define G_BCM1480_MC_COL09(x) _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
+#define S_BCM1480_MC_COL09 8
+#define M_BCM1480_MC_COL09 _SB_MAKEMASK(6, S_BCM1480_MC_COL09)
+#define V_BCM1480_MC_COL09(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL09)
+#define G_BCM1480_MC_COL09(x) _SB_GETVALUE(x, S_BCM1480_MC_COL09, M_BCM1480_MC_COL09)
-#define S_BCM1480_MC_COL10 16 /* not a valid position, must be prog as 0 */
+#define S_BCM1480_MC_COL10 16 /* not a valid position, must be prog as 0 */
-#define S_BCM1480_MC_COL11 24
-#define M_BCM1480_MC_COL11 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
-#define V_BCM1480_MC_COL11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
-#define G_BCM1480_MC_COL11(x) _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
+#define S_BCM1480_MC_COL11 24
+#define M_BCM1480_MC_COL11 _SB_MAKEMASK(6, S_BCM1480_MC_COL11)
+#define V_BCM1480_MC_COL11(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL11)
+#define G_BCM1480_MC_COL11(x) _SB_GETVALUE(x, S_BCM1480_MC_COL11, M_BCM1480_MC_COL11)
-#define S_BCM1480_MC_COL12 32
-#define M_BCM1480_MC_COL12 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
-#define V_BCM1480_MC_COL12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
-#define G_BCM1480_MC_COL12(x) _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
+#define S_BCM1480_MC_COL12 32
+#define M_BCM1480_MC_COL12 _SB_MAKEMASK(6, S_BCM1480_MC_COL12)
+#define V_BCM1480_MC_COL12(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL12)
+#define G_BCM1480_MC_COL12(x) _SB_GETVALUE(x, S_BCM1480_MC_COL12, M_BCM1480_MC_COL12)
-#define S_BCM1480_MC_COL13 40
-#define M_BCM1480_MC_COL13 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
-#define V_BCM1480_MC_COL13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
-#define G_BCM1480_MC_COL13(x) _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
+#define S_BCM1480_MC_COL13 40
+#define M_BCM1480_MC_COL13 _SB_MAKEMASK(6, S_BCM1480_MC_COL13)
+#define V_BCM1480_MC_COL13(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL13)
+#define G_BCM1480_MC_COL13(x) _SB_GETVALUE(x, S_BCM1480_MC_COL13, M_BCM1480_MC_COL13)
-#define S_BCM1480_MC_COL14 48
-#define M_BCM1480_MC_COL14 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
-#define V_BCM1480_MC_COL14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
-#define G_BCM1480_MC_COL14(x) _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
+#define S_BCM1480_MC_COL14 48
+#define M_BCM1480_MC_COL14 _SB_MAKEMASK(6, S_BCM1480_MC_COL14)
+#define V_BCM1480_MC_COL14(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COL14)
+#define G_BCM1480_MC_COL14(x) _SB_GETVALUE(x, S_BCM1480_MC_COL14, M_BCM1480_MC_COL14)
-#define K_BCM1480_MC_COLX_BIT_SPACING 8
+#define K_BCM1480_MC_COLX_BIT_SPACING 8
/*
* CS0 and CS1 Bank Address Bit Select Register (Table 88)
*/
-#define S_BCM1480_MC_CS01_BANK0 0
-#define M_BCM1480_MC_CS01_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
-#define V_BCM1480_MC_CS01_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
-#define G_BCM1480_MC_CS01_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
+#define S_BCM1480_MC_CS01_BANK0 0
+#define M_BCM1480_MC_CS01_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK0)
+#define V_BCM1480_MC_CS01_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK0)
+#define G_BCM1480_MC_CS01_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK0, M_BCM1480_MC_CS01_BANK0)
-#define S_BCM1480_MC_CS01_BANK1 8
-#define M_BCM1480_MC_CS01_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
-#define V_BCM1480_MC_CS01_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
-#define G_BCM1480_MC_CS01_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
+#define S_BCM1480_MC_CS01_BANK1 8
+#define M_BCM1480_MC_CS01_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK1)
+#define V_BCM1480_MC_CS01_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK1)
+#define G_BCM1480_MC_CS01_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK1, M_BCM1480_MC_CS01_BANK1)
-#define S_BCM1480_MC_CS01_BANK2 16
-#define M_BCM1480_MC_CS01_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
-#define V_BCM1480_MC_CS01_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
-#define G_BCM1480_MC_CS01_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
+#define S_BCM1480_MC_CS01_BANK2 16
+#define M_BCM1480_MC_CS01_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS01_BANK2)
+#define V_BCM1480_MC_CS01_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS01_BANK2)
+#define G_BCM1480_MC_CS01_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS01_BANK2, M_BCM1480_MC_CS01_BANK2)
/*
* CS2 and CS3 Bank Address Bit Select Register (Table 89)
*/
-#define S_BCM1480_MC_CS23_BANK0 0
-#define M_BCM1480_MC_CS23_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
-#define V_BCM1480_MC_CS23_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
-#define G_BCM1480_MC_CS23_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
+#define S_BCM1480_MC_CS23_BANK0 0
+#define M_BCM1480_MC_CS23_BANK0 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK0)
+#define V_BCM1480_MC_CS23_BANK0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK0)
+#define G_BCM1480_MC_CS23_BANK0(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK0, M_BCM1480_MC_CS23_BANK0)
-#define S_BCM1480_MC_CS23_BANK1 8
-#define M_BCM1480_MC_CS23_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
-#define V_BCM1480_MC_CS23_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
-#define G_BCM1480_MC_CS23_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
+#define S_BCM1480_MC_CS23_BANK1 8
+#define M_BCM1480_MC_CS23_BANK1 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK1)
+#define V_BCM1480_MC_CS23_BANK1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK1)
+#define G_BCM1480_MC_CS23_BANK1(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK1, M_BCM1480_MC_CS23_BANK1)
-#define S_BCM1480_MC_CS23_BANK2 16
-#define M_BCM1480_MC_CS23_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
-#define V_BCM1480_MC_CS23_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
-#define G_BCM1480_MC_CS23_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
+#define S_BCM1480_MC_CS23_BANK2 16
+#define M_BCM1480_MC_CS23_BANK2 _SB_MAKEMASK(6, S_BCM1480_MC_CS23_BANK2)
+#define V_BCM1480_MC_CS23_BANK2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS23_BANK2)
+#define G_BCM1480_MC_CS23_BANK2(x) _SB_GETVALUE(x, S_BCM1480_MC_CS23_BANK2, M_BCM1480_MC_CS23_BANK2)
#define K_BCM1480_MC_CSXX_BANKX_BIT_SPACING 8
@@ -335,19 +335,19 @@
* DRAM Command Register (Table 90)
*/
-#define S_BCM1480_MC_COMMAND 0
-#define M_BCM1480_MC_COMMAND _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
-#define V_BCM1480_MC_COMMAND(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
-#define G_BCM1480_MC_COMMAND(x) _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
+#define S_BCM1480_MC_COMMAND 0
+#define M_BCM1480_MC_COMMAND _SB_MAKEMASK(4, S_BCM1480_MC_COMMAND)
+#define V_BCM1480_MC_COMMAND(x) _SB_MAKEVALUE(x, S_BCM1480_MC_COMMAND)
+#define G_BCM1480_MC_COMMAND(x) _SB_GETVALUE(x, S_BCM1480_MC_COMMAND, M_BCM1480_MC_COMMAND)
-#define K_BCM1480_MC_COMMAND_EMRS 0
-#define K_BCM1480_MC_COMMAND_MRS 1
-#define K_BCM1480_MC_COMMAND_PRE 2
-#define K_BCM1480_MC_COMMAND_AR 3
-#define K_BCM1480_MC_COMMAND_SETRFSH 4
-#define K_BCM1480_MC_COMMAND_CLRRFSH 5
-#define K_BCM1480_MC_COMMAND_SETPWRDN 6
-#define K_BCM1480_MC_COMMAND_CLRPWRDN 7
+#define K_BCM1480_MC_COMMAND_EMRS 0
+#define K_BCM1480_MC_COMMAND_MRS 1
+#define K_BCM1480_MC_COMMAND_PRE 2
+#define K_BCM1480_MC_COMMAND_AR 3
+#define K_BCM1480_MC_COMMAND_SETRFSH 4
+#define K_BCM1480_MC_COMMAND_CLRRFSH 5
+#define K_BCM1480_MC_COMMAND_SETPWRDN 6
+#define K_BCM1480_MC_COMMAND_CLRPWRDN 7
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define K_BCM1480_MC_COMMAND_EMRS2 8
@@ -356,61 +356,61 @@
#define K_BCM1480_MC_COMMAND_DISABLE_MCLK 11
#endif
-#define V_BCM1480_MC_COMMAND_EMRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
-#define V_BCM1480_MC_COMMAND_MRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
-#define V_BCM1480_MC_COMMAND_PRE V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
-#define V_BCM1480_MC_COMMAND_AR V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
-#define V_BCM1480_MC_COMMAND_SETRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
-#define V_BCM1480_MC_COMMAND_CLRRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
-#define V_BCM1480_MC_COMMAND_SETPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
-#define V_BCM1480_MC_COMMAND_CLRPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
+#define V_BCM1480_MC_COMMAND_EMRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS)
+#define V_BCM1480_MC_COMMAND_MRS V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_MRS)
+#define V_BCM1480_MC_COMMAND_PRE V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_PRE)
+#define V_BCM1480_MC_COMMAND_AR V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_AR)
+#define V_BCM1480_MC_COMMAND_SETRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETRFSH)
+#define V_BCM1480_MC_COMMAND_CLRRFSH V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRRFSH)
+#define V_BCM1480_MC_COMMAND_SETPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_SETPWRDN)
+#define V_BCM1480_MC_COMMAND_CLRPWRDN V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_CLRPWRDN)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define V_BCM1480_MC_COMMAND_EMRS2 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
-#define V_BCM1480_MC_COMMAND_EMRS3 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
+#define V_BCM1480_MC_COMMAND_EMRS2 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS2)
+#define V_BCM1480_MC_COMMAND_EMRS3 V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_EMRS3)
#define V_BCM1480_MC_COMMAND_ENABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_ENABLE_MCLK)
#define V_BCM1480_MC_COMMAND_DISABLE_MCLK V_BCM1480_MC_COMMAND(K_BCM1480_MC_COMMAND_DISABLE_MCLK)
#endif
#define S_BCM1480_MC_CS0 4
-#define M_BCM1480_MC_CS0 _SB_MAKEMASK1(4)
-#define M_BCM1480_MC_CS1 _SB_MAKEMASK1(5)
-#define M_BCM1480_MC_CS2 _SB_MAKEMASK1(6)
-#define M_BCM1480_MC_CS3 _SB_MAKEMASK1(7)
-#define M_BCM1480_MC_CS4 _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_CS5 _SB_MAKEMASK1(9)
-#define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10)
-#define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11)
+#define M_BCM1480_MC_CS0 _SB_MAKEMASK1(4)
+#define M_BCM1480_MC_CS1 _SB_MAKEMASK1(5)
+#define M_BCM1480_MC_CS2 _SB_MAKEMASK1(6)
+#define M_BCM1480_MC_CS3 _SB_MAKEMASK1(7)
+#define M_BCM1480_MC_CS4 _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_CS5 _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_CS6 _SB_MAKEMASK1(10)
+#define M_BCM1480_MC_CS7 _SB_MAKEMASK1(11)
-#define M_BCM1480_MC_CS _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
-#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
-#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
+#define M_BCM1480_MC_CS _SB_MAKEMASK(8, S_BCM1480_MC_CS0)
+#define V_BCM1480_MC_CS(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CS0)
+#define G_BCM1480_MC_CS(x) _SB_GETVALUE(x, S_BCM1480_MC_CS0, M_BCM1480_MC_CS0)
-#define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_CMD_ACTIVE _SB_MAKEMASK1(16)
/*
* DRAM Mode Register (Table 91)
*/
-#define S_BCM1480_MC_EMODE 0
-#define M_BCM1480_MC_EMODE _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
-#define G_BCM1480_MC_EMODE(x) _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
-#define V_BCM1480_MC_EMODE_DEFAULT V_BCM1480_MC_EMODE(0)
+#define S_BCM1480_MC_EMODE 0
+#define M_BCM1480_MC_EMODE _SB_MAKEMASK(15, S_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_EMODE)
+#define G_BCM1480_MC_EMODE(x) _SB_GETVALUE(x, S_BCM1480_MC_EMODE, M_BCM1480_MC_EMODE)
+#define V_BCM1480_MC_EMODE_DEFAULT V_BCM1480_MC_EMODE(0)
-#define S_BCM1480_MC_MODE 16
-#define M_BCM1480_MC_MODE _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
-#define G_BCM1480_MC_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
-#define V_BCM1480_MC_MODE_DEFAULT V_BCM1480_MC_MODE(0)
+#define S_BCM1480_MC_MODE 16
+#define M_BCM1480_MC_MODE _SB_MAKEMASK(15, S_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MODE)
+#define G_BCM1480_MC_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_MODE, M_BCM1480_MC_MODE)
+#define V_BCM1480_MC_MODE_DEFAULT V_BCM1480_MC_MODE(0)
-#define S_BCM1480_MC_DRAM_TYPE 32
-#define M_BCM1480_MC_DRAM_TYPE _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
-#define V_BCM1480_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
-#define G_BCM1480_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
+#define S_BCM1480_MC_DRAM_TYPE 32
+#define M_BCM1480_MC_DRAM_TYPE _SB_MAKEMASK(4, S_BCM1480_MC_DRAM_TYPE)
+#define V_BCM1480_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DRAM_TYPE)
+#define G_BCM1480_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_BCM1480_MC_DRAM_TYPE, M_BCM1480_MC_DRAM_TYPE)
-#define K_BCM1480_MC_DRAM_TYPE_JEDEC 0
-#define K_BCM1480_MC_DRAM_TYPE_FCRAM 1
+#define K_BCM1480_MC_DRAM_TYPE_JEDEC 0
+#define K_BCM1480_MC_DRAM_TYPE_FCRAM 1
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define K_BCM1480_MC_DRAM_TYPE_DDR2 2
@@ -418,27 +418,27 @@
#define K_BCM1480_MC_DRAM_TYPE_DDR2_PASS1 0
-#define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
-#define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
+#define V_BCM1480_MC_DRAM_TYPE_JEDEC V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_JEDEC)
+#define V_BCM1480_MC_DRAM_TYPE_FCRAM V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_FCRAM)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define V_BCM1480_MC_DRAM_TYPE_DDR2 V_BCM1480_MC_DRAM_TYPE(K_BCM1480_MC_DRAM_TYPE_DDR2)
#endif
-#define M_BCM1480_MC_GANGED _SB_MAKEMASK1(36)
-#define M_BCM1480_MC_BY9_INTF _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_FORCE_ECC64 _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_ECC_DISABLE _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_GANGED _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_BY9_INTF _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_FORCE_ECC64 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_ECC_DISABLE _SB_MAKEMASK1(39)
-#define S_BCM1480_MC_PG_POLICY 40
-#define M_BCM1480_MC_PG_POLICY _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
-#define V_BCM1480_MC_PG_POLICY(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
-#define G_BCM1480_MC_PG_POLICY(x) _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
+#define S_BCM1480_MC_PG_POLICY 40
+#define M_BCM1480_MC_PG_POLICY _SB_MAKEMASK(2, S_BCM1480_MC_PG_POLICY)
+#define V_BCM1480_MC_PG_POLICY(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PG_POLICY)
+#define G_BCM1480_MC_PG_POLICY(x) _SB_GETVALUE(x, S_BCM1480_MC_PG_POLICY, M_BCM1480_MC_PG_POLICY)
-#define K_BCM1480_MC_PG_POLICY_CLOSED 0
+#define K_BCM1480_MC_PG_POLICY_CLOSED 0
#define K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK 1
-#define V_BCM1480_MC_PG_POLICY_CLOSED V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
+#define V_BCM1480_MC_PG_POLICY_CLOSED V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CLOSED)
#define V_BCM1480_MC_PG_POLICY_CAS_TIME_CHK V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
@@ -447,32 +447,32 @@
#endif
#define V_BCM1480_MC_DRAMMODE_DEFAULT V_BCM1480_MC_EMODE_DEFAULT | V_BCM1480_MC_MODE_DEFAULT | V_BCM1480_MC_DRAM_TYPE_JEDEC | \
- V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
+ V_BCM1480_MC_PG_POLICY(K_BCM1480_MC_PG_POLICY_CAS_TIME_CHK)
/*
* Memory Clock Configuration Register (Table 92)
*/
-#define S_BCM1480_MC_CLK_RATIO 0
-#define M_BCM1480_MC_CLK_RATIO _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
-#define V_BCM1480_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
-#define G_BCM1480_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
+#define S_BCM1480_MC_CLK_RATIO 0
+#define M_BCM1480_MC_CLK_RATIO _SB_MAKEMASK(6, S_BCM1480_MC_CLK_RATIO)
+#define V_BCM1480_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CLK_RATIO)
+#define G_BCM1480_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_BCM1480_MC_CLK_RATIO, M_BCM1480_MC_CLK_RATIO)
-#define V_BCM1480_MC_CLK_RATIO_DEFAULT V_BCM1480_MC_CLK_RATIO(10)
+#define V_BCM1480_MC_CLK_RATIO_DEFAULT V_BCM1480_MC_CLK_RATIO(10)
-#define S_BCM1480_MC_REF_RATE 8
-#define M_BCM1480_MC_REF_RATE _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
-#define V_BCM1480_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
-#define G_BCM1480_MC_REF_RATE(x) _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
+#define S_BCM1480_MC_REF_RATE 8
+#define M_BCM1480_MC_REF_RATE _SB_MAKEMASK(8, S_BCM1480_MC_REF_RATE)
+#define V_BCM1480_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_REF_RATE)
+#define G_BCM1480_MC_REF_RATE(x) _SB_GETVALUE(x, S_BCM1480_MC_REF_RATE, M_BCM1480_MC_REF_RATE)
-#define K_BCM1480_MC_REF_RATE_100MHz 0x31
-#define K_BCM1480_MC_REF_RATE_200MHz 0x62
-#define K_BCM1480_MC_REF_RATE_400MHz 0xC4
+#define K_BCM1480_MC_REF_RATE_100MHz 0x31
+#define K_BCM1480_MC_REF_RATE_200MHz 0x62
+#define K_BCM1480_MC_REF_RATE_400MHz 0xC4
-#define V_BCM1480_MC_REF_RATE_100MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
-#define V_BCM1480_MC_REF_RATE_200MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
-#define V_BCM1480_MC_REF_RATE_400MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
-#define V_BCM1480_MC_REF_RATE_DEFAULT V_BCM1480_MC_REF_RATE_400MHz
+#define V_BCM1480_MC_REF_RATE_100MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_100MHz)
+#define V_BCM1480_MC_REF_RATE_200MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_200MHz)
+#define V_BCM1480_MC_REF_RATE_400MHz V_BCM1480_MC_REF_RATE(K_BCM1480_MC_REF_RATE_400MHz)
+#define V_BCM1480_MC_REF_RATE_DEFAULT V_BCM1480_MC_REF_RATE_400MHz
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define M_BCM1480_MC_AUTO_REF_DIS _SB_MAKEMASK1(16)
@@ -518,19 +518,19 @@
#define M_BCM1480_MC_CS_ODD_ODT_EN _SB_MAKEMASK1(32)
-#define S_BCM1480_MC_ODT0 0
+#define S_BCM1480_MC_ODT0 0
#define M_BCM1480_MC_ODT0 _SB_MAKEMASK(8, S_BCM1480_MC_ODT0)
#define V_BCM1480_MC_ODT0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT0)
-#define S_BCM1480_MC_ODT2 8
+#define S_BCM1480_MC_ODT2 8
#define M_BCM1480_MC_ODT2 _SB_MAKEMASK(8, S_BCM1480_MC_ODT2)
#define V_BCM1480_MC_ODT2(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT2)
-#define S_BCM1480_MC_ODT4 16
+#define S_BCM1480_MC_ODT4 16
#define M_BCM1480_MC_ODT4 _SB_MAKEMASK(8, S_BCM1480_MC_ODT4)
#define V_BCM1480_MC_ODT4(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT4)
-#define S_BCM1480_MC_ODT6 24
+#define S_BCM1480_MC_ODT6 24
#define M_BCM1480_MC_ODT6 _SB_MAKEMASK(8, S_BCM1480_MC_ODT6)
#define V_BCM1480_MC_ODT6(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ODT6)
#endif
@@ -539,139 +539,139 @@
* Memory DLL Configuration Register (Table 93)
*/
-#define S_BCM1480_MC_ADDR_COARSE_ADJ 0
-#define M_BCM1480_MC_ADDR_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define V_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
-#define G_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
+#define S_BCM1480_MC_ADDR_COARSE_ADJ 0
+#define M_BCM1480_MC_ADDR_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define V_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ)
+#define G_BCM1480_MC_ADDR_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_COARSE_ADJ, M_BCM1480_MC_ADDR_COARSE_ADJ)
#define V_BCM1480_MC_ADDR_COARSE_ADJ_DEFAULT V_BCM1480_MC_ADDR_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_ADDR_FREQ_RANGE 8
-#define M_BCM1480_MC_ADDR_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
-#define G_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
-#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_ADDR_FREQ_RANGE 8
+#define M_BCM1480_MC_ADDR_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE)
+#define G_BCM1480_MC_ADDR_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FREQ_RANGE, M_BCM1480_MC_ADDR_FREQ_RANGE)
+#define V_BCM1480_MC_ADDR_FREQ_RANGE_DEFAULT V_BCM1480_MC_ADDR_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_ADDR_FINE_ADJ 8
-#define M_BCM1480_MC_ADDR_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define V_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
-#define G_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
+#define S_BCM1480_MC_ADDR_FINE_ADJ 8
+#define M_BCM1480_MC_ADDR_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define V_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ)
+#define G_BCM1480_MC_ADDR_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_ADDR_FINE_ADJ, M_BCM1480_MC_ADDR_FINE_ADJ)
#define V_BCM1480_MC_ADDR_FINE_ADJ_DEFAULT V_BCM1480_MC_ADDR_FINE_ADJ(0x8)
-#define S_BCM1480_MC_DQI_COARSE_ADJ 16
-#define M_BCM1480_MC_DQI_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define V_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
-#define G_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
+#define S_BCM1480_MC_DQI_COARSE_ADJ 16
+#define M_BCM1480_MC_DQI_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define V_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ)
+#define G_BCM1480_MC_DQI_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_COARSE_ADJ, M_BCM1480_MC_DQI_COARSE_ADJ)
#define V_BCM1480_MC_DQI_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQI_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQI_FREQ_RANGE 24
-#define M_BCM1480_MC_DQI_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
-#define G_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
-#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQI_FREQ_RANGE 24
+#define M_BCM1480_MC_DQI_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE)
+#define G_BCM1480_MC_DQI_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FREQ_RANGE, M_BCM1480_MC_DQI_FREQ_RANGE)
+#define V_BCM1480_MC_DQI_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQI_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DQI_FINE_ADJ 24
-#define M_BCM1480_MC_DQI_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
-#define V_BCM1480_MC_DQI_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
-#define G_BCM1480_MC_DQI_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
+#define S_BCM1480_MC_DQI_FINE_ADJ 24
+#define M_BCM1480_MC_DQI_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQI_FINE_ADJ)
+#define V_BCM1480_MC_DQI_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ)
+#define G_BCM1480_MC_DQI_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQI_FINE_ADJ, M_BCM1480_MC_DQI_FINE_ADJ)
#define V_BCM1480_MC_DQI_FINE_ADJ_DEFAULT V_BCM1480_MC_DQI_FINE_ADJ(0x8)
-#define S_BCM1480_MC_DQO_COARSE_ADJ 32
-#define M_BCM1480_MC_DQO_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define V_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
-#define G_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
+#define S_BCM1480_MC_DQO_COARSE_ADJ 32
+#define M_BCM1480_MC_DQO_COARSE_ADJ _SB_MAKEMASK(6, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define V_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ)
+#define G_BCM1480_MC_DQO_COARSE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_COARSE_ADJ, M_BCM1480_MC_DQO_COARSE_ADJ)
#define V_BCM1480_MC_DQO_COARSE_ADJ_DEFAULT V_BCM1480_MC_DQO_COARSE_ADJ(0x0)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DQO_FREQ_RANGE 40
-#define M_BCM1480_MC_DQO_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
-#define G_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
-#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DQO_FREQ_RANGE 40
+#define M_BCM1480_MC_DQO_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE)
+#define G_BCM1480_MC_DQO_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FREQ_RANGE, M_BCM1480_MC_DQO_FREQ_RANGE)
+#define V_BCM1480_MC_DQO_FREQ_RANGE_DEFAULT V_BCM1480_MC_DQO_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DQO_FINE_ADJ 40
-#define M_BCM1480_MC_DQO_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
-#define V_BCM1480_MC_DQO_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
-#define G_BCM1480_MC_DQO_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
+#define S_BCM1480_MC_DQO_FINE_ADJ 40
+#define M_BCM1480_MC_DQO_FINE_ADJ _SB_MAKEMASK(4, S_BCM1480_MC_DQO_FINE_ADJ)
+#define V_BCM1480_MC_DQO_FINE_ADJ(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ)
+#define G_BCM1480_MC_DQO_FINE_ADJ(x) _SB_GETVALUE(x, S_BCM1480_MC_DQO_FINE_ADJ, M_BCM1480_MC_DQO_FINE_ADJ)
#define V_BCM1480_MC_DQO_FINE_ADJ_DEFAULT V_BCM1480_MC_DQO_FINE_ADJ(0x8)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_PDSEL 44
-#define M_BCM1480_MC_DLL_PDSEL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_PDSEL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
-#define G_BCM1480_MC_DLL_PDSEL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
-#define V_BCM1480_MC_DLL_DEFAULT_PDSEL V_BCM1480_MC_DLL_PDSEL(0x0)
-
-#define M_BCM1480_MC_DLL_REGBYPASS _SB_MAKEMASK1(46)
-#define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47)
+#define S_BCM1480_MC_DLL_PDSEL 44
+#define M_BCM1480_MC_DLL_PDSEL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_PDSEL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_PDSEL)
+#define G_BCM1480_MC_DLL_PDSEL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_PDSEL, M_BCM1480_MC_DLL_PDSEL)
+#define V_BCM1480_MC_DLL_DEFAULT_PDSEL V_BCM1480_MC_DLL_PDSEL(0x0)
+
+#define M_BCM1480_MC_DLL_REGBYPASS _SB_MAKEMASK1(46)
+#define M_BCM1480_MC_DQO_SHIFT _SB_MAKEMASK1(47)
#endif
-#define S_BCM1480_MC_DLL_DEFAULT 48
-#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
-#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
-#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
+#define S_BCM1480_MC_DLL_DEFAULT 48
+#define M_BCM1480_MC_DLL_DEFAULT _SB_MAKEMASK(6, S_BCM1480_MC_DLL_DEFAULT)
+#define V_BCM1480_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_DEFAULT)
+#define G_BCM1480_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_DEFAULT, M_BCM1480_MC_DLL_DEFAULT)
#define V_BCM1480_MC_DLL_DEFAULT_DEFAULT V_BCM1480_MC_DLL_DEFAULT(0x10)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define S_BCM1480_MC_DLL_REGCTRL 54
-#define M_BCM1480_MC_DLL_REGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
-#define V_BCM1480_MC_DLL_REGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
-#define G_BCM1480_MC_DLL_REGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
+#define M_BCM1480_MC_DLL_REGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_REGCTRL)
+#define V_BCM1480_MC_DLL_REGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_REGCTRL)
+#define G_BCM1480_MC_DLL_REGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_REGCTRL, M_BCM1480_MC_DLL_REGCTRL)
#define V_BCM1480_MC_DLL_DEFAULT_REGCTRL V_BCM1480_MC_DLL_REGCTRL(0x0)
#endif
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_DLL_FREQ_RANGE 56
-#define M_BCM1480_MC_DLL_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
-#define G_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
-#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
+#define S_BCM1480_MC_DLL_FREQ_RANGE 56
+#define M_BCM1480_MC_DLL_FREQ_RANGE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE)
+#define G_BCM1480_MC_DLL_FREQ_RANGE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_FREQ_RANGE, M_BCM1480_MC_DLL_FREQ_RANGE)
+#define V_BCM1480_MC_DLL_FREQ_RANGE_DEFAULT V_BCM1480_MC_DLL_FREQ_RANGE(0x4)
#endif
-#define S_BCM1480_MC_DLL_STEP_SIZE 56
-#define M_BCM1480_MC_DLL_STEP_SIZE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
-#define V_BCM1480_MC_DLL_STEP_SIZE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
-#define G_BCM1480_MC_DLL_STEP_SIZE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
+#define S_BCM1480_MC_DLL_STEP_SIZE 56
+#define M_BCM1480_MC_DLL_STEP_SIZE _SB_MAKEMASK(4, S_BCM1480_MC_DLL_STEP_SIZE)
+#define V_BCM1480_MC_DLL_STEP_SIZE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE)
+#define G_BCM1480_MC_DLL_STEP_SIZE(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_STEP_SIZE, M_BCM1480_MC_DLL_STEP_SIZE)
#define V_BCM1480_MC_DLL_STEP_SIZE_DEFAULT V_BCM1480_MC_DLL_STEP_SIZE(0x8)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define S_BCM1480_MC_DLL_BGCTRL 60
-#define M_BCM1480_MC_DLL_BGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_BGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
-#define G_BCM1480_MC_DLL_BGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
-#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL V_BCM1480_MC_DLL_BGCTRL(0x0)
+#define M_BCM1480_MC_DLL_BGCTRL _SB_MAKEMASK(2, S_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_BGCTRL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_DLL_BGCTRL)
+#define G_BCM1480_MC_DLL_BGCTRL(x) _SB_GETVALUE(x, S_BCM1480_MC_DLL_BGCTRL, M_BCM1480_MC_DLL_BGCTRL)
+#define V_BCM1480_MC_DLL_DEFAULT_BGCTRL V_BCM1480_MC_DLL_BGCTRL(0x0)
#endif
-#define M_BCM1480_MC_DLL_BYPASS _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_DLL_BYPASS _SB_MAKEMASK1(63)
/*
* Memory Drive Configuration Register (Table 94)
*/
-#define S_BCM1480_MC_RTT_BYP_PULLDOWN 0
-#define M_BCM1480_MC_RTT_BYP_PULLDOWN _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
+#define S_BCM1480_MC_RTT_BYP_PULLDOWN 0
+#define M_BCM1480_MC_RTT_BYP_PULLDOWN _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLDOWN)
#define V_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN)
#define G_BCM1480_MC_RTT_BYP_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLDOWN, M_BCM1480_MC_RTT_BYP_PULLDOWN)
-#define S_BCM1480_MC_RTT_BYP_PULLUP 6
-#define M_BCM1480_MC_RTT_BYP_PULLUP _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define V_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
-#define G_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
+#define S_BCM1480_MC_RTT_BYP_PULLUP 6
+#define M_BCM1480_MC_RTT_BYP_PULLUP _SB_MAKEMASK(3, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define V_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP)
+#define G_BCM1480_MC_RTT_BYP_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_RTT_BYP_PULLUP, M_BCM1480_MC_RTT_BYP_PULLUP)
-#define M_BCM1480_MC_RTT_BYPASS _SB_MAKEMASK1(8)
-#define M_BCM1480_MC_RTT_COMP_MOV_AVG _SB_MAKEMASK1(9)
+#define M_BCM1480_MC_RTT_BYPASS _SB_MAKEMASK1(8)
+#define M_BCM1480_MC_RTT_COMP_MOV_AVG _SB_MAKEMASK1(9)
#define S_BCM1480_MC_PVT_BYP_C1_PULLDOWN 10
#define M_BCM1480_MC_PVT_BYP_C1_PULLDOWN _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
#define V_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
#define G_BCM1480_MC_PVT_BYP_C1_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLDOWN, M_BCM1480_MC_PVT_BYP_C1_PULLDOWN)
-#define S_BCM1480_MC_PVT_BYP_C1_PULLUP 15
-#define M_BCM1480_MC_PVT_BYP_C1_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C1_PULLUP 15
+#define M_BCM1480_MC_PVT_BYP_C1_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
#define V_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP)
#define G_BCM1480_MC_PVT_BYP_C1_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C1_PULLUP, M_BCM1480_MC_PVT_BYP_C1_PULLUP)
@@ -680,153 +680,153 @@
#define V_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
#define G_BCM1480_MC_PVT_BYP_C2_PULLDOWN(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLDOWN, M_BCM1480_MC_PVT_BYP_C2_PULLDOWN)
-#define S_BCM1480_MC_PVT_BYP_C2_PULLUP 25
-#define M_BCM1480_MC_PVT_BYP_C2_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
+#define S_BCM1480_MC_PVT_BYP_C2_PULLUP 25
+#define M_BCM1480_MC_PVT_BYP_C2_PULLUP _SB_MAKEMASK(4, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
#define V_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP)
#define G_BCM1480_MC_PVT_BYP_C2_PULLUP(x) _SB_GETVALUE(x, S_BCM1480_MC_PVT_BYP_C2_PULLUP, M_BCM1480_MC_PVT_BYP_C2_PULLUP)
-#define M_BCM1480_MC_PVT_BYPASS _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_PVT_COMP_MOV_AVG _SB_MAKEMASK1(31)
+#define M_BCM1480_MC_PVT_BYPASS _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_PVT_COMP_MOV_AVG _SB_MAKEMASK1(31)
-#define M_BCM1480_MC_CLK_CLASS _SB_MAKEMASK1(34)
-#define M_BCM1480_MC_DATA_CLASS _SB_MAKEMASK1(35)
-#define M_BCM1480_MC_ADDR_CLASS _SB_MAKEMASK1(36)
+#define M_BCM1480_MC_CLK_CLASS _SB_MAKEMASK1(34)
+#define M_BCM1480_MC_DATA_CLASS _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_ADDR_CLASS _SB_MAKEMASK1(36)
-#define M_BCM1480_MC_DQ_ODT_75 _SB_MAKEMASK1(37)
-#define M_BCM1480_MC_DQ_ODT_150 _SB_MAKEMASK1(38)
-#define M_BCM1480_MC_DQS_ODT_75 _SB_MAKEMASK1(39)
-#define M_BCM1480_MC_DQS_ODT_150 _SB_MAKEMASK1(40)
-#define M_BCM1480_MC_DQS_DIFF _SB_MAKEMASK1(41)
+#define M_BCM1480_MC_DQ_ODT_75 _SB_MAKEMASK1(37)
+#define M_BCM1480_MC_DQ_ODT_150 _SB_MAKEMASK1(38)
+#define M_BCM1480_MC_DQS_ODT_75 _SB_MAKEMASK1(39)
+#define M_BCM1480_MC_DQS_ODT_150 _SB_MAKEMASK1(40)
+#define M_BCM1480_MC_DQS_DIFF _SB_MAKEMASK1(41)
/*
* ECC Test Data Register (Table 95)
*/
-#define S_BCM1480_MC_DATA_INVERT 0
-#define M_DATA_ECC_INVERT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_DATA_INVERT 0
+#define M_DATA_ECC_INVERT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_INVERT)
/*
* ECC Test ECC Register (Table 96)
*/
-#define S_BCM1480_MC_ECC_INVERT 0
-#define M_BCM1480_MC_ECC_INVERT _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
+#define S_BCM1480_MC_ECC_INVERT 0
+#define M_BCM1480_MC_ECC_INVERT _SB_MAKEMASK(8, S_BCM1480_MC_ECC_INVERT)
/*
* SDRAM Timing Register (Table 97)
*/
-#define S_BCM1480_MC_tRCD 0
-#define M_BCM1480_MC_tRCD _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
-#define V_BCM1480_MC_tRCD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
-#define G_BCM1480_MC_tRCD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
-#define K_BCM1480_MC_tRCD_DEFAULT 3
-#define V_BCM1480_MC_tRCD_DEFAULT V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
-
-#define S_BCM1480_MC_tCL 4
-#define M_BCM1480_MC_tCL _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
-#define V_BCM1480_MC_tCL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
-#define G_BCM1480_MC_tCL(x) _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
-#define K_BCM1480_MC_tCL_DEFAULT 2
-#define V_BCM1480_MC_tCL_DEFAULT V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
-
-#define M_BCM1480_MC_tCrDh _SB_MAKEMASK1(8)
-
-#define S_BCM1480_MC_tWR 9
-#define M_BCM1480_MC_tWR _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
-#define V_BCM1480_MC_tWR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
-#define G_BCM1480_MC_tWR(x) _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
-#define K_BCM1480_MC_tWR_DEFAULT 2
-#define V_BCM1480_MC_tWR_DEFAULT V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
-
-#define S_BCM1480_MC_tCwD 12
-#define M_BCM1480_MC_tCwD _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
-#define V_BCM1480_MC_tCwD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
-#define G_BCM1480_MC_tCwD(x) _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
-#define K_BCM1480_MC_tCwD_DEFAULT 1
-#define V_BCM1480_MC_tCwD_DEFAULT V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
-
-#define S_BCM1480_MC_tRP 16
-#define M_BCM1480_MC_tRP _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
-#define V_BCM1480_MC_tRP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
-#define G_BCM1480_MC_tRP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
-#define K_BCM1480_MC_tRP_DEFAULT 4
-#define V_BCM1480_MC_tRP_DEFAULT V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
-
-#define S_BCM1480_MC_tRRD 20
-#define M_BCM1480_MC_tRRD _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
-#define V_BCM1480_MC_tRRD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
-#define G_BCM1480_MC_tRRD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
-#define K_BCM1480_MC_tRRD_DEFAULT 2
-#define V_BCM1480_MC_tRRD_DEFAULT V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
-
-#define S_BCM1480_MC_tRCw 24
-#define M_BCM1480_MC_tRCw _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
-#define V_BCM1480_MC_tRCw(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
-#define G_BCM1480_MC_tRCw(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
-#define K_BCM1480_MC_tRCw_DEFAULT 10
-#define V_BCM1480_MC_tRCw_DEFAULT V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
-
-#define S_BCM1480_MC_tRCr 32
-#define M_BCM1480_MC_tRCr _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
-#define V_BCM1480_MC_tRCr(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
-#define G_BCM1480_MC_tRCr(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
-#define K_BCM1480_MC_tRCr_DEFAULT 9
-#define V_BCM1480_MC_tRCr_DEFAULT V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
+#define S_BCM1480_MC_tRCD 0
+#define M_BCM1480_MC_tRCD _SB_MAKEMASK(4, S_BCM1480_MC_tRCD)
+#define V_BCM1480_MC_tRCD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCD)
+#define G_BCM1480_MC_tRCD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCD, M_BCM1480_MC_tRCD)
+#define K_BCM1480_MC_tRCD_DEFAULT 3
+#define V_BCM1480_MC_tRCD_DEFAULT V_BCM1480_MC_tRCD(K_BCM1480_MC_tRCD_DEFAULT)
+
+#define S_BCM1480_MC_tCL 4
+#define M_BCM1480_MC_tCL _SB_MAKEMASK(4, S_BCM1480_MC_tCL)
+#define V_BCM1480_MC_tCL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCL)
+#define G_BCM1480_MC_tCL(x) _SB_GETVALUE(x, S_BCM1480_MC_tCL, M_BCM1480_MC_tCL)
+#define K_BCM1480_MC_tCL_DEFAULT 2
+#define V_BCM1480_MC_tCL_DEFAULT V_BCM1480_MC_tCL(K_BCM1480_MC_tCL_DEFAULT)
+
+#define M_BCM1480_MC_tCrDh _SB_MAKEMASK1(8)
+
+#define S_BCM1480_MC_tWR 9
+#define M_BCM1480_MC_tWR _SB_MAKEMASK(3, S_BCM1480_MC_tWR)
+#define V_BCM1480_MC_tWR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tWR)
+#define G_BCM1480_MC_tWR(x) _SB_GETVALUE(x, S_BCM1480_MC_tWR, M_BCM1480_MC_tWR)
+#define K_BCM1480_MC_tWR_DEFAULT 2
+#define V_BCM1480_MC_tWR_DEFAULT V_BCM1480_MC_tWR(K_BCM1480_MC_tWR_DEFAULT)
+
+#define S_BCM1480_MC_tCwD 12
+#define M_BCM1480_MC_tCwD _SB_MAKEMASK(4, S_BCM1480_MC_tCwD)
+#define V_BCM1480_MC_tCwD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tCwD)
+#define G_BCM1480_MC_tCwD(x) _SB_GETVALUE(x, S_BCM1480_MC_tCwD, M_BCM1480_MC_tCwD)
+#define K_BCM1480_MC_tCwD_DEFAULT 1
+#define V_BCM1480_MC_tCwD_DEFAULT V_BCM1480_MC_tCwD(K_BCM1480_MC_tCwD_DEFAULT)
+
+#define S_BCM1480_MC_tRP 16
+#define M_BCM1480_MC_tRP _SB_MAKEMASK(4, S_BCM1480_MC_tRP)
+#define V_BCM1480_MC_tRP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRP)
+#define G_BCM1480_MC_tRP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRP, M_BCM1480_MC_tRP)
+#define K_BCM1480_MC_tRP_DEFAULT 4
+#define V_BCM1480_MC_tRP_DEFAULT V_BCM1480_MC_tRP(K_BCM1480_MC_tRP_DEFAULT)
+
+#define S_BCM1480_MC_tRRD 20
+#define M_BCM1480_MC_tRRD _SB_MAKEMASK(4, S_BCM1480_MC_tRRD)
+#define V_BCM1480_MC_tRRD(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRRD)
+#define G_BCM1480_MC_tRRD(x) _SB_GETVALUE(x, S_BCM1480_MC_tRRD, M_BCM1480_MC_tRRD)
+#define K_BCM1480_MC_tRRD_DEFAULT 2
+#define V_BCM1480_MC_tRRD_DEFAULT V_BCM1480_MC_tRRD(K_BCM1480_MC_tRRD_DEFAULT)
+
+#define S_BCM1480_MC_tRCw 24
+#define M_BCM1480_MC_tRCw _SB_MAKEMASK(5, S_BCM1480_MC_tRCw)
+#define V_BCM1480_MC_tRCw(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCw)
+#define G_BCM1480_MC_tRCw(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCw, M_BCM1480_MC_tRCw)
+#define K_BCM1480_MC_tRCw_DEFAULT 10
+#define V_BCM1480_MC_tRCw_DEFAULT V_BCM1480_MC_tRCw(K_BCM1480_MC_tRCw_DEFAULT)
+
+#define S_BCM1480_MC_tRCr 32
+#define M_BCM1480_MC_tRCr _SB_MAKEMASK(5, S_BCM1480_MC_tRCr)
+#define V_BCM1480_MC_tRCr(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRCr)
+#define G_BCM1480_MC_tRCr(x) _SB_GETVALUE(x, S_BCM1480_MC_tRCr, M_BCM1480_MC_tRCr)
+#define K_BCM1480_MC_tRCr_DEFAULT 9
+#define V_BCM1480_MC_tRCr_DEFAULT V_BCM1480_MC_tRCr(K_BCM1480_MC_tRCr_DEFAULT)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_tFAW 40
-#define M_BCM1480_MC_tFAW _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
-#define V_BCM1480_MC_tFAW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
-#define G_BCM1480_MC_tFAW(x) _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
-#define K_BCM1480_MC_tFAW_DEFAULT 0
-#define V_BCM1480_MC_tFAW_DEFAULT V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
+#define S_BCM1480_MC_tFAW 40
+#define M_BCM1480_MC_tFAW _SB_MAKEMASK(6, S_BCM1480_MC_tFAW)
+#define V_BCM1480_MC_tFAW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFAW)
+#define G_BCM1480_MC_tFAW(x) _SB_GETVALUE(x, S_BCM1480_MC_tFAW, M_BCM1480_MC_tFAW)
+#define K_BCM1480_MC_tFAW_DEFAULT 0
+#define V_BCM1480_MC_tFAW_DEFAULT V_BCM1480_MC_tFAW(K_BCM1480_MC_tFAW_DEFAULT)
#endif
-#define S_BCM1480_MC_tRFC 48
-#define M_BCM1480_MC_tRFC _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
-#define V_BCM1480_MC_tRFC(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
-#define G_BCM1480_MC_tRFC(x) _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
-#define K_BCM1480_MC_tRFC_DEFAULT 12
-#define V_BCM1480_MC_tRFC_DEFAULT V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
-
-#define S_BCM1480_MC_tFIFO 56
-#define M_BCM1480_MC_tFIFO _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
-#define V_BCM1480_MC_tFIFO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
-#define G_BCM1480_MC_tFIFO(x) _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
-#define K_BCM1480_MC_tFIFO_DEFAULT 0
-#define V_BCM1480_MC_tFIFO_DEFAULT V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
-
-#define S_BCM1480_MC_tW2R 58
-#define M_BCM1480_MC_tW2R _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
-#define V_BCM1480_MC_tW2R(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
-#define G_BCM1480_MC_tW2R(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
-#define K_BCM1480_MC_tW2R_DEFAULT 1
-#define V_BCM1480_MC_tW2R_DEFAULT V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
-
-#define S_BCM1480_MC_tR2W 60
-#define M_BCM1480_MC_tR2W _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
-#define V_BCM1480_MC_tR2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
-#define G_BCM1480_MC_tR2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
-#define K_BCM1480_MC_tR2W_DEFAULT 0
-#define V_BCM1480_MC_tR2W_DEFAULT V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
+#define S_BCM1480_MC_tRFC 48
+#define M_BCM1480_MC_tRFC _SB_MAKEMASK(7, S_BCM1480_MC_tRFC)
+#define V_BCM1480_MC_tRFC(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRFC)
+#define G_BCM1480_MC_tRFC(x) _SB_GETVALUE(x, S_BCM1480_MC_tRFC, M_BCM1480_MC_tRFC)
+#define K_BCM1480_MC_tRFC_DEFAULT 12
+#define V_BCM1480_MC_tRFC_DEFAULT V_BCM1480_MC_tRFC(K_BCM1480_MC_tRFC_DEFAULT)
+
+#define S_BCM1480_MC_tFIFO 56
+#define M_BCM1480_MC_tFIFO _SB_MAKEMASK(2, S_BCM1480_MC_tFIFO)
+#define V_BCM1480_MC_tFIFO(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tFIFO)
+#define G_BCM1480_MC_tFIFO(x) _SB_GETVALUE(x, S_BCM1480_MC_tFIFO, M_BCM1480_MC_tFIFO)
+#define K_BCM1480_MC_tFIFO_DEFAULT 0
+#define V_BCM1480_MC_tFIFO_DEFAULT V_BCM1480_MC_tFIFO(K_BCM1480_MC_tFIFO_DEFAULT)
+
+#define S_BCM1480_MC_tW2R 58
+#define M_BCM1480_MC_tW2R _SB_MAKEMASK(2, S_BCM1480_MC_tW2R)
+#define V_BCM1480_MC_tW2R(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2R)
+#define G_BCM1480_MC_tW2R(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2R, M_BCM1480_MC_tW2R)
+#define K_BCM1480_MC_tW2R_DEFAULT 1
+#define V_BCM1480_MC_tW2R_DEFAULT V_BCM1480_MC_tW2R(K_BCM1480_MC_tW2R_DEFAULT)
+
+#define S_BCM1480_MC_tR2W 60
+#define M_BCM1480_MC_tR2W _SB_MAKEMASK(2, S_BCM1480_MC_tR2W)
+#define V_BCM1480_MC_tR2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tR2W)
+#define G_BCM1480_MC_tR2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tR2W, M_BCM1480_MC_tR2W)
+#define K_BCM1480_MC_tR2W_DEFAULT 0
+#define V_BCM1480_MC_tR2W_DEFAULT V_BCM1480_MC_tR2W(K_BCM1480_MC_tR2W_DEFAULT)
#define M_BCM1480_MC_tR2R _SB_MAKEMASK1(62)
-#define V_BCM1480_MC_TIMING_DEFAULT (M_BCM1480_MC_tR2R | \
- V_BCM1480_MC_tFIFO_DEFAULT | \
- V_BCM1480_MC_tR2W_DEFAULT | \
- V_BCM1480_MC_tW2R_DEFAULT | \
- V_BCM1480_MC_tRFC_DEFAULT | \
- V_BCM1480_MC_tRCr_DEFAULT | \
- V_BCM1480_MC_tRCw_DEFAULT | \
- V_BCM1480_MC_tRRD_DEFAULT | \
- V_BCM1480_MC_tRP_DEFAULT | \
- V_BCM1480_MC_tCwD_DEFAULT | \
- V_BCM1480_MC_tWR_DEFAULT | \
- M_BCM1480_MC_tCrDh | \
- V_BCM1480_MC_tCL_DEFAULT | \
- V_BCM1480_MC_tRCD_DEFAULT)
+#define V_BCM1480_MC_TIMING_DEFAULT (M_BCM1480_MC_tR2R | \
+ V_BCM1480_MC_tFIFO_DEFAULT | \
+ V_BCM1480_MC_tR2W_DEFAULT | \
+ V_BCM1480_MC_tW2R_DEFAULT | \
+ V_BCM1480_MC_tRFC_DEFAULT | \
+ V_BCM1480_MC_tRCr_DEFAULT | \
+ V_BCM1480_MC_tRCw_DEFAULT | \
+ V_BCM1480_MC_tRRD_DEFAULT | \
+ V_BCM1480_MC_tRP_DEFAULT | \
+ V_BCM1480_MC_tCwD_DEFAULT | \
+ V_BCM1480_MC_tWR_DEFAULT | \
+ M_BCM1480_MC_tCrDh | \
+ V_BCM1480_MC_tCL_DEFAULT | \
+ V_BCM1480_MC_tRCD_DEFAULT)
/*
* SDRAM Timing Register 2
@@ -834,33 +834,33 @@
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define S_BCM1480_MC_tAL 0
-#define M_BCM1480_MC_tAL _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
-#define V_BCM1480_MC_tAL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
-#define G_BCM1480_MC_tAL(x) _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
-#define K_BCM1480_MC_tAL_DEFAULT 0
-#define V_BCM1480_MC_tAL_DEFAULT V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
-
-#define S_BCM1480_MC_tRTP 4
-#define M_BCM1480_MC_tRTP _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
-#define V_BCM1480_MC_tRTP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
-#define G_BCM1480_MC_tRTP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
-#define K_BCM1480_MC_tRTP_DEFAULT 2
-#define V_BCM1480_MC_tRTP_DEFAULT V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
-
-#define S_BCM1480_MC_tW2W 8
-#define M_BCM1480_MC_tW2W _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
-#define V_BCM1480_MC_tW2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
-#define G_BCM1480_MC_tW2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
-#define K_BCM1480_MC_tW2W_DEFAULT 0
-#define V_BCM1480_MC_tW2W_DEFAULT V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
-
-#define S_BCM1480_MC_tRAP 12
-#define M_BCM1480_MC_tRAP _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
-#define V_BCM1480_MC_tRAP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
-#define G_BCM1480_MC_tRAP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
-#define K_BCM1480_MC_tRAP_DEFAULT 0
-#define V_BCM1480_MC_tRAP_DEFAULT V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
+#define S_BCM1480_MC_tAL 0
+#define M_BCM1480_MC_tAL _SB_MAKEMASK(4, S_BCM1480_MC_tAL)
+#define V_BCM1480_MC_tAL(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tAL)
+#define G_BCM1480_MC_tAL(x) _SB_GETVALUE(x, S_BCM1480_MC_tAL, M_BCM1480_MC_tAL)
+#define K_BCM1480_MC_tAL_DEFAULT 0
+#define V_BCM1480_MC_tAL_DEFAULT V_BCM1480_MC_tAL(K_BCM1480_MC_tAL_DEFAULT)
+
+#define S_BCM1480_MC_tRTP 4
+#define M_BCM1480_MC_tRTP _SB_MAKEMASK(3, S_BCM1480_MC_tRTP)
+#define V_BCM1480_MC_tRTP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRTP)
+#define G_BCM1480_MC_tRTP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRTP, M_BCM1480_MC_tRTP)
+#define K_BCM1480_MC_tRTP_DEFAULT 2
+#define V_BCM1480_MC_tRTP_DEFAULT V_BCM1480_MC_tRTP(K_BCM1480_MC_tRTP_DEFAULT)
+
+#define S_BCM1480_MC_tW2W 8
+#define M_BCM1480_MC_tW2W _SB_MAKEMASK(2, S_BCM1480_MC_tW2W)
+#define V_BCM1480_MC_tW2W(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tW2W)
+#define G_BCM1480_MC_tW2W(x) _SB_GETVALUE(x, S_BCM1480_MC_tW2W, M_BCM1480_MC_tW2W)
+#define K_BCM1480_MC_tW2W_DEFAULT 0
+#define V_BCM1480_MC_tW2W_DEFAULT V_BCM1480_MC_tW2W(K_BCM1480_MC_tW2W_DEFAULT)
+
+#define S_BCM1480_MC_tRAP 12
+#define M_BCM1480_MC_tRAP _SB_MAKEMASK(4, S_BCM1480_MC_tRAP)
+#define V_BCM1480_MC_tRAP(x) _SB_MAKEVALUE(x, S_BCM1480_MC_tRAP)
+#define G_BCM1480_MC_tRAP(x) _SB_GETVALUE(x, S_BCM1480_MC_tRAP, M_BCM1480_MC_tRAP)
+#define K_BCM1480_MC_tRAP_DEFAULT 0
+#define V_BCM1480_MC_tRAP_DEFAULT V_BCM1480_MC_tRAP(K_BCM1480_MC_tRAP_DEFAULT)
#endif
@@ -874,111 +874,111 @@
* Global Configuration Register (Table 99)
*/
-#define S_BCM1480_MC_BLK_SET_MARK 8
-#define M_BCM1480_MC_BLK_SET_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
-#define V_BCM1480_MC_BLK_SET_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
-#define G_BCM1480_MC_BLK_SET_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
+#define S_BCM1480_MC_BLK_SET_MARK 8
+#define M_BCM1480_MC_BLK_SET_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_SET_MARK)
+#define V_BCM1480_MC_BLK_SET_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_SET_MARK)
+#define G_BCM1480_MC_BLK_SET_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_SET_MARK, M_BCM1480_MC_BLK_SET_MARK)
-#define S_BCM1480_MC_BLK_CLR_MARK 12
-#define M_BCM1480_MC_BLK_CLR_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
-#define V_BCM1480_MC_BLK_CLR_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
-#define G_BCM1480_MC_BLK_CLR_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
+#define S_BCM1480_MC_BLK_CLR_MARK 12
+#define M_BCM1480_MC_BLK_CLR_MARK _SB_MAKEMASK(4, S_BCM1480_MC_BLK_CLR_MARK)
+#define V_BCM1480_MC_BLK_CLR_MARK(x) _SB_MAKEVALUE(x, S_BCM1480_MC_BLK_CLR_MARK)
+#define G_BCM1480_MC_BLK_CLR_MARK(x) _SB_GETVALUE(x, S_BCM1480_MC_BLK_CLR_MARK, M_BCM1480_MC_BLK_CLR_MARK)
-#define M_BCM1480_MC_PKT_PRIORITY _SB_MAKEMASK1(16)
+#define M_BCM1480_MC_PKT_PRIORITY _SB_MAKEMASK1(16)
-#define S_BCM1480_MC_MAX_AGE 20
-#define M_BCM1480_MC_MAX_AGE _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
-#define V_BCM1480_MC_MAX_AGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
-#define G_BCM1480_MC_MAX_AGE(x) _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
+#define S_BCM1480_MC_MAX_AGE 20
+#define M_BCM1480_MC_MAX_AGE _SB_MAKEMASK(4, S_BCM1480_MC_MAX_AGE)
+#define V_BCM1480_MC_MAX_AGE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_MAX_AGE)
+#define G_BCM1480_MC_MAX_AGE(x) _SB_GETVALUE(x, S_BCM1480_MC_MAX_AGE, M_BCM1480_MC_MAX_AGE)
-#define M_BCM1480_MC_BERR_DISABLE _SB_MAKEMASK1(29)
-#define M_BCM1480_MC_FORCE_SEQ _SB_MAKEMASK1(30)
-#define M_BCM1480_MC_VGEN _SB_MAKEMASK1(32)
+#define M_BCM1480_MC_BERR_DISABLE _SB_MAKEMASK1(29)
+#define M_BCM1480_MC_FORCE_SEQ _SB_MAKEMASK1(30)
+#define M_BCM1480_MC_VGEN _SB_MAKEMASK1(32)
-#define S_BCM1480_MC_SLEW 33
-#define M_BCM1480_MC_SLEW _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
-#define V_BCM1480_MC_SLEW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
-#define G_BCM1480_MC_SLEW(x) _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
+#define S_BCM1480_MC_SLEW 33
+#define M_BCM1480_MC_SLEW _SB_MAKEMASK(2, S_BCM1480_MC_SLEW)
+#define V_BCM1480_MC_SLEW(x) _SB_MAKEVALUE(x, S_BCM1480_MC_SLEW)
+#define G_BCM1480_MC_SLEW(x) _SB_GETVALUE(x, S_BCM1480_MC_SLEW, M_BCM1480_MC_SLEW)
-#define M_BCM1480_MC_SSTL_VOLTAGE _SB_MAKEMASK1(35)
+#define M_BCM1480_MC_SSTL_VOLTAGE _SB_MAKEMASK1(35)
/*
* Global Channel Interleave Register (Table 100)
*/
-#define S_BCM1480_MC_INTLV0 0
-#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
-#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
-#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
-
-#define S_BCM1480_MC_INTLV1 8
-#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
-#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
-#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
-
-#define S_BCM1480_MC_INTLV_MODE 16
-#define M_BCM1480_MC_INTLV_MODE _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
-#define V_BCM1480_MC_INTLV_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
-#define G_BCM1480_MC_INTLV_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
-
-#define K_BCM1480_MC_INTLV_MODE_NONE 0x0
-#define K_BCM1480_MC_INTLV_MODE_01 0x1
-#define K_BCM1480_MC_INTLV_MODE_23 0x2
-#define K_BCM1480_MC_INTLV_MODE_01_23 0x3
-#define K_BCM1480_MC_INTLV_MODE_0123 0x4
-
-#define V_BCM1480_MC_INTLV_MODE_NONE V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
-#define V_BCM1480_MC_INTLV_MODE_01 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
-#define V_BCM1480_MC_INTLV_MODE_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
-#define V_BCM1480_MC_INTLV_MODE_01_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
-#define V_BCM1480_MC_INTLV_MODE_0123 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
+#define S_BCM1480_MC_INTLV0 0
+#define M_BCM1480_MC_INTLV0 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV0)
+#define V_BCM1480_MC_INTLV0(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV0)
+#define G_BCM1480_MC_INTLV0(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV0, M_BCM1480_MC_INTLV0)
+
+#define S_BCM1480_MC_INTLV1 8
+#define M_BCM1480_MC_INTLV1 _SB_MAKEMASK(6, S_BCM1480_MC_INTLV1)
+#define V_BCM1480_MC_INTLV1(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV1)
+#define G_BCM1480_MC_INTLV1(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV1, M_BCM1480_MC_INTLV1)
+
+#define S_BCM1480_MC_INTLV_MODE 16
+#define M_BCM1480_MC_INTLV_MODE _SB_MAKEMASK(3, S_BCM1480_MC_INTLV_MODE)
+#define V_BCM1480_MC_INTLV_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_MC_INTLV_MODE)
+#define G_BCM1480_MC_INTLV_MODE(x) _SB_GETVALUE(x, S_BCM1480_MC_INTLV_MODE, M_BCM1480_MC_INTLV_MODE)
+
+#define K_BCM1480_MC_INTLV_MODE_NONE 0x0
+#define K_BCM1480_MC_INTLV_MODE_01 0x1
+#define K_BCM1480_MC_INTLV_MODE_23 0x2
+#define K_BCM1480_MC_INTLV_MODE_01_23 0x3
+#define K_BCM1480_MC_INTLV_MODE_0123 0x4
+
+#define V_BCM1480_MC_INTLV_MODE_NONE V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_NONE)
+#define V_BCM1480_MC_INTLV_MODE_01 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01)
+#define V_BCM1480_MC_INTLV_MODE_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_23)
+#define V_BCM1480_MC_INTLV_MODE_01_23 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_01_23)
+#define V_BCM1480_MC_INTLV_MODE_0123 V_BCM1480_MC_INTLV_MODE(K_BCM1480_MC_INTLV_MODE_0123)
/*
* ECC Status Register
*/
-#define S_BCM1480_MC_ECC_ERR_ADDR 0
-#define M_BCM1480_MC_ECC_ERR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
-#define V_BCM1480_MC_ECC_ERR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
-#define G_BCM1480_MC_ECC_ERR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
+#define S_BCM1480_MC_ECC_ERR_ADDR 0
+#define M_BCM1480_MC_ECC_ERR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_ERR_ADDR)
+#define V_BCM1480_MC_ECC_ERR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR)
+#define G_BCM1480_MC_ECC_ERR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_ERR_ADDR, M_BCM1480_MC_ECC_ERR_ADDR)
#if SIBYTE_HDR_FEATURE(1480, PASS2)
-#define M_BCM1480_MC_ECC_ERR_RMW _SB_MAKEMASK1(60)
+#define M_BCM1480_MC_ECC_ERR_RMW _SB_MAKEMASK1(60)
#endif
-#define M_BCM1480_MC_ECC_MULT_ERR_DET _SB_MAKEMASK1(61)
-#define M_BCM1480_MC_ECC_UERR_DET _SB_MAKEMASK1(62)
-#define M_BCM1480_MC_ECC_CERR_DET _SB_MAKEMASK1(63)
+#define M_BCM1480_MC_ECC_MULT_ERR_DET _SB_MAKEMASK1(61)
+#define M_BCM1480_MC_ECC_UERR_DET _SB_MAKEMASK1(62)
+#define M_BCM1480_MC_ECC_CERR_DET _SB_MAKEMASK1(63)
/*
* Global ECC Address Register (Table 102)
*/
-#define S_BCM1480_MC_ECC_CORR_ADDR 0
-#define M_BCM1480_MC_ECC_CORR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
-#define V_BCM1480_MC_ECC_CORR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
-#define G_BCM1480_MC_ECC_CORR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
+#define S_BCM1480_MC_ECC_CORR_ADDR 0
+#define M_BCM1480_MC_ECC_CORR_ADDR _SB_MAKEMASK(37, S_BCM1480_MC_ECC_CORR_ADDR)
+#define V_BCM1480_MC_ECC_CORR_ADDR(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR)
+#define G_BCM1480_MC_ECC_CORR_ADDR(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORR_ADDR, M_BCM1480_MC_ECC_CORR_ADDR)
/*
* Global ECC Correction Register (Table 103)
*/
-#define S_BCM1480_MC_ECC_CORRECT 0
-#define M_BCM1480_MC_ECC_CORRECT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
-#define V_BCM1480_MC_ECC_CORRECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
-#define G_BCM1480_MC_ECC_CORRECT(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
+#define S_BCM1480_MC_ECC_CORRECT 0
+#define M_BCM1480_MC_ECC_CORRECT _SB_MAKEMASK(64, S_BCM1480_MC_ECC_CORRECT)
+#define V_BCM1480_MC_ECC_CORRECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_ECC_CORRECT)
+#define G_BCM1480_MC_ECC_CORRECT(x) _SB_GETVALUE(x, S_BCM1480_MC_ECC_CORRECT, M_BCM1480_MC_ECC_CORRECT)
/*
* Global ECC Performance Counters Control Register (Table 104)
*/
-#define S_BCM1480_MC_CHANNEL_SELECT 0
-#define M_BCM1480_MC_CHANNEL_SELECT _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
-#define V_BCM1480_MC_CHANNEL_SELECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
-#define G_BCM1480_MC_CHANNEL_SELECT(x) _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
-#define K_BCM1480_MC_CHANNEL_SELECT_0 0x1
-#define K_BCM1480_MC_CHANNEL_SELECT_1 0x2
-#define K_BCM1480_MC_CHANNEL_SELECT_2 0x4
-#define K_BCM1480_MC_CHANNEL_SELECT_3 0x8
+#define S_BCM1480_MC_CHANNEL_SELECT 0
+#define M_BCM1480_MC_CHANNEL_SELECT _SB_MAKEMASK(4, S_BCM1480_MC_CHANNEL_SELECT)
+#define V_BCM1480_MC_CHANNEL_SELECT(x) _SB_MAKEVALUE(x, S_BCM1480_MC_CHANNEL_SELECT)
+#define G_BCM1480_MC_CHANNEL_SELECT(x) _SB_GETVALUE(x, S_BCM1480_MC_CHANNEL_SELECT, M_BCM1480_MC_CHANNEL_SELECT)
+#define K_BCM1480_MC_CHANNEL_SELECT_0 0x1
+#define K_BCM1480_MC_CHANNEL_SELECT_1 0x2
+#define K_BCM1480_MC_CHANNEL_SELECT_2 0x4
+#define K_BCM1480_MC_CHANNEL_SELECT_3 0x8
#endif /* _BCM1480_MC_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_regs.h b/arch/mips/include/asm/sibyte/bcm1480_regs.h
index 84d168ddfeb..ec0dacf6f0c 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_regs.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_regs.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1255/BCM1280/BCM1455/BCM1480 Board Support Package
*
- * Register Definitions File: bcm1480_regs.h
+ * Register Definitions File: bcm1480_regs.h
*
* This module contains the addresses of the on-chip peripherals
* on the BCM1280 and BCM1480.
@@ -80,48 +80,48 @@
* Memory Controller Registers (Section 6)
********************************************************************* */
-#define A_BCM1480_MC_BASE_0 0x0010050000
-#define A_BCM1480_MC_BASE_1 0x0010051000
-#define A_BCM1480_MC_BASE_2 0x0010052000
-#define A_BCM1480_MC_BASE_3 0x0010053000
-#define BCM1480_MC_REGISTER_SPACING 0x1000
+#define A_BCM1480_MC_BASE_0 0x0010050000
+#define A_BCM1480_MC_BASE_1 0x0010051000
+#define A_BCM1480_MC_BASE_2 0x0010052000
+#define A_BCM1480_MC_BASE_3 0x0010053000
+#define BCM1480_MC_REGISTER_SPACING 0x1000
-#define A_BCM1480_MC_BASE(ctlid) (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
+#define A_BCM1480_MC_BASE(ctlid) (A_BCM1480_MC_BASE_0+(ctlid)*BCM1480_MC_REGISTER_SPACING)
#define A_BCM1480_MC_REGISTER(ctlid, reg) (A_BCM1480_MC_BASE(ctlid)+(reg))
-#define R_BCM1480_MC_CONFIG 0x0000000100
-#define R_BCM1480_MC_CS_START 0x0000000120
-#define R_BCM1480_MC_CS_END 0x0000000140
-#define S_BCM1480_MC_CS_STARTEND 24
-
-#define R_BCM1480_MC_CS01_ROW0 0x0000000180
-#define R_BCM1480_MC_CS01_ROW1 0x00000001A0
-#define R_BCM1480_MC_CS23_ROW0 0x0000000200
-#define R_BCM1480_MC_CS23_ROW1 0x0000000220
-#define R_BCM1480_MC_CS01_COL0 0x0000000280
-#define R_BCM1480_MC_CS01_COL1 0x00000002A0
-#define R_BCM1480_MC_CS23_COL0 0x0000000300
-#define R_BCM1480_MC_CS23_COL1 0x0000000320
-
-#define R_BCM1480_MC_CSX_BASE 0x0000000180
-#define R_BCM1480_MC_CSX_ROW0 0x0000000000 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_ROW1 0x0000000020 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL0 0x0000000100 /* relative to CSX_BASE */
-#define R_BCM1480_MC_CSX_COL1 0x0000000120 /* relative to CSX_BASE */
-#define BCM1480_MC_CSX_SPACING 0x0000000080 /* CS23 relative to CS01 */
-
-#define R_BCM1480_MC_CS01_BA 0x0000000380
-#define R_BCM1480_MC_CS23_BA 0x00000003A0
-#define R_BCM1480_MC_DRAMCMD 0x0000000400
-#define R_BCM1480_MC_DRAMMODE 0x0000000420
-#define R_BCM1480_MC_CLOCK_CFG 0x0000000440
-#define R_BCM1480_MC_MCLK_CFG R_BCM1480_MC_CLOCK_CFG
-#define R_BCM1480_MC_TEST_DATA 0x0000000480
-#define R_BCM1480_MC_TEST_ECC 0x00000004A0
-#define R_BCM1480_MC_TIMING1 0x00000004C0
-#define R_BCM1480_MC_TIMING2 0x00000004E0
-#define R_BCM1480_MC_DLL_CFG 0x0000000500
-#define R_BCM1480_MC_DRIVE_CFG 0x0000000520
+#define R_BCM1480_MC_CONFIG 0x0000000100
+#define R_BCM1480_MC_CS_START 0x0000000120
+#define R_BCM1480_MC_CS_END 0x0000000140
+#define S_BCM1480_MC_CS_STARTEND 24
+
+#define R_BCM1480_MC_CS01_ROW0 0x0000000180
+#define R_BCM1480_MC_CS01_ROW1 0x00000001A0
+#define R_BCM1480_MC_CS23_ROW0 0x0000000200
+#define R_BCM1480_MC_CS23_ROW1 0x0000000220
+#define R_BCM1480_MC_CS01_COL0 0x0000000280
+#define R_BCM1480_MC_CS01_COL1 0x00000002A0
+#define R_BCM1480_MC_CS23_COL0 0x0000000300
+#define R_BCM1480_MC_CS23_COL1 0x0000000320
+
+#define R_BCM1480_MC_CSX_BASE 0x0000000180
+#define R_BCM1480_MC_CSX_ROW0 0x0000000000 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_ROW1 0x0000000020 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL0 0x0000000100 /* relative to CSX_BASE */
+#define R_BCM1480_MC_CSX_COL1 0x0000000120 /* relative to CSX_BASE */
+#define BCM1480_MC_CSX_SPACING 0x0000000080 /* CS23 relative to CS01 */
+
+#define R_BCM1480_MC_CS01_BA 0x0000000380
+#define R_BCM1480_MC_CS23_BA 0x00000003A0
+#define R_BCM1480_MC_DRAMCMD 0x0000000400
+#define R_BCM1480_MC_DRAMMODE 0x0000000420
+#define R_BCM1480_MC_CLOCK_CFG 0x0000000440
+#define R_BCM1480_MC_MCLK_CFG R_BCM1480_MC_CLOCK_CFG
+#define R_BCM1480_MC_TEST_DATA 0x0000000480
+#define R_BCM1480_MC_TEST_ECC 0x00000004A0
+#define R_BCM1480_MC_TIMING1 0x00000004C0
+#define R_BCM1480_MC_TIMING2 0x00000004E0
+#define R_BCM1480_MC_DLL_CFG 0x0000000500
+#define R_BCM1480_MC_DRIVE_CFG 0x0000000520
#if SIBYTE_HDR_FEATURE(1480, PASS2)
#define R_BCM1480_MC_ODT 0x0000000460
@@ -129,55 +129,55 @@
#endif
/* Global registers (single instance) */
-#define A_BCM1480_MC_GLB_CONFIG 0x0010054100
-#define A_BCM1480_MC_GLB_INTLV 0x0010054120
-#define A_BCM1480_MC_GLB_ECC_STATUS 0x0010054140
-#define A_BCM1480_MC_GLB_ECC_ADDR 0x0010054160
-#define A_BCM1480_MC_GLB_ECC_CORRECT 0x0010054180
+#define A_BCM1480_MC_GLB_CONFIG 0x0010054100
+#define A_BCM1480_MC_GLB_INTLV 0x0010054120
+#define A_BCM1480_MC_GLB_ECC_STATUS 0x0010054140
+#define A_BCM1480_MC_GLB_ECC_ADDR 0x0010054160
+#define A_BCM1480_MC_GLB_ECC_CORRECT 0x0010054180
#define A_BCM1480_MC_GLB_PERF_CNT_CONTROL 0x00100541A0
/* *********************************************************************
* L2 Cache Control Registers (Section 5)
********************************************************************* */
-#define A_BCM1480_L2_BASE 0x0010040000
+#define A_BCM1480_L2_BASE 0x0010040000
-#define A_BCM1480_L2_READ_TAG 0x0010040018
-#define A_BCM1480_L2_ECC_TAG 0x0010040038
-#define A_BCM1480_L2_MISC0_VALUE 0x0010040058
-#define A_BCM1480_L2_MISC1_VALUE 0x0010040078
-#define A_BCM1480_L2_MISC2_VALUE 0x0010040098
-#define A_BCM1480_L2_MISC_CONFIG 0x0010040040 /* x040 */
-#define A_BCM1480_L2_CACHE_DISABLE 0x0010040060 /* x060 */
+#define A_BCM1480_L2_READ_TAG 0x0010040018
+#define A_BCM1480_L2_ECC_TAG 0x0010040038
+#define A_BCM1480_L2_MISC0_VALUE 0x0010040058
+#define A_BCM1480_L2_MISC1_VALUE 0x0010040078
+#define A_BCM1480_L2_MISC2_VALUE 0x0010040098
+#define A_BCM1480_L2_MISC_CONFIG 0x0010040040 /* x040 */
+#define A_BCM1480_L2_CACHE_DISABLE 0x0010040060 /* x060 */
#define A_BCM1480_L2_MAKECACHEDISABLE(x) (A_BCM1480_L2_CACHE_DISABLE | (((x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_ENABLE_3_0 0x0010040080 /* x080 */
-#define A_BCM1480_L2_WAY_ENABLE_7_4 0x00100400A0 /* x0A0 */
+#define A_BCM1480_L2_WAY_ENABLE_3_0 0x0010040080 /* x080 */
+#define A_BCM1480_L2_WAY_ENABLE_7_4 0x00100400A0 /* x0A0 */
#define A_BCM1480_L2_MAKE_WAY_ENABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_ENABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_DISABLE_LO(x) (A_BCM1480_L2_WAY_ENABLE_3_0 | (((~x)&0xF) << 12))
#define A_BCM1480_L2_MAKE_WAY_DISABLE_HI(x) (A_BCM1480_L2_WAY_ENABLE_7_4 | (((~x)&0xF) << 12))
-#define A_BCM1480_L2_WAY_LOCAL_3_0 0x0010040100 /* x100 */
-#define A_BCM1480_L2_WAY_LOCAL_7_4 0x0010040120 /* x120 */
-#define A_BCM1480_L2_WAY_REMOTE_3_0 0x0010040140 /* x140 */
-#define A_BCM1480_L2_WAY_REMOTE_7_4 0x0010040160 /* x160 */
-#define A_BCM1480_L2_WAY_AGENT_3_0 0x00100400C0 /* xxC0 */
-#define A_BCM1480_L2_WAY_AGENT_7_4 0x00100400E0 /* xxE0 */
+#define A_BCM1480_L2_WAY_LOCAL_3_0 0x0010040100 /* x100 */
+#define A_BCM1480_L2_WAY_LOCAL_7_4 0x0010040120 /* x120 */
+#define A_BCM1480_L2_WAY_REMOTE_3_0 0x0010040140 /* x140 */
+#define A_BCM1480_L2_WAY_REMOTE_7_4 0x0010040160 /* x160 */
+#define A_BCM1480_L2_WAY_AGENT_3_0 0x00100400C0 /* xxC0 */
+#define A_BCM1480_L2_WAY_AGENT_7_4 0x00100400E0 /* xxE0 */
#define A_BCM1480_L2_WAY_ENABLE(A, banks) (A | (((~(banks))&0x0F) << 8))
-#define A_BCM1480_L2_BANK_BASE 0x00D0300000
-#define A_BCM1480_L2_BANK_ADDRESS(b) (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
-#define A_BCM1480_L2_MGMT_TAG_BASE 0x00D0000000
+#define A_BCM1480_L2_BANK_BASE 0x00D0300000
+#define A_BCM1480_L2_BANK_ADDRESS(b) (A_BCM1480_L2_BANK_BASE | (((b)&0x7)<<17))
+#define A_BCM1480_L2_MGMT_TAG_BASE 0x00D0000000
/* *********************************************************************
* PCI-X Interface Registers (Section 7)
********************************************************************* */
-#define A_BCM1480_PCI_BASE 0x0010061400
+#define A_BCM1480_PCI_BASE 0x0010061400
-#define A_BCM1480_PCI_RESET 0x0010061400
-#define A_BCM1480_PCI_DLL 0x0010061500
+#define A_BCM1480_PCI_RESET 0x0010061400
+#define A_BCM1480_PCI_DLL 0x0010061500
-#define A_BCM1480_PCI_TYPE00_HEADER 0x002E000000
+#define A_BCM1480_PCI_TYPE00_HEADER 0x002E000000
/* *********************************************************************
* Ethernet MAC Registers (Section 11) and DMA Registers (Section 10.6)
@@ -185,19 +185,19 @@
/* No register changes with Rev.C BCM1250, but one additional MAC */
-#define A_BCM1480_MAC_BASE_2 0x0010066000
+#define A_BCM1480_MAC_BASE_2 0x0010066000
#ifndef A_MAC_BASE_2
-#define A_MAC_BASE_2 A_BCM1480_MAC_BASE_2
+#define A_MAC_BASE_2 A_BCM1480_MAC_BASE_2
#endif
-#define A_BCM1480_MAC_BASE_3 0x0010067000
-#define A_MAC_BASE_3 A_BCM1480_MAC_BASE_3
+#define A_BCM1480_MAC_BASE_3 0x0010067000
+#define A_MAC_BASE_3 A_BCM1480_MAC_BASE_3
-#define R_BCM1480_MAC_DMA_OODPKTLOST 0x00000038
+#define R_BCM1480_MAC_DMA_OODPKTLOST 0x00000038
#ifndef R_MAC_DMA_OODPKTLOST
-#define R_MAC_DMA_OODPKTLOST R_BCM1480_MAC_DMA_OODPKTLOST
+#define R_MAC_DMA_OODPKTLOST R_BCM1480_MAC_DMA_OODPKTLOST
#endif
@@ -208,18 +208,18 @@
/* No significant differences from BCM1250, two DUARTs */
/* Conventions, per user manual:
- * DUART generic, channels A,B,C,D
- * DUART0 implementing channels A,B
- * DUART1 inplementing channels C,D
+ * DUART generic, channels A,B,C,D
+ * DUART0 implementing channels A,B
+ * DUART1 inplementing channels C,D
*/
-#define BCM1480_DUART_NUM_PORTS 4
+#define BCM1480_DUART_NUM_PORTS 4
-#define A_BCM1480_DUART0 0x0010060000
-#define A_BCM1480_DUART1 0x0010060400
-#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
+#define A_BCM1480_DUART0 0x0010060000
+#define A_BCM1480_DUART1 0x0010060400
+#define A_BCM1480_DUART(chan) ((((chan)&2) == 0)? A_BCM1480_DUART0 : A_BCM1480_DUART1)
-#define BCM1480_DUART_CHANREG_SPACING 0x100
+#define BCM1480_DUART_CHANREG_SPACING 0x100
#define A_BCM1480_DUART_CHANREG(chan, reg) \
(A_BCM1480_DUART(chan) + \
BCM1480_DUART_CHANREG_SPACING * (((chan) & 1) + 1) + (reg))
@@ -249,43 +249,43 @@
* These constants are the absolute addresses.
*/
-#define A_BCM1480_DUART_MODE_REG_1_C 0x0010060400
-#define A_BCM1480_DUART_MODE_REG_2_C 0x0010060410
-#define A_BCM1480_DUART_STATUS_C 0x0010060420
-#define A_BCM1480_DUART_CLK_SEL_C 0x0010060430
-#define A_BCM1480_DUART_FULL_CTL_C 0x0010060440
-#define A_BCM1480_DUART_CMD_C 0x0010060450
-#define A_BCM1480_DUART_RX_HOLD_C 0x0010060460
-#define A_BCM1480_DUART_TX_HOLD_C 0x0010060470
-#define A_BCM1480_DUART_OPCR_C 0x0010060480
-#define A_BCM1480_DUART_AUX_CTRL_C 0x0010060490
-
-#define A_BCM1480_DUART_MODE_REG_1_D 0x0010060500
-#define A_BCM1480_DUART_MODE_REG_2_D 0x0010060510
-#define A_BCM1480_DUART_STATUS_D 0x0010060520
-#define A_BCM1480_DUART_CLK_SEL_D 0x0010060530
-#define A_BCM1480_DUART_FULL_CTL_D 0x0010060540
-#define A_BCM1480_DUART_CMD_D 0x0010060550
-#define A_BCM1480_DUART_RX_HOLD_D 0x0010060560
-#define A_BCM1480_DUART_TX_HOLD_D 0x0010060570
-#define A_BCM1480_DUART_OPCR_D 0x0010060580
-#define A_BCM1480_DUART_AUX_CTRL_D 0x0010060590
-
-#define A_BCM1480_DUART_INPORT_CHNG_CD 0x0010060600
-#define A_BCM1480_DUART_AUX_CTRL_CD 0x0010060610
-#define A_BCM1480_DUART_ISR_C 0x0010060620
-#define A_BCM1480_DUART_IMR_C 0x0010060630
-#define A_BCM1480_DUART_ISR_D 0x0010060640
-#define A_BCM1480_DUART_IMR_D 0x0010060650
-#define A_BCM1480_DUART_OUT_PORT_CD 0x0010060660
-#define A_BCM1480_DUART_OPCR_CD 0x0010060670
-#define A_BCM1480_DUART_IN_PORT_CD 0x0010060680
-#define A_BCM1480_DUART_ISR_CD 0x0010060690
-#define A_BCM1480_DUART_IMR_CD 0x00100606A0
-#define A_BCM1480_DUART_SET_OPR_CD 0x00100606B0
-#define A_BCM1480_DUART_CLEAR_OPR_CD 0x00100606C0
-#define A_BCM1480_DUART_INPORT_CHNG_C 0x00100606D0
-#define A_BCM1480_DUART_INPORT_CHNG_D 0x00100606E0
+#define A_BCM1480_DUART_MODE_REG_1_C 0x0010060400
+#define A_BCM1480_DUART_MODE_REG_2_C 0x0010060410
+#define A_BCM1480_DUART_STATUS_C 0x0010060420
+#define A_BCM1480_DUART_CLK_SEL_C 0x0010060430
+#define A_BCM1480_DUART_FULL_CTL_C 0x0010060440
+#define A_BCM1480_DUART_CMD_C 0x0010060450
+#define A_BCM1480_DUART_RX_HOLD_C 0x0010060460
+#define A_BCM1480_DUART_TX_HOLD_C 0x0010060470
+#define A_BCM1480_DUART_OPCR_C 0x0010060480
+#define A_BCM1480_DUART_AUX_CTRL_C 0x0010060490
+
+#define A_BCM1480_DUART_MODE_REG_1_D 0x0010060500
+#define A_BCM1480_DUART_MODE_REG_2_D 0x0010060510
+#define A_BCM1480_DUART_STATUS_D 0x0010060520
+#define A_BCM1480_DUART_CLK_SEL_D 0x0010060530
+#define A_BCM1480_DUART_FULL_CTL_D 0x0010060540
+#define A_BCM1480_DUART_CMD_D 0x0010060550
+#define A_BCM1480_DUART_RX_HOLD_D 0x0010060560
+#define A_BCM1480_DUART_TX_HOLD_D 0x0010060570
+#define A_BCM1480_DUART_OPCR_D 0x0010060580
+#define A_BCM1480_DUART_AUX_CTRL_D 0x0010060590
+
+#define A_BCM1480_DUART_INPORT_CHNG_CD 0x0010060600
+#define A_BCM1480_DUART_AUX_CTRL_CD 0x0010060610
+#define A_BCM1480_DUART_ISR_C 0x0010060620
+#define A_BCM1480_DUART_IMR_C 0x0010060630
+#define A_BCM1480_DUART_ISR_D 0x0010060640
+#define A_BCM1480_DUART_IMR_D 0x0010060650
+#define A_BCM1480_DUART_OUT_PORT_CD 0x0010060660
+#define A_BCM1480_DUART_OPCR_CD 0x0010060670
+#define A_BCM1480_DUART_IN_PORT_CD 0x0010060680
+#define A_BCM1480_DUART_ISR_CD 0x0010060690
+#define A_BCM1480_DUART_IMR_CD 0x00100606A0
+#define A_BCM1480_DUART_SET_OPR_CD 0x00100606B0
+#define A_BCM1480_DUART_CLEAR_OPR_CD 0x00100606C0
+#define A_BCM1480_DUART_INPORT_CHNG_C 0x00100606D0
+#define A_BCM1480_DUART_INPORT_CHNG_D 0x00100606E0
/* *********************************************************************
@@ -301,8 +301,8 @@
/* One additional GPIO register, placed _before_ the BCM1250's GPIO block base */
-#define A_BCM1480_GPIO_INT_ADD_TYPE 0x0010061A78
-#define R_BCM1480_GPIO_INT_ADD_TYPE (-8)
+#define A_BCM1480_GPIO_INT_ADD_TYPE 0x0010061A78
+#define R_BCM1480_GPIO_INT_ADD_TYPE (-8)
#define A_GPIO_INT_ADD_TYPE A_BCM1480_GPIO_INT_ADD_TYPE
#define R_GPIO_INT_ADD_TYPE R_BCM1480_GPIO_INT_ADD_TYPE
@@ -321,30 +321,30 @@
/* Watchdog timers */
-#define A_BCM1480_SCD_WDOG_2 0x0010022050
-#define A_BCM1480_SCD_WDOG_3 0x0010022150
+#define A_BCM1480_SCD_WDOG_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_3 0x0010022150
-#define BCM1480_SCD_NUM_WDOGS 4
+#define BCM1480_SCD_NUM_WDOGS 4
-#define A_BCM1480_SCD_WDOG_BASE(w) (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
+#define A_BCM1480_SCD_WDOG_BASE(w) (A_BCM1480_SCD_WDOG_0+((w)&2)*0x1000 + ((w)&1)*0x100)
#define A_BCM1480_SCD_WDOG_REGISTER(w, r) (A_BCM1480_SCD_WDOG_BASE(w) + (r))
-#define A_BCM1480_SCD_WDOG_INIT_2 0x0010022050
-#define A_BCM1480_SCD_WDOG_CNT_2 0x0010022058
-#define A_BCM1480_SCD_WDOG_CFG_2 0x0010022060
+#define A_BCM1480_SCD_WDOG_INIT_2 0x0010022050
+#define A_BCM1480_SCD_WDOG_CNT_2 0x0010022058
+#define A_BCM1480_SCD_WDOG_CFG_2 0x0010022060
-#define A_BCM1480_SCD_WDOG_INIT_3 0x0010022150
-#define A_BCM1480_SCD_WDOG_CNT_3 0x0010022158
-#define A_BCM1480_SCD_WDOG_CFG_3 0x0010022160
+#define A_BCM1480_SCD_WDOG_INIT_3 0x0010022150
+#define A_BCM1480_SCD_WDOG_CNT_3 0x0010022158
+#define A_BCM1480_SCD_WDOG_CFG_3 0x0010022160
/* BCM1480 has two additional compare registers */
#define A_BCM1480_SCD_ZBBUS_CYCLE_COUNT A_SCD_ZBBUS_CYCLE_COUNT
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE 0x0010020C00
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0 A_SCD_ZBBUS_CYCLE_CP0
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1 A_SCD_ZBBUS_CYCLE_CP1
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2 0x0010020C10
-#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3 0x0010020C18
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP_BASE 0x0010020C00
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP0 A_SCD_ZBBUS_CYCLE_CP0
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP1 A_SCD_ZBBUS_CYCLE_CP1
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP2 0x0010020C10
+#define A_BCM1480_SCD_ZBBUS_CYCLE_CP3 0x0010020C18
/* *********************************************************************
* System Control Registers (Section 4.2)
@@ -352,7 +352,7 @@
/* Scratch register in different place */
-#define A_BCM1480_SCD_SCRATCH 0x100200A0
+#define A_BCM1480_SCD_SCRATCH 0x100200A0
/* *********************************************************************
* System Address Trap Registers (Section 4.9)
@@ -364,68 +364,68 @@
* System Interrupt Mapper Registers (Sections 4.3-4.5)
********************************************************************* */
-#define A_BCM1480_IMR_CPU0_BASE 0x0010020000
-#define A_BCM1480_IMR_CPU1_BASE 0x0010022000
-#define A_BCM1480_IMR_CPU2_BASE 0x0010024000
-#define A_BCM1480_IMR_CPU3_BASE 0x0010026000
-#define BCM1480_IMR_REGISTER_SPACING 0x2000
+#define A_BCM1480_IMR_CPU0_BASE 0x0010020000
+#define A_BCM1480_IMR_CPU1_BASE 0x0010022000
+#define A_BCM1480_IMR_CPU2_BASE 0x0010024000
+#define A_BCM1480_IMR_CPU3_BASE 0x0010026000
+#define BCM1480_IMR_REGISTER_SPACING 0x2000
#define BCM1480_IMR_REGISTER_SPACING_SHIFT 13
-#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
+#define A_BCM1480_IMR_MAPPER(cpu) (A_BCM1480_IMR_CPU0_BASE+(cpu)*BCM1480_IMR_REGISTER_SPACING)
#define A_BCM1480_IMR_REGISTER(cpu, reg) (A_BCM1480_IMR_MAPPER(cpu)+(reg))
/* Most IMR registers are 128 bits, implemented as non-contiguous
64-bit registers high (_H) and low (_L) */
-#define BCM1480_IMR_HL_SPACING 0x1000
+#define BCM1480_IMR_HL_SPACING 0x1000
-#define R_BCM1480_IMR_INTERRUPT_DIAG_H 0x0010
-#define R_BCM1480_IMR_LDT_INTERRUPT_H 0x0018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H 0x0020
-#define R_BCM1480_IMR_INTERRUPT_MASK_H 0x0028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_H 0x0038
+#define R_BCM1480_IMR_INTERRUPT_DIAG_H 0x0010
+#define R_BCM1480_IMR_LDT_INTERRUPT_H 0x0018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_H 0x0020
+#define R_BCM1480_IMR_INTERRUPT_MASK_H 0x0028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_H 0x0038
#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_H 0x0040
-#define R_BCM1480_IMR_LDT_INTERRUPT_SET 0x0048
-#define R_BCM1480_IMR_MAILBOX_0_CPU 0x00C0
-#define R_BCM1480_IMR_MAILBOX_0_SET_CPU 0x00C8
-#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU 0x00D0
-#define R_BCM1480_IMR_MAILBOX_1_CPU 0x00E0
-#define R_BCM1480_IMR_MAILBOX_1_SET_CPU 0x00E8
-#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU 0x00F0
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H 0x0100
-#define BCM1480_IMR_INTERRUPT_STATUS_COUNT 8
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H 0x0200
-#define BCM1480_IMR_INTERRUPT_MAP_COUNT 64
-
-#define R_BCM1480_IMR_INTERRUPT_DIAG_L 0x1010
-#define R_BCM1480_IMR_LDT_INTERRUPT_L 0x1018
-#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L 0x1020
-#define R_BCM1480_IMR_INTERRUPT_MASK_L 0x1028
-#define R_BCM1480_IMR_INTERRUPT_TRACE_L 0x1038
+#define R_BCM1480_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_BCM1480_IMR_MAILBOX_0_CPU 0x00C0
+#define R_BCM1480_IMR_MAILBOX_0_SET_CPU 0x00C8
+#define R_BCM1480_IMR_MAILBOX_0_CLR_CPU 0x00D0
+#define R_BCM1480_IMR_MAILBOX_1_CPU 0x00E0
+#define R_BCM1480_IMR_MAILBOX_1_SET_CPU 0x00E8
+#define R_BCM1480_IMR_MAILBOX_1_CLR_CPU 0x00F0
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_H 0x0100
+#define BCM1480_IMR_INTERRUPT_STATUS_COUNT 8
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_H 0x0200
+#define BCM1480_IMR_INTERRUPT_MAP_COUNT 64
+
+#define R_BCM1480_IMR_INTERRUPT_DIAG_L 0x1010
+#define R_BCM1480_IMR_LDT_INTERRUPT_L 0x1018
+#define R_BCM1480_IMR_LDT_INTERRUPT_CLR_L 0x1020
+#define R_BCM1480_IMR_INTERRUPT_MASK_L 0x1028
+#define R_BCM1480_IMR_INTERRUPT_TRACE_L 0x1038
#define R_BCM1480_IMR_INTERRUPT_SOURCE_STATUS_L 0x1040
-#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L 0x1100
-#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L 0x1200
+#define R_BCM1480_IMR_INTERRUPT_STATUS_BASE_L 0x1100
+#define R_BCM1480_IMR_INTERRUPT_MAP_BASE_L 0x1200
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE 0x0010028000
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE 0x0010028100
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE 0x0010028200
-#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE 0x0010028300
-#define BCM1480_IMR_ALIAS_MAILBOX_SPACING 0100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE 0x0010028000
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU1_BASE 0x0010028100
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU2_BASE 0x0010028200
+#define A_BCM1480_IMR_ALIAS_MAILBOX_CPU3_BASE 0x0010028300
+#define BCM1480_IMR_ALIAS_MAILBOX_SPACING 0100
#define A_BCM1480_IMR_ALIAS_MAILBOX(cpu) (A_BCM1480_IMR_ALIAS_MAILBOX_CPU0_BASE + \
- (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
+ (cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
#define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */
/*
* these macros work together to build the address of a mailbox
* register, e.g., A_BCM1480_MAILBOX_REGISTER(0,R_BCM1480_IMR_MAILBOX_SET,2)
* for mbox_0_set_cpu2 returns 0x00100240C8
*/
-#define R_BCM1480_IMR_MAILBOX_CPU 0x00
-#define R_BCM1480_IMR_MAILBOX_SET 0x08
-#define R_BCM1480_IMR_MAILBOX_CLR 0x10
+#define R_BCM1480_IMR_MAILBOX_CPU 0x00
+#define R_BCM1480_IMR_MAILBOX_SET 0x08
+#define R_BCM1480_IMR_MAILBOX_CLR 0x10
#define R_BCM1480_IMR_MAILBOX_NUM_SPACING 0x20
#define A_BCM1480_MAILBOX_REGISTER(num, reg, cpu) \
(A_BCM1480_IMR_CPU0_BASE + \
@@ -440,22 +440,22 @@
/* BCM1480 has four more performance counter registers, and two control
registers. */
-#define A_BCM1480_SCD_PERF_CNT_BASE 0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_BASE 0x00100204C0
-#define A_BCM1480_SCD_PERF_CNT_CFG0 0x00100204C0
-#define A_BCM1480_SCD_PERF_CNT_CFG_0 A_BCM1480_SCD_PERF_CNT_CFG0
-#define A_BCM1480_SCD_PERF_CNT_CFG1 0x00100204C8
-#define A_BCM1480_SCD_PERF_CNT_CFG_1 A_BCM1480_SCD_PERF_CNT_CFG1
+#define A_BCM1480_SCD_PERF_CNT_CFG0 0x00100204C0
+#define A_BCM1480_SCD_PERF_CNT_CFG_0 A_BCM1480_SCD_PERF_CNT_CFG0
+#define A_BCM1480_SCD_PERF_CNT_CFG1 0x00100204C8
+#define A_BCM1480_SCD_PERF_CNT_CFG_1 A_BCM1480_SCD_PERF_CNT_CFG1
-#define A_BCM1480_SCD_PERF_CNT_0 A_SCD_PERF_CNT_0
-#define A_BCM1480_SCD_PERF_CNT_1 A_SCD_PERF_CNT_1
-#define A_BCM1480_SCD_PERF_CNT_2 A_SCD_PERF_CNT_2
-#define A_BCM1480_SCD_PERF_CNT_3 A_SCD_PERF_CNT_3
+#define A_BCM1480_SCD_PERF_CNT_0 A_SCD_PERF_CNT_0
+#define A_BCM1480_SCD_PERF_CNT_1 A_SCD_PERF_CNT_1
+#define A_BCM1480_SCD_PERF_CNT_2 A_SCD_PERF_CNT_2
+#define A_BCM1480_SCD_PERF_CNT_3 A_SCD_PERF_CNT_3
-#define A_BCM1480_SCD_PERF_CNT_4 0x00100204F0
-#define A_BCM1480_SCD_PERF_CNT_5 0x00100204F8
-#define A_BCM1480_SCD_PERF_CNT_6 0x0010020500
-#define A_BCM1480_SCD_PERF_CNT_7 0x0010020508
+#define A_BCM1480_SCD_PERF_CNT_4 0x00100204F0
+#define A_BCM1480_SCD_PERF_CNT_5 0x00100204F8
+#define A_BCM1480_SCD_PERF_CNT_6 0x0010020500
+#define A_BCM1480_SCD_PERF_CNT_7 0x0010020508
#define BCM1480_SCD_NUM_PERF_CNT 8
#define BCM1480_SCD_PERF_CNT_SPACING 8
@@ -468,7 +468,7 @@
/* Same as 1250 except BUS_ERR_STATUS_DEBUG is in a different place. */
-#define A_BCM1480_BUS_ERR_STATUS_DEBUG 0x00100208D8
+#define A_BCM1480_BUS_ERR_STATUS_DEBUG 0x00100208D8
/* *********************************************************************
* System Debug Controller Registers (Section 19)
@@ -497,46 +497,46 @@
#define BCM1480_HT_PORT_SPACING 0x800
#define A_BCM1480_HT_PORT_HEADER(x) (A_BCM1480_HT_PORT0_HEADER + ((x)*BCM1480_HT_PORT_SPACING))
-#define A_BCM1480_HT_PORT0_HEADER 0x00FE000000
-#define A_BCM1480_HT_PORT1_HEADER 0x00FE000800
-#define A_BCM1480_HT_PORT2_HEADER 0x00FE001000
-#define A_BCM1480_HT_TYPE00_HEADER 0x00FE002000
+#define A_BCM1480_HT_PORT0_HEADER 0x00FE000000
+#define A_BCM1480_HT_PORT1_HEADER 0x00FE000800
+#define A_BCM1480_HT_PORT2_HEADER 0x00FE001000
+#define A_BCM1480_HT_TYPE00_HEADER 0x00FE002000
/* *********************************************************************
* Node Controller Registers (Section 9)
********************************************************************* */
-#define A_BCM1480_NC_BASE 0x00DFBD0000
+#define A_BCM1480_NC_BASE 0x00DFBD0000
-#define A_BCM1480_NC_RLD_FIELD 0x00DFBD0000
-#define A_BCM1480_NC_RLD_TRIGGER 0x00DFBD0020
-#define A_BCM1480_NC_RLD_BAD_ERROR 0x00DFBD0040
-#define A_BCM1480_NC_RLD_COR_ERROR 0x00DFBD0060
-#define A_BCM1480_NC_RLD_ECC_STATUS 0x00DFBD0080
-#define A_BCM1480_NC_RLD_WAY_ENABLE 0x00DFBD00A0
-#define A_BCM1480_NC_RLD_RANDOM_LFSR 0x00DFBD00C0
+#define A_BCM1480_NC_RLD_FIELD 0x00DFBD0000
+#define A_BCM1480_NC_RLD_TRIGGER 0x00DFBD0020
+#define A_BCM1480_NC_RLD_BAD_ERROR 0x00DFBD0040
+#define A_BCM1480_NC_RLD_COR_ERROR 0x00DFBD0060
+#define A_BCM1480_NC_RLD_ECC_STATUS 0x00DFBD0080
+#define A_BCM1480_NC_RLD_WAY_ENABLE 0x00DFBD00A0
+#define A_BCM1480_NC_RLD_RANDOM_LFSR 0x00DFBD00C0
-#define A_BCM1480_NC_INTERRUPT_STATUS 0x00DFBD00E0
-#define A_BCM1480_NC_INTERRUPT_ENABLE 0x00DFBD0100
-#define A_BCM1480_NC_TIMEOUT_COUNTER 0x00DFBD0120
+#define A_BCM1480_NC_INTERRUPT_STATUS 0x00DFBD00E0
+#define A_BCM1480_NC_INTERRUPT_ENABLE 0x00DFBD0100
+#define A_BCM1480_NC_TIMEOUT_COUNTER 0x00DFBD0120
#define A_BCM1480_NC_TIMEOUT_COUNTER_SEL 0x00DFBD0140
-#define A_BCM1480_NC_CREDIT_STATUS_REG0 0x00DFBD0200
-#define A_BCM1480_NC_CREDIT_STATUS_REG1 0x00DFBD0220
-#define A_BCM1480_NC_CREDIT_STATUS_REG2 0x00DFBD0240
-#define A_BCM1480_NC_CREDIT_STATUS_REG3 0x00DFBD0260
-#define A_BCM1480_NC_CREDIT_STATUS_REG4 0x00DFBD0280
-#define A_BCM1480_NC_CREDIT_STATUS_REG5 0x00DFBD02A0
-#define A_BCM1480_NC_CREDIT_STATUS_REG6 0x00DFBD02C0
-#define A_BCM1480_NC_CREDIT_STATUS_REG7 0x00DFBD02E0
-#define A_BCM1480_NC_CREDIT_STATUS_REG8 0x00DFBD0300
-#define A_BCM1480_NC_CREDIT_STATUS_REG9 0x00DFBD0320
+#define A_BCM1480_NC_CREDIT_STATUS_REG0 0x00DFBD0200
+#define A_BCM1480_NC_CREDIT_STATUS_REG1 0x00DFBD0220
+#define A_BCM1480_NC_CREDIT_STATUS_REG2 0x00DFBD0240
+#define A_BCM1480_NC_CREDIT_STATUS_REG3 0x00DFBD0260
+#define A_BCM1480_NC_CREDIT_STATUS_REG4 0x00DFBD0280
+#define A_BCM1480_NC_CREDIT_STATUS_REG5 0x00DFBD02A0
+#define A_BCM1480_NC_CREDIT_STATUS_REG6 0x00DFBD02C0
+#define A_BCM1480_NC_CREDIT_STATUS_REG7 0x00DFBD02E0
+#define A_BCM1480_NC_CREDIT_STATUS_REG8 0x00DFBD0300
+#define A_BCM1480_NC_CREDIT_STATUS_REG9 0x00DFBD0320
#define A_BCM1480_NC_CREDIT_STATUS_REG10 0x00DFBE0000
#define A_BCM1480_NC_CREDIT_STATUS_REG11 0x00DFBE0020
#define A_BCM1480_NC_CREDIT_STATUS_REG12 0x00DFBE0040
-#define A_BCM1480_NC_SR_TIMEOUT_COUNTER 0x00DFBE0060
+#define A_BCM1480_NC_SR_TIMEOUT_COUNTER 0x00DFBE0060
#define A_BCM1480_NC_SR_TIMEOUT_COUNTER_SEL 0x00DFBE0080
@@ -544,43 +544,43 @@
* H&R Block Configuration Registers (Section 12.4)
********************************************************************* */
-#define A_BCM1480_HR_BASE_0 0x00DF820000
-#define A_BCM1480_HR_BASE_1 0x00DF8A0000
-#define A_BCM1480_HR_BASE_2 0x00DF920000
-#define BCM1480_HR_REGISTER_SPACING 0x80000
+#define A_BCM1480_HR_BASE_0 0x00DF820000
+#define A_BCM1480_HR_BASE_1 0x00DF8A0000
+#define A_BCM1480_HR_BASE_2 0x00DF920000
+#define BCM1480_HR_REGISTER_SPACING 0x80000
-#define A_BCM1480_HR_BASE(idx) (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
-#define A_BCM1480_HR_REGISTER(idx, reg) (A_BCM1480_HR_BASE(idx) + (reg))
+#define A_BCM1480_HR_BASE(idx) (A_BCM1480_HR_BASE_0 + ((idx)*BCM1480_HR_REGISTER_SPACING))
+#define A_BCM1480_HR_REGISTER(idx, reg) (A_BCM1480_HR_BASE(idx) + (reg))
-#define R_BCM1480_HR_CFG 0x0000000000
+#define R_BCM1480_HR_CFG 0x0000000000
#define R_BCM1480_HR_MAPPING 0x0000010010
-#define BCM1480_HR_RULE_SPACING 0x0000000010
-#define BCM1480_HR_NUM_RULES 16
-#define BCM1480_HR_OP_OFFSET 0x0000000100
-#define BCM1480_HR_TYPE_OFFSET 0x0000000108
-#define R_BCM1480_HR_RULE_OP(idx) (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
-#define R_BCM1480_HR_RULE_TYPE(idx) (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define BCM1480_HR_RULE_SPACING 0x0000000010
+#define BCM1480_HR_NUM_RULES 16
+#define BCM1480_HR_OP_OFFSET 0x0000000100
+#define BCM1480_HR_TYPE_OFFSET 0x0000000108
+#define R_BCM1480_HR_RULE_OP(idx) (BCM1480_HR_OP_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
+#define R_BCM1480_HR_RULE_TYPE(idx) (BCM1480_HR_TYPE_OFFSET + ((idx)*BCM1480_HR_RULE_SPACING))
-#define BCM1480_HR_LEAF_SPACING 0x0000000010
-#define BCM1480_HR_NUM_LEAVES 10
-#define BCM1480_HR_LEAF_OFFSET 0x0000000300
-#define R_BCM1480_HR_HA_LEAF0(idx) (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
+#define BCM1480_HR_LEAF_SPACING 0x0000000010
+#define BCM1480_HR_NUM_LEAVES 10
+#define BCM1480_HR_LEAF_OFFSET 0x0000000300
+#define R_BCM1480_HR_HA_LEAF0(idx) (BCM1480_HR_LEAF_OFFSET + ((idx)*BCM1480_HR_LEAF_SPACING))
-#define R_BCM1480_HR_EX_LEAF0 0x00000003A0
+#define R_BCM1480_HR_EX_LEAF0 0x00000003A0
-#define BCM1480_HR_PATH_SPACING 0x0000000010
-#define BCM1480_HR_NUM_PATHS 16
-#define BCM1480_HR_PATH_OFFSET 0x0000000600
-#define R_BCM1480_HR_PATH(idx) (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
+#define BCM1480_HR_PATH_SPACING 0x0000000010
+#define BCM1480_HR_NUM_PATHS 16
+#define BCM1480_HR_PATH_OFFSET 0x0000000600
+#define R_BCM1480_HR_PATH(idx) (BCM1480_HR_PATH_OFFSET + ((idx)*BCM1480_HR_PATH_SPACING))
-#define R_BCM1480_HR_PATH_DEFAULT 0x0000000700
+#define R_BCM1480_HR_PATH_DEFAULT 0x0000000700
-#define BCM1480_HR_ROUTE_SPACING 8
-#define BCM1480_HR_NUM_ROUTES 512
-#define BCM1480_HR_ROUTE_OFFSET 0x0000001000
-#define R_BCM1480_HR_RT_WORD(idx) (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
+#define BCM1480_HR_ROUTE_SPACING 8
+#define BCM1480_HR_NUM_ROUTES 512
+#define BCM1480_HR_ROUTE_OFFSET 0x0000001000
+#define R_BCM1480_HR_RT_WORD(idx) (BCM1480_HR_ROUTE_OFFSET + ((idx)*BCM1480_HR_ROUTE_SPACING))
/* checked to here - ehs */
@@ -588,55 +588,55 @@
* Packet Manager DMA Registers (Section 12.5)
********************************************************************* */
-#define A_BCM1480_PM_BASE 0x0010056000
+#define A_BCM1480_PM_BASE 0x0010056000
-#define A_BCM1480_PMI_LCL_0 0x0010058000
-#define A_BCM1480_PMO_LCL_0 0x001005C000
-#define A_BCM1480_PMI_OFFSET_0 (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_OFFSET_0 (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_LCL_0 0x0010058000
+#define A_BCM1480_PMO_LCL_0 0x001005C000
+#define A_BCM1480_PMI_OFFSET_0 (A_BCM1480_PMI_LCL_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_OFFSET_0 (A_BCM1480_PMO_LCL_0 - A_BCM1480_PM_BASE)
-#define BCM1480_PM_LCL_REGISTER_SPACING 0x100
-#define BCM1480_PM_NUM_CHANNELS 32
+#define BCM1480_PM_LCL_REGISTER_SPACING 0x100
+#define BCM1480_PM_NUM_CHANNELS 32
-#define A_BCM1480_PMI_LCL_BASE(idx) (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMI_LCL_REGISTER(idx, reg) (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
-#define A_BCM1480_PMO_LCL_BASE(idx) (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
-#define A_BCM1480_PMO_LCL_REGISTER(idx, reg) (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMI_LCL_BASE(idx) (A_BCM1480_PMI_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMI_LCL_REGISTER(idx, reg) (A_BCM1480_PMI_LCL_BASE(idx) + (reg))
+#define A_BCM1480_PMO_LCL_BASE(idx) (A_BCM1480_PMO_LCL_0 + ((idx)*BCM1480_PM_LCL_REGISTER_SPACING))
+#define A_BCM1480_PMO_LCL_REGISTER(idx, reg) (A_BCM1480_PMO_LCL_BASE(idx) + (reg))
-#define BCM1480_PM_INT_PACKING 8
-#define BCM1480_PM_INT_FUNCTION_SPACING 0x40
-#define BCM1480_PM_INT_NUM_FUNCTIONS 3
+#define BCM1480_PM_INT_PACKING 8
+#define BCM1480_PM_INT_FUNCTION_SPACING 0x40
+#define BCM1480_PM_INT_NUM_FUNCTIONS 3
/*
* DMA channel registers relative to A_BCM1480_PMI_LCL_BASE(n) and A_BCM1480_PMO_LCL_BASE(n)
*/
-#define R_BCM1480_PM_BASE_SIZE 0x0000000000
-#define R_BCM1480_PM_CNT 0x0000000008
-#define R_BCM1480_PM_PFCNT 0x0000000010
-#define R_BCM1480_PM_LAST 0x0000000018
-#define R_BCM1480_PM_PFINDX 0x0000000020
-#define R_BCM1480_PM_INT_WMK 0x0000000028
-#define R_BCM1480_PM_CONFIG0 0x0000000030
-#define R_BCM1480_PM_LOCALDEBUG 0x0000000078
-#define R_BCM1480_PM_CACHEABILITY 0x0000000080 /* PMI only */
-#define R_BCM1480_PM_INT_CNFG 0x0000000088
-#define R_BCM1480_PM_DESC_MERGE_TIMER 0x0000000090
-#define R_BCM1480_PM_LOCALDEBUG_PIB 0x00000000F8 /* PMI only */
-#define R_BCM1480_PM_LOCALDEBUG_POB 0x00000000F8 /* PMO only */
+#define R_BCM1480_PM_BASE_SIZE 0x0000000000
+#define R_BCM1480_PM_CNT 0x0000000008
+#define R_BCM1480_PM_PFCNT 0x0000000010
+#define R_BCM1480_PM_LAST 0x0000000018
+#define R_BCM1480_PM_PFINDX 0x0000000020
+#define R_BCM1480_PM_INT_WMK 0x0000000028
+#define R_BCM1480_PM_CONFIG0 0x0000000030
+#define R_BCM1480_PM_LOCALDEBUG 0x0000000078
+#define R_BCM1480_PM_CACHEABILITY 0x0000000080 /* PMI only */
+#define R_BCM1480_PM_INT_CNFG 0x0000000088
+#define R_BCM1480_PM_DESC_MERGE_TIMER 0x0000000090
+#define R_BCM1480_PM_LOCALDEBUG_PIB 0x00000000F8 /* PMI only */
+#define R_BCM1480_PM_LOCALDEBUG_POB 0x00000000F8 /* PMO only */
/*
* Global Registers (Not Channelized)
*/
-#define A_BCM1480_PMI_GLB_0 0x0010056000
-#define A_BCM1480_PMO_GLB_0 0x0010057000
+#define A_BCM1480_PMI_GLB_0 0x0010056000
+#define A_BCM1480_PMO_GLB_0 0x0010057000
/*
* PM to TX Mapping Register relative to A_BCM1480_PMI_GLB_0 and A_BCM1480_PMO_GLB_0
*/
-#define R_BCM1480_PM_PMO_MAPPING 0x00000008C8 /* PMO only */
+#define R_BCM1480_PM_PMO_MAPPING 0x00000008C8 /* PMO only */
#define A_BCM1480_PM_PMO_MAPPING (A_BCM1480_PMO_GLB_0 + R_BCM1480_PM_PMO_MAPPING)
@@ -645,32 +645,32 @@
*/
-#define A_BCM1480_PMI_INT_0 0x0010056800
-#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMI_INT_OFFSET_0 (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
-#define A_BCM1480_PMO_INT_0 0x0010057800
-#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
-#define A_BCM1480_PMO_INT_OFFSET_0 (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMI_INT_0 0x0010056800
+#define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMI_INT_OFFSET_0 (A_BCM1480_PMI_INT_0 - A_BCM1480_PM_BASE)
+#define A_BCM1480_PMO_INT_0 0x0010057800
+#define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8))
+#define A_BCM1480_PMO_INT_OFFSET_0 (A_BCM1480_PMO_INT_0 - A_BCM1480_PM_BASE)
/*
* Interrupt registers relative to A_BCM1480_PMI_INT_0 and A_BCM1480_PMO_INT_0
*/
-#define R_BCM1480_PM_INT_ST 0x0000000000
-#define R_BCM1480_PM_INT_MSK 0x0000000040
-#define R_BCM1480_PM_INT_CLR 0x0000000080
-#define R_BCM1480_PM_MRGD_INT 0x00000000C0
+#define R_BCM1480_PM_INT_ST 0x0000000000
+#define R_BCM1480_PM_INT_MSK 0x0000000040
+#define R_BCM1480_PM_INT_CLR 0x0000000080
+#define R_BCM1480_PM_MRGD_INT 0x00000000C0
/*
* Debug registers (global)
*/
#define A_BCM1480_PM_GLOBALDEBUGMODE_PMI 0x0010056000
-#define A_BCM1480_PM_GLOBALDEBUG_PID 0x00100567F8
-#define A_BCM1480_PM_GLOBALDEBUG_PIB 0x0010056FF8
+#define A_BCM1480_PM_GLOBALDEBUG_PID 0x00100567F8
+#define A_BCM1480_PM_GLOBALDEBUG_PIB 0x0010056FF8
#define A_BCM1480_PM_GLOBALDEBUGMODE_PMO 0x0010057000
-#define A_BCM1480_PM_GLOBALDEBUG_POD 0x00100577F8
-#define A_BCM1480_PM_GLOBALDEBUG_POB 0x0010057FF8
+#define A_BCM1480_PM_GLOBALDEBUG_POD 0x00100577F8
+#define A_BCM1480_PM_GLOBALDEBUG_POB 0x0010057FF8
/* *********************************************************************
* Switch performance counters
@@ -715,16 +715,16 @@
* High-Speed Port Registers (Section 13)
********************************************************************* */
-#define A_BCM1480_HSP_BASE_0 0x00DF810000
-#define A_BCM1480_HSP_BASE_1 0x00DF890000
-#define A_BCM1480_HSP_BASE_2 0x00DF910000
-#define BCM1480_HSP_REGISTER_SPACING 0x80000
+#define A_BCM1480_HSP_BASE_0 0x00DF810000
+#define A_BCM1480_HSP_BASE_1 0x00DF890000
+#define A_BCM1480_HSP_BASE_2 0x00DF910000
+#define BCM1480_HSP_REGISTER_SPACING 0x80000
-#define A_BCM1480_HSP_BASE(idx) (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
+#define A_BCM1480_HSP_BASE(idx) (A_BCM1480_HSP_BASE_0 + ((idx)*BCM1480_HSP_REGISTER_SPACING))
#define A_BCM1480_HSP_REGISTER(idx, reg) (A_BCM1480_HSP_BASE(idx) + (reg))
-#define R_BCM1480_HSP_RX_SPI4_CFG_0 0x0000000000
-#define R_BCM1480_HSP_RX_SPI4_CFG_1 0x0000000008
+#define R_BCM1480_HSP_RX_SPI4_CFG_0 0x0000000000
+#define R_BCM1480_HSP_RX_SPI4_CFG_1 0x0000000008
#define R_BCM1480_HSP_RX_SPI4_DESKEW_OVERRIDE 0x0000000010
#define R_BCM1480_HSP_RX_SPI4_DESKEW_DATAPATH 0x0000000018
#define R_BCM1480_HSP_RX_SPI4_PORT_INT_EN 0x0000000020
@@ -733,34 +733,34 @@
#define R_BCM1480_HSP_RX_SPI4_CALENDAR_0 0x0000000200
#define R_BCM1480_HSP_RX_SPI4_CALENDAR_1 0x0000000208
-#define R_BCM1480_HSP_RX_PLL_CNFG 0x0000000800
-#define R_BCM1480_HSP_RX_CALIBRATION 0x0000000808
-#define R_BCM1480_HSP_RX_TEST 0x0000000810
-#define R_BCM1480_HSP_RX_DIAG_DETAILS 0x0000000818
-#define R_BCM1480_HSP_RX_DIAG_CRC_0 0x0000000820
-#define R_BCM1480_HSP_RX_DIAG_CRC_1 0x0000000828
-#define R_BCM1480_HSP_RX_DIAG_HTCMD 0x0000000830
-#define R_BCM1480_HSP_RX_DIAG_PKTCTL 0x0000000838
+#define R_BCM1480_HSP_RX_PLL_CNFG 0x0000000800
+#define R_BCM1480_HSP_RX_CALIBRATION 0x0000000808
+#define R_BCM1480_HSP_RX_TEST 0x0000000810
+#define R_BCM1480_HSP_RX_DIAG_DETAILS 0x0000000818
+#define R_BCM1480_HSP_RX_DIAG_CRC_0 0x0000000820
+#define R_BCM1480_HSP_RX_DIAG_CRC_1 0x0000000828
+#define R_BCM1480_HSP_RX_DIAG_HTCMD 0x0000000830
+#define R_BCM1480_HSP_RX_DIAG_PKTCTL 0x0000000838
#define R_BCM1480_HSP_RX_VIS_FLCTRL_COUNTER 0x0000000870
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0 0x0000020020
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1 0x0000020028
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2 0x0000020030
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3 0x0000020038
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4 0x0000020040
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5 0x0000020048
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6 0x0000020050
-#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7 0x0000020058
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_0 0x0000020020
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_1 0x0000020028
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_2 0x0000020030
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_3 0x0000020038
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_4 0x0000020040
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_5 0x0000020048
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_6 0x0000020050
+#define R_BCM1480_HSP_RX_PKT_RAMALLOC_7 0x0000020058
#define R_BCM1480_HSP_RX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_RX_PKT_RAMALLOC_0 + 8*(idx))
/* XXX Following registers were shuffled. Renamed/renumbered per errata. */
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_0 0x0000020078
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_1 0x0000020080
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_2 0x0000020088
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_3 0x0000020090
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_4 0x0000020098
-#define R_BCM1480_HSP_RX_HT_RAMALLOC_5 0x00000200A0
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_0 0x0000020078
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_1 0x0000020080
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_2 0x0000020088
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_3 0x0000020090
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_4 0x0000020098
+#define R_BCM1480_HSP_RX_HT_RAMALLOC_5 0x00000200A0
#define R_BCM1480_HSP_RX_SPI_WATERMARK_0 0x00000200B0
#define R_BCM1480_HSP_RX_SPI_WATERMARK_1 0x00000200B8
@@ -772,30 +772,30 @@
#define R_BCM1480_HSP_RX_SPI_WATERMARK_7 0x00000200E8
#define R_BCM1480_HSP_RX_SPI_WATERMARK(idx) (R_BCM1480_HSP_RX_SPI_WATERMARK_0 + 8*(idx))
-#define R_BCM1480_HSP_RX_VIS_CMDQ_0 0x00000200F0
-#define R_BCM1480_HSP_RX_VIS_CMDQ_1 0x00000200F8
-#define R_BCM1480_HSP_RX_VIS_CMDQ_2 0x0000020100
-#define R_BCM1480_HSP_RX_RAM_READCTL 0x0000020108
-#define R_BCM1480_HSP_RX_RAM_READWINDOW 0x0000020110
-#define R_BCM1480_HSP_RX_RF_READCTL 0x0000020118
-#define R_BCM1480_HSP_RX_RF_READWINDOW 0x0000020120
+#define R_BCM1480_HSP_RX_VIS_CMDQ_0 0x00000200F0
+#define R_BCM1480_HSP_RX_VIS_CMDQ_1 0x00000200F8
+#define R_BCM1480_HSP_RX_VIS_CMDQ_2 0x0000020100
+#define R_BCM1480_HSP_RX_RAM_READCTL 0x0000020108
+#define R_BCM1480_HSP_RX_RAM_READWINDOW 0x0000020110
+#define R_BCM1480_HSP_RX_RF_READCTL 0x0000020118
+#define R_BCM1480_HSP_RX_RF_READWINDOW 0x0000020120
-#define R_BCM1480_HSP_TX_SPI4_CFG_0 0x0000040000
-#define R_BCM1480_HSP_TX_SPI4_CFG_1 0x0000040008
+#define R_BCM1480_HSP_TX_SPI4_CFG_0 0x0000040000
+#define R_BCM1480_HSP_TX_SPI4_CFG_1 0x0000040008
#define R_BCM1480_HSP_TX_SPI4_TRAINING_FMT 0x0000040010
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0 0x0000040020
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1 0x0000040028
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2 0x0000040030
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3 0x0000040038
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4 0x0000040040
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5 0x0000040048
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6 0x0000040050
-#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7 0x0000040058
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_0 0x0000040020
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_1 0x0000040028
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_2 0x0000040030
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_3 0x0000040038
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_4 0x0000040040
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_5 0x0000040048
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_6 0x0000040050
+#define R_BCM1480_HSP_TX_PKT_RAMALLOC_7 0x0000040058
#define R_BCM1480_HSP_TX_PKT_RAMALLOC(idx) (R_BCM1480_HSP_TX_PKT_RAMALLOC_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_NPC_RAMALLOC 0x0000040078
-#define R_BCM1480_HSP_TX_RSP_RAMALLOC 0x0000040080
-#define R_BCM1480_HSP_TX_PC_RAMALLOC 0x0000040088
+#define R_BCM1480_HSP_TX_NPC_RAMALLOC 0x0000040078
+#define R_BCM1480_HSP_TX_RSP_RAMALLOC 0x0000040080
+#define R_BCM1480_HSP_TX_PC_RAMALLOC 0x0000040088
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_0 0x0000040090
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_1 0x0000040098
#define R_BCM1480_HSP_TX_HTCC_RAMALLOC_2 0x00000400A0
@@ -805,37 +805,37 @@
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_2 0x00000400C0
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT_3 0x00000400C8
#define R_BCM1480_HSP_TX_PKT_RXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_RXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT 0x00000400D0
-#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT 0x00000400D8
+#define R_BCM1480_HSP_TX_HTIO_RXPHITCNT 0x00000400D0
+#define R_BCM1480_HSP_TX_HTCC_RXPHITCNT 0x00000400D8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 0x00000400E0
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_1 0x00000400E8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_2 0x00000400F0
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT_3 0x00000400F8
#define R_BCM1480_HSP_TX_PKT_TXPHITCNT(idx) (R_BCM1480_HSP_TX_PKT_TXPHITCNT_0 + 8*(idx))
-#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT 0x0000040100
-#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT 0x0000040108
+#define R_BCM1480_HSP_TX_HTIO_TXPHITCNT 0x0000040100
+#define R_BCM1480_HSP_TX_HTCC_TXPHITCNT 0x0000040108
#define R_BCM1480_HSP_TX_SPI4_CALENDAR_0 0x0000040200
#define R_BCM1480_HSP_TX_SPI4_CALENDAR_1 0x0000040208
-#define R_BCM1480_HSP_TX_PLL_CNFG 0x0000040800
-#define R_BCM1480_HSP_TX_CALIBRATION 0x0000040808
-#define R_BCM1480_HSP_TX_TEST 0x0000040810
+#define R_BCM1480_HSP_TX_PLL_CNFG 0x0000040800
+#define R_BCM1480_HSP_TX_CALIBRATION 0x0000040808
+#define R_BCM1480_HSP_TX_TEST 0x0000040810
-#define R_BCM1480_HSP_TX_VIS_CMDQ_0 0x0000040840
-#define R_BCM1480_HSP_TX_VIS_CMDQ_1 0x0000040848
-#define R_BCM1480_HSP_TX_VIS_CMDQ_2 0x0000040850
-#define R_BCM1480_HSP_TX_RAM_READCTL 0x0000040860
-#define R_BCM1480_HSP_TX_RAM_READWINDOW 0x0000040868
-#define R_BCM1480_HSP_TX_RF_READCTL 0x0000040870
-#define R_BCM1480_HSP_TX_RF_READWINDOW 0x0000040878
+#define R_BCM1480_HSP_TX_VIS_CMDQ_0 0x0000040840
+#define R_BCM1480_HSP_TX_VIS_CMDQ_1 0x0000040848
+#define R_BCM1480_HSP_TX_VIS_CMDQ_2 0x0000040850
+#define R_BCM1480_HSP_TX_RAM_READCTL 0x0000040860
+#define R_BCM1480_HSP_TX_RAM_READWINDOW 0x0000040868
+#define R_BCM1480_HSP_TX_RF_READCTL 0x0000040870
+#define R_BCM1480_HSP_TX_RF_READWINDOW 0x0000040878
#define R_BCM1480_HSP_TX_SPI4_PORT_INT_STATUS 0x0000040880
#define R_BCM1480_HSP_TX_SPI4_PORT_INT_EN 0x0000040888
#define R_BCM1480_HSP_TX_NEXT_ADDR_BASE 0x000040400
-#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
+#define R_BCM1480_HSP_TX_NEXT_ADDR_REGISTER(x) (R_BCM1480_HSP_TX_NEXT_ADDR_BASE+ 8*(x))
@@ -843,60 +843,60 @@
* Physical Address Map (Table 10 and Figure 7)
********************************************************************* */
-#define A_BCM1480_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
-#define A_BCM1480_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
-#define A_BCM1480_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
-#define A_BCM1480_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
-#define A_BCM1480_PHYS_GENBUS _SB_MAKE64(0x0010090000)
-#define A_BCM1480_PHYS_GENBUS_END _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES _SB_MAKE64(0x0028000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES _SB_MAKE64(0x0029000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES _SB_MAKE64(0x002C000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES _SB_MAKE64(0x002E000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES _SB_MAKE64(0x002F000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES _SB_MAKE64(0x0030000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES _SB_MAKE64(0x0040000000)
-#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS _SB_MAKE64(0x0060000000)
-#define A_BCM1480_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
-#define A_BCM1480_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
-#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS _SB_MAKE64(0x00A8000000)
-#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS _SB_MAKE64(0x00A9000000)
-#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS _SB_MAKE64(0x00AC000000)
-#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS _SB_MAKE64(0x00AE000000)
-#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS _SB_MAKE64(0x00AF000000)
-#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS _SB_MAKE64(0x00B0000000)
-#define A_BCM1480_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
-#define A_BCM1480_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
-#define A_BCM1480_PHYS_HS_SUBSYS _SB_MAKE64(0x00DF000000)
-#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
-#define A_BCM1480_PHYS_HT_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
-#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
-#define A_BCM1480_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
-#define A_BCM1480_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
-#define A_BCM1480_PHYS_PCI_UPPER _SB_MAKE64(0x1000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES _SB_MAKE64(0x2000000000)
-#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS _SB_MAKE64(0x3000000000)
-#define A_BCM1480_PHYS_HT_NODE_ALIAS _SB_MAKE64(0x4000000000)
-#define A_BCM1480_PHYS_HT_FULLACCESS _SB_MAKE64(0xF000000000)
+#define A_BCM1480_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_BCM1480_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_BCM1480_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_BCM1480_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_BCM1480_PHYS_GENBUS _SB_MAKE64(0x0010090000)
+#define A_BCM1480_PHYS_GENBUS_END _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BYTES _SB_MAKE64(0x0028000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BYTES _SB_MAKE64(0x0029000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BYTES _SB_MAKE64(0x002C000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BYTES _SB_MAKE64(0x002E000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BYTES _SB_MAKE64(0x002F000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BYTES _SB_MAKE64(0x0030000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BYTES _SB_MAKE64(0x0040000000)
+#define A_BCM1480_PHYS_HT_MEM_MATCH_BITS _SB_MAKE64(0x0060000000)
+#define A_BCM1480_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_BCM1480_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_BCM1480_PHYS_PCI_MISC_MATCH_BITS _SB_MAKE64(0x00A8000000)
+#define A_BCM1480_PHYS_PCI_IACK_MATCH_BITS _SB_MAKE64(0x00A9000000)
+#define A_BCM1480_PHYS_PCI_IO_MATCH_BITS _SB_MAKE64(0x00AC000000)
+#define A_BCM1480_PHYS_PCI_CFG_MATCH_BITS _SB_MAKE64(0x00AE000000)
+#define A_BCM1480_PHYS_PCI_OMAP_MATCH_BITS _SB_MAKE64(0x00AF000000)
+#define A_BCM1480_PHYS_PCI_MEM_MATCH_BITS _SB_MAKE64(0x00B0000000)
+#define A_BCM1480_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_BCM1480_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_BCM1480_PHYS_HS_SUBSYS _SB_MAKE64(0x00DF000000)
+#define A_BCM1480_PHYS_HT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_BCM1480_PHYS_HT_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_BCM1480_PHYS_HT_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_BCM1480_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_BCM1480_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_BCM1480_PHYS_PCI_UPPER _SB_MAKE64(0x1000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BYTES _SB_MAKE64(0x2000000000)
+#define A_BCM1480_PHYS_HT_UPPER_MATCH_BITS _SB_MAKE64(0x3000000000)
+#define A_BCM1480_PHYS_HT_NODE_ALIAS _SB_MAKE64(0x4000000000)
+#define A_BCM1480_PHYS_HT_FULLACCESS _SB_MAKE64(0xF000000000)
/* *********************************************************************
* L2 Cache as RAM (Table 54)
********************************************************************* */
-#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
-#define BCM1480_PHYS_L2CACHE_NUM_WAYS 8
-#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000100000)
-#define A_BCM1480_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0300000)
-#define A_BCM1480_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D0320000)
-#define A_BCM1480_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D0340000)
-#define A_BCM1480_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D0360000)
-#define A_BCM1480_PHYS_L2CACHE_WAY4 _SB_MAKE64(0x00D0380000)
-#define A_BCM1480_PHYS_L2CACHE_WAY5 _SB_MAKE64(0x00D03A0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY6 _SB_MAKE64(0x00D03C0000)
-#define A_BCM1480_PHYS_L2CACHE_WAY7 _SB_MAKE64(0x00D03E0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define BCM1480_PHYS_L2CACHE_NUM_WAYS 8
+#define A_BCM1480_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000100000)
+#define A_BCM1480_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0300000)
+#define A_BCM1480_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D0320000)
+#define A_BCM1480_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D0340000)
+#define A_BCM1480_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D0360000)
+#define A_BCM1480_PHYS_L2CACHE_WAY4 _SB_MAKE64(0x00D0380000)
+#define A_BCM1480_PHYS_L2CACHE_WAY5 _SB_MAKE64(0x00D03A0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY6 _SB_MAKE64(0x00D03C0000)
+#define A_BCM1480_PHYS_L2CACHE_WAY7 _SB_MAKE64(0x00D03E0000)
#endif /* _BCM1480_REGS_H */
diff --git a/arch/mips/include/asm/sibyte/bcm1480_scd.h b/arch/mips/include/asm/sibyte/bcm1480_scd.h
index 2af3706b964..8a1e2b05a62 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_scd.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_scd.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* BCM1280/BCM1400 Board Support Package
*
- * SCD Constants and Macros File: bcm1480_scd.h
+ * SCD Constants and Macros File: bcm1480_scd.h
*
* This module contains constants and macros useful for
* manipulating the System Control and Debug module.
@@ -74,11 +74,11 @@
* New part definitions
*/
-#define K_SYS_PART_BCM1480 0x1406
-#define K_SYS_PART_BCM1280 0x1206
-#define K_SYS_PART_BCM1455 0x1407
-#define K_SYS_PART_BCM1255 0x1257
-#define K_SYS_PART_BCM1158 0x1156
+#define K_SYS_PART_BCM1480 0x1406
+#define K_SYS_PART_BCM1280 0x1206
+#define K_SYS_PART_BCM1455 0x1407
+#define K_SYS_PART_BCM1255 0x1257
+#define K_SYS_PART_BCM1158 0x1156
/*
* Manufacturing Information Register (Table 14)
@@ -91,73 +91,73 @@
* Entire register is different from 1250, all new constants below
*/
-#define M_BCM1480_SYS_RESERVED0 _SB_MAKEMASK1(0)
-#define M_BCM1480_SYS_HT_MINRSTCNT _SB_MAKEMASK1(1)
-#define M_BCM1480_SYS_RESERVED2 _SB_MAKEMASK1(2)
-#define M_BCM1480_SYS_RESERVED3 _SB_MAKEMASK1(3)
-#define M_BCM1480_SYS_RESERVED4 _SB_MAKEMASK1(4)
-#define M_BCM1480_SYS_IOB_DIV _SB_MAKEMASK1(5)
-
-#define S_BCM1480_SYS_PLL_DIV _SB_MAKE64(6)
-#define M_BCM1480_SYS_PLL_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
-#define V_BCM1480_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
-#define G_BCM1480_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
-
-#define S_BCM1480_SYS_SW_DIV _SB_MAKE64(11)
-#define M_BCM1480_SYS_SW_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
-#define V_BCM1480_SYS_SW_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
-#define G_BCM1480_SYS_SW_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
-
-#define M_BCM1480_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
-#define M_BCM1480_SYS_DUART1_ENABLE _SB_MAKEMASK1(17)
-
-#define S_BCM1480_SYS_BOOT_MODE _SB_MAKE64(18)
-#define M_BCM1480_SYS_BOOT_MODE _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
-#define V_BCM1480_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
-#define G_BCM1480_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
-#define K_BCM1480_SYS_BOOT_MODE_ROM32 0
-#define K_BCM1480_SYS_BOOT_MODE_ROM8 1
+#define M_BCM1480_SYS_RESERVED0 _SB_MAKEMASK1(0)
+#define M_BCM1480_SYS_HT_MINRSTCNT _SB_MAKEMASK1(1)
+#define M_BCM1480_SYS_RESERVED2 _SB_MAKEMASK1(2)
+#define M_BCM1480_SYS_RESERVED3 _SB_MAKEMASK1(3)
+#define M_BCM1480_SYS_RESERVED4 _SB_MAKEMASK1(4)
+#define M_BCM1480_SYS_IOB_DIV _SB_MAKEMASK1(5)
+
+#define S_BCM1480_SYS_PLL_DIV _SB_MAKE64(6)
+#define M_BCM1480_SYS_PLL_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_PLL_DIV)
+#define V_BCM1480_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_PLL_DIV)
+#define G_BCM1480_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_PLL_DIV, M_BCM1480_SYS_PLL_DIV)
+
+#define S_BCM1480_SYS_SW_DIV _SB_MAKE64(11)
+#define M_BCM1480_SYS_SW_DIV _SB_MAKEMASK(5, S_BCM1480_SYS_SW_DIV)
+#define V_BCM1480_SYS_SW_DIV(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_SW_DIV)
+#define G_BCM1480_SYS_SW_DIV(x) _SB_GETVALUE(x, S_BCM1480_SYS_SW_DIV, M_BCM1480_SYS_SW_DIV)
+
+#define M_BCM1480_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+#define M_BCM1480_SYS_DUART1_ENABLE _SB_MAKEMASK1(17)
+
+#define S_BCM1480_SYS_BOOT_MODE _SB_MAKE64(18)
+#define M_BCM1480_SYS_BOOT_MODE _SB_MAKEMASK(2, S_BCM1480_SYS_BOOT_MODE)
+#define V_BCM1480_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_BOOT_MODE)
+#define G_BCM1480_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_BCM1480_SYS_BOOT_MODE, M_BCM1480_SYS_BOOT_MODE)
+#define K_BCM1480_SYS_BOOT_MODE_ROM32 0
+#define K_BCM1480_SYS_BOOT_MODE_ROM8 1
#define K_BCM1480_SYS_BOOT_MODE_SMBUS_SMALL 2
#define K_BCM1480_SYS_BOOT_MODE_SMBUS_BIG 3
-#define M_BCM1480_SYS_BOOT_MODE_SMBUS _SB_MAKEMASK1(19)
-
-#define M_BCM1480_SYS_PCI_HOST _SB_MAKEMASK1(20)
-#define M_BCM1480_SYS_PCI_ARBITER _SB_MAKEMASK1(21)
-#define M_BCM1480_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
-#define M_BCM1480_SYS_GENCLK_EN _SB_MAKEMASK1(23)
-#define M_BCM1480_SYS_GEN_PARITY_EN _SB_MAKEMASK1(24)
-#define M_BCM1480_SYS_RESERVED25 _SB_MAKEMASK1(25)
-
-#define S_BCM1480_SYS_CONFIG 26
-#define M_BCM1480_SYS_CONFIG _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
-#define V_BCM1480_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
-#define G_BCM1480_SYS_CONFIG(x) _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
-
-#define M_BCM1480_SYS_RESERVED32 _SB_MAKEMASK(32, 15)
-
-#define S_BCM1480_SYS_NODEID 47
-#define M_BCM1480_SYS_NODEID _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
-#define V_BCM1480_SYS_NODEID(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
-#define G_BCM1480_SYS_NODEID(x) _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
-
-#define M_BCM1480_SYS_CCNUMA_EN _SB_MAKEMASK1(51)
-#define M_BCM1480_SYS_CPU_RESET_0 _SB_MAKEMASK1(52)
-#define M_BCM1480_SYS_CPU_RESET_1 _SB_MAKEMASK1(53)
-#define M_BCM1480_SYS_CPU_RESET_2 _SB_MAKEMASK1(54)
-#define M_BCM1480_SYS_CPU_RESET_3 _SB_MAKEMASK1(55)
-#define S_BCM1480_SYS_DISABLECPU0 56
-#define M_BCM1480_SYS_DISABLECPU0 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
-#define S_BCM1480_SYS_DISABLECPU1 57
-#define M_BCM1480_SYS_DISABLECPU1 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
-#define S_BCM1480_SYS_DISABLECPU2 58
-#define M_BCM1480_SYS_DISABLECPU2 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
-#define S_BCM1480_SYS_DISABLECPU3 59
-#define M_BCM1480_SYS_DISABLECPU3 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
-
-#define M_BCM1480_SYS_SB_SOFTRES _SB_MAKEMASK1(60)
-#define M_BCM1480_SYS_EXT_RESET _SB_MAKEMASK1(61)
-#define M_BCM1480_SYS_SYSTEM_RESET _SB_MAKEMASK1(62)
-#define M_BCM1480_SYS_SW_FLAG _SB_MAKEMASK1(63)
+#define M_BCM1480_SYS_BOOT_MODE_SMBUS _SB_MAKEMASK1(19)
+
+#define M_BCM1480_SYS_PCI_HOST _SB_MAKEMASK1(20)
+#define M_BCM1480_SYS_PCI_ARBITER _SB_MAKEMASK1(21)
+#define M_BCM1480_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_BCM1480_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_BCM1480_SYS_GEN_PARITY_EN _SB_MAKEMASK1(24)
+#define M_BCM1480_SYS_RESERVED25 _SB_MAKEMASK1(25)
+
+#define S_BCM1480_SYS_CONFIG 26
+#define M_BCM1480_SYS_CONFIG _SB_MAKEMASK(6, S_BCM1480_SYS_CONFIG)
+#define V_BCM1480_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_CONFIG)
+#define G_BCM1480_SYS_CONFIG(x) _SB_GETVALUE(x, S_BCM1480_SYS_CONFIG, M_BCM1480_SYS_CONFIG)
+
+#define M_BCM1480_SYS_RESERVED32 _SB_MAKEMASK(32, 15)
+
+#define S_BCM1480_SYS_NODEID 47
+#define M_BCM1480_SYS_NODEID _SB_MAKEMASK(4, S_BCM1480_SYS_NODEID)
+#define V_BCM1480_SYS_NODEID(x) _SB_MAKEVALUE(x, S_BCM1480_SYS_NODEID)
+#define G_BCM1480_SYS_NODEID(x) _SB_GETVALUE(x, S_BCM1480_SYS_NODEID, M_BCM1480_SYS_NODEID)
+
+#define M_BCM1480_SYS_CCNUMA_EN _SB_MAKEMASK1(51)
+#define M_BCM1480_SYS_CPU_RESET_0 _SB_MAKEMASK1(52)
+#define M_BCM1480_SYS_CPU_RESET_1 _SB_MAKEMASK1(53)
+#define M_BCM1480_SYS_CPU_RESET_2 _SB_MAKEMASK1(54)
+#define M_BCM1480_SYS_CPU_RESET_3 _SB_MAKEMASK1(55)
+#define S_BCM1480_SYS_DISABLECPU0 56
+#define M_BCM1480_SYS_DISABLECPU0 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU0)
+#define S_BCM1480_SYS_DISABLECPU1 57
+#define M_BCM1480_SYS_DISABLECPU1 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU1)
+#define S_BCM1480_SYS_DISABLECPU2 58
+#define M_BCM1480_SYS_DISABLECPU2 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU2)
+#define S_BCM1480_SYS_DISABLECPU3 59
+#define M_BCM1480_SYS_DISABLECPU3 _SB_MAKEMASK1(S_BCM1480_SYS_DISABLECPU3)
+
+#define M_BCM1480_SYS_SB_SOFTRES _SB_MAKEMASK1(60)
+#define M_BCM1480_SYS_EXT_RESET _SB_MAKEMASK1(61)
+#define M_BCM1480_SYS_SYSTEM_RESET _SB_MAKEMASK1(62)
+#define M_BCM1480_SYS_SW_FLAG _SB_MAKEMASK1(63)
/*
* Scratch Register (Table 16)
@@ -193,23 +193,23 @@
* Registers: SCD_WDOG_CFG_x
*/
-#define M_BCM1480_SCD_WDOG_ENABLE _SB_MAKEMASK1(0)
+#define M_BCM1480_SCD_WDOG_ENABLE _SB_MAKEMASK1(0)
-#define S_BCM1480_SCD_WDOG_RESET_TYPE 2
-#define M_BCM1480_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
+#define S_BCM1480_SCD_WDOG_RESET_TYPE 2
+#define M_BCM1480_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(5, S_BCM1480_SCD_WDOG_RESET_TYPE)
#define V_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE)
#define G_BCM1480_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_BCM1480_SCD_WDOG_RESET_TYPE, M_BCM1480_SCD_WDOG_RESET_TYPE)
-#define K_BCM1480_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
-#define K_BCM1480_SCD_WDOG_RESET_SOFT 1
-#define K_BCM1480_SCD_WDOG_RESET_CPU0 3
-#define K_BCM1480_SCD_WDOG_RESET_CPU1 5
-#define K_BCM1480_SCD_WDOG_RESET_CPU2 9
-#define K_BCM1480_SCD_WDOG_RESET_CPU3 17
+#define K_BCM1480_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_BCM1480_SCD_WDOG_RESET_SOFT 1
+#define K_BCM1480_SCD_WDOG_RESET_CPU0 3
+#define K_BCM1480_SCD_WDOG_RESET_CPU1 5
+#define K_BCM1480_SCD_WDOG_RESET_CPU2 9
+#define K_BCM1480_SCD_WDOG_RESET_CPU3 17
#define K_BCM1480_SCD_WDOG_RESET_ALL_CPUS 31
-#define M_BCM1480_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(8)
+#define M_BCM1480_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(8)
/*
* General Timer Initial Count Registers (Table 26)
@@ -243,32 +243,32 @@
* The clear/enable bits are in different locations on the 1250 and 1480.
*/
-#define S_SPC_CFG_SRC4 32
-#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
-#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
-#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
+#define S_SPC_CFG_SRC4 32
+#define M_SPC_CFG_SRC4 _SB_MAKEMASK(8, S_SPC_CFG_SRC4)
+#define V_SPC_CFG_SRC4(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC4)
+#define G_SPC_CFG_SRC4(x) _SB_GETVALUE(x, S_SPC_CFG_SRC4, M_SPC_CFG_SRC4)
-#define S_SPC_CFG_SRC5 40
-#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
-#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
-#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
+#define S_SPC_CFG_SRC5 40
+#define M_SPC_CFG_SRC5 _SB_MAKEMASK(8, S_SPC_CFG_SRC5)
+#define V_SPC_CFG_SRC5(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC5)
+#define G_SPC_CFG_SRC5(x) _SB_GETVALUE(x, S_SPC_CFG_SRC5, M_SPC_CFG_SRC5)
-#define S_SPC_CFG_SRC6 48
-#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
-#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
-#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
+#define S_SPC_CFG_SRC6 48
+#define M_SPC_CFG_SRC6 _SB_MAKEMASK(8, S_SPC_CFG_SRC6)
+#define V_SPC_CFG_SRC6(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC6)
+#define G_SPC_CFG_SRC6(x) _SB_GETVALUE(x, S_SPC_CFG_SRC6, M_SPC_CFG_SRC6)
-#define S_SPC_CFG_SRC7 56
-#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
-#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
-#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
+#define S_SPC_CFG_SRC7 56
+#define M_SPC_CFG_SRC7 _SB_MAKEMASK(8, S_SPC_CFG_SRC7)
+#define V_SPC_CFG_SRC7(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC7)
+#define G_SPC_CFG_SRC7(x) _SB_GETVALUE(x, S_SPC_CFG_SRC7, M_SPC_CFG_SRC7)
/*
* System Performance Counter Control Register (Table 32)
* Register: PERF_CNT_CFG_1
* BCM1480 specific
*/
-#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0)
+#define M_BCM1480_SPC_CFG_CLEAR _SB_MAKEMASK1(0)
#define M_BCM1480_SPC_CFG_ENABLE _SB_MAKEMASK1(1)
#if SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_SPC_CFG_CLEAR M_BCM1480_SPC_CFG_CLEAR
@@ -280,12 +280,12 @@
* Registers: PERF_CNT_x
*/
-#define S_BCM1480_SPC_CNT_COUNT 0
-#define M_BCM1480_SPC_CNT_COUNT _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
-#define V_BCM1480_SPC_CNT_COUNT(x) _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
-#define G_BCM1480_SPC_CNT_COUNT(x) _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
+#define S_BCM1480_SPC_CNT_COUNT 0
+#define M_BCM1480_SPC_CNT_COUNT _SB_MAKEMASK(40, S_BCM1480_SPC_CNT_COUNT)
+#define V_BCM1480_SPC_CNT_COUNT(x) _SB_MAKEVALUE(x, S_BCM1480_SPC_CNT_COUNT)
+#define G_BCM1480_SPC_CNT_COUNT(x) _SB_GETVALUE(x, S_BCM1480_SPC_CNT_COUNT, M_BCM1480_SPC_CNT_COUNT)
-#define M_BCM1480_SPC_CNT_OFLOW _SB_MAKEMASK1(40)
+#define M_BCM1480_SPC_CNT_OFLOW _SB_MAKEMASK1(40)
/*
@@ -325,45 +325,45 @@
#define M_BCM1480_ATRAP_INDEX _SB_MAKEMASK(4, 0)
#define M_BCM1480_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
-#define S_BCM1480_ATRAP_CFG_CNT 0
-#define M_BCM1480_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
-#define V_BCM1480_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
-#define G_BCM1480_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
+#define S_BCM1480_ATRAP_CFG_CNT 0
+#define M_BCM1480_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_BCM1480_ATRAP_CFG_CNT)
+#define V_BCM1480_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CNT)
+#define G_BCM1480_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CNT, M_BCM1480_ATRAP_CFG_CNT)
#define M_BCM1480_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
-#define M_BCM1480_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
-#define M_BCM1480_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_BCM1480_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_BCM1480_ATRAP_CFG_INV _SB_MAKEMASK1(5)
#define M_BCM1480_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
#define M_BCM1480_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
-#define S_BCM1480_ATRAP_CFG_AGENTID 8
-#define M_BCM1480_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
-#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
-#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
+#define S_BCM1480_ATRAP_CFG_AGENTID 8
+#define M_BCM1480_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_BCM1480_ATRAP_CFG_AGENTID)
+#define V_BCM1480_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID)
+#define G_BCM1480_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_AGENTID, M_BCM1480_ATRAP_CFG_AGENTID)
-#define K_BCM1480_BUS_AGENT_CPU0 0
-#define K_BCM1480_BUS_AGENT_CPU1 1
-#define K_BCM1480_BUS_AGENT_NC 2
-#define K_BCM1480_BUS_AGENT_IOB 3
-#define K_BCM1480_BUS_AGENT_SCD 4
-#define K_BCM1480_BUS_AGENT_L2C 6
-#define K_BCM1480_BUS_AGENT_MC 7
-#define K_BCM1480_BUS_AGENT_CPU2 8
-#define K_BCM1480_BUS_AGENT_CPU3 9
-#define K_BCM1480_BUS_AGENT_PM 10
+#define K_BCM1480_BUS_AGENT_CPU0 0
+#define K_BCM1480_BUS_AGENT_CPU1 1
+#define K_BCM1480_BUS_AGENT_NC 2
+#define K_BCM1480_BUS_AGENT_IOB 3
+#define K_BCM1480_BUS_AGENT_SCD 4
+#define K_BCM1480_BUS_AGENT_L2C 6
+#define K_BCM1480_BUS_AGENT_MC 7
+#define K_BCM1480_BUS_AGENT_CPU2 8
+#define K_BCM1480_BUS_AGENT_CPU3 9
+#define K_BCM1480_BUS_AGENT_PM 10
-#define S_BCM1480_ATRAP_CFG_CATTR 12
-#define M_BCM1480_ATRAP_CFG_CATTR _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
-#define V_BCM1480_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
-#define G_BCM1480_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
+#define S_BCM1480_ATRAP_CFG_CATTR 12
+#define M_BCM1480_ATRAP_CFG_CATTR _SB_MAKEMASK(2, S_BCM1480_ATRAP_CFG_CATTR)
+#define V_BCM1480_ATRAP_CFG_CATTR(x) _SB_MAKEVALUE(x, S_BCM1480_ATRAP_CFG_CATTR)
+#define G_BCM1480_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_BCM1480_ATRAP_CFG_CATTR, M_BCM1480_ATRAP_CFG_CATTR)
#define K_BCM1480_ATRAP_CFG_CATTR_IGNORE 0
-#define K_BCM1480_ATRAP_CFG_CATTR_UNC 1
+#define K_BCM1480_ATRAP_CFG_CATTR_UNC 1
#define K_BCM1480_ATRAP_CFG_CATTR_NONCOH 2
#define K_BCM1480_ATRAP_CFG_CATTR_COHERENT 3
-#define M_BCM1480_ATRAP_CFG_CATTRINV _SB_MAKEMASK1(14)
+#define M_BCM1480_ATRAP_CFG_CATTRINV _SB_MAKEMASK1(14)
/*
@@ -381,10 +381,10 @@
#define M_BCM1480_SCD_TRSEQ_TID_MATCH_EN _SB_MAKEMASK1(25)
-#define S_BCM1480_SCD_TRSEQ_SWFUNC 26
-#define M_BCM1480_SCD_TRSEQ_SWFUNC _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define V_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
-#define G_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
+#define S_BCM1480_SCD_TRSEQ_SWFUNC 26
+#define M_BCM1480_SCD_TRSEQ_SWFUNC _SB_MAKEMASK(2, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define V_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC)
+#define G_BCM1480_SCD_TRSEQ_SWFUNC(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRSEQ_SWFUNC, M_BCM1480_SCD_TRSEQ_SWFUNC)
/*
* Trace Control Register (Table 49)
@@ -394,13 +394,13 @@
* are defined below.
*/
-#define S_BCM1480_SCD_TRACE_CFG_MODE 16
-#define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
-#define G_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
+#define S_BCM1480_SCD_TRACE_CFG_MODE 16
+#define M_BCM1480_SCD_TRACE_CFG_MODE _SB_MAKEMASK(2, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define V_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_MAKEVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE)
+#define G_BCM1480_SCD_TRACE_CFG_MODE(x) _SB_GETVALUE(x, S_BCM1480_SCD_TRACE_CFG_MODE, M_BCM1480_SCD_TRACE_CFG_MODE)
#define K_BCM1480_SCD_TRACE_CFG_MODE_BLOCKERS 0
-#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
+#define K_BCM1480_SCD_TRACE_CFG_MODE_BYTEEN_INT 1
#define K_BCM1480_SCD_TRACE_CFG_MODE_FLOW_ID 2
#endif /* _BCM1480_SCD_H */
diff --git a/arch/mips/include/asm/sibyte/bigsur.h b/arch/mips/include/asm/sibyte/bigsur.h
index 2d1a26d3436..ae29dae4155 100644
--- a/arch/mips/include/asm/sibyte/bigsur.h
+++ b/arch/mips/include/asm/sibyte/bigsur.h
@@ -24,25 +24,25 @@
#ifdef CONFIG_SIBYTE_BIGSUR
#define SIBYTE_BOARD_NAME "BCM91x80A/B (BigSur)"
#define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#endif
/* Generic bus chip selects */
-#define LEDS_CS 3
-#define LEDS_PHYS 0x100a0000
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
#ifdef SIBYTE_HAVE_IDE
-#define IDE_CS 4
-#define IDE_PHYS 0x100b0000
-#define K_GPIO_GB_IDE 4
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
#ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS 6
-#define PCMCIA_PHYS 0x11000000
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
#define K_GPIO_PC_READY 9
-#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
#endif
#endif /* __ASM_SIBYTE_BIGSUR_H */
diff --git a/arch/mips/include/asm/sibyte/carmel.h b/arch/mips/include/asm/sibyte/carmel.h
index 11cad71323e..793edba73aa 100644
--- a/arch/mips/include/asm/sibyte/carmel.h
+++ b/arch/mips/include/asm/sibyte/carmel.h
@@ -23,35 +23,35 @@
#define SIBYTE_BOARD_NAME "Carmel"
-#define GPIO_PHY_INTERRUPT 2
-#define GPIO_NONMASKABLE_INT 3
-#define GPIO_CF_INSERTED 6
-#define GPIO_MONTEREY_RESET 7
-#define GPIO_QUADUART_INT 8
-#define GPIO_CF_INT 9
-#define GPIO_FPGA_CCLK 10
-#define GPIO_FPGA_DOUT 11
-#define GPIO_FPGA_DIN 12
-#define GPIO_FPGA_PGM 13
-#define GPIO_FPGA_DONE 14
-#define GPIO_FPGA_INIT 15
+#define GPIO_PHY_INTERRUPT 2
+#define GPIO_NONMASKABLE_INT 3
+#define GPIO_CF_INSERTED 6
+#define GPIO_MONTEREY_RESET 7
+#define GPIO_QUADUART_INT 8
+#define GPIO_CF_INT 9
+#define GPIO_FPGA_CCLK 10
+#define GPIO_FPGA_DOUT 11
+#define GPIO_FPGA_DIN 12
+#define GPIO_FPGA_PGM 13
+#define GPIO_FPGA_DONE 14
+#define GPIO_FPGA_INIT 15
-#define LEDS_CS 2
-#define LEDS_PHYS 0x100C0000
-#define MLEDS_CS 3
-#define MLEDS_PHYS 0x100A0000
-#define UART_CS 4
-#define UART_PHYS 0x100D0000
-#define ARAVALI_CS 5
-#define ARAVALI_PHYS 0x11000000
-#define IDE_CS 6
-#define IDE_PHYS 0x100B0000
-#define ARAVALI2_CS 7
-#define ARAVALI2_PHYS 0x100E0000
+#define LEDS_CS 2
+#define LEDS_PHYS 0x100C0000
+#define MLEDS_CS 3
+#define MLEDS_PHYS 0x100A0000
+#define UART_CS 4
+#define UART_PHYS 0x100D0000
+#define ARAVALI_CS 5
+#define ARAVALI_PHYS 0x11000000
+#define IDE_CS 6
+#define IDE_PHYS 0x100B0000
+#define ARAVALI2_CS 7
+#define ARAVALI2_PHYS 0x100E0000
#if defined(CONFIG_SIBYTE_CARMEL)
-#define K_GPIO_GB_IDE 9
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define K_GPIO_GB_IDE 9
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250.h b/arch/mips/include/asm/sibyte/sb1250.h
index 80c1a052662..d45dff9753d 100644
--- a/arch/mips/include/asm/sibyte/sb1250.h
+++ b/arch/mips/include/asm/sibyte/sb1250.h
@@ -27,8 +27,8 @@
#define SB1250_NR_IRQS 64
-#define BCM1480_NR_IRQS 128
-#define BCM1480_NR_IRQS_HALF 64
+#define BCM1480_NR_IRQS 128
+#define BCM1480_NR_IRQS_HALF 64
#define SB1250_DUART_MINOR_BASE 64
diff --git a/arch/mips/include/asm/sibyte/sb1250_defs.h b/arch/mips/include/asm/sibyte/sb1250_defs.h
index 09365f9111f..4364eb8d22a 100644
--- a/arch/mips/include/asm/sibyte/sb1250_defs.h
+++ b/arch/mips/include/asm/sibyte/sb1250_defs.h
@@ -51,15 +51,15 @@
*
* Use like:
*
- * #define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_112x_PASS1
+ * #define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_112x_PASS1
*
* Generate defines only for that revision of chip.
*
- * #if SIBYTE_HDR_FEATURE(chip,pass)
+ * #if SIBYTE_HDR_FEATURE(chip,pass)
*
* True if header features for that revision or later of
- * that particular chip type are enabled in SIBYTE_HDR_FEATURES.
- * (Use this to bracket #defines for features present in a given
+ * that particular chip type are enabled in SIBYTE_HDR_FEATURES.
+ * (Use this to bracket #defines for features present in a given
* revision and later.)
*
* Note that there is no implied ordering between chip types.
@@ -69,12 +69,12 @@
* SIBYTE_HDR_FEATURE(112x, PASS1) is OK, but
* SIBYTE_HDR_FEATURE(1120, pass1) is not (for two reasons).
*
- * #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
+ * #if SIBYTE_HDR_FEATURE_UP_TO(chip,pass)
*
* Same as SIBYTE_HDR_FEATURE, but true for the named revision
* and earlier revisions of the named chip type.
*
- * #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
+ * #if SIBYTE_HDR_FEATURE_EXACT(chip,pass)
*
* Same as SIBYTE_HDR_FEATURE, but only true for the named
* revision of the named chip type. (Note that this CANNOT
@@ -82,7 +82,7 @@
* particular chip/revision. It will be true any time this
* chip/revision is included in SIBYTE_HDR_FEATURES.)
*
- * #if SIBYTE_HDR_FEATURE_CHIP(chip)
+ * #if SIBYTE_HDR_FEATURE_CHIP(chip)
*
* True if header features for (any revision of) that chip type
* are enabled in SIBYTE_HDR_FEATURES. (Use this to bracket
@@ -95,47 +95,47 @@
* ordering, so be careful when adding support for new minor revs.
********************************************************************* */
-#define SIBYTE_HDR_FMASK_1250_ALL 0x000000ff
-#define SIBYTE_HDR_FMASK_1250_PASS1 0x00000001
-#define SIBYTE_HDR_FMASK_1250_PASS2 0x00000002
-#define SIBYTE_HDR_FMASK_1250_PASS3 0x00000004
+#define SIBYTE_HDR_FMASK_1250_ALL 0x000000ff
+#define SIBYTE_HDR_FMASK_1250_PASS1 0x00000001
+#define SIBYTE_HDR_FMASK_1250_PASS2 0x00000002
+#define SIBYTE_HDR_FMASK_1250_PASS3 0x00000004
-#define SIBYTE_HDR_FMASK_112x_ALL 0x00000f00
-#define SIBYTE_HDR_FMASK_112x_PASS1 0x00000100
+#define SIBYTE_HDR_FMASK_112x_ALL 0x00000f00
+#define SIBYTE_HDR_FMASK_112x_PASS1 0x00000100
#define SIBYTE_HDR_FMASK_1480_ALL 0x0000f000
#define SIBYTE_HDR_FMASK_1480_PASS1 0x00001000
#define SIBYTE_HDR_FMASK_1480_PASS2 0x00002000
-/* Bit mask for chip/revision. (use _ALL for all revisions of a chip). */
-#define SIBYTE_HDR_FMASK(chip, pass) \
+/* Bit mask for chip/revision. (use _ALL for all revisions of a chip). */
+#define SIBYTE_HDR_FMASK(chip, pass) \
(SIBYTE_HDR_FMASK_ ## chip ## _ ## pass)
-#define SIBYTE_HDR_FMASK_ALLREVS(chip) \
+#define SIBYTE_HDR_FMASK_ALLREVS(chip) \
(SIBYTE_HDR_FMASK_ ## chip ## _ALL)
/* Default constant value for all chips, all revisions */
-#define SIBYTE_HDR_FMASK_ALL \
+#define SIBYTE_HDR_FMASK_ALL \
(SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL \
| SIBYTE_HDR_FMASK_1480_ALL)
/* This one is used for the "original" BCM1250/BCM112x chips. We use this
to weed out constants and macros that do not exist on later chips like
- the BCM1480 */
+ the BCM1480 */
#define SIBYTE_HDR_FMASK_1250_112x_ALL \
(SIBYTE_HDR_FMASK_1250_ALL | SIBYTE_HDR_FMASK_112x_ALL)
#define SIBYTE_HDR_FMASK_1250_112x SIBYTE_HDR_FMASK_1250_112x_ALL
#ifndef SIBYTE_HDR_FEATURES
-#define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_ALL
+#define SIBYTE_HDR_FEATURES SIBYTE_HDR_FMASK_ALL
#endif
/* Bit mask for revisions of chip exclusively before the named revision. */
-#define SIBYTE_HDR_FMASK_BEFORE(chip, pass) \
+#define SIBYTE_HDR_FMASK_BEFORE(chip, pass) \
((SIBYTE_HDR_FMASK(chip, pass) - 1) & SIBYTE_HDR_FMASK_ALLREVS(chip))
-/* Bit mask for revisions of chip exclusively after the named revision. */
-#define SIBYTE_HDR_FMASK_AFTER(chip, pass) \
+/* Bit mask for revisions of chip exclusively after the named revision. */
+#define SIBYTE_HDR_FMASK_AFTER(chip, pass) \
(~(SIBYTE_HDR_FMASK(chip, pass) \
| (SIBYTE_HDR_FMASK(chip, pass) - 1)) & SIBYTE_HDR_FMASK_ALLREVS(chip))
@@ -168,38 +168,38 @@
/* *********************************************************************
* Naming schemes for constants in these files:
*
- * M_xxx MASK constant (identifies bits in a register).
- * For multi-bit fields, all bits in the field will
- * be set.
+ * M_xxx MASK constant (identifies bits in a register).
+ * For multi-bit fields, all bits in the field will
+ * be set.
*
- * K_xxx "Code" constant (value for data in a multi-bit
- * field). The value is right justified.
+ * K_xxx "Code" constant (value for data in a multi-bit
+ * field). The value is right justified.
*
- * V_xxx "Value" constant. This is the same as the
- * corresponding "K_xxx" constant, except it is
- * shifted to the correct position in the register.
+ * V_xxx "Value" constant. This is the same as the
+ * corresponding "K_xxx" constant, except it is
+ * shifted to the correct position in the register.
*
- * S_xxx SHIFT constant. This is the number of bits that
- * a field value (code) needs to be shifted
- * (towards the left) to put the value in the right
- * position for the register.
+ * S_xxx SHIFT constant. This is the number of bits that
+ * a field value (code) needs to be shifted
+ * (towards the left) to put the value in the right
+ * position for the register.
*
- * A_xxx ADDRESS constant. This will be a physical
- * address. Use the PHYS_TO_K1 macro to generate
- * a K1SEG address.
+ * A_xxx ADDRESS constant. This will be a physical
+ * address. Use the PHYS_TO_K1 macro to generate
+ * a K1SEG address.
*
- * R_xxx RELATIVE offset constant. This is an offset from
- * an A_xxx constant (usually the first register in
- * a group).
+ * R_xxx RELATIVE offset constant. This is an offset from
+ * an A_xxx constant (usually the first register in
+ * a group).
*
- * G_xxx(X) GET value. This macro obtains a multi-bit field
- * from a register, masks it, and shifts it to
- * the bottom of the register (retrieving a K_xxx
- * value, for example).
+ * G_xxx(X) GET value. This macro obtains a multi-bit field
+ * from a register, masks it, and shifts it to
+ * the bottom of the register (retrieving a K_xxx
+ * value, for example).
*
- * V_xxx(X) VALUE. This macro computes the value of a
- * K_xxx constant shifted to the correct position
- * in the register.
+ * V_xxx(X) VALUE. This macro computes the value of a
+ * K_xxx constant shifted to the correct position
+ * in the register.
********************************************************************* */
diff --git a/arch/mips/include/asm/sibyte/sb1250_dma.h b/arch/mips/include/asm/sibyte/sb1250_dma.h
index 6c44dfb5287..ea81713b78d 100644
--- a/arch/mips/include/asm/sibyte/sb1250_dma.h
+++ b/arch/mips/include/asm/sibyte/sb1250_dma.h
@@ -51,15 +51,15 @@
*/
-#define M_DMA_DROP _SB_MAKEMASK1(0)
+#define M_DMA_DROP _SB_MAKEMASK1(0)
-#define M_DMA_CHAIN_SEL _SB_MAKEMASK1(1)
-#define M_DMA_RESERVED1 _SB_MAKEMASK1(2)
+#define M_DMA_CHAIN_SEL _SB_MAKEMASK1(1)
+#define M_DMA_RESERVED1 _SB_MAKEMASK1(2)
#define S_DMA_DESC_TYPE _SB_MAKE64(1)
#define M_DMA_DESC_TYPE _SB_MAKEMASK(2, S_DMA_DESC_TYPE)
-#define V_DMA_DESC_TYPE(x) _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
-#define G_DMA_DESC_TYPE(x) _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
+#define V_DMA_DESC_TYPE(x) _SB_MAKEVALUE(x, S_DMA_DESC_TYPE)
+#define G_DMA_DESC_TYPE(x) _SB_GETVALUE(x, S_DMA_DESC_TYPE, M_DMA_DESC_TYPE)
#define K_DMA_DESC_TYPE_RING_AL 0
#define K_DMA_DESC_TYPE_CHAIN_AL 1
@@ -69,31 +69,31 @@
#define K_DMA_DESC_TYPE_RING_UAL_RMW 3
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_EOP_INT_EN _SB_MAKEMASK1(3)
-#define M_DMA_HWM_INT_EN _SB_MAKEMASK1(4)
-#define M_DMA_LWM_INT_EN _SB_MAKEMASK1(5)
-#define M_DMA_TBX_EN _SB_MAKEMASK1(6)
-#define M_DMA_TDX_EN _SB_MAKEMASK1(7)
+#define M_DMA_EOP_INT_EN _SB_MAKEMASK1(3)
+#define M_DMA_HWM_INT_EN _SB_MAKEMASK1(4)
+#define M_DMA_LWM_INT_EN _SB_MAKEMASK1(5)
+#define M_DMA_TBX_EN _SB_MAKEMASK1(6)
+#define M_DMA_TDX_EN _SB_MAKEMASK1(7)
-#define S_DMA_INT_PKTCNT _SB_MAKE64(8)
-#define M_DMA_INT_PKTCNT _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
-#define V_DMA_INT_PKTCNT(x) _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
-#define G_DMA_INT_PKTCNT(x) _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
+#define S_DMA_INT_PKTCNT _SB_MAKE64(8)
+#define M_DMA_INT_PKTCNT _SB_MAKEMASK(8, S_DMA_INT_PKTCNT)
+#define V_DMA_INT_PKTCNT(x) _SB_MAKEVALUE(x, S_DMA_INT_PKTCNT)
+#define G_DMA_INT_PKTCNT(x) _SB_GETVALUE(x, S_DMA_INT_PKTCNT, M_DMA_INT_PKTCNT)
-#define S_DMA_RINGSZ _SB_MAKE64(16)
-#define M_DMA_RINGSZ _SB_MAKEMASK(16, S_DMA_RINGSZ)
-#define V_DMA_RINGSZ(x) _SB_MAKEVALUE(x, S_DMA_RINGSZ)
-#define G_DMA_RINGSZ(x) _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
+#define S_DMA_RINGSZ _SB_MAKE64(16)
+#define M_DMA_RINGSZ _SB_MAKEMASK(16, S_DMA_RINGSZ)
+#define V_DMA_RINGSZ(x) _SB_MAKEVALUE(x, S_DMA_RINGSZ)
+#define G_DMA_RINGSZ(x) _SB_GETVALUE(x, S_DMA_RINGSZ, M_DMA_RINGSZ)
-#define S_DMA_HIGH_WATERMARK _SB_MAKE64(32)
-#define M_DMA_HIGH_WATERMARK _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
-#define V_DMA_HIGH_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
-#define G_DMA_HIGH_WATERMARK(x) _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
+#define S_DMA_HIGH_WATERMARK _SB_MAKE64(32)
+#define M_DMA_HIGH_WATERMARK _SB_MAKEMASK(16, S_DMA_HIGH_WATERMARK)
+#define V_DMA_HIGH_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_HIGH_WATERMARK)
+#define G_DMA_HIGH_WATERMARK(x) _SB_GETVALUE(x, S_DMA_HIGH_WATERMARK, M_DMA_HIGH_WATERMARK)
-#define S_DMA_LOW_WATERMARK _SB_MAKE64(48)
-#define M_DMA_LOW_WATERMARK _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
-#define V_DMA_LOW_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
-#define G_DMA_LOW_WATERMARK(x) _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
+#define S_DMA_LOW_WATERMARK _SB_MAKE64(48)
+#define M_DMA_LOW_WATERMARK _SB_MAKEMASK(16, S_DMA_LOW_WATERMARK)
+#define V_DMA_LOW_WATERMARK(x) _SB_MAKEVALUE(x, S_DMA_LOW_WATERMARK)
+#define G_DMA_LOW_WATERMARK(x) _SB_GETVALUE(x, S_DMA_LOW_WATERMARK, M_DMA_LOW_WATERMARK)
/*
* Ethernet and Serial DMA Configuration Register 1 (Table 7-5)
@@ -103,11 +103,11 @@
* Registers: DMA_CONFIG1_SER_x_TX
*/
-#define M_DMA_HDR_CF_EN _SB_MAKEMASK1(0)
-#define M_DMA_ASIC_XFR_EN _SB_MAKEMASK1(1)
-#define M_DMA_PRE_ADDR_EN _SB_MAKEMASK1(2)
-#define M_DMA_FLOW_CTL_EN _SB_MAKEMASK1(3)
-#define M_DMA_NO_DSCR_UPDT _SB_MAKEMASK1(4)
+#define M_DMA_HDR_CF_EN _SB_MAKEMASK1(0)
+#define M_DMA_ASIC_XFR_EN _SB_MAKEMASK1(1)
+#define M_DMA_PRE_ADDR_EN _SB_MAKEMASK1(2)
+#define M_DMA_FLOW_CTL_EN _SB_MAKEMASK1(3)
+#define M_DMA_NO_DSCR_UPDT _SB_MAKEMASK1(4)
#define M_DMA_L2CA _SB_MAKEMASK1(5)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -116,37 +116,37 @@
#define M_DMA_TX_FC_PAUSE_EN _SB_MAKEMASK1(7)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_MBZ1 _SB_MAKEMASK(6, 15)
+#define M_DMA_MBZ1 _SB_MAKEMASK(6, 15)
-#define S_DMA_HDR_SIZE _SB_MAKE64(21)
-#define M_DMA_HDR_SIZE _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
-#define V_DMA_HDR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
-#define G_DMA_HDR_SIZE(x) _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
+#define S_DMA_HDR_SIZE _SB_MAKE64(21)
+#define M_DMA_HDR_SIZE _SB_MAKEMASK(9, S_DMA_HDR_SIZE)
+#define V_DMA_HDR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_HDR_SIZE)
+#define G_DMA_HDR_SIZE(x) _SB_GETVALUE(x, S_DMA_HDR_SIZE, M_DMA_HDR_SIZE)
-#define M_DMA_MBZ2 _SB_MAKEMASK(5, 32)
+#define M_DMA_MBZ2 _SB_MAKEMASK(5, 32)
-#define S_DMA_ASICXFR_SIZE _SB_MAKE64(37)
-#define M_DMA_ASICXFR_SIZE _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
-#define V_DMA_ASICXFR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
-#define G_DMA_ASICXFR_SIZE(x) _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
+#define S_DMA_ASICXFR_SIZE _SB_MAKE64(37)
+#define M_DMA_ASICXFR_SIZE _SB_MAKEMASK(9, S_DMA_ASICXFR_SIZE)
+#define V_DMA_ASICXFR_SIZE(x) _SB_MAKEVALUE(x, S_DMA_ASICXFR_SIZE)
+#define G_DMA_ASICXFR_SIZE(x) _SB_GETVALUE(x, S_DMA_ASICXFR_SIZE, M_DMA_ASICXFR_SIZE)
-#define S_DMA_INT_TIMEOUT _SB_MAKE64(48)
-#define M_DMA_INT_TIMEOUT _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
-#define V_DMA_INT_TIMEOUT(x) _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
-#define G_DMA_INT_TIMEOUT(x) _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
+#define S_DMA_INT_TIMEOUT _SB_MAKE64(48)
+#define M_DMA_INT_TIMEOUT _SB_MAKEMASK(16, S_DMA_INT_TIMEOUT)
+#define V_DMA_INT_TIMEOUT(x) _SB_MAKEVALUE(x, S_DMA_INT_TIMEOUT)
+#define G_DMA_INT_TIMEOUT(x) _SB_GETVALUE(x, S_DMA_INT_TIMEOUT, M_DMA_INT_TIMEOUT)
/*
* Ethernet and Serial DMA Descriptor base address (Table 7-6)
*/
-#define M_DMA_DSCRBASE_MBZ _SB_MAKEMASK(4, 0)
+#define M_DMA_DSCRBASE_MBZ _SB_MAKEMASK(4, 0)
/*
* ASIC Mode Base Address (Table 7-7)
*/
-#define M_DMA_ASIC_BASE_MBZ _SB_MAKEMASK(20, 0)
+#define M_DMA_ASIC_BASE_MBZ _SB_MAKEMASK(20, 0)
/*
* DMA Descriptor Count Registers (Table 7-8)
@@ -159,10 +159,10 @@
* Current Descriptor Address Register (Table 7-11)
*/
-#define S_DMA_CURDSCR_ADDR _SB_MAKE64(0)
-#define M_DMA_CURDSCR_ADDR _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
-#define S_DMA_CURDSCR_COUNT _SB_MAKE64(40)
-#define M_DMA_CURDSCR_COUNT _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
+#define S_DMA_CURDSCR_ADDR _SB_MAKE64(0)
+#define M_DMA_CURDSCR_ADDR _SB_MAKEMASK(40, S_DMA_CURDSCR_ADDR)
+#define S_DMA_CURDSCR_COUNT _SB_MAKE64(40)
+#define M_DMA_CURDSCR_COUNT _SB_MAKEMASK(16, S_DMA_CURDSCR_COUNT)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_DMA_TX_CH_PAUSE_ON _SB_MAKEMASK1(56)
@@ -172,13 +172,13 @@
* Receive Packet Drop Registers
*/
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_OODLOST_RX _SB_MAKE64(0)
-#define M_DMA_OODLOST_RX _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
-#define G_DMA_OODLOST_RX(x) _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
+#define S_DMA_OODLOST_RX _SB_MAKE64(0)
+#define M_DMA_OODLOST_RX _SB_MAKEMASK(16, S_DMA_OODLOST_RX)
+#define G_DMA_OODLOST_RX(x) _SB_GETVALUE(x, S_DMA_OODLOST_RX, M_DMA_OODLOST_RX)
-#define S_DMA_EOP_COUNT_RX _SB_MAKE64(16)
-#define M_DMA_EOP_COUNT_RX _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
-#define G_DMA_EOP_COUNT_RX(x) _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
+#define S_DMA_EOP_COUNT_RX _SB_MAKE64(16)
+#define M_DMA_EOP_COUNT_RX _SB_MAKEMASK(8, S_DMA_EOP_COUNT_RX)
+#define G_DMA_EOP_COUNT_RX(x) _SB_GETVALUE(x, S_DMA_EOP_COUNT_RX, M_DMA_EOP_COUNT_RX)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
/* *********************************************************************
@@ -189,26 +189,26 @@
* Descriptor doubleword "A" (Table 7-12)
*/
-#define S_DMA_DSCRA_OFFSET _SB_MAKE64(0)
-#define M_DMA_DSCRA_OFFSET _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
-#define V_DMA_DSCRA_OFFSET(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
-#define G_DMA_DSCRA_OFFSET(x) _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
+#define S_DMA_DSCRA_OFFSET _SB_MAKE64(0)
+#define M_DMA_DSCRA_OFFSET _SB_MAKEMASK(5, S_DMA_DSCRA_OFFSET)
+#define V_DMA_DSCRA_OFFSET(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_OFFSET)
+#define G_DMA_DSCRA_OFFSET(x) _SB_GETVALUE(x, S_DMA_DSCRA_OFFSET, M_DMA_DSCRA_OFFSET)
/* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRA_A_ADDR _SB_MAKE64(5)
-#define M_DMA_DSCRA_A_ADDR _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
+#define S_DMA_DSCRA_A_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRA_A_ADDR _SB_MAKEMASK(35, S_DMA_DSCRA_A_ADDR)
#define M_DMA_DSCRA_A_ADDR_OFFSET (M_DMA_DSCRA_OFFSET | M_DMA_DSCRA_A_ADDR)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRA_A_ADDR_UA _SB_MAKE64(0)
-#define M_DMA_DSCRA_A_ADDR_UA _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
+#define S_DMA_DSCRA_A_ADDR_UA _SB_MAKE64(0)
+#define M_DMA_DSCRA_A_ADDR_UA _SB_MAKEMASK(40, S_DMA_DSCRA_A_ADDR_UA)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_DSCRA_A_SIZE _SB_MAKE64(40)
-#define M_DMA_DSCRA_A_SIZE _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
-#define V_DMA_DSCRA_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
-#define G_DMA_DSCRA_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
+#define S_DMA_DSCRA_A_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRA_A_SIZE _SB_MAKEMASK(9, S_DMA_DSCRA_A_SIZE)
+#define V_DMA_DSCRA_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_A_SIZE)
+#define G_DMA_DSCRA_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRA_A_SIZE, M_DMA_DSCRA_A_SIZE)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define S_DMA_DSCRA_DSCR_CNT _SB_MAKE64(40)
@@ -216,43 +216,43 @@
#define G_DMA_DSCRA_DSCR_CNT(x) _SB_GETVALUE(x, S_DMA_DSCRA_DSCR_CNT, M_DMA_DSCRA_DSCR_CNT)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DMA_DSCRA_INTERRUPT _SB_MAKEMASK1(49)
+#define M_DMA_DSCRA_INTERRUPT _SB_MAKEMASK1(49)
#define M_DMA_DSCRA_OFFSETB _SB_MAKEMASK1(50)
-#define S_DMA_DSCRA_STATUS _SB_MAKE64(51)
-#define M_DMA_DSCRA_STATUS _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
-#define V_DMA_DSCRA_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
-#define G_DMA_DSCRA_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
+#define S_DMA_DSCRA_STATUS _SB_MAKE64(51)
+#define M_DMA_DSCRA_STATUS _SB_MAKEMASK(13, S_DMA_DSCRA_STATUS)
+#define V_DMA_DSCRA_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRA_STATUS)
+#define G_DMA_DSCRA_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRA_STATUS, M_DMA_DSCRA_STATUS)
/*
* Descriptor doubleword "B" (Table 7-13)
*/
-#define S_DMA_DSCRB_OPTIONS _SB_MAKE64(0)
-#define M_DMA_DSCRB_OPTIONS _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
-#define V_DMA_DSCRB_OPTIONS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
-#define G_DMA_DSCRB_OPTIONS(x) _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
+#define S_DMA_DSCRB_OPTIONS _SB_MAKE64(0)
+#define M_DMA_DSCRB_OPTIONS _SB_MAKEMASK(4, S_DMA_DSCRB_OPTIONS)
+#define V_DMA_DSCRB_OPTIONS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_OPTIONS)
+#define G_DMA_DSCRB_OPTIONS(x) _SB_GETVALUE(x, S_DMA_DSCRB_OPTIONS, M_DMA_DSCRB_OPTIONS)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_DMA_DSCRB_A_SIZE _SB_MAKE64(8)
-#define M_DMA_DSCRB_A_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
-#define V_DMA_DSCRB_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
-#define G_DMA_DSCRB_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
+#define S_DMA_DSCRB_A_SIZE _SB_MAKE64(8)
+#define M_DMA_DSCRB_A_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_A_SIZE)
+#define V_DMA_DSCRB_A_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_A_SIZE)
+#define G_DMA_DSCRB_A_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_A_SIZE, M_DMA_DSCRB_A_SIZE)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define R_DMA_DSCRB_ADDR _SB_MAKE64(0x10)
+#define R_DMA_DSCRB_ADDR _SB_MAKE64(0x10)
/* Note: Don't shift the address over, just mask it with the mask below */
-#define S_DMA_DSCRB_B_ADDR _SB_MAKE64(5)
-#define M_DMA_DSCRB_B_ADDR _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
+#define S_DMA_DSCRB_B_ADDR _SB_MAKE64(5)
+#define M_DMA_DSCRB_B_ADDR _SB_MAKEMASK(35, S_DMA_DSCRB_B_ADDR)
-#define S_DMA_DSCRB_B_SIZE _SB_MAKE64(40)
-#define M_DMA_DSCRB_B_SIZE _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
-#define V_DMA_DSCRB_B_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
-#define G_DMA_DSCRB_B_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
+#define S_DMA_DSCRB_B_SIZE _SB_MAKE64(40)
+#define M_DMA_DSCRB_B_SIZE _SB_MAKEMASK(9, S_DMA_DSCRB_B_SIZE)
+#define V_DMA_DSCRB_B_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_B_SIZE)
+#define G_DMA_DSCRB_B_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_B_SIZE, M_DMA_DSCRB_B_SIZE)
-#define M_DMA_DSCRB_B_VALID _SB_MAKEMASK1(49)
+#define M_DMA_DSCRB_B_VALID _SB_MAKEMASK1(49)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define S_DMA_DSCRB_PKT_SIZE_MSB _SB_MAKE64(48)
@@ -261,24 +261,24 @@
#define G_DMA_DSCRB_PKT_SIZE_MSB(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE_MSB, M_DMA_DSCRB_PKT_SIZE_MSB)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_DSCRB_PKT_SIZE _SB_MAKE64(50)
-#define M_DMA_DSCRB_PKT_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
-#define V_DMA_DSCRB_PKT_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
-#define G_DMA_DSCRB_PKT_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
+#define S_DMA_DSCRB_PKT_SIZE _SB_MAKE64(50)
+#define M_DMA_DSCRB_PKT_SIZE _SB_MAKEMASK(14, S_DMA_DSCRB_PKT_SIZE)
+#define V_DMA_DSCRB_PKT_SIZE(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_PKT_SIZE)
+#define G_DMA_DSCRB_PKT_SIZE(x) _SB_GETVALUE(x, S_DMA_DSCRB_PKT_SIZE, M_DMA_DSCRB_PKT_SIZE)
/*
* from pass2 some bits in dscr_b are also used for rx status
*/
-#define S_DMA_DSCRB_STATUS _SB_MAKE64(0)
-#define M_DMA_DSCRB_STATUS _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
-#define V_DMA_DSCRB_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
-#define G_DMA_DSCRB_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
+#define S_DMA_DSCRB_STATUS _SB_MAKE64(0)
+#define M_DMA_DSCRB_STATUS _SB_MAKEMASK(1, S_DMA_DSCRB_STATUS)
+#define V_DMA_DSCRB_STATUS(x) _SB_MAKEVALUE(x, S_DMA_DSCRB_STATUS)
+#define G_DMA_DSCRB_STATUS(x) _SB_GETVALUE(x, S_DMA_DSCRB_STATUS, M_DMA_DSCRB_STATUS)
/*
* Ethernet Descriptor Status Bits (Table 7-15)
*/
-#define M_DMA_ETHRX_BADIP4CS _SB_MAKEMASK1(51)
+#define M_DMA_ETHRX_BADIP4CS _SB_MAKEMASK1(51)
#define M_DMA_ETHRX_DSCRERR _SB_MAKEMASK1(52)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -292,70 +292,70 @@
#define M_DMA_ETH_CRC_FLAG _SB_MAKEMASK1(2)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_DMA_ETHRX_RXCH 53
-#define M_DMA_ETHRX_RXCH _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
-#define V_DMA_ETHRX_RXCH(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
-#define G_DMA_ETHRX_RXCH(x) _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
+#define S_DMA_ETHRX_RXCH 53
+#define M_DMA_ETHRX_RXCH _SB_MAKEMASK(2, S_DMA_ETHRX_RXCH)
+#define V_DMA_ETHRX_RXCH(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_RXCH)
+#define G_DMA_ETHRX_RXCH(x) _SB_GETVALUE(x, S_DMA_ETHRX_RXCH, M_DMA_ETHRX_RXCH)
-#define S_DMA_ETHRX_PKTTYPE 55
-#define M_DMA_ETHRX_PKTTYPE _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
-#define V_DMA_ETHRX_PKTTYPE(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
-#define G_DMA_ETHRX_PKTTYPE(x) _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
+#define S_DMA_ETHRX_PKTTYPE 55
+#define M_DMA_ETHRX_PKTTYPE _SB_MAKEMASK(3, S_DMA_ETHRX_PKTTYPE)
+#define V_DMA_ETHRX_PKTTYPE(x) _SB_MAKEVALUE(x, S_DMA_ETHRX_PKTTYPE)
+#define G_DMA_ETHRX_PKTTYPE(x) _SB_GETVALUE(x, S_DMA_ETHRX_PKTTYPE, M_DMA_ETHRX_PKTTYPE)
#define K_DMA_ETHRX_PKTTYPE_IPV4 0
#define K_DMA_ETHRX_PKTTYPE_ARPV4 1
-#define K_DMA_ETHRX_PKTTYPE_802 2
+#define K_DMA_ETHRX_PKTTYPE_802 2
#define K_DMA_ETHRX_PKTTYPE_OTHER 3
#define K_DMA_ETHRX_PKTTYPE_USER0 4
#define K_DMA_ETHRX_PKTTYPE_USER1 5
#define K_DMA_ETHRX_PKTTYPE_USER2 6
#define K_DMA_ETHRX_PKTTYPE_USER3 7
-#define M_DMA_ETHRX_MATCH_HASH _SB_MAKEMASK1(58)
-#define M_DMA_ETHRX_MATCH_EXACT _SB_MAKEMASK1(59)
-#define M_DMA_ETHRX_BCAST _SB_MAKEMASK1(60)
-#define M_DMA_ETHRX_MCAST _SB_MAKEMASK1(61)
-#define M_DMA_ETHRX_BAD _SB_MAKEMASK1(62)
-#define M_DMA_ETHRX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_ETHRX_MATCH_HASH _SB_MAKEMASK1(58)
+#define M_DMA_ETHRX_MATCH_EXACT _SB_MAKEMASK1(59)
+#define M_DMA_ETHRX_BCAST _SB_MAKEMASK1(60)
+#define M_DMA_ETHRX_MCAST _SB_MAKEMASK1(61)
+#define M_DMA_ETHRX_BAD _SB_MAKEMASK1(62)
+#define M_DMA_ETHRX_SOP _SB_MAKEMASK1(63)
/*
* Ethernet Transmit Status Bits (Table 7-16)
*/
-#define M_DMA_ETHTX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_ETHTX_SOP _SB_MAKEMASK1(63)
/*
* Ethernet Transmit Options (Table 7-17)
*/
-#define K_DMA_ETHTX_NOTSOP _SB_MAKE64(0x00)
-#define K_DMA_ETHTX_APPENDCRC _SB_MAKE64(0x01)
-#define K_DMA_ETHTX_REPLACECRC _SB_MAKE64(0x02)
+#define K_DMA_ETHTX_NOTSOP _SB_MAKE64(0x00)
+#define K_DMA_ETHTX_APPENDCRC _SB_MAKE64(0x01)
+#define K_DMA_ETHTX_REPLACECRC _SB_MAKE64(0x02)
#define K_DMA_ETHTX_APPENDCRC_APPENDPAD _SB_MAKE64(0x03)
#define K_DMA_ETHTX_APPENDVLAN_REPLACECRC _SB_MAKE64(0x04)
#define K_DMA_ETHTX_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x05)
#define K_DMA_ETHTX_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x6)
-#define K_DMA_ETHTX_NOMODS _SB_MAKE64(0x07)
-#define K_DMA_ETHTX_RESERVED1 _SB_MAKE64(0x08)
+#define K_DMA_ETHTX_NOMODS _SB_MAKE64(0x07)
+#define K_DMA_ETHTX_RESERVED1 _SB_MAKE64(0x08)
#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC _SB_MAKE64(0x09)
#define K_DMA_ETHTX_REPLACESADDR_REPLACECRC _SB_MAKE64(0x0A)
#define K_DMA_ETHTX_REPLACESADDR_APPENDCRC_APPENDPAD _SB_MAKE64(0x0B)
#define K_DMA_ETHTX_REPLACESADDR_APPENDVLAN_REPLACECRC _SB_MAKE64(0x0C)
#define K_DMA_ETHTX_REPLACESADDR_REMOVEVLAN_REPLACECRC _SB_MAKE64(0x0D)
#define K_DMA_ETHTX_REPLACESADDR_REPLACEVLAN_REPLACECRC _SB_MAKE64(0x0E)
-#define K_DMA_ETHTX_RESERVED2 _SB_MAKE64(0x0F)
+#define K_DMA_ETHTX_RESERVED2 _SB_MAKE64(0x0F)
/*
* Serial Receive Options (Table 7-18)
*/
-#define M_DMA_SERRX_CRC_ERROR _SB_MAKEMASK1(56)
-#define M_DMA_SERRX_ABORT _SB_MAKEMASK1(57)
-#define M_DMA_SERRX_OCTET_ERROR _SB_MAKEMASK1(58)
+#define M_DMA_SERRX_CRC_ERROR _SB_MAKEMASK1(56)
+#define M_DMA_SERRX_ABORT _SB_MAKEMASK1(57)
+#define M_DMA_SERRX_OCTET_ERROR _SB_MAKEMASK1(58)
#define M_DMA_SERRX_LONGFRAME_ERROR _SB_MAKEMASK1(59)
#define M_DMA_SERRX_SHORTFRAME_ERROR _SB_MAKEMASK1(60)
#define M_DMA_SERRX_OVERRUN_ERROR _SB_MAKEMASK1(61)
-#define M_DMA_SERRX_GOOD _SB_MAKEMASK1(62)
-#define M_DMA_SERRX_SOP _SB_MAKEMASK1(63)
+#define M_DMA_SERRX_GOOD _SB_MAKEMASK1(62)
+#define M_DMA_SERRX_SOP _SB_MAKEMASK1(63)
/*
* Serial Transmit Status Bits (Table 7-20)
@@ -367,10 +367,10 @@
* Serial Transmit Options (Table 7-21)
*/
-#define K_DMA_SERTX_RESERVED _SB_MAKEMASK1(0)
-#define K_DMA_SERTX_APPENDCRC _SB_MAKEMASK1(1)
-#define K_DMA_SERTX_APPENDPAD _SB_MAKEMASK1(2)
-#define K_DMA_SERTX_ABORT _SB_MAKEMASK1(3)
+#define K_DMA_SERTX_RESERVED _SB_MAKEMASK1(0)
+#define K_DMA_SERTX_APPENDCRC _SB_MAKEMASK1(1)
+#define K_DMA_SERTX_APPENDPAD _SB_MAKEMASK1(2)
+#define K_DMA_SERTX_ABORT _SB_MAKEMASK1(3)
/* *********************************************************************
@@ -385,19 +385,19 @@
* Register: DM_DSCR_BASE_3
*/
-#define M_DM_DSCR_BASE_MBZ _SB_MAKEMASK(4, 0)
+#define M_DM_DSCR_BASE_MBZ _SB_MAKEMASK(4, 0)
/* Note: Just mask the base address and then OR it in. */
-#define S_DM_DSCR_BASE_ADDR _SB_MAKE64(4)
-#define M_DM_DSCR_BASE_ADDR _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
+#define S_DM_DSCR_BASE_ADDR _SB_MAKE64(4)
+#define M_DM_DSCR_BASE_ADDR _SB_MAKEMASK(36, S_DM_DSCR_BASE_ADDR)
-#define S_DM_DSCR_BASE_RINGSZ _SB_MAKE64(40)
-#define M_DM_DSCR_BASE_RINGSZ _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
+#define S_DM_DSCR_BASE_RINGSZ _SB_MAKE64(40)
+#define M_DM_DSCR_BASE_RINGSZ _SB_MAKEMASK(16, S_DM_DSCR_BASE_RINGSZ)
#define V_DM_DSCR_BASE_RINGSZ(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_RINGSZ)
#define G_DM_DSCR_BASE_RINGSZ(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_RINGSZ, M_DM_DSCR_BASE_RINGSZ)
-#define S_DM_DSCR_BASE_PRIORITY _SB_MAKE64(56)
-#define M_DM_DSCR_BASE_PRIORITY _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
+#define S_DM_DSCR_BASE_PRIORITY _SB_MAKE64(56)
+#define M_DM_DSCR_BASE_PRIORITY _SB_MAKEMASK(3, S_DM_DSCR_BASE_PRIORITY)
#define V_DM_DSCR_BASE_PRIORITY(x) _SB_MAKEVALUE(x, S_DM_DSCR_BASE_PRIORITY)
#define G_DM_DSCR_BASE_PRIORITY(x) _SB_GETVALUE(x, S_DM_DSCR_BASE_PRIORITY, M_DM_DSCR_BASE_PRIORITY)
@@ -407,12 +407,12 @@
#define K_DM_DSCR_BASE_PRIORITY_8 3
#define K_DM_DSCR_BASE_PRIORITY_16 4
-#define M_DM_DSCR_BASE_ACTIVE _SB_MAKEMASK1(59)
+#define M_DM_DSCR_BASE_ACTIVE _SB_MAKEMASK1(59)
#define M_DM_DSCR_BASE_INTERRUPT _SB_MAKEMASK1(60)
-#define M_DM_DSCR_BASE_RESET _SB_MAKEMASK1(61) /* write register */
-#define M_DM_DSCR_BASE_ERROR _SB_MAKEMASK1(61) /* read register */
-#define M_DM_DSCR_BASE_ABORT _SB_MAKEMASK1(62)
-#define M_DM_DSCR_BASE_ENABL _SB_MAKEMASK1(63)
+#define M_DM_DSCR_BASE_RESET _SB_MAKEMASK1(61) /* write register */
+#define M_DM_DSCR_BASE_ERROR _SB_MAKEMASK1(61) /* read register */
+#define M_DM_DSCR_BASE_ABORT _SB_MAKEMASK1(62)
+#define M_DM_DSCR_BASE_ENABL _SB_MAKEMASK1(63)
/*
* Data Mover Descriptor Count Register (Table 7-25)
@@ -428,14 +428,14 @@
* Register: DM_CUR_DSCR_ADDR_3
*/
-#define S_DM_CUR_DSCR_DSCR_ADDR _SB_MAKE64(0)
-#define M_DM_CUR_DSCR_DSCR_ADDR _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
+#define S_DM_CUR_DSCR_DSCR_ADDR _SB_MAKE64(0)
+#define M_DM_CUR_DSCR_DSCR_ADDR _SB_MAKEMASK(40, S_DM_CUR_DSCR_DSCR_ADDR)
#define S_DM_CUR_DSCR_DSCR_COUNT _SB_MAKE64(48)
#define M_DM_CUR_DSCR_DSCR_COUNT _SB_MAKEMASK(16, S_DM_CUR_DSCR_DSCR_COUNT)
#define V_DM_CUR_DSCR_DSCR_COUNT(r) _SB_MAKEVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT)
#define G_DM_CUR_DSCR_DSCR_COUNT(r) _SB_GETVALUE(r, S_DM_CUR_DSCR_DSCR_COUNT,\
- M_DM_CUR_DSCR_DSCR_COUNT)
+ M_DM_CUR_DSCR_DSCR_COUNT)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -450,15 +450,15 @@
#define M_DM_PARTIAL_CRC_PARTIAL _SB_MAKEMASK(32, S_DM_PARTIAL_CRC_PARTIAL)
#define V_DM_PARTIAL_CRC_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_CRC_PARTIAL)
#define G_DM_PARTIAL_CRC_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_CRC_PARTIAL,\
- M_DM_PARTIAL_CRC_PARTIAL)
+ M_DM_PARTIAL_CRC_PARTIAL)
#define S_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKE64(32)
#define M_DM_PARTIAL_TCPCS_PARTIAL _SB_MAKEMASK(16, S_DM_PARTIAL_TCPCS_PARTIAL)
#define V_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_MAKEVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL)
#define G_DM_PARTIAL_TCPCS_PARTIAL(r) _SB_GETVALUE(r, S_DM_PARTIAL_TCPCS_PARTIAL,\
- M_DM_PARTIAL_TCPCS_PARTIAL)
+ M_DM_PARTIAL_TCPCS_PARTIAL)
-#define M_DM_PARTIAL_ODD_BYTE _SB_MAKEMASK1(48)
+#define M_DM_PARTIAL_ODD_BYTE _SB_MAKEMASK1(48)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
@@ -468,17 +468,17 @@
* Register: CRC_DEF_0
* Register: CRC_DEF_1
*/
-#define S_CRC_DEF_CRC_INIT _SB_MAKE64(0)
-#define M_CRC_DEF_CRC_INIT _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
-#define V_CRC_DEF_CRC_INIT(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
-#define G_CRC_DEF_CRC_INIT(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
- M_CRC_DEF_CRC_INIT)
-
-#define S_CRC_DEF_CRC_POLY _SB_MAKE64(32)
-#define M_CRC_DEF_CRC_POLY _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
-#define V_CRC_DEF_CRC_POLY(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
-#define G_CRC_DEF_CRC_POLY(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
- M_CRC_DEF_CRC_POLY)
+#define S_CRC_DEF_CRC_INIT _SB_MAKE64(0)
+#define M_CRC_DEF_CRC_INIT _SB_MAKEMASK(32, S_CRC_DEF_CRC_INIT)
+#define V_CRC_DEF_CRC_INIT(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_INIT)
+#define G_CRC_DEF_CRC_INIT(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_INIT,\
+ M_CRC_DEF_CRC_INIT)
+
+#define S_CRC_DEF_CRC_POLY _SB_MAKE64(32)
+#define M_CRC_DEF_CRC_POLY _SB_MAKEMASK(32, S_CRC_DEF_CRC_POLY)
+#define V_CRC_DEF_CRC_POLY(r) _SB_MAKEVALUE(r, S_CRC_DEF_CRC_POLY)
+#define G_CRC_DEF_CRC_POLY(r) _SB_GETVALUE(r, S_CRC_DEF_CRC_POLY,\
+ M_CRC_DEF_CRC_POLY)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
@@ -488,50 +488,50 @@
* Register: CTCP_DEF_0
* Register: CTCP_DEF_1
*/
-#define S_CTCP_DEF_CRC_TXOR _SB_MAKE64(0)
-#define M_CTCP_DEF_CRC_TXOR _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
-#define V_CTCP_DEF_CRC_TXOR(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
-#define G_CTCP_DEF_CRC_TXOR(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
- M_CTCP_DEF_CRC_TXOR)
-
-#define S_CTCP_DEF_TCPCS_INIT _SB_MAKE64(32)
-#define M_CTCP_DEF_TCPCS_INIT _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
+#define S_CTCP_DEF_CRC_TXOR _SB_MAKE64(0)
+#define M_CTCP_DEF_CRC_TXOR _SB_MAKEMASK(32, S_CTCP_DEF_CRC_TXOR)
+#define V_CTCP_DEF_CRC_TXOR(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_TXOR)
+#define G_CTCP_DEF_CRC_TXOR(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_TXOR,\
+ M_CTCP_DEF_CRC_TXOR)
+
+#define S_CTCP_DEF_TCPCS_INIT _SB_MAKE64(32)
+#define M_CTCP_DEF_TCPCS_INIT _SB_MAKEMASK(16, S_CTCP_DEF_TCPCS_INIT)
#define V_CTCP_DEF_TCPCS_INIT(r) _SB_MAKEVALUE(r, S_CTCP_DEF_TCPCS_INIT)
#define G_CTCP_DEF_TCPCS_INIT(r) _SB_GETVALUE(r, S_CTCP_DEF_TCPCS_INIT,\
- M_CTCP_DEF_TCPCS_INIT)
+ M_CTCP_DEF_TCPCS_INIT)
-#define S_CTCP_DEF_CRC_WIDTH _SB_MAKE64(48)
-#define M_CTCP_DEF_CRC_WIDTH _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
-#define V_CTCP_DEF_CRC_WIDTH(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
-#define G_CTCP_DEF_CRC_WIDTH(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
- M_CTCP_DEF_CRC_WIDTH)
+#define S_CTCP_DEF_CRC_WIDTH _SB_MAKE64(48)
+#define M_CTCP_DEF_CRC_WIDTH _SB_MAKEMASK(2, S_CTCP_DEF_CRC_WIDTH)
+#define V_CTCP_DEF_CRC_WIDTH(r) _SB_MAKEVALUE(r, S_CTCP_DEF_CRC_WIDTH)
+#define G_CTCP_DEF_CRC_WIDTH(r) _SB_GETVALUE(r, S_CTCP_DEF_CRC_WIDTH,\
+ M_CTCP_DEF_CRC_WIDTH)
-#define K_CTCP_DEF_CRC_WIDTH_4 0
-#define K_CTCP_DEF_CRC_WIDTH_2 1
-#define K_CTCP_DEF_CRC_WIDTH_1 2
+#define K_CTCP_DEF_CRC_WIDTH_4 0
+#define K_CTCP_DEF_CRC_WIDTH_2 1
+#define K_CTCP_DEF_CRC_WIDTH_1 2
#define M_CTCP_DEF_CRC_BIT_ORDER _SB_MAKEMASK1(50)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
/*
- * Data Mover Descriptor Doubleword "A" (Table 7-26)
+ * Data Mover Descriptor Doubleword "A" (Table 7-26)
*/
-#define S_DM_DSCRA_DST_ADDR _SB_MAKE64(0)
-#define M_DM_DSCRA_DST_ADDR _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
+#define S_DM_DSCRA_DST_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRA_DST_ADDR _SB_MAKEMASK(40, S_DM_DSCRA_DST_ADDR)
-#define M_DM_DSCRA_UN_DEST _SB_MAKEMASK1(40)
-#define M_DM_DSCRA_UN_SRC _SB_MAKEMASK1(41)
-#define M_DM_DSCRA_INTERRUPT _SB_MAKEMASK1(42)
+#define M_DM_DSCRA_UN_DEST _SB_MAKEMASK1(40)
+#define M_DM_DSCRA_UN_SRC _SB_MAKEMASK1(41)
+#define M_DM_DSCRA_INTERRUPT _SB_MAKEMASK1(42)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-#define M_DM_DSCRA_THROTTLE _SB_MAKEMASK1(43)
+#define M_DM_DSCRA_THROTTLE _SB_MAKEMASK1(43)
#endif /* up to 1250 PASS1 */
-#define S_DM_DSCRA_DIR_DEST _SB_MAKE64(44)
-#define M_DM_DSCRA_DIR_DEST _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
-#define V_DM_DSCRA_DIR_DEST(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
-#define G_DM_DSCRA_DIR_DEST(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
+#define S_DM_DSCRA_DIR_DEST _SB_MAKE64(44)
+#define M_DM_DSCRA_DIR_DEST _SB_MAKEMASK(2, S_DM_DSCRA_DIR_DEST)
+#define V_DM_DSCRA_DIR_DEST(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_DEST)
+#define G_DM_DSCRA_DIR_DEST(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_DEST, M_DM_DSCRA_DIR_DEST)
#define K_DM_DSCRA_DIR_DEST_INCR 0
#define K_DM_DSCRA_DIR_DEST_DECR 1
@@ -541,24 +541,24 @@
#define V_DM_DSCRA_DIR_DEST_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_DECR, S_DM_DSCRA_DIR_DEST)
#define V_DM_DSCRA_DIR_DEST_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_DEST_CONST, S_DM_DSCRA_DIR_DEST)
-#define S_DM_DSCRA_DIR_SRC _SB_MAKE64(46)
-#define M_DM_DSCRA_DIR_SRC _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
-#define G_DM_DSCRA_DIR_SRC(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
+#define S_DM_DSCRA_DIR_SRC _SB_MAKE64(46)
+#define M_DM_DSCRA_DIR_SRC _SB_MAKEMASK(2, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC(x) _SB_MAKEVALUE(x, S_DM_DSCRA_DIR_SRC)
+#define G_DM_DSCRA_DIR_SRC(x) _SB_GETVALUE(x, S_DM_DSCRA_DIR_SRC, M_DM_DSCRA_DIR_SRC)
-#define K_DM_DSCRA_DIR_SRC_INCR 0
-#define K_DM_DSCRA_DIR_SRC_DECR 1
+#define K_DM_DSCRA_DIR_SRC_INCR 0
+#define K_DM_DSCRA_DIR_SRC_DECR 1
#define K_DM_DSCRA_DIR_SRC_CONST 2
-#define V_DM_DSCRA_DIR_SRC_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
-#define V_DM_DSCRA_DIR_SRC_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_INCR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_INCR, S_DM_DSCRA_DIR_SRC)
+#define V_DM_DSCRA_DIR_SRC_DECR _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_DECR, S_DM_DSCRA_DIR_SRC)
#define V_DM_DSCRA_DIR_SRC_CONST _SB_MAKEVALUE(K_DM_DSCRA_DIR_SRC_CONST, S_DM_DSCRA_DIR_SRC)
-#define M_DM_DSCRA_ZERO_MEM _SB_MAKEMASK1(48)
-#define M_DM_DSCRA_PREFETCH _SB_MAKEMASK1(49)
-#define M_DM_DSCRA_L2C_DEST _SB_MAKEMASK1(50)
-#define M_DM_DSCRA_L2C_SRC _SB_MAKEMASK1(51)
+#define M_DM_DSCRA_ZERO_MEM _SB_MAKEMASK1(48)
+#define M_DM_DSCRA_PREFETCH _SB_MAKEMASK1(49)
+#define M_DM_DSCRA_L2C_DEST _SB_MAKEMASK1(50)
+#define M_DM_DSCRA_L2C_SRC _SB_MAKEMASK1(51)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_DM_DSCRA_RD_BKOFF _SB_MAKEMASK1(52)
@@ -566,27 +566,27 @@
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_DM_DSCRA_TCPCS_EN _SB_MAKEMASK1(54)
-#define M_DM_DSCRA_TCPCS_RES _SB_MAKEMASK1(55)
-#define M_DM_DSCRA_TCPCS_AP _SB_MAKEMASK1(56)
-#define M_DM_DSCRA_CRC_EN _SB_MAKEMASK1(57)
-#define M_DM_DSCRA_CRC_RES _SB_MAKEMASK1(58)
-#define M_DM_DSCRA_CRC_AP _SB_MAKEMASK1(59)
-#define M_DM_DSCRA_CRC_DFN _SB_MAKEMASK1(60)
-#define M_DM_DSCRA_CRC_XBIT _SB_MAKEMASK1(61)
+#define M_DM_DSCRA_TCPCS_EN _SB_MAKEMASK1(54)
+#define M_DM_DSCRA_TCPCS_RES _SB_MAKEMASK1(55)
+#define M_DM_DSCRA_TCPCS_AP _SB_MAKEMASK1(56)
+#define M_DM_DSCRA_CRC_EN _SB_MAKEMASK1(57)
+#define M_DM_DSCRA_CRC_RES _SB_MAKEMASK1(58)
+#define M_DM_DSCRA_CRC_AP _SB_MAKEMASK1(59)
+#define M_DM_DSCRA_CRC_DFN _SB_MAKEMASK1(60)
+#define M_DM_DSCRA_CRC_XBIT _SB_MAKEMASK1(61)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define M_DM_DSCRA_RESERVED2 _SB_MAKEMASK(3, 61)
+#define M_DM_DSCRA_RESERVED2 _SB_MAKEMASK(3, 61)
/*
- * Data Mover Descriptor Doubleword "B" (Table 7-25)
+ * Data Mover Descriptor Doubleword "B" (Table 7-25)
*/
-#define S_DM_DSCRB_SRC_ADDR _SB_MAKE64(0)
-#define M_DM_DSCRB_SRC_ADDR _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
+#define S_DM_DSCRB_SRC_ADDR _SB_MAKE64(0)
+#define M_DM_DSCRB_SRC_ADDR _SB_MAKEMASK(40, S_DM_DSCRB_SRC_ADDR)
-#define S_DM_DSCRB_SRC_LENGTH _SB_MAKE64(40)
-#define M_DM_DSCRB_SRC_LENGTH _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
+#define S_DM_DSCRB_SRC_LENGTH _SB_MAKE64(40)
+#define M_DM_DSCRB_SRC_LENGTH _SB_MAKEMASK(20, S_DM_DSCRB_SRC_LENGTH)
#define V_DM_DSCRB_SRC_LENGTH(x) _SB_MAKEVALUE(x, S_DM_DSCRB_SRC_LENGTH)
#define G_DM_DSCRB_SRC_LENGTH(x) _SB_GETVALUE(x, S_DM_DSCRB_SRC_LENGTH, M_DM_DSCRB_SRC_LENGTH)
diff --git a/arch/mips/include/asm/sibyte/sb1250_genbus.h b/arch/mips/include/asm/sibyte/sb1250_genbus.h
index a96ded17bdc..04c009c3693 100644
--- a/arch/mips/include/asm/sibyte/sb1250_genbus.h
+++ b/arch/mips/include/asm/sibyte/sb1250_genbus.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Generic Bus Constants File: sb1250_genbus.h
+ * Generic Bus Constants File: sb1250_genbus.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's Generic Bus interface
@@ -40,10 +40,10 @@
* Generic Bus Region Configuration Registers (Table 11-4)
*/
-#define S_IO_RDY_ACTIVE 0
+#define S_IO_RDY_ACTIVE 0
#define M_IO_RDY_ACTIVE _SB_MAKEMASK1(S_IO_RDY_ACTIVE)
-#define S_IO_ENA_RDY 1
+#define S_IO_ENA_RDY 1
#define M_IO_ENA_RDY _SB_MAKEMASK1(S_IO_ENA_RDY)
#define S_IO_WIDTH_SEL 2
@@ -52,7 +52,7 @@
#define K_IO_WIDTH_SEL_2 1
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define K_IO_WIDTH_SEL_1L 2
+#define K_IO_WIDTH_SEL_1L 2
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define K_IO_WIDTH_SEL_4 3
#define V_IO_WIDTH_SEL(x) _SB_MAKEVALUE(x, S_IO_WIDTH_SEL)
@@ -111,7 +111,7 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_EARLY_CS _SB_MAKEMASK1(3)
+#define M_IO_EARLY_CS _SB_MAKEMASK1(3)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_ALE_TO_CS 4
@@ -121,10 +121,10 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_IO_BURST_WIDTH _SB_MAKE64(6)
-#define M_IO_BURST_WIDTH _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
-#define V_IO_BURST_WIDTH(x) _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
-#define G_IO_BURST_WIDTH(x) _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
+#define S_IO_BURST_WIDTH _SB_MAKE64(6)
+#define M_IO_BURST_WIDTH _SB_MAKEMASK(2, S_IO_BURST_WIDTH)
+#define V_IO_BURST_WIDTH(x) _SB_MAKEVALUE(x, S_IO_BURST_WIDTH)
+#define G_IO_BURST_WIDTH(x) _SB_GETVALUE(x, S_IO_BURST_WIDTH, M_IO_BURST_WIDTH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_CS_WIDTH 8
@@ -149,7 +149,7 @@
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) \
|| SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_RDY_SYNC _SB_MAKEMASK1(3)
+#define M_IO_RDY_SYNC _SB_MAKEMASK1(3)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
#define S_IO_WRITE_WIDTH 4
@@ -191,7 +191,7 @@
#define M_IO_ILL_ADDR_INT _SB_MAKEMASK1(11)
#define M_IO_MULT_CS_INT _SB_MAKEMASK1(12)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_IO_COH_ERR _SB_MAKEMASK1(14)
+#define M_IO_COH_ERR _SB_MAKEMASK1(14)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
@@ -370,8 +370,8 @@
#define S_GPIO_INTR_TYPEX(n) (((n)/2)*2)
#define M_GPIO_INTR_TYPEX(n) _SB_MAKEMASK(2, S_GPIO_INTR_TYPEX(n))
-#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
-#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
+#define V_GPIO_INTR_TYPEX(n, x) _SB_MAKEVALUE(x, S_GPIO_INTR_TYPEX(n))
+#define G_GPIO_INTR_TYPEX(n, x) _SB_GETVALUE(x, S_GPIO_INTR_TYPEX(n), M_GPIO_INTR_TYPEX(n))
#define S_GPIO_INTR_TYPE0 0
#define M_GPIO_INTR_TYPE0 _SB_MAKEMASK(2, S_GPIO_INTR_TYPE0)
diff --git a/arch/mips/include/asm/sibyte/sb1250_int.h b/arch/mips/include/asm/sibyte/sb1250_int.h
index dbea73ddd2f..36afcb2766c 100644
--- a/arch/mips/include/asm/sibyte/sb1250_int.h
+++ b/arch/mips/include/asm/sibyte/sb1250_int.h
@@ -45,71 +45,71 @@
* First, the interrupt numbers.
*/
-#define K_INT_SOURCES 64
-
-#define K_INT_WATCHDOG_TIMER_0 0
-#define K_INT_WATCHDOG_TIMER_1 1
-#define K_INT_TIMER_0 2
-#define K_INT_TIMER_1 3
-#define K_INT_TIMER_2 4
-#define K_INT_TIMER_3 5
-#define K_INT_SMB_0 6
-#define K_INT_SMB_1 7
-#define K_INT_UART_0 8
-#define K_INT_UART_1 9
-#define K_INT_SER_0 10
-#define K_INT_SER_1 11
-#define K_INT_PCMCIA 12
-#define K_INT_ADDR_TRAP 13
-#define K_INT_PERF_CNT 14
-#define K_INT_TRACE_FREEZE 15
-#define K_INT_BAD_ECC 16
-#define K_INT_COR_ECC 17
-#define K_INT_IO_BUS 18
-#define K_INT_MAC_0 19
-#define K_INT_MAC_1 20
-#define K_INT_MAC_2 21
-#define K_INT_DM_CH_0 22
-#define K_INT_DM_CH_1 23
-#define K_INT_DM_CH_2 24
-#define K_INT_DM_CH_3 25
-#define K_INT_MBOX_0 26
-#define K_INT_MBOX_1 27
-#define K_INT_MBOX_2 28
-#define K_INT_MBOX_3 29
+#define K_INT_SOURCES 64
+
+#define K_INT_WATCHDOG_TIMER_0 0
+#define K_INT_WATCHDOG_TIMER_1 1
+#define K_INT_TIMER_0 2
+#define K_INT_TIMER_1 3
+#define K_INT_TIMER_2 4
+#define K_INT_TIMER_3 5
+#define K_INT_SMB_0 6
+#define K_INT_SMB_1 7
+#define K_INT_UART_0 8
+#define K_INT_UART_1 9
+#define K_INT_SER_0 10
+#define K_INT_SER_1 11
+#define K_INT_PCMCIA 12
+#define K_INT_ADDR_TRAP 13
+#define K_INT_PERF_CNT 14
+#define K_INT_TRACE_FREEZE 15
+#define K_INT_BAD_ECC 16
+#define K_INT_COR_ECC 17
+#define K_INT_IO_BUS 18
+#define K_INT_MAC_0 19
+#define K_INT_MAC_1 20
+#define K_INT_MAC_2 21
+#define K_INT_DM_CH_0 22
+#define K_INT_DM_CH_1 23
+#define K_INT_DM_CH_2 24
+#define K_INT_DM_CH_3 25
+#define K_INT_MBOX_0 26
+#define K_INT_MBOX_1 27
+#define K_INT_MBOX_2 28
+#define K_INT_MBOX_3 29
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define K_INT_CYCLE_CP0_INT 30
#define K_INT_CYCLE_CP1_INT 31
#endif /* 1250 PASS2 || 112x PASS1 */
-#define K_INT_GPIO_0 32
-#define K_INT_GPIO_1 33
-#define K_INT_GPIO_2 34
-#define K_INT_GPIO_3 35
-#define K_INT_GPIO_4 36
-#define K_INT_GPIO_5 37
-#define K_INT_GPIO_6 38
-#define K_INT_GPIO_7 39
-#define K_INT_GPIO_8 40
-#define K_INT_GPIO_9 41
-#define K_INT_GPIO_10 42
-#define K_INT_GPIO_11 43
-#define K_INT_GPIO_12 44
-#define K_INT_GPIO_13 45
-#define K_INT_GPIO_14 46
-#define K_INT_GPIO_15 47
-#define K_INT_LDT_FATAL 48
-#define K_INT_LDT_NONFATAL 49
-#define K_INT_LDT_SMI 50
-#define K_INT_LDT_NMI 51
-#define K_INT_LDT_INIT 52
-#define K_INT_LDT_STARTUP 53
-#define K_INT_LDT_EXT 54
-#define K_INT_PCI_ERROR 55
-#define K_INT_PCI_INTA 56
-#define K_INT_PCI_INTB 57
-#define K_INT_PCI_INTC 58
-#define K_INT_PCI_INTD 59
-#define K_INT_SPARE_2 60
+#define K_INT_GPIO_0 32
+#define K_INT_GPIO_1 33
+#define K_INT_GPIO_2 34
+#define K_INT_GPIO_3 35
+#define K_INT_GPIO_4 36
+#define K_INT_GPIO_5 37
+#define K_INT_GPIO_6 38
+#define K_INT_GPIO_7 39
+#define K_INT_GPIO_8 40
+#define K_INT_GPIO_9 41
+#define K_INT_GPIO_10 42
+#define K_INT_GPIO_11 43
+#define K_INT_GPIO_12 44
+#define K_INT_GPIO_13 45
+#define K_INT_GPIO_14 46
+#define K_INT_GPIO_15 47
+#define K_INT_LDT_FATAL 48
+#define K_INT_LDT_NONFATAL 49
+#define K_INT_LDT_SMI 50
+#define K_INT_LDT_NMI 51
+#define K_INT_LDT_INIT 52
+#define K_INT_LDT_STARTUP 53
+#define K_INT_LDT_EXT 54
+#define K_INT_PCI_ERROR 55
+#define K_INT_PCI_INTA 56
+#define K_INT_PCI_INTB 57
+#define K_INT_PCI_INTC 58
+#define K_INT_PCI_INTD 59
+#define K_INT_SPARE_2 60
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define K_INT_MAC_0_CH1 61
#define K_INT_MAC_1_CH1 62
@@ -120,70 +120,70 @@
* Mask values for each interrupt
*/
-#define M_INT_WATCHDOG_TIMER_0 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
-#define M_INT_WATCHDOG_TIMER_1 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
-#define M_INT_TIMER_0 _SB_MAKEMASK1(K_INT_TIMER_0)
-#define M_INT_TIMER_1 _SB_MAKEMASK1(K_INT_TIMER_1)
-#define M_INT_TIMER_2 _SB_MAKEMASK1(K_INT_TIMER_2)
-#define M_INT_TIMER_3 _SB_MAKEMASK1(K_INT_TIMER_3)
-#define M_INT_SMB_0 _SB_MAKEMASK1(K_INT_SMB_0)
-#define M_INT_SMB_1 _SB_MAKEMASK1(K_INT_SMB_1)
-#define M_INT_UART_0 _SB_MAKEMASK1(K_INT_UART_0)
-#define M_INT_UART_1 _SB_MAKEMASK1(K_INT_UART_1)
-#define M_INT_SER_0 _SB_MAKEMASK1(K_INT_SER_0)
-#define M_INT_SER_1 _SB_MAKEMASK1(K_INT_SER_1)
-#define M_INT_PCMCIA _SB_MAKEMASK1(K_INT_PCMCIA)
-#define M_INT_ADDR_TRAP _SB_MAKEMASK1(K_INT_ADDR_TRAP)
-#define M_INT_PERF_CNT _SB_MAKEMASK1(K_INT_PERF_CNT)
-#define M_INT_TRACE_FREEZE _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
-#define M_INT_BAD_ECC _SB_MAKEMASK1(K_INT_BAD_ECC)
-#define M_INT_COR_ECC _SB_MAKEMASK1(K_INT_COR_ECC)
-#define M_INT_IO_BUS _SB_MAKEMASK1(K_INT_IO_BUS)
-#define M_INT_MAC_0 _SB_MAKEMASK1(K_INT_MAC_0)
-#define M_INT_MAC_1 _SB_MAKEMASK1(K_INT_MAC_1)
-#define M_INT_MAC_2 _SB_MAKEMASK1(K_INT_MAC_2)
-#define M_INT_DM_CH_0 _SB_MAKEMASK1(K_INT_DM_CH_0)
-#define M_INT_DM_CH_1 _SB_MAKEMASK1(K_INT_DM_CH_1)
-#define M_INT_DM_CH_2 _SB_MAKEMASK1(K_INT_DM_CH_2)
-#define M_INT_DM_CH_3 _SB_MAKEMASK1(K_INT_DM_CH_3)
-#define M_INT_MBOX_0 _SB_MAKEMASK1(K_INT_MBOX_0)
-#define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1)
-#define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2)
-#define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3)
-#define M_INT_MBOX_ALL _SB_MAKEMASK(4, K_INT_MBOX_0)
+#define M_INT_WATCHDOG_TIMER_0 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_0)
+#define M_INT_WATCHDOG_TIMER_1 _SB_MAKEMASK1(K_INT_WATCHDOG_TIMER_1)
+#define M_INT_TIMER_0 _SB_MAKEMASK1(K_INT_TIMER_0)
+#define M_INT_TIMER_1 _SB_MAKEMASK1(K_INT_TIMER_1)
+#define M_INT_TIMER_2 _SB_MAKEMASK1(K_INT_TIMER_2)
+#define M_INT_TIMER_3 _SB_MAKEMASK1(K_INT_TIMER_3)
+#define M_INT_SMB_0 _SB_MAKEMASK1(K_INT_SMB_0)
+#define M_INT_SMB_1 _SB_MAKEMASK1(K_INT_SMB_1)
+#define M_INT_UART_0 _SB_MAKEMASK1(K_INT_UART_0)
+#define M_INT_UART_1 _SB_MAKEMASK1(K_INT_UART_1)
+#define M_INT_SER_0 _SB_MAKEMASK1(K_INT_SER_0)
+#define M_INT_SER_1 _SB_MAKEMASK1(K_INT_SER_1)
+#define M_INT_PCMCIA _SB_MAKEMASK1(K_INT_PCMCIA)
+#define M_INT_ADDR_TRAP _SB_MAKEMASK1(K_INT_ADDR_TRAP)
+#define M_INT_PERF_CNT _SB_MAKEMASK1(K_INT_PERF_CNT)
+#define M_INT_TRACE_FREEZE _SB_MAKEMASK1(K_INT_TRACE_FREEZE)
+#define M_INT_BAD_ECC _SB_MAKEMASK1(K_INT_BAD_ECC)
+#define M_INT_COR_ECC _SB_MAKEMASK1(K_INT_COR_ECC)
+#define M_INT_IO_BUS _SB_MAKEMASK1(K_INT_IO_BUS)
+#define M_INT_MAC_0 _SB_MAKEMASK1(K_INT_MAC_0)
+#define M_INT_MAC_1 _SB_MAKEMASK1(K_INT_MAC_1)
+#define M_INT_MAC_2 _SB_MAKEMASK1(K_INT_MAC_2)
+#define M_INT_DM_CH_0 _SB_MAKEMASK1(K_INT_DM_CH_0)
+#define M_INT_DM_CH_1 _SB_MAKEMASK1(K_INT_DM_CH_1)
+#define M_INT_DM_CH_2 _SB_MAKEMASK1(K_INT_DM_CH_2)
+#define M_INT_DM_CH_3 _SB_MAKEMASK1(K_INT_DM_CH_3)
+#define M_INT_MBOX_0 _SB_MAKEMASK1(K_INT_MBOX_0)
+#define M_INT_MBOX_1 _SB_MAKEMASK1(K_INT_MBOX_1)
+#define M_INT_MBOX_2 _SB_MAKEMASK1(K_INT_MBOX_2)
+#define M_INT_MBOX_3 _SB_MAKEMASK1(K_INT_MBOX_3)
+#define M_INT_MBOX_ALL _SB_MAKEMASK(4, K_INT_MBOX_0)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_INT_CYCLE_CP0_INT _SB_MAKEMASK1(K_INT_CYCLE_CP0_INT)
#define M_INT_CYCLE_CP1_INT _SB_MAKEMASK1(K_INT_CYCLE_CP1_INT)
#endif /* 1250 PASS2 || 112x PASS1 */
-#define M_INT_GPIO_0 _SB_MAKEMASK1(K_INT_GPIO_0)
-#define M_INT_GPIO_1 _SB_MAKEMASK1(K_INT_GPIO_1)
-#define M_INT_GPIO_2 _SB_MAKEMASK1(K_INT_GPIO_2)
-#define M_INT_GPIO_3 _SB_MAKEMASK1(K_INT_GPIO_3)
-#define M_INT_GPIO_4 _SB_MAKEMASK1(K_INT_GPIO_4)
-#define M_INT_GPIO_5 _SB_MAKEMASK1(K_INT_GPIO_5)
-#define M_INT_GPIO_6 _SB_MAKEMASK1(K_INT_GPIO_6)
-#define M_INT_GPIO_7 _SB_MAKEMASK1(K_INT_GPIO_7)
-#define M_INT_GPIO_8 _SB_MAKEMASK1(K_INT_GPIO_8)
-#define M_INT_GPIO_9 _SB_MAKEMASK1(K_INT_GPIO_9)
-#define M_INT_GPIO_10 _SB_MAKEMASK1(K_INT_GPIO_10)
-#define M_INT_GPIO_11 _SB_MAKEMASK1(K_INT_GPIO_11)
-#define M_INT_GPIO_12 _SB_MAKEMASK1(K_INT_GPIO_12)
-#define M_INT_GPIO_13 _SB_MAKEMASK1(K_INT_GPIO_13)
-#define M_INT_GPIO_14 _SB_MAKEMASK1(K_INT_GPIO_14)
-#define M_INT_GPIO_15 _SB_MAKEMASK1(K_INT_GPIO_15)
-#define M_INT_LDT_FATAL _SB_MAKEMASK1(K_INT_LDT_FATAL)
-#define M_INT_LDT_NONFATAL _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
-#define M_INT_LDT_SMI _SB_MAKEMASK1(K_INT_LDT_SMI)
-#define M_INT_LDT_NMI _SB_MAKEMASK1(K_INT_LDT_NMI)
-#define M_INT_LDT_INIT _SB_MAKEMASK1(K_INT_LDT_INIT)
-#define M_INT_LDT_STARTUP _SB_MAKEMASK1(K_INT_LDT_STARTUP)
-#define M_INT_LDT_EXT _SB_MAKEMASK1(K_INT_LDT_EXT)
-#define M_INT_PCI_ERROR _SB_MAKEMASK1(K_INT_PCI_ERROR)
-#define M_INT_PCI_INTA _SB_MAKEMASK1(K_INT_PCI_INTA)
-#define M_INT_PCI_INTB _SB_MAKEMASK1(K_INT_PCI_INTB)
-#define M_INT_PCI_INTC _SB_MAKEMASK1(K_INT_PCI_INTC)
-#define M_INT_PCI_INTD _SB_MAKEMASK1(K_INT_PCI_INTD)
-#define M_INT_SPARE_2 _SB_MAKEMASK1(K_INT_SPARE_2)
+#define M_INT_GPIO_0 _SB_MAKEMASK1(K_INT_GPIO_0)
+#define M_INT_GPIO_1 _SB_MAKEMASK1(K_INT_GPIO_1)
+#define M_INT_GPIO_2 _SB_MAKEMASK1(K_INT_GPIO_2)
+#define M_INT_GPIO_3 _SB_MAKEMASK1(K_INT_GPIO_3)
+#define M_INT_GPIO_4 _SB_MAKEMASK1(K_INT_GPIO_4)
+#define M_INT_GPIO_5 _SB_MAKEMASK1(K_INT_GPIO_5)
+#define M_INT_GPIO_6 _SB_MAKEMASK1(K_INT_GPIO_6)
+#define M_INT_GPIO_7 _SB_MAKEMASK1(K_INT_GPIO_7)
+#define M_INT_GPIO_8 _SB_MAKEMASK1(K_INT_GPIO_8)
+#define M_INT_GPIO_9 _SB_MAKEMASK1(K_INT_GPIO_9)
+#define M_INT_GPIO_10 _SB_MAKEMASK1(K_INT_GPIO_10)
+#define M_INT_GPIO_11 _SB_MAKEMASK1(K_INT_GPIO_11)
+#define M_INT_GPIO_12 _SB_MAKEMASK1(K_INT_GPIO_12)
+#define M_INT_GPIO_13 _SB_MAKEMASK1(K_INT_GPIO_13)
+#define M_INT_GPIO_14 _SB_MAKEMASK1(K_INT_GPIO_14)
+#define M_INT_GPIO_15 _SB_MAKEMASK1(K_INT_GPIO_15)
+#define M_INT_LDT_FATAL _SB_MAKEMASK1(K_INT_LDT_FATAL)
+#define M_INT_LDT_NONFATAL _SB_MAKEMASK1(K_INT_LDT_NONFATAL)
+#define M_INT_LDT_SMI _SB_MAKEMASK1(K_INT_LDT_SMI)
+#define M_INT_LDT_NMI _SB_MAKEMASK1(K_INT_LDT_NMI)
+#define M_INT_LDT_INIT _SB_MAKEMASK1(K_INT_LDT_INIT)
+#define M_INT_LDT_STARTUP _SB_MAKEMASK1(K_INT_LDT_STARTUP)
+#define M_INT_LDT_EXT _SB_MAKEMASK1(K_INT_LDT_EXT)
+#define M_INT_PCI_ERROR _SB_MAKEMASK1(K_INT_PCI_ERROR)
+#define M_INT_PCI_INTA _SB_MAKEMASK1(K_INT_PCI_INTA)
+#define M_INT_PCI_INTB _SB_MAKEMASK1(K_INT_PCI_INTB)
+#define M_INT_PCI_INTC _SB_MAKEMASK1(K_INT_PCI_INTC)
+#define M_INT_PCI_INTD _SB_MAKEMASK1(K_INT_PCI_INTD)
+#define M_INT_SPARE_2 _SB_MAKEMASK1(K_INT_SPARE_2)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_INT_MAC_0_CH1 _SB_MAKEMASK1(K_INT_MAC_0_CH1)
#define M_INT_MAC_1_CH1 _SB_MAKEMASK1(K_INT_MAC_1_CH1)
@@ -208,9 +208,9 @@
*/
#define S_INT_LDT_INTMSG 0
-#define M_INT_LDT_INTMSG _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
-#define V_INT_LDT_INTMSG(x) _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
-#define G_INT_LDT_INTMSG(x) _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
+#define M_INT_LDT_INTMSG _SB_MAKEMASK(3, S_INT_LDT_INTMSG)
+#define V_INT_LDT_INTMSG(x) _SB_MAKEVALUE(x, S_INT_LDT_INTMSG)
+#define G_INT_LDT_INTMSG(x) _SB_GETVALUE(x, S_INT_LDT_INTMSG, M_INT_LDT_INTMSG)
#define K_INT_LDT_INTMSG_FIXED 0
#define K_INT_LDT_INTMSG_ARBITRATED 1
@@ -221,28 +221,28 @@
#define K_INT_LDT_INTMSG_EXTINT 6
#define K_INT_LDT_INTMSG_RESERVED 7
-#define M_INT_LDT_EDGETRIGGER 0
-#define M_INT_LDT_LEVELTRIGGER _SB_MAKEMASK1(3)
+#define M_INT_LDT_EDGETRIGGER 0
+#define M_INT_LDT_LEVELTRIGGER _SB_MAKEMASK1(3)
-#define M_INT_LDT_PHYSICALDEST 0
-#define M_INT_LDT_LOGICALDEST _SB_MAKEMASK1(4)
+#define M_INT_LDT_PHYSICALDEST 0
+#define M_INT_LDT_LOGICALDEST _SB_MAKEMASK1(4)
-#define S_INT_LDT_INTDEST 5
-#define M_INT_LDT_INTDEST _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
-#define V_INT_LDT_INTDEST(x) _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
-#define G_INT_LDT_INTDEST(x) _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
+#define S_INT_LDT_INTDEST 5
+#define M_INT_LDT_INTDEST _SB_MAKEMASK(10, S_INT_LDT_INTDEST)
+#define V_INT_LDT_INTDEST(x) _SB_MAKEVALUE(x, S_INT_LDT_INTDEST)
+#define G_INT_LDT_INTDEST(x) _SB_GETVALUE(x, S_INT_LDT_INTDEST, M_INT_LDT_INTDEST)
-#define S_INT_LDT_VECTOR 13
-#define M_INT_LDT_VECTOR _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
-#define V_INT_LDT_VECTOR(x) _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
-#define G_INT_LDT_VECTOR(x) _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
+#define S_INT_LDT_VECTOR 13
+#define M_INT_LDT_VECTOR _SB_MAKEMASK(8, S_INT_LDT_VECTOR)
+#define V_INT_LDT_VECTOR(x) _SB_MAKEVALUE(x, S_INT_LDT_VECTOR)
+#define G_INT_LDT_VECTOR(x) _SB_GETVALUE(x, S_INT_LDT_VECTOR, M_INT_LDT_VECTOR)
/*
* Vector format (Table 4-6)
*/
#define M_LDTVECT_RAISEINT 0x00
-#define M_LDTVECT_RAISEMBOX 0x40
+#define M_LDTVECT_RAISEMBOX 0x40
#endif /* 1250/112x */
diff --git a/arch/mips/include/asm/sibyte/sb1250_l2c.h b/arch/mips/include/asm/sibyte/sb1250_l2c.h
index b61a7491607..30092d7cfdc 100644
--- a/arch/mips/include/asm/sibyte/sb1250_l2c.h
+++ b/arch/mips/include/asm/sibyte/sb1250_l2c.h
@@ -39,71 +39,71 @@
* Level 2 Cache Tag register (Table 5-3)
*/
-#define S_L2C_TAG_MBZ 0
-#define M_L2C_TAG_MBZ _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
+#define S_L2C_TAG_MBZ 0
+#define M_L2C_TAG_MBZ _SB_MAKEMASK(5, S_L2C_TAG_MBZ)
-#define S_L2C_TAG_INDEX 5
-#define M_L2C_TAG_INDEX _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
-#define V_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
-#define G_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
+#define S_L2C_TAG_INDEX 5
+#define M_L2C_TAG_INDEX _SB_MAKEMASK(12, S_L2C_TAG_INDEX)
+#define V_L2C_TAG_INDEX(x) _SB_MAKEVALUE(x, S_L2C_TAG_INDEX)
+#define G_L2C_TAG_INDEX(x) _SB_GETVALUE(x, S_L2C_TAG_INDEX, M_L2C_TAG_INDEX)
-#define S_L2C_TAG_TAG 17
-#define M_L2C_TAG_TAG _SB_MAKEMASK(23, S_L2C_TAG_TAG)
-#define V_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
-#define G_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
+#define S_L2C_TAG_TAG 17
+#define M_L2C_TAG_TAG _SB_MAKEMASK(23, S_L2C_TAG_TAG)
+#define V_L2C_TAG_TAG(x) _SB_MAKEVALUE(x, S_L2C_TAG_TAG)
+#define G_L2C_TAG_TAG(x) _SB_GETVALUE(x, S_L2C_TAG_TAG, M_L2C_TAG_TAG)
-#define S_L2C_TAG_ECC 40
-#define M_L2C_TAG_ECC _SB_MAKEMASK(6, S_L2C_TAG_ECC)
-#define V_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
-#define G_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
+#define S_L2C_TAG_ECC 40
+#define M_L2C_TAG_ECC _SB_MAKEMASK(6, S_L2C_TAG_ECC)
+#define V_L2C_TAG_ECC(x) _SB_MAKEVALUE(x, S_L2C_TAG_ECC)
+#define G_L2C_TAG_ECC(x) _SB_GETVALUE(x, S_L2C_TAG_ECC, M_L2C_TAG_ECC)
-#define S_L2C_TAG_WAY 46
-#define M_L2C_TAG_WAY _SB_MAKEMASK(2, S_L2C_TAG_WAY)
-#define V_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
-#define G_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
+#define S_L2C_TAG_WAY 46
+#define M_L2C_TAG_WAY _SB_MAKEMASK(2, S_L2C_TAG_WAY)
+#define V_L2C_TAG_WAY(x) _SB_MAKEVALUE(x, S_L2C_TAG_WAY)
+#define G_L2C_TAG_WAY(x) _SB_GETVALUE(x, S_L2C_TAG_WAY, M_L2C_TAG_WAY)
-#define M_L2C_TAG_DIRTY _SB_MAKEMASK1(48)
-#define M_L2C_TAG_VALID _SB_MAKEMASK1(49)
+#define M_L2C_TAG_DIRTY _SB_MAKEMASK1(48)
+#define M_L2C_TAG_VALID _SB_MAKEMASK1(49)
/*
* Format of level 2 cache management address (table 5-2)
*/
-#define S_L2C_MGMT_INDEX 5
-#define M_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
-#define V_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
-#define G_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
+#define S_L2C_MGMT_INDEX 5
+#define M_L2C_MGMT_INDEX _SB_MAKEMASK(12, S_L2C_MGMT_INDEX)
+#define V_L2C_MGMT_INDEX(x) _SB_MAKEVALUE(x, S_L2C_MGMT_INDEX)
+#define G_L2C_MGMT_INDEX(x) _SB_GETVALUE(x, S_L2C_MGMT_INDEX, M_L2C_MGMT_INDEX)
-#define S_L2C_MGMT_QUADRANT 15
-#define M_L2C_MGMT_QUADRANT _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
-#define V_L2C_MGMT_QUADRANT(x) _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
-#define G_L2C_MGMT_QUADRANT(x) _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
+#define S_L2C_MGMT_QUADRANT 15
+#define M_L2C_MGMT_QUADRANT _SB_MAKEMASK(2, S_L2C_MGMT_QUADRANT)
+#define V_L2C_MGMT_QUADRANT(x) _SB_MAKEVALUE(x, S_L2C_MGMT_QUADRANT)
+#define G_L2C_MGMT_QUADRANT(x) _SB_GETVALUE(x, S_L2C_MGMT_QUADRANT, M_L2C_MGMT_QUADRANT)
#define S_L2C_MGMT_HALF 16
-#define M_L2C_MGMT_HALF _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
+#define M_L2C_MGMT_HALF _SB_MAKEMASK(1, S_L2C_MGMT_HALF)
-#define S_L2C_MGMT_WAY 17
-#define M_L2C_MGMT_WAY _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
-#define V_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
-#define G_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
+#define S_L2C_MGMT_WAY 17
+#define M_L2C_MGMT_WAY _SB_MAKEMASK(2, S_L2C_MGMT_WAY)
+#define V_L2C_MGMT_WAY(x) _SB_MAKEVALUE(x, S_L2C_MGMT_WAY)
+#define G_L2C_MGMT_WAY(x) _SB_GETVALUE(x, S_L2C_MGMT_WAY, M_L2C_MGMT_WAY)
-#define S_L2C_MGMT_ECC_DIAG 21
-#define M_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
-#define V_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
-#define G_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
+#define S_L2C_MGMT_ECC_DIAG 21
+#define M_L2C_MGMT_ECC_DIAG _SB_MAKEMASK(2, S_L2C_MGMT_ECC_DIAG)
+#define V_L2C_MGMT_ECC_DIAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_ECC_DIAG)
+#define G_L2C_MGMT_ECC_DIAG(x) _SB_GETVALUE(x, S_L2C_MGMT_ECC_DIAG, M_L2C_MGMT_ECC_DIAG)
-#define S_L2C_MGMT_TAG 23
-#define M_L2C_MGMT_TAG _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
-#define V_L2C_MGMT_TAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
-#define G_L2C_MGMT_TAG(x) _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
+#define S_L2C_MGMT_TAG 23
+#define M_L2C_MGMT_TAG _SB_MAKEMASK(4, S_L2C_MGMT_TAG)
+#define V_L2C_MGMT_TAG(x) _SB_MAKEVALUE(x, S_L2C_MGMT_TAG)
+#define G_L2C_MGMT_TAG(x) _SB_GETVALUE(x, S_L2C_MGMT_TAG, M_L2C_MGMT_TAG)
-#define M_L2C_MGMT_DIRTY _SB_MAKEMASK1(19)
-#define M_L2C_MGMT_VALID _SB_MAKEMASK1(20)
+#define M_L2C_MGMT_DIRTY _SB_MAKEMASK1(19)
+#define M_L2C_MGMT_VALID _SB_MAKEMASK1(20)
-#define A_L2C_MGMT_TAG_BASE 0x00D0000000
+#define A_L2C_MGMT_TAG_BASE 0x00D0000000
-#define L2C_ENTRIES_PER_WAY 4096
-#define L2C_NUM_WAYS 4
+#define L2C_ENTRIES_PER_WAY 4096
+#define L2C_NUM_WAYS 4
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
diff --git a/arch/mips/include/asm/sibyte/sb1250_ldt.h b/arch/mips/include/asm/sibyte/sb1250_ldt.h
index bf7f320d1a8..2340c29dc0c 100644
--- a/arch/mips/include/asm/sibyte/sb1250_ldt.h
+++ b/arch/mips/include/asm/sibyte/sb1250_ldt.h
@@ -66,7 +66,7 @@
#define R_LDT_TYPE1_SRICMD 0x0050
#define R_LDT_TYPE1_SRITXNUM 0x0054
#define R_LDT_TYPE1_SRIRXNUM 0x0058
-#define R_LDT_TYPE1_ERRSTATUS 0x0068
+#define R_LDT_TYPE1_ERRSTATUS 0x0068
#define R_LDT_TYPE1_SRICTRL 0x006C
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define R_LDT_TYPE1_ADDSTATUS 0x0070
@@ -258,7 +258,7 @@
#define M_LDT_LINKCTRL_DWFCOUT_EN _SB_MAKEMASK1_32(31)
/*
- * LDT Link frequency register (Table 8-20) offset 0x48
+ * LDT Link frequency register (Table 8-20) offset 0x48
*/
#define S_LDT_LINKFREQ_FREQ 8
@@ -301,8 +301,8 @@
#define S_LDT_SRICMD_TXINITIALOFFSET 28
#define M_LDT_SRICMD_TXINITIALOFFSET _SB_MAKEMASK_32(3, S_LDT_SRICMD_TXINITIALOFFSET)
-#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
-#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
+#define V_LDT_SRICMD_TXINITIALOFFSET(x) _SB_MAKEVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET)
+#define G_LDT_SRICMD_TXINITIALOFFSET(x) _SB_GETVALUE_32(x, S_LDT_SRICMD_TXINITIALOFFSET, M_LDT_SRICMD_TXINITIALOFFSET)
#define M_LDT_SRICMD_LINKFREQDIRECT _SB_MAKEMASK1_32(31)
@@ -318,16 +318,16 @@
#define M_LDT_ERRCTL_OVFSYNCFLOOD_EN _SB_MAKEMASK1_32(5)
#define M_LDT_ERRCTL_EOCNXAFATAL_EN _SB_MAKEMASK1_32(6)
#define M_LDT_ERRCTL_EOCNXANONFATAL_EN _SB_MAKEMASK1_32(7)
-#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
+#define M_LDT_ERRCTL_EOCNXASYNCFLOOD_EN _SB_MAKEMASK1_32(8)
#define M_LDT_ERRCTL_CRCFATAL_EN _SB_MAKEMASK1_32(9)
#define M_LDT_ERRCTL_CRCNONFATAL_EN _SB_MAKEMASK1_32(10)
#define M_LDT_ERRCTL_SERRFATAL_EN _SB_MAKEMASK1_32(11)
#define M_LDT_ERRCTL_SRCTAGFATAL_EN _SB_MAKEMASK1_32(12)
#define M_LDT_ERRCTL_SRCTAGNONFATAL_EN _SB_MAKEMASK1_32(13)
-#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
+#define M_LDT_ERRCTL_SRCTAGSYNCFLOOD_EN _SB_MAKEMASK1_32(14)
#define M_LDT_ERRCTL_MAPNXAFATAL_EN _SB_MAKEMASK1_32(15)
#define M_LDT_ERRCTL_MAPNXANONFATAL_EN _SB_MAKEMASK1_32(16)
-#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
+#define M_LDT_ERRCTL_MAPNXASYNCFLOOD_EN _SB_MAKEMASK1_32(17)
#define M_LDT_ERRCTL_PROTOERR _SB_MAKEMASK1_32(24)
#define M_LDT_ERRCTL_OVFERR _SB_MAKEMASK1_32(25)
diff --git a/arch/mips/include/asm/sibyte/sb1250_mac.h b/arch/mips/include/asm/sibyte/sb1250_mac.h
index cfc4d787088..3fa94fc7404 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mac.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mac.h
@@ -47,86 +47,86 @@
*/
-#define M_MAC_RESERVED0 _SB_MAKEMASK1(0)
-#define M_MAC_TX_HOLD_SOP_EN _SB_MAKEMASK1(1)
-#define M_MAC_RETRY_EN _SB_MAKEMASK1(2)
-#define M_MAC_RET_DRPREQ_EN _SB_MAKEMASK1(3)
-#define M_MAC_RET_UFL_EN _SB_MAKEMASK1(4)
-#define M_MAC_BURST_EN _SB_MAKEMASK1(5)
-
-#define S_MAC_TX_PAUSE _SB_MAKE64(6)
-#define M_MAC_TX_PAUSE_CNT _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
-#define V_MAC_TX_PAUSE_CNT(x) _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
-
-#define K_MAC_TX_PAUSE_CNT_512 0
-#define K_MAC_TX_PAUSE_CNT_1K 1
-#define K_MAC_TX_PAUSE_CNT_2K 2
-#define K_MAC_TX_PAUSE_CNT_4K 3
-#define K_MAC_TX_PAUSE_CNT_8K 4
-#define K_MAC_TX_PAUSE_CNT_16K 5
-#define K_MAC_TX_PAUSE_CNT_32K 6
-#define K_MAC_TX_PAUSE_CNT_64K 7
-
-#define V_MAC_TX_PAUSE_CNT_512 V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
-#define V_MAC_TX_PAUSE_CNT_1K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
-#define V_MAC_TX_PAUSE_CNT_2K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
-#define V_MAC_TX_PAUSE_CNT_4K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
-#define V_MAC_TX_PAUSE_CNT_8K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
-#define V_MAC_TX_PAUSE_CNT_16K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
-#define V_MAC_TX_PAUSE_CNT_32K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
-#define V_MAC_TX_PAUSE_CNT_64K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
-
-#define M_MAC_RESERVED1 _SB_MAKEMASK(8, 9)
-
-#define M_MAC_AP_STAT_EN _SB_MAKEMASK1(17)
+#define M_MAC_RESERVED0 _SB_MAKEMASK1(0)
+#define M_MAC_TX_HOLD_SOP_EN _SB_MAKEMASK1(1)
+#define M_MAC_RETRY_EN _SB_MAKEMASK1(2)
+#define M_MAC_RET_DRPREQ_EN _SB_MAKEMASK1(3)
+#define M_MAC_RET_UFL_EN _SB_MAKEMASK1(4)
+#define M_MAC_BURST_EN _SB_MAKEMASK1(5)
+
+#define S_MAC_TX_PAUSE _SB_MAKE64(6)
+#define M_MAC_TX_PAUSE_CNT _SB_MAKEMASK(3, S_MAC_TX_PAUSE)
+#define V_MAC_TX_PAUSE_CNT(x) _SB_MAKEVALUE(x, S_MAC_TX_PAUSE)
+
+#define K_MAC_TX_PAUSE_CNT_512 0
+#define K_MAC_TX_PAUSE_CNT_1K 1
+#define K_MAC_TX_PAUSE_CNT_2K 2
+#define K_MAC_TX_PAUSE_CNT_4K 3
+#define K_MAC_TX_PAUSE_CNT_8K 4
+#define K_MAC_TX_PAUSE_CNT_16K 5
+#define K_MAC_TX_PAUSE_CNT_32K 6
+#define K_MAC_TX_PAUSE_CNT_64K 7
+
+#define V_MAC_TX_PAUSE_CNT_512 V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_512)
+#define V_MAC_TX_PAUSE_CNT_1K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_1K)
+#define V_MAC_TX_PAUSE_CNT_2K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_2K)
+#define V_MAC_TX_PAUSE_CNT_4K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_4K)
+#define V_MAC_TX_PAUSE_CNT_8K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_8K)
+#define V_MAC_TX_PAUSE_CNT_16K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_16K)
+#define V_MAC_TX_PAUSE_CNT_32K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_32K)
+#define V_MAC_TX_PAUSE_CNT_64K V_MAC_TX_PAUSE_CNT(K_MAC_TX_PAUSE_CNT_64K)
+
+#define M_MAC_RESERVED1 _SB_MAKEMASK(8, 9)
+
+#define M_MAC_AP_STAT_EN _SB_MAKEMASK1(17)
#if SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_TIMESTAMP _SB_MAKEMASK1(18)
#endif
-#define M_MAC_DRP_ERRPKT_EN _SB_MAKEMASK1(19)
-#define M_MAC_DRP_FCSERRPKT_EN _SB_MAKEMASK1(20)
-#define M_MAC_DRP_CODEERRPKT_EN _SB_MAKEMASK1(21)
-#define M_MAC_DRP_DRBLERRPKT_EN _SB_MAKEMASK1(22)
-#define M_MAC_DRP_RNTPKT_EN _SB_MAKEMASK1(23)
-#define M_MAC_DRP_OSZPKT_EN _SB_MAKEMASK1(24)
-#define M_MAC_DRP_LENERRPKT_EN _SB_MAKEMASK1(25)
+#define M_MAC_DRP_ERRPKT_EN _SB_MAKEMASK1(19)
+#define M_MAC_DRP_FCSERRPKT_EN _SB_MAKEMASK1(20)
+#define M_MAC_DRP_CODEERRPKT_EN _SB_MAKEMASK1(21)
+#define M_MAC_DRP_DRBLERRPKT_EN _SB_MAKEMASK1(22)
+#define M_MAC_DRP_RNTPKT_EN _SB_MAKEMASK1(23)
+#define M_MAC_DRP_OSZPKT_EN _SB_MAKEMASK1(24)
+#define M_MAC_DRP_LENERRPKT_EN _SB_MAKEMASK1(25)
-#define M_MAC_RESERVED3 _SB_MAKEMASK(6, 26)
+#define M_MAC_RESERVED3 _SB_MAKEMASK(6, 26)
-#define M_MAC_BYPASS_SEL _SB_MAKEMASK1(32)
-#define M_MAC_HDX_EN _SB_MAKEMASK1(33)
+#define M_MAC_BYPASS_SEL _SB_MAKEMASK1(32)
+#define M_MAC_HDX_EN _SB_MAKEMASK1(33)
-#define S_MAC_SPEED_SEL _SB_MAKE64(34)
-#define M_MAC_SPEED_SEL _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
+#define S_MAC_SPEED_SEL _SB_MAKE64(34)
+#define M_MAC_SPEED_SEL _SB_MAKEMASK(2, S_MAC_SPEED_SEL)
#define V_MAC_SPEED_SEL(x) _SB_MAKEVALUE(x, S_MAC_SPEED_SEL)
#define G_MAC_SPEED_SEL(x) _SB_GETVALUE(x, S_MAC_SPEED_SEL, M_MAC_SPEED_SEL)
-#define K_MAC_SPEED_SEL_10MBPS 0
-#define K_MAC_SPEED_SEL_100MBPS 1
+#define K_MAC_SPEED_SEL_10MBPS 0
+#define K_MAC_SPEED_SEL_100MBPS 1
#define K_MAC_SPEED_SEL_1000MBPS 2
#define K_MAC_SPEED_SEL_RESERVED 3
-#define V_MAC_SPEED_SEL_10MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
-#define V_MAC_SPEED_SEL_100MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
+#define V_MAC_SPEED_SEL_10MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_10MBPS)
+#define V_MAC_SPEED_SEL_100MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_100MBPS)
#define V_MAC_SPEED_SEL_1000MBPS V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_1000MBPS)
#define V_MAC_SPEED_SEL_RESERVED V_MAC_SPEED_SEL(K_MAC_SPEED_SEL_RESERVED)
-#define M_MAC_TX_CLK_EDGE_SEL _SB_MAKEMASK1(36)
-#define M_MAC_LOOPBACK_SEL _SB_MAKEMASK1(37)
-#define M_MAC_FAST_SYNC _SB_MAKEMASK1(38)
-#define M_MAC_SS_EN _SB_MAKEMASK1(39)
+#define M_MAC_TX_CLK_EDGE_SEL _SB_MAKEMASK1(36)
+#define M_MAC_LOOPBACK_SEL _SB_MAKEMASK1(37)
+#define M_MAC_FAST_SYNC _SB_MAKEMASK1(38)
+#define M_MAC_SS_EN _SB_MAKEMASK1(39)
#define S_MAC_BYPASS_CFG _SB_MAKE64(40)
-#define M_MAC_BYPASS_CFG _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
-#define V_MAC_BYPASS_CFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
-#define G_MAC_BYPASS_CFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
+#define M_MAC_BYPASS_CFG _SB_MAKEMASK(2, S_MAC_BYPASS_CFG)
+#define V_MAC_BYPASS_CFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_CFG)
+#define G_MAC_BYPASS_CFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_CFG, M_MAC_BYPASS_CFG)
#define K_MAC_BYPASS_GMII 0
-#define K_MAC_BYPASS_ENCODED 1
-#define K_MAC_BYPASS_SOP 2
-#define K_MAC_BYPASS_EOP 3
+#define K_MAC_BYPASS_ENCODED 1
+#define K_MAC_BYPASS_SOP 2
+#define K_MAC_BYPASS_EOP 3
-#define M_MAC_BYPASS_16 _SB_MAKEMASK1(42)
+#define M_MAC_BYPASS_16 _SB_MAKEMASK1(42)
#define M_MAC_BYPASS_FCS_CHK _SB_MAKEMASK1(43)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
@@ -137,30 +137,30 @@
#define M_MAC_SPLIT_CH_SEL _SB_MAKEMASK1(45)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_MAC_BYPASS_IFG _SB_MAKE64(46)
-#define M_MAC_BYPASS_IFG _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
+#define S_MAC_BYPASS_IFG _SB_MAKE64(46)
+#define M_MAC_BYPASS_IFG _SB_MAKEMASK(8, S_MAC_BYPASS_IFG)
#define V_MAC_BYPASS_IFG(x) _SB_MAKEVALUE(x, S_MAC_BYPASS_IFG)
#define G_MAC_BYPASS_IFG(x) _SB_GETVALUE(x, S_MAC_BYPASS_IFG, M_MAC_BYPASS_IFG)
-#define K_MAC_FC_CMD_DISABLED 0
-#define K_MAC_FC_CMD_ENABLED 1
+#define K_MAC_FC_CMD_DISABLED 0
+#define K_MAC_FC_CMD_ENABLED 1
#define K_MAC_FC_CMD_ENAB_FALSECARR 2
-#define V_MAC_FC_CMD_DISABLED V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
-#define V_MAC_FC_CMD_ENABLED V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
+#define V_MAC_FC_CMD_DISABLED V_MAC_FC_CMD(K_MAC_FC_CMD_DISABLED)
+#define V_MAC_FC_CMD_ENABLED V_MAC_FC_CMD(K_MAC_FC_CMD_ENABLED)
#define V_MAC_FC_CMD_ENAB_FALSECARR V_MAC_FC_CMD(K_MAC_FC_CMD_ENAB_FALSECARR)
-#define M_MAC_FC_SEL _SB_MAKEMASK1(54)
+#define M_MAC_FC_SEL _SB_MAKEMASK1(54)
-#define S_MAC_FC_CMD _SB_MAKE64(55)
-#define M_MAC_FC_CMD _SB_MAKEMASK(2, S_MAC_FC_CMD)
-#define V_MAC_FC_CMD(x) _SB_MAKEVALUE(x, S_MAC_FC_CMD)
-#define G_MAC_FC_CMD(x) _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
+#define S_MAC_FC_CMD _SB_MAKE64(55)
+#define M_MAC_FC_CMD _SB_MAKEMASK(2, S_MAC_FC_CMD)
+#define V_MAC_FC_CMD(x) _SB_MAKEVALUE(x, S_MAC_FC_CMD)
+#define G_MAC_FC_CMD(x) _SB_GETVALUE(x, S_MAC_FC_CMD, M_MAC_FC_CMD)
-#define S_MAC_RX_CH_SEL _SB_MAKE64(57)
-#define M_MAC_RX_CH_SEL _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
-#define V_MAC_RX_CH_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
-#define G_MAC_RX_CH_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
+#define S_MAC_RX_CH_SEL _SB_MAKE64(57)
+#define M_MAC_RX_CH_SEL _SB_MAKEMASK(7, S_MAC_RX_CH_SEL)
+#define V_MAC_RX_CH_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_SEL)
+#define G_MAC_RX_CH_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_SEL, M_MAC_RX_CH_SEL)
/*
@@ -170,18 +170,18 @@
* Register: MAC_ENABLE_2
*/
-#define M_MAC_RXDMA_EN0 _SB_MAKEMASK1(0)
-#define M_MAC_RXDMA_EN1 _SB_MAKEMASK1(1)
-#define M_MAC_TXDMA_EN0 _SB_MAKEMASK1(4)
-#define M_MAC_TXDMA_EN1 _SB_MAKEMASK1(5)
+#define M_MAC_RXDMA_EN0 _SB_MAKEMASK1(0)
+#define M_MAC_RXDMA_EN1 _SB_MAKEMASK1(1)
+#define M_MAC_TXDMA_EN0 _SB_MAKEMASK1(4)
+#define M_MAC_TXDMA_EN1 _SB_MAKEMASK1(5)
-#define M_MAC_PORT_RESET _SB_MAKEMASK1(8)
+#define M_MAC_PORT_RESET _SB_MAKEMASK1(8)
#if (SIBYTE_HDR_FEATURE_CHIP(1250) || SIBYTE_HDR_FEATURE_CHIP(112x))
-#define M_MAC_RX_ENABLE _SB_MAKEMASK1(10)
-#define M_MAC_TX_ENABLE _SB_MAKEMASK1(11)
-#define M_MAC_BYP_RX_ENABLE _SB_MAKEMASK1(12)
-#define M_MAC_BYP_TX_ENABLE _SB_MAKEMASK1(13)
+#define M_MAC_RX_ENABLE _SB_MAKEMASK1(10)
+#define M_MAC_TX_ENABLE _SB_MAKEMASK1(11)
+#define M_MAC_BYP_RX_ENABLE _SB_MAKEMASK1(12)
+#define M_MAC_BYP_TX_ENABLE _SB_MAKEMASK1(13)
#endif
/*
@@ -203,13 +203,13 @@
#define S_MAC_TXD_WEIGHT0 _SB_MAKE64(0)
#define M_MAC_TXD_WEIGHT0 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT0)
-#define V_MAC_TXD_WEIGHT0(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
-#define G_MAC_TXD_WEIGHT0(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
+#define V_MAC_TXD_WEIGHT0(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT0)
+#define G_MAC_TXD_WEIGHT0(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT0, M_MAC_TXD_WEIGHT0)
#define S_MAC_TXD_WEIGHT1 _SB_MAKE64(4)
#define M_MAC_TXD_WEIGHT1 _SB_MAKEMASK(4, S_MAC_TXD_WEIGHT1)
-#define V_MAC_TXD_WEIGHT1(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
-#define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
+#define V_MAC_TXD_WEIGHT1(x) _SB_MAKEVALUE(x, S_MAC_TXD_WEIGHT1)
+#define G_MAC_TXD_WEIGHT1(x) _SB_GETVALUE(x, S_MAC_TXD_WEIGHT1, M_MAC_TXD_WEIGHT1)
/*
* MAC Fifo Threshold registers (Table 9-14)
@@ -218,53 +218,53 @@
* Register: MAC_THRSH_CFG_2
*/
-#define S_MAC_TX_WR_THRSH _SB_MAKE64(0)
+#define S_MAC_TX_WR_THRSH _SB_MAKE64(0)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
-/* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_WR_THRSH _SB_MAKEMASK(6, S_MAC_TX_WR_THRSH) */
#endif /* up to 1250 PASS1 */
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
+#define M_MAC_TX_WR_THRSH _SB_MAKEMASK(7, S_MAC_TX_WR_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
-#define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
+#define V_MAC_TX_WR_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_WR_THRSH)
+#define G_MAC_TX_WR_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_WR_THRSH, M_MAC_TX_WR_THRSH)
-#define S_MAC_TX_RD_THRSH _SB_MAKE64(8)
+#define S_MAC_TX_RD_THRSH _SB_MAKE64(8)
#if SIBYTE_HDR_FEATURE_UP_TO(1250, PASS1)
-/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
-/* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
+/* XXX: Can't enable, as it has the same name as a pass2+ define below. */
+/* #define M_MAC_TX_RD_THRSH _SB_MAKEMASK(6, S_MAC_TX_RD_THRSH) */
#endif /* up to 1250 PASS1 */
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
+#define M_MAC_TX_RD_THRSH _SB_MAKEMASK(7, S_MAC_TX_RD_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
-#define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
+#define V_MAC_TX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RD_THRSH)
+#define G_MAC_TX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RD_THRSH, M_MAC_TX_RD_THRSH)
-#define S_MAC_TX_RL_THRSH _SB_MAKE64(16)
-#define M_MAC_TX_RL_THRSH _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
-#define V_MAC_TX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
-#define G_MAC_TX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
+#define S_MAC_TX_RL_THRSH _SB_MAKE64(16)
+#define M_MAC_TX_RL_THRSH _SB_MAKEMASK(4, S_MAC_TX_RL_THRSH)
+#define V_MAC_TX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_TX_RL_THRSH)
+#define G_MAC_TX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_TX_RL_THRSH, M_MAC_TX_RL_THRSH)
-#define S_MAC_RX_PL_THRSH _SB_MAKE64(24)
-#define M_MAC_RX_PL_THRSH _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
-#define V_MAC_RX_PL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
-#define G_MAC_RX_PL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
+#define S_MAC_RX_PL_THRSH _SB_MAKE64(24)
+#define M_MAC_RX_PL_THRSH _SB_MAKEMASK(6, S_MAC_RX_PL_THRSH)
+#define V_MAC_RX_PL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_PL_THRSH)
+#define G_MAC_RX_PL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_PL_THRSH, M_MAC_RX_PL_THRSH)
-#define S_MAC_RX_RD_THRSH _SB_MAKE64(32)
-#define M_MAC_RX_RD_THRSH _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
-#define V_MAC_RX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
-#define G_MAC_RX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
+#define S_MAC_RX_RD_THRSH _SB_MAKE64(32)
+#define M_MAC_RX_RD_THRSH _SB_MAKEMASK(6, S_MAC_RX_RD_THRSH)
+#define V_MAC_RX_RD_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RD_THRSH)
+#define G_MAC_RX_RD_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RD_THRSH, M_MAC_RX_RD_THRSH)
-#define S_MAC_RX_RL_THRSH _SB_MAKE64(40)
-#define M_MAC_RX_RL_THRSH _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
-#define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
-#define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
+#define S_MAC_RX_RL_THRSH _SB_MAKE64(40)
+#define M_MAC_RX_RL_THRSH _SB_MAKEMASK(6, S_MAC_RX_RL_THRSH)
+#define V_MAC_RX_RL_THRSH(x) _SB_MAKEVALUE(x, S_MAC_RX_RL_THRSH)
+#define G_MAC_RX_RL_THRSH(x) _SB_GETVALUE(x, S_MAC_RX_RL_THRSH, M_MAC_RX_RL_THRSH)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_ENC_FC_THRSH _SB_MAKE64(56)
-#define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
-#define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
-#define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
+#define S_MAC_ENC_FC_THRSH _SB_MAKE64(56)
+#define M_MAC_ENC_FC_THRSH _SB_MAKEMASK(6, S_MAC_ENC_FC_THRSH)
+#define V_MAC_ENC_FC_THRSH(x) _SB_MAKEVALUE(x, S_MAC_ENC_FC_THRSH)
+#define G_MAC_ENC_FC_THRSH(x) _SB_GETVALUE(x, S_MAC_ENC_FC_THRSH, M_MAC_ENC_FC_THRSH)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
/*
@@ -275,79 +275,79 @@
*/
/* XXXCGD: ??? Unused in pass2? */
-#define S_MAC_IFG_RX _SB_MAKE64(0)
-#define M_MAC_IFG_RX _SB_MAKEMASK(6, S_MAC_IFG_RX)
-#define V_MAC_IFG_RX(x) _SB_MAKEVALUE(x, S_MAC_IFG_RX)
-#define G_MAC_IFG_RX(x) _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
+#define S_MAC_IFG_RX _SB_MAKE64(0)
+#define M_MAC_IFG_RX _SB_MAKEMASK(6, S_MAC_IFG_RX)
+#define V_MAC_IFG_RX(x) _SB_MAKEVALUE(x, S_MAC_IFG_RX)
+#define G_MAC_IFG_RX(x) _SB_GETVALUE(x, S_MAC_IFG_RX, M_MAC_IFG_RX)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_PRE_LEN _SB_MAKE64(0)
-#define M_MAC_PRE_LEN _SB_MAKEMASK(6, S_MAC_PRE_LEN)
-#define V_MAC_PRE_LEN(x) _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
-#define G_MAC_PRE_LEN(x) _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
+#define S_MAC_PRE_LEN _SB_MAKE64(0)
+#define M_MAC_PRE_LEN _SB_MAKEMASK(6, S_MAC_PRE_LEN)
+#define V_MAC_PRE_LEN(x) _SB_MAKEVALUE(x, S_MAC_PRE_LEN)
+#define G_MAC_PRE_LEN(x) _SB_GETVALUE(x, S_MAC_PRE_LEN, M_MAC_PRE_LEN)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_MAC_IFG_TX _SB_MAKE64(6)
-#define M_MAC_IFG_TX _SB_MAKEMASK(6, S_MAC_IFG_TX)
-#define V_MAC_IFG_TX(x) _SB_MAKEVALUE(x, S_MAC_IFG_TX)
-#define G_MAC_IFG_TX(x) _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
-
-#define S_MAC_IFG_THRSH _SB_MAKE64(12)
-#define M_MAC_IFG_THRSH _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
-#define V_MAC_IFG_THRSH(x) _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
-#define G_MAC_IFG_THRSH(x) _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
-
-#define S_MAC_BACKOFF_SEL _SB_MAKE64(18)
-#define M_MAC_BACKOFF_SEL _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
-#define V_MAC_BACKOFF_SEL(x) _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
-#define G_MAC_BACKOFF_SEL(x) _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
-
-#define S_MAC_LFSR_SEED _SB_MAKE64(22)
-#define M_MAC_LFSR_SEED _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
-#define V_MAC_LFSR_SEED(x) _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
-#define G_MAC_LFSR_SEED(x) _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
-
-#define S_MAC_SLOT_SIZE _SB_MAKE64(30)
-#define M_MAC_SLOT_SIZE _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
-#define V_MAC_SLOT_SIZE(x) _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
-#define G_MAC_SLOT_SIZE(x) _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
-
-#define S_MAC_MIN_FRAMESZ _SB_MAKE64(40)
-#define M_MAC_MIN_FRAMESZ _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
-#define V_MAC_MIN_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
-#define G_MAC_MIN_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
-
-#define S_MAC_MAX_FRAMESZ _SB_MAKE64(48)
-#define M_MAC_MAX_FRAMESZ _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
-#define V_MAC_MAX_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
-#define G_MAC_MAX_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
+#define S_MAC_IFG_TX _SB_MAKE64(6)
+#define M_MAC_IFG_TX _SB_MAKEMASK(6, S_MAC_IFG_TX)
+#define V_MAC_IFG_TX(x) _SB_MAKEVALUE(x, S_MAC_IFG_TX)
+#define G_MAC_IFG_TX(x) _SB_GETVALUE(x, S_MAC_IFG_TX, M_MAC_IFG_TX)
+
+#define S_MAC_IFG_THRSH _SB_MAKE64(12)
+#define M_MAC_IFG_THRSH _SB_MAKEMASK(6, S_MAC_IFG_THRSH)
+#define V_MAC_IFG_THRSH(x) _SB_MAKEVALUE(x, S_MAC_IFG_THRSH)
+#define G_MAC_IFG_THRSH(x) _SB_GETVALUE(x, S_MAC_IFG_THRSH, M_MAC_IFG_THRSH)
+
+#define S_MAC_BACKOFF_SEL _SB_MAKE64(18)
+#define M_MAC_BACKOFF_SEL _SB_MAKEMASK(4, S_MAC_BACKOFF_SEL)
+#define V_MAC_BACKOFF_SEL(x) _SB_MAKEVALUE(x, S_MAC_BACKOFF_SEL)
+#define G_MAC_BACKOFF_SEL(x) _SB_GETVALUE(x, S_MAC_BACKOFF_SEL, M_MAC_BACKOFF_SEL)
+
+#define S_MAC_LFSR_SEED _SB_MAKE64(22)
+#define M_MAC_LFSR_SEED _SB_MAKEMASK(8, S_MAC_LFSR_SEED)
+#define V_MAC_LFSR_SEED(x) _SB_MAKEVALUE(x, S_MAC_LFSR_SEED)
+#define G_MAC_LFSR_SEED(x) _SB_GETVALUE(x, S_MAC_LFSR_SEED, M_MAC_LFSR_SEED)
+
+#define S_MAC_SLOT_SIZE _SB_MAKE64(30)
+#define M_MAC_SLOT_SIZE _SB_MAKEMASK(10, S_MAC_SLOT_SIZE)
+#define V_MAC_SLOT_SIZE(x) _SB_MAKEVALUE(x, S_MAC_SLOT_SIZE)
+#define G_MAC_SLOT_SIZE(x) _SB_GETVALUE(x, S_MAC_SLOT_SIZE, M_MAC_SLOT_SIZE)
+
+#define S_MAC_MIN_FRAMESZ _SB_MAKE64(40)
+#define M_MAC_MIN_FRAMESZ _SB_MAKEMASK(8, S_MAC_MIN_FRAMESZ)
+#define V_MAC_MIN_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MIN_FRAMESZ)
+#define G_MAC_MIN_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MIN_FRAMESZ, M_MAC_MIN_FRAMESZ)
+
+#define S_MAC_MAX_FRAMESZ _SB_MAKE64(48)
+#define M_MAC_MAX_FRAMESZ _SB_MAKEMASK(16, S_MAC_MAX_FRAMESZ)
+#define V_MAC_MAX_FRAMESZ(x) _SB_MAKEVALUE(x, S_MAC_MAX_FRAMESZ)
+#define G_MAC_MAX_FRAMESZ(x) _SB_GETVALUE(x, S_MAC_MAX_FRAMESZ, M_MAC_MAX_FRAMESZ)
/*
* These constants are used to configure the fields within the Frame
* Configuration Register.
*/
-#define K_MAC_IFG_RX_10 _SB_MAKE64(0) /* See table 176, not used */
-#define K_MAC_IFG_RX_100 _SB_MAKE64(0)
-#define K_MAC_IFG_RX_1000 _SB_MAKE64(0)
+#define K_MAC_IFG_RX_10 _SB_MAKE64(0) /* See table 176, not used */
+#define K_MAC_IFG_RX_100 _SB_MAKE64(0)
+#define K_MAC_IFG_RX_1000 _SB_MAKE64(0)
-#define K_MAC_IFG_TX_10 _SB_MAKE64(20)
-#define K_MAC_IFG_TX_100 _SB_MAKE64(20)
-#define K_MAC_IFG_TX_1000 _SB_MAKE64(8)
+#define K_MAC_IFG_TX_10 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_100 _SB_MAKE64(20)
+#define K_MAC_IFG_TX_1000 _SB_MAKE64(8)
-#define K_MAC_IFG_THRSH_10 _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_100 _SB_MAKE64(4)
-#define K_MAC_IFG_THRSH_1000 _SB_MAKE64(0)
+#define K_MAC_IFG_THRSH_10 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_100 _SB_MAKE64(4)
+#define K_MAC_IFG_THRSH_1000 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_10 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_100 _SB_MAKE64(0)
-#define K_MAC_SLOT_SIZE_1000 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_10 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_100 _SB_MAKE64(0)
+#define K_MAC_SLOT_SIZE_1000 _SB_MAKE64(0)
-#define V_MAC_IFG_RX_10 V_MAC_IFG_RX(K_MAC_IFG_RX_10)
+#define V_MAC_IFG_RX_10 V_MAC_IFG_RX(K_MAC_IFG_RX_10)
#define V_MAC_IFG_RX_100 V_MAC_IFG_RX(K_MAC_IFG_RX_100)
#define V_MAC_IFG_RX_1000 V_MAC_IFG_RX(K_MAC_IFG_RX_1000)
-#define V_MAC_IFG_TX_10 V_MAC_IFG_TX(K_MAC_IFG_TX_10)
+#define V_MAC_IFG_TX_10 V_MAC_IFG_TX(K_MAC_IFG_TX_10)
#define V_MAC_IFG_TX_100 V_MAC_IFG_TX(K_MAC_IFG_TX_100)
#define V_MAC_IFG_TX_1000 V_MAC_IFG_TX(K_MAC_IFG_TX_1000)
@@ -359,15 +359,15 @@
#define V_MAC_SLOT_SIZE_100 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_100)
#define V_MAC_SLOT_SIZE_1000 V_MAC_SLOT_SIZE(K_MAC_SLOT_SIZE_1000)
-#define K_MAC_MIN_FRAMESZ_FIFO _SB_MAKE64(9)
+#define K_MAC_MIN_FRAMESZ_FIFO _SB_MAKE64(9)
#define K_MAC_MIN_FRAMESZ_DEFAULT _SB_MAKE64(64)
#define K_MAC_MAX_FRAMESZ_DEFAULT _SB_MAKE64(1518)
-#define K_MAC_MAX_FRAMESZ_JUMBO _SB_MAKE64(9216)
+#define K_MAC_MAX_FRAMESZ_JUMBO _SB_MAKE64(9216)
-#define V_MAC_MIN_FRAMESZ_FIFO V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
+#define V_MAC_MIN_FRAMESZ_FIFO V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_FIFO)
#define V_MAC_MIN_FRAMESZ_DEFAULT V_MAC_MIN_FRAMESZ(K_MAC_MIN_FRAMESZ_DEFAULT)
#define V_MAC_MAX_FRAMESZ_DEFAULT V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_DEFAULT)
-#define V_MAC_MAX_FRAMESZ_JUMBO V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
+#define V_MAC_MAX_FRAMESZ_JUMBO V_MAC_MAX_FRAMESZ(K_MAC_MAX_FRAMESZ_JUMBO)
/*
* MAC VLAN Tag Registers (Table 9-16)
@@ -376,23 +376,23 @@
* Register: MAC_VLANTAG_2
*/
-#define S_MAC_VLAN_TAG _SB_MAKE64(0)
-#define M_MAC_VLAN_TAG _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
-#define V_MAC_VLAN_TAG(x) _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
-#define G_MAC_VLAN_TAG(x) _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
+#define S_MAC_VLAN_TAG _SB_MAKE64(0)
+#define M_MAC_VLAN_TAG _SB_MAKEMASK(32, S_MAC_VLAN_TAG)
+#define V_MAC_VLAN_TAG(x) _SB_MAKEVALUE(x, S_MAC_VLAN_TAG)
+#define G_MAC_VLAN_TAG(x) _SB_GETVALUE(x, S_MAC_VLAN_TAG, M_MAC_VLAN_TAG)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define S_MAC_TX_PKT_OFFSET _SB_MAKE64(32)
-#define M_MAC_TX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
-#define V_MAC_TX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
-#define G_MAC_TX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
+#define S_MAC_TX_PKT_OFFSET _SB_MAKE64(32)
+#define M_MAC_TX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_TX_PKT_OFFSET)
+#define V_MAC_TX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_PKT_OFFSET)
+#define G_MAC_TX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_PKT_OFFSET, M_MAC_TX_PKT_OFFSET)
-#define S_MAC_TX_CRC_OFFSET _SB_MAKE64(40)
-#define M_MAC_TX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
-#define V_MAC_TX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
-#define G_MAC_TX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
+#define S_MAC_TX_CRC_OFFSET _SB_MAKE64(40)
+#define M_MAC_TX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_TX_CRC_OFFSET)
+#define V_MAC_TX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_TX_CRC_OFFSET)
+#define G_MAC_TX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_TX_CRC_OFFSET, M_MAC_TX_CRC_OFFSET)
-#define M_MAC_CH_BASE_FC_EN _SB_MAKEMASK1(48)
+#define M_MAC_CH_BASE_FC_EN _SB_MAKEMASK1(48)
#endif /* 1250 PASS3 || 112x PASS1 */
/*
@@ -412,29 +412,29 @@
* on each channel.
*/
-#define S_MAC_RX_CH0 _SB_MAKE64(0)
-#define S_MAC_RX_CH1 _SB_MAKE64(8)
-#define S_MAC_TX_CH0 _SB_MAKE64(16)
-#define S_MAC_TX_CH1 _SB_MAKE64(24)
+#define S_MAC_RX_CH0 _SB_MAKE64(0)
+#define S_MAC_RX_CH1 _SB_MAKE64(8)
+#define S_MAC_TX_CH0 _SB_MAKE64(16)
+#define S_MAC_TX_CH1 _SB_MAKE64(24)
#define S_MAC_TXCHANNELS _SB_MAKE64(16) /* this is 1st TX chan */
-#define S_MAC_CHANWIDTH _SB_MAKE64(8) /* bits between channels */
+#define S_MAC_CHANWIDTH _SB_MAKE64(8) /* bits between channels */
/*
- * These are the same as RX channel 0. The idea here
+ * These are the same as RX channel 0. The idea here
* is that you'll use one of the "S_" things above
* and pass just the six bits to a DMA-channel-specific ISR
*/
-#define M_MAC_INT_CHANNEL _SB_MAKEMASK(8, 0)
-#define M_MAC_INT_EOP_COUNT _SB_MAKEMASK1(0)
-#define M_MAC_INT_EOP_TIMER _SB_MAKEMASK1(1)
-#define M_MAC_INT_EOP_SEEN _SB_MAKEMASK1(2)
-#define M_MAC_INT_HWM _SB_MAKEMASK1(3)
-#define M_MAC_INT_LWM _SB_MAKEMASK1(4)
-#define M_MAC_INT_DSCR _SB_MAKEMASK1(5)
-#define M_MAC_INT_ERR _SB_MAKEMASK1(6)
-#define M_MAC_INT_DZERO _SB_MAKEMASK1(7) /* only for TX channels */
-#define M_MAC_INT_DROP _SB_MAKEMASK1(7) /* only for RX channels */
+#define M_MAC_INT_CHANNEL _SB_MAKEMASK(8, 0)
+#define M_MAC_INT_EOP_COUNT _SB_MAKEMASK1(0)
+#define M_MAC_INT_EOP_TIMER _SB_MAKEMASK1(1)
+#define M_MAC_INT_EOP_SEEN _SB_MAKEMASK1(2)
+#define M_MAC_INT_HWM _SB_MAKEMASK1(3)
+#define M_MAC_INT_LWM _SB_MAKEMASK1(4)
+#define M_MAC_INT_DSCR _SB_MAKEMASK1(5)
+#define M_MAC_INT_ERR _SB_MAKEMASK1(6)
+#define M_MAC_INT_DZERO _SB_MAKEMASK1(7) /* only for TX channels */
+#define M_MAC_INT_DROP _SB_MAKEMASK1(7) /* only for RX channels */
/*
* In the following definitions we use ch (0/1) and txrx (TX=1, RX=0, see
@@ -442,34 +442,34 @@
*/
#define S_MAC_STATUS_CH_OFFSET(ch, txrx) _SB_MAKE64(((ch) + 2 * (txrx)) * S_MAC_CHANWIDTH)
-#define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_CHANNEL(ch, txrx) _SB_MAKEVALUE(_SB_MAKEMASK(8, 0), S_MAC_STATUS_CH_OFFSET(ch, txrx))
#define M_MAC_STATUS_EOP_COUNT(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_COUNT, S_MAC_STATUS_CH_OFFSET(ch, txrx))
#define M_MAC_STATUS_EOP_TIMER(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_TIMER, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
-#define M_MAC_STATUS_OTHER_ERR _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
-
-
-#define M_MAC_RX_UNDRFL _SB_MAKEMASK1(40)
-#define M_MAC_RX_OVRFL _SB_MAKEMASK1(41)
-#define M_MAC_TX_UNDRFL _SB_MAKEMASK1(42)
-#define M_MAC_TX_OVRFL _SB_MAKEMASK1(43)
-#define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44)
-#define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45)
-#define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46)
+#define M_MAC_STATUS_EOP_SEEN(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_EOP_SEEN, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_HWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_HWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_LWM(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_LWM, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DSCR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DSCR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_ERR(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_ERR, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DZERO(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DZERO, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_DROP(ch, txrx) _SB_MAKEVALUE(M_MAC_INT_DROP, S_MAC_STATUS_CH_OFFSET(ch, txrx))
+#define M_MAC_STATUS_OTHER_ERR _SB_MAKEVALUE(_SB_MAKEMASK(7, 0), 40)
+
+
+#define M_MAC_RX_UNDRFL _SB_MAKEMASK1(40)
+#define M_MAC_RX_OVRFL _SB_MAKEMASK1(41)
+#define M_MAC_TX_UNDRFL _SB_MAKEMASK1(42)
+#define M_MAC_TX_OVRFL _SB_MAKEMASK1(43)
+#define M_MAC_LTCOL_ERR _SB_MAKEMASK1(44)
+#define M_MAC_EXCOL_ERR _SB_MAKEMASK1(45)
+#define M_MAC_CNTR_OVRFL_ERR _SB_MAKEMASK1(46)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */
+#define M_MAC_SPLIT_EN _SB_MAKEMASK1(47) /* interrupt mask only */
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define S_MAC_COUNTER_ADDR _SB_MAKE64(47)
-#define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
-#define V_MAC_COUNTER_ADDR(x) _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
-#define G_MAC_COUNTER_ADDR(x) _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
+#define S_MAC_COUNTER_ADDR _SB_MAKE64(47)
+#define M_MAC_COUNTER_ADDR _SB_MAKEMASK(5, S_MAC_COUNTER_ADDR)
+#define V_MAC_COUNTER_ADDR(x) _SB_MAKEVALUE(x, S_MAC_COUNTER_ADDR)
+#define G_MAC_COUNTER_ADDR(x) _SB_GETVALUE(x, S_MAC_COUNTER_ADDR, M_MAC_COUNTER_ADDR)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_TX_PAUSE_ON _SB_MAKEMASK1(52)
@@ -482,42 +482,42 @@
* Register: MAC_FIFO_PTRS_2
*/
-#define S_MAC_TX_WRPTR _SB_MAKE64(0)
-#define M_MAC_TX_WRPTR _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
-#define V_MAC_TX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
-#define G_MAC_TX_WRPTR(x) _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_TX_WRPTR _SB_MAKE64(0)
+#define M_MAC_TX_WRPTR _SB_MAKEMASK(6, S_MAC_TX_WRPTR)
+#define V_MAC_TX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_WRPTR)
+#define G_MAC_TX_WRPTR(x) _SB_GETVALUE(x, S_MAC_TX_WRPTR, M_MAC_TX_WRPTR)
-#define S_MAC_TX_RDPTR _SB_MAKE64(8)
-#define M_MAC_TX_RDPTR _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
-#define V_MAC_TX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
-#define G_MAC_TX_RDPTR(x) _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_TX_RDPTR _SB_MAKE64(8)
+#define M_MAC_TX_RDPTR _SB_MAKEMASK(6, S_MAC_TX_RDPTR)
+#define V_MAC_TX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_TX_RDPTR)
+#define G_MAC_TX_RDPTR(x) _SB_GETVALUE(x, S_MAC_TX_RDPTR, M_MAC_TX_RDPTR)
-#define S_MAC_RX_WRPTR _SB_MAKE64(16)
-#define M_MAC_RX_WRPTR _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
-#define V_MAC_RX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
-#define G_MAC_RX_WRPTR(x) _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
+#define S_MAC_RX_WRPTR _SB_MAKE64(16)
+#define M_MAC_RX_WRPTR _SB_MAKEMASK(6, S_MAC_RX_WRPTR)
+#define V_MAC_RX_WRPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_WRPTR)
+#define G_MAC_RX_WRPTR(x) _SB_GETVALUE(x, S_MAC_RX_WRPTR, M_MAC_TX_WRPTR)
-#define S_MAC_RX_RDPTR _SB_MAKE64(24)
-#define M_MAC_RX_RDPTR _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
-#define V_MAC_RX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
-#define G_MAC_RX_RDPTR(x) _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
+#define S_MAC_RX_RDPTR _SB_MAKE64(24)
+#define M_MAC_RX_RDPTR _SB_MAKEMASK(6, S_MAC_RX_RDPTR)
+#define V_MAC_RX_RDPTR(x) _SB_MAKEVALUE(x, S_MAC_RX_RDPTR)
+#define G_MAC_RX_RDPTR(x) _SB_GETVALUE(x, S_MAC_RX_RDPTR, M_MAC_TX_RDPTR)
/*
- * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
+ * MAC Fifo End Of Packet Count Registers (Table 9-20) [Debug register]
* Register: MAC_EOPCNT_0
* Register: MAC_EOPCNT_1
* Register: MAC_EOPCNT_2
*/
-#define S_MAC_TX_EOP_COUNTER _SB_MAKE64(0)
-#define M_MAC_TX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
-#define V_MAC_TX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
-#define G_MAC_TX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
+#define S_MAC_TX_EOP_COUNTER _SB_MAKE64(0)
+#define M_MAC_TX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_TX_EOP_COUNTER)
+#define V_MAC_TX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_TX_EOP_COUNTER)
+#define G_MAC_TX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_TX_EOP_COUNTER, M_MAC_TX_EOP_COUNTER)
-#define S_MAC_RX_EOP_COUNTER _SB_MAKE64(8)
-#define M_MAC_RX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
-#define V_MAC_RX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
-#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
+#define S_MAC_RX_EOP_COUNTER _SB_MAKE64(8)
+#define M_MAC_RX_EOP_COUNTER _SB_MAKEMASK(6, S_MAC_RX_EOP_COUNTER)
+#define V_MAC_RX_EOP_COUNTER(x) _SB_MAKEVALUE(x, S_MAC_RX_EOP_COUNTER)
+#define G_MAC_RX_EOP_COUNTER(x) _SB_GETVALUE(x, S_MAC_RX_EOP_COUNTER, M_MAC_RX_EOP_COUNTER)
/*
* MAC Receive Address Filter Exact Match Registers (Table 9-21)
@@ -562,27 +562,27 @@
* Register: MAC_TYPE_CFG_2
*/
-#define S_TYPECFG_TYPESIZE _SB_MAKE64(16)
+#define S_TYPECFG_TYPESIZE _SB_MAKE64(16)
#define S_TYPECFG_TYPE0 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE0 _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
-#define V_TYPECFG_TYPE0(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
-#define G_TYPECFG_TYPE0(x) _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
+#define M_TYPECFG_TYPE0 _SB_MAKEMASK(16, S_TYPECFG_TYPE0)
+#define V_TYPECFG_TYPE0(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE0)
+#define G_TYPECFG_TYPE0(x) _SB_GETVALUE(x, S_TYPECFG_TYPE0, M_TYPECFG_TYPE0)
#define S_TYPECFG_TYPE1 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE1 _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
-#define V_TYPECFG_TYPE1(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
-#define G_TYPECFG_TYPE1(x) _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
+#define M_TYPECFG_TYPE1 _SB_MAKEMASK(16, S_TYPECFG_TYPE1)
+#define V_TYPECFG_TYPE1(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE1)
+#define G_TYPECFG_TYPE1(x) _SB_GETVALUE(x, S_TYPECFG_TYPE1, M_TYPECFG_TYPE1)
#define S_TYPECFG_TYPE2 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE2 _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
-#define V_TYPECFG_TYPE2(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
-#define G_TYPECFG_TYPE2(x) _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
+#define M_TYPECFG_TYPE2 _SB_MAKEMASK(16, S_TYPECFG_TYPE2)
+#define V_TYPECFG_TYPE2(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE2)
+#define G_TYPECFG_TYPE2(x) _SB_GETVALUE(x, S_TYPECFG_TYPE2, M_TYPECFG_TYPE2)
#define S_TYPECFG_TYPE3 _SB_MAKE64(0)
-#define M_TYPECFG_TYPE3 _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
-#define V_TYPECFG_TYPE3(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
-#define G_TYPECFG_TYPE3(x) _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
+#define M_TYPECFG_TYPE3 _SB_MAKEMASK(16, S_TYPECFG_TYPE3)
+#define V_TYPECFG_TYPE3(x) _SB_MAKEVALUE(x, S_TYPECFG_TYPE3)
+#define G_TYPECFG_TYPE3(x) _SB_GETVALUE(x, S_TYPECFG_TYPE3, M_TYPECFG_TYPE3)
/*
* MAC Receive Address Filter Control Registers (Table 9-24)
@@ -591,38 +591,38 @@
* Register: MAC_ADFILTER_CFG_2
*/
-#define M_MAC_ALLPKT_EN _SB_MAKEMASK1(0)
-#define M_MAC_UCAST_EN _SB_MAKEMASK1(1)
-#define M_MAC_UCAST_INV _SB_MAKEMASK1(2)
-#define M_MAC_MCAST_EN _SB_MAKEMASK1(3)
-#define M_MAC_MCAST_INV _SB_MAKEMASK1(4)
-#define M_MAC_BCAST_EN _SB_MAKEMASK1(5)
-#define M_MAC_DIRECT_INV _SB_MAKEMASK1(6)
+#define M_MAC_ALLPKT_EN _SB_MAKEMASK1(0)
+#define M_MAC_UCAST_EN _SB_MAKEMASK1(1)
+#define M_MAC_UCAST_INV _SB_MAKEMASK1(2)
+#define M_MAC_MCAST_EN _SB_MAKEMASK1(3)
+#define M_MAC_MCAST_INV _SB_MAKEMASK1(4)
+#define M_MAC_BCAST_EN _SB_MAKEMASK1(5)
+#define M_MAC_DIRECT_INV _SB_MAKEMASK1(6)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define M_MAC_ALLMCAST_EN _SB_MAKEMASK1(7)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define S_MAC_IPHDR_OFFSET _SB_MAKE64(8)
-#define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
+#define S_MAC_IPHDR_OFFSET _SB_MAKE64(8)
+#define M_MAC_IPHDR_OFFSET _SB_MAKEMASK(8, S_MAC_IPHDR_OFFSET)
#define V_MAC_IPHDR_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_IPHDR_OFFSET)
#define G_MAC_IPHDR_OFFSET(x) _SB_GETVALUE(x, S_MAC_IPHDR_OFFSET, M_MAC_IPHDR_OFFSET)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_MAC_RX_CRC_OFFSET _SB_MAKE64(16)
-#define M_MAC_RX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
+#define S_MAC_RX_CRC_OFFSET _SB_MAKE64(16)
+#define M_MAC_RX_CRC_OFFSET _SB_MAKEMASK(8, S_MAC_RX_CRC_OFFSET)
#define V_MAC_RX_CRC_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_CRC_OFFSET)
#define G_MAC_RX_CRC_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_CRC_OFFSET, M_MAC_RX_CRC_OFFSET)
-#define S_MAC_RX_PKT_OFFSET _SB_MAKE64(24)
-#define M_MAC_RX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
+#define S_MAC_RX_PKT_OFFSET _SB_MAKE64(24)
+#define M_MAC_RX_PKT_OFFSET _SB_MAKEMASK(8, S_MAC_RX_PKT_OFFSET)
#define V_MAC_RX_PKT_OFFSET(x) _SB_MAKEVALUE(x, S_MAC_RX_PKT_OFFSET)
#define G_MAC_RX_PKT_OFFSET(x) _SB_GETVALUE(x, S_MAC_RX_PKT_OFFSET, M_MAC_RX_PKT_OFFSET)
#define M_MAC_FWDPAUSE_EN _SB_MAKEMASK1(32)
#define M_MAC_VLAN_DET_EN _SB_MAKEMASK1(33)
-#define S_MAC_RX_CH_MSN_SEL _SB_MAKE64(34)
-#define M_MAC_RX_CH_MSN_SEL _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
+#define S_MAC_RX_CH_MSN_SEL _SB_MAKE64(34)
+#define M_MAC_RX_CH_MSN_SEL _SB_MAKEMASK(8, S_MAC_RX_CH_MSN_SEL)
#define V_MAC_RX_CH_MSN_SEL(x) _SB_MAKEVALUE(x, S_MAC_RX_CH_MSN_SEL)
#define G_MAC_RX_CH_MSN_SEL(x) _SB_GETVALUE(x, S_MAC_RX_CH_MSN_SEL, M_MAC_RX_CH_MSN_SEL)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
diff --git a/arch/mips/include/asm/sibyte/sb1250_mc.h b/arch/mips/include/asm/sibyte/sb1250_mc.h
index 15048dcaf22..8368e411131 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mc.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mc.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Memory Controller constants File: sb1250_mc.h
+ * Memory Controller constants File: sb1250_mc.h
*
* This module contains constants and macros useful for
* programming the memory controller.
@@ -39,96 +39,96 @@
* Memory Channel Config Register (table 6-14)
*/
-#define S_MC_RESERVED0 0
-#define M_MC_RESERVED0 _SB_MAKEMASK(8, S_MC_RESERVED0)
+#define S_MC_RESERVED0 0
+#define M_MC_RESERVED0 _SB_MAKEMASK(8, S_MC_RESERVED0)
-#define S_MC_CHANNEL_SEL 8
-#define M_MC_CHANNEL_SEL _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
-#define V_MC_CHANNEL_SEL(x) _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
-#define G_MC_CHANNEL_SEL(x) _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
+#define S_MC_CHANNEL_SEL 8
+#define M_MC_CHANNEL_SEL _SB_MAKEMASK(8, S_MC_CHANNEL_SEL)
+#define V_MC_CHANNEL_SEL(x) _SB_MAKEVALUE(x, S_MC_CHANNEL_SEL)
+#define G_MC_CHANNEL_SEL(x) _SB_GETVALUE(x, S_MC_CHANNEL_SEL, M_MC_CHANNEL_SEL)
-#define S_MC_BANK0_MAP 16
-#define M_MC_BANK0_MAP _SB_MAKEMASK(4, S_MC_BANK0_MAP)
-#define V_MC_BANK0_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
-#define G_MC_BANK0_MAP(x) _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
+#define S_MC_BANK0_MAP 16
+#define M_MC_BANK0_MAP _SB_MAKEMASK(4, S_MC_BANK0_MAP)
+#define V_MC_BANK0_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK0_MAP)
+#define G_MC_BANK0_MAP(x) _SB_GETVALUE(x, S_MC_BANK0_MAP, M_MC_BANK0_MAP)
-#define K_MC_BANK0_MAP_DEFAULT 0x00
-#define V_MC_BANK0_MAP_DEFAULT V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
+#define K_MC_BANK0_MAP_DEFAULT 0x00
+#define V_MC_BANK0_MAP_DEFAULT V_MC_BANK0_MAP(K_MC_BANK0_MAP_DEFAULT)
-#define S_MC_BANK1_MAP 20
-#define M_MC_BANK1_MAP _SB_MAKEMASK(4, S_MC_BANK1_MAP)
-#define V_MC_BANK1_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
-#define G_MC_BANK1_MAP(x) _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
+#define S_MC_BANK1_MAP 20
+#define M_MC_BANK1_MAP _SB_MAKEMASK(4, S_MC_BANK1_MAP)
+#define V_MC_BANK1_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK1_MAP)
+#define G_MC_BANK1_MAP(x) _SB_GETVALUE(x, S_MC_BANK1_MAP, M_MC_BANK1_MAP)
-#define K_MC_BANK1_MAP_DEFAULT 0x08
-#define V_MC_BANK1_MAP_DEFAULT V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
+#define K_MC_BANK1_MAP_DEFAULT 0x08
+#define V_MC_BANK1_MAP_DEFAULT V_MC_BANK1_MAP(K_MC_BANK1_MAP_DEFAULT)
-#define S_MC_BANK2_MAP 24
-#define M_MC_BANK2_MAP _SB_MAKEMASK(4, S_MC_BANK2_MAP)
-#define V_MC_BANK2_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
-#define G_MC_BANK2_MAP(x) _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
+#define S_MC_BANK2_MAP 24
+#define M_MC_BANK2_MAP _SB_MAKEMASK(4, S_MC_BANK2_MAP)
+#define V_MC_BANK2_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK2_MAP)
+#define G_MC_BANK2_MAP(x) _SB_GETVALUE(x, S_MC_BANK2_MAP, M_MC_BANK2_MAP)
-#define K_MC_BANK2_MAP_DEFAULT 0x09
-#define V_MC_BANK2_MAP_DEFAULT V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
+#define K_MC_BANK2_MAP_DEFAULT 0x09
+#define V_MC_BANK2_MAP_DEFAULT V_MC_BANK2_MAP(K_MC_BANK2_MAP_DEFAULT)
-#define S_MC_BANK3_MAP 28
-#define M_MC_BANK3_MAP _SB_MAKEMASK(4, S_MC_BANK3_MAP)
-#define V_MC_BANK3_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
-#define G_MC_BANK3_MAP(x) _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
+#define S_MC_BANK3_MAP 28
+#define M_MC_BANK3_MAP _SB_MAKEMASK(4, S_MC_BANK3_MAP)
+#define V_MC_BANK3_MAP(x) _SB_MAKEVALUE(x, S_MC_BANK3_MAP)
+#define G_MC_BANK3_MAP(x) _SB_GETVALUE(x, S_MC_BANK3_MAP, M_MC_BANK3_MAP)
-#define K_MC_BANK3_MAP_DEFAULT 0x0C
-#define V_MC_BANK3_MAP_DEFAULT V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
+#define K_MC_BANK3_MAP_DEFAULT 0x0C
+#define V_MC_BANK3_MAP_DEFAULT V_MC_BANK3_MAP(K_MC_BANK3_MAP_DEFAULT)
-#define M_MC_RESERVED1 _SB_MAKEMASK(8, 32)
+#define M_MC_RESERVED1 _SB_MAKEMASK(8, 32)
#define S_MC_QUEUE_SIZE 40
-#define M_MC_QUEUE_SIZE _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE(x) _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
-#define G_MC_QUEUE_SIZE(x) _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
-#define V_MC_QUEUE_SIZE_DEFAULT V_MC_QUEUE_SIZE(0x0A)
-
-#define S_MC_AGE_LIMIT 44
-#define M_MC_AGE_LIMIT _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT(x) _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
-#define G_MC_AGE_LIMIT(x) _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
-#define V_MC_AGE_LIMIT_DEFAULT V_MC_AGE_LIMIT(8)
-
-#define S_MC_WR_LIMIT 48
-#define M_MC_WR_LIMIT _SB_MAKEMASK(4, S_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT(x) _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
-#define G_MC_WR_LIMIT(x) _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
-#define V_MC_WR_LIMIT_DEFAULT V_MC_WR_LIMIT(5)
+#define M_MC_QUEUE_SIZE _SB_MAKEMASK(4, S_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE(x) _SB_MAKEVALUE(x, S_MC_QUEUE_SIZE)
+#define G_MC_QUEUE_SIZE(x) _SB_GETVALUE(x, S_MC_QUEUE_SIZE, M_MC_QUEUE_SIZE)
+#define V_MC_QUEUE_SIZE_DEFAULT V_MC_QUEUE_SIZE(0x0A)
+
+#define S_MC_AGE_LIMIT 44
+#define M_MC_AGE_LIMIT _SB_MAKEMASK(4, S_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT(x) _SB_MAKEVALUE(x, S_MC_AGE_LIMIT)
+#define G_MC_AGE_LIMIT(x) _SB_GETVALUE(x, S_MC_AGE_LIMIT, M_MC_AGE_LIMIT)
+#define V_MC_AGE_LIMIT_DEFAULT V_MC_AGE_LIMIT(8)
+
+#define S_MC_WR_LIMIT 48
+#define M_MC_WR_LIMIT _SB_MAKEMASK(4, S_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT(x) _SB_MAKEVALUE(x, S_MC_WR_LIMIT)
+#define G_MC_WR_LIMIT(x) _SB_GETVALUE(x, S_MC_WR_LIMIT, M_MC_WR_LIMIT)
+#define V_MC_WR_LIMIT_DEFAULT V_MC_WR_LIMIT(5)
#define M_MC_IOB1HIGHPRIORITY _SB_MAKEMASK1(52)
-#define M_MC_RESERVED2 _SB_MAKEMASK(3, 53)
+#define M_MC_RESERVED2 _SB_MAKEMASK(3, 53)
-#define S_MC_CS_MODE 56
-#define M_MC_CS_MODE _SB_MAKEMASK(4, S_MC_CS_MODE)
-#define V_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_MC_CS_MODE)
-#define G_MC_CS_MODE(x) _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
+#define S_MC_CS_MODE 56
+#define M_MC_CS_MODE _SB_MAKEMASK(4, S_MC_CS_MODE)
+#define V_MC_CS_MODE(x) _SB_MAKEVALUE(x, S_MC_CS_MODE)
+#define G_MC_CS_MODE(x) _SB_GETVALUE(x, S_MC_CS_MODE, M_MC_CS_MODE)
-#define K_MC_CS_MODE_MSB_CS 0
-#define K_MC_CS_MODE_INTLV_CS 15
+#define K_MC_CS_MODE_MSB_CS 0
+#define K_MC_CS_MODE_INTLV_CS 15
#define K_MC_CS_MODE_MIXED_CS_10 12
#define K_MC_CS_MODE_MIXED_CS_30 6
#define K_MC_CS_MODE_MIXED_CS_32 3
-#define V_MC_CS_MODE_MSB_CS V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
-#define V_MC_CS_MODE_INTLV_CS V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
+#define V_MC_CS_MODE_MSB_CS V_MC_CS_MODE(K_MC_CS_MODE_MSB_CS)
+#define V_MC_CS_MODE_INTLV_CS V_MC_CS_MODE(K_MC_CS_MODE_INTLV_CS)
#define V_MC_CS_MODE_MIXED_CS_10 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_10)
#define V_MC_CS_MODE_MIXED_CS_30 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_30)
#define V_MC_CS_MODE_MIXED_CS_32 V_MC_CS_MODE(K_MC_CS_MODE_MIXED_CS_32)
-#define M_MC_ECC_DISABLE _SB_MAKEMASK1(60)
-#define M_MC_BERR_DISABLE _SB_MAKEMASK1(61)
-#define M_MC_FORCE_SEQ _SB_MAKEMASK1(62)
-#define M_MC_DEBUG _SB_MAKEMASK1(63)
+#define M_MC_ECC_DISABLE _SB_MAKEMASK1(60)
+#define M_MC_BERR_DISABLE _SB_MAKEMASK1(61)
+#define M_MC_FORCE_SEQ _SB_MAKEMASK1(62)
+#define M_MC_DEBUG _SB_MAKEMASK1(63)
-#define V_MC_CONFIG_DEFAULT V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
+#define V_MC_CONFIG_DEFAULT V_MC_WR_LIMIT_DEFAULT | V_MC_AGE_LIMIT_DEFAULT | \
V_MC_BANK0_MAP_DEFAULT | V_MC_BANK1_MAP_DEFAULT | \
V_MC_BANK2_MAP_DEFAULT | V_MC_BANK3_MAP_DEFAULT | V_MC_CHANNEL_SEL(0) | \
- M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
+ M_MC_IOB1HIGHPRIORITY | V_MC_QUEUE_SIZE_DEFAULT
/*
@@ -137,96 +137,96 @@
* Note: this field has been updated to be consistent with the errata to 0.2
*/
-#define S_MC_CLK_RATIO 0
-#define M_MC_CLK_RATIO _SB_MAKEMASK(4, S_MC_CLK_RATIO)
-#define V_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
-#define G_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
+#define S_MC_CLK_RATIO 0
+#define M_MC_CLK_RATIO _SB_MAKEMASK(4, S_MC_CLK_RATIO)
+#define V_MC_CLK_RATIO(x) _SB_MAKEVALUE(x, S_MC_CLK_RATIO)
+#define G_MC_CLK_RATIO(x) _SB_GETVALUE(x, S_MC_CLK_RATIO, M_MC_CLK_RATIO)
-#define K_MC_CLK_RATIO_2X 4
-#define K_MC_CLK_RATIO_25X 5
-#define K_MC_CLK_RATIO_3X 6
-#define K_MC_CLK_RATIO_35X 7
-#define K_MC_CLK_RATIO_4X 8
+#define K_MC_CLK_RATIO_2X 4
+#define K_MC_CLK_RATIO_25X 5
+#define K_MC_CLK_RATIO_3X 6
+#define K_MC_CLK_RATIO_35X 7
+#define K_MC_CLK_RATIO_4X 8
#define K_MC_CLK_RATIO_45X 9
#define V_MC_CLK_RATIO_2X V_MC_CLK_RATIO(K_MC_CLK_RATIO_2X)
-#define V_MC_CLK_RATIO_25X V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
-#define V_MC_CLK_RATIO_3X V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
-#define V_MC_CLK_RATIO_35X V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
-#define V_MC_CLK_RATIO_4X V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
-#define V_MC_CLK_RATIO_45X V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
-#define V_MC_CLK_RATIO_DEFAULT V_MC_CLK_RATIO_25X
-
-#define S_MC_REF_RATE 8
-#define M_MC_REF_RATE _SB_MAKEMASK(8, S_MC_REF_RATE)
-#define V_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_MC_REF_RATE)
-#define G_MC_REF_RATE(x) _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
-
-#define K_MC_REF_RATE_100MHz 0x62
-#define K_MC_REF_RATE_133MHz 0x81
-#define K_MC_REF_RATE_200MHz 0xC4
-
-#define V_MC_REF_RATE_100MHz V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
-#define V_MC_REF_RATE_133MHz V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
-#define V_MC_REF_RATE_200MHz V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
-#define V_MC_REF_RATE_DEFAULT V_MC_REF_RATE_100MHz
-
-#define S_MC_CLOCK_DRIVE 16
-#define M_MC_CLOCK_DRIVE _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
-#define V_MC_CLOCK_DRIVE(x) _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
-#define G_MC_CLOCK_DRIVE(x) _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
+#define V_MC_CLK_RATIO_25X V_MC_CLK_RATIO(K_MC_CLK_RATIO_25X)
+#define V_MC_CLK_RATIO_3X V_MC_CLK_RATIO(K_MC_CLK_RATIO_3X)
+#define V_MC_CLK_RATIO_35X V_MC_CLK_RATIO(K_MC_CLK_RATIO_35X)
+#define V_MC_CLK_RATIO_4X V_MC_CLK_RATIO(K_MC_CLK_RATIO_4X)
+#define V_MC_CLK_RATIO_45X V_MC_CLK_RATIO(K_MC_CLK_RATIO_45X)
+#define V_MC_CLK_RATIO_DEFAULT V_MC_CLK_RATIO_25X
+
+#define S_MC_REF_RATE 8
+#define M_MC_REF_RATE _SB_MAKEMASK(8, S_MC_REF_RATE)
+#define V_MC_REF_RATE(x) _SB_MAKEVALUE(x, S_MC_REF_RATE)
+#define G_MC_REF_RATE(x) _SB_GETVALUE(x, S_MC_REF_RATE, M_MC_REF_RATE)
+
+#define K_MC_REF_RATE_100MHz 0x62
+#define K_MC_REF_RATE_133MHz 0x81
+#define K_MC_REF_RATE_200MHz 0xC4
+
+#define V_MC_REF_RATE_100MHz V_MC_REF_RATE(K_MC_REF_RATE_100MHz)
+#define V_MC_REF_RATE_133MHz V_MC_REF_RATE(K_MC_REF_RATE_133MHz)
+#define V_MC_REF_RATE_200MHz V_MC_REF_RATE(K_MC_REF_RATE_200MHz)
+#define V_MC_REF_RATE_DEFAULT V_MC_REF_RATE_100MHz
+
+#define S_MC_CLOCK_DRIVE 16
+#define M_MC_CLOCK_DRIVE _SB_MAKEMASK(4, S_MC_CLOCK_DRIVE)
+#define V_MC_CLOCK_DRIVE(x) _SB_MAKEVALUE(x, S_MC_CLOCK_DRIVE)
+#define G_MC_CLOCK_DRIVE(x) _SB_GETVALUE(x, S_MC_CLOCK_DRIVE, M_MC_CLOCK_DRIVE)
#define V_MC_CLOCK_DRIVE_DEFAULT V_MC_CLOCK_DRIVE(0xF)
-#define S_MC_DATA_DRIVE 20
-#define M_MC_DATA_DRIVE _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE(x) _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
-#define G_MC_DATA_DRIVE(x) _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
-#define V_MC_DATA_DRIVE_DEFAULT V_MC_DATA_DRIVE(0x0)
+#define S_MC_DATA_DRIVE 20
+#define M_MC_DATA_DRIVE _SB_MAKEMASK(4, S_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE(x) _SB_MAKEVALUE(x, S_MC_DATA_DRIVE)
+#define G_MC_DATA_DRIVE(x) _SB_GETVALUE(x, S_MC_DATA_DRIVE, M_MC_DATA_DRIVE)
+#define V_MC_DATA_DRIVE_DEFAULT V_MC_DATA_DRIVE(0x0)
-#define S_MC_ADDR_DRIVE 24
-#define M_MC_ADDR_DRIVE _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE(x) _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
-#define G_MC_ADDR_DRIVE(x) _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
-#define V_MC_ADDR_DRIVE_DEFAULT V_MC_ADDR_DRIVE(0x0)
+#define S_MC_ADDR_DRIVE 24
+#define M_MC_ADDR_DRIVE _SB_MAKEMASK(4, S_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE(x) _SB_MAKEVALUE(x, S_MC_ADDR_DRIVE)
+#define G_MC_ADDR_DRIVE(x) _SB_GETVALUE(x, S_MC_ADDR_DRIVE, M_MC_ADDR_DRIVE)
+#define V_MC_ADDR_DRIVE_DEFAULT V_MC_ADDR_DRIVE(0x0)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_REF_DISABLE _SB_MAKEMASK1(30)
+#define M_MC_REF_DISABLE _SB_MAKEMASK1(30)
#endif /* 1250 PASS3 || 112x PASS1 */
-#define M_MC_DLL_BYPASS _SB_MAKEMASK1(31)
-
-#define S_MC_DQI_SKEW 32
-#define M_MC_DQI_SKEW _SB_MAKEMASK(8, S_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
-#define G_MC_DQI_SKEW(x) _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
-#define V_MC_DQI_SKEW_DEFAULT V_MC_DQI_SKEW(0)
-
-#define S_MC_DQO_SKEW 40
-#define M_MC_DQO_SKEW _SB_MAKEMASK(8, S_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
-#define G_MC_DQO_SKEW(x) _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
-#define V_MC_DQO_SKEW_DEFAULT V_MC_DQO_SKEW(0)
-
-#define S_MC_ADDR_SKEW 48
-#define M_MC_ADDR_SKEW _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW(x) _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
-#define G_MC_ADDR_SKEW(x) _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
-#define V_MC_ADDR_SKEW_DEFAULT V_MC_ADDR_SKEW(0x0F)
-
-#define S_MC_DLL_DEFAULT 56
-#define M_MC_DLL_DEFAULT _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
-#define V_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
-#define G_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
+#define M_MC_DLL_BYPASS _SB_MAKEMASK1(31)
+
+#define S_MC_DQI_SKEW 32
+#define M_MC_DQI_SKEW _SB_MAKEMASK(8, S_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQI_SKEW)
+#define G_MC_DQI_SKEW(x) _SB_GETVALUE(x, S_MC_DQI_SKEW, M_MC_DQI_SKEW)
+#define V_MC_DQI_SKEW_DEFAULT V_MC_DQI_SKEW(0)
+
+#define S_MC_DQO_SKEW 40
+#define M_MC_DQO_SKEW _SB_MAKEMASK(8, S_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW(x) _SB_MAKEVALUE(x, S_MC_DQO_SKEW)
+#define G_MC_DQO_SKEW(x) _SB_GETVALUE(x, S_MC_DQO_SKEW, M_MC_DQO_SKEW)
+#define V_MC_DQO_SKEW_DEFAULT V_MC_DQO_SKEW(0)
+
+#define S_MC_ADDR_SKEW 48
+#define M_MC_ADDR_SKEW _SB_MAKEMASK(8, S_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW(x) _SB_MAKEVALUE(x, S_MC_ADDR_SKEW)
+#define G_MC_ADDR_SKEW(x) _SB_GETVALUE(x, S_MC_ADDR_SKEW, M_MC_ADDR_SKEW)
+#define V_MC_ADDR_SKEW_DEFAULT V_MC_ADDR_SKEW(0x0F)
+
+#define S_MC_DLL_DEFAULT 56
+#define M_MC_DLL_DEFAULT _SB_MAKEMASK(8, S_MC_DLL_DEFAULT)
+#define V_MC_DLL_DEFAULT(x) _SB_MAKEVALUE(x, S_MC_DLL_DEFAULT)
+#define G_MC_DLL_DEFAULT(x) _SB_GETVALUE(x, S_MC_DLL_DEFAULT, M_MC_DLL_DEFAULT)
#define V_MC_DLL_DEFAULT_DEFAULT V_MC_DLL_DEFAULT(0x10)
-#define V_MC_CLKCONFIG_DEFAULT V_MC_DLL_DEFAULT_DEFAULT | \
- V_MC_ADDR_SKEW_DEFAULT | \
- V_MC_DQO_SKEW_DEFAULT | \
- V_MC_DQI_SKEW_DEFAULT | \
- V_MC_ADDR_DRIVE_DEFAULT | \
- V_MC_DATA_DRIVE_DEFAULT | \
- V_MC_CLOCK_DRIVE_DEFAULT | \
- V_MC_REF_RATE_DEFAULT
+#define V_MC_CLKCONFIG_DEFAULT V_MC_DLL_DEFAULT_DEFAULT | \
+ V_MC_ADDR_SKEW_DEFAULT | \
+ V_MC_DQO_SKEW_DEFAULT | \
+ V_MC_DQI_SKEW_DEFAULT | \
+ V_MC_ADDR_DRIVE_DEFAULT | \
+ V_MC_DATA_DRIVE_DEFAULT | \
+ V_MC_CLOCK_DRIVE_DEFAULT | \
+ V_MC_REF_RATE_DEFAULT
@@ -234,68 +234,68 @@
* DRAM Command Register (Table 6-13)
*/
-#define S_MC_COMMAND 0
-#define M_MC_COMMAND _SB_MAKEMASK(4, S_MC_COMMAND)
-#define V_MC_COMMAND(x) _SB_MAKEVALUE(x, S_MC_COMMAND)
-#define G_MC_COMMAND(x) _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
-
-#define K_MC_COMMAND_EMRS 0
-#define K_MC_COMMAND_MRS 1
-#define K_MC_COMMAND_PRE 2
-#define K_MC_COMMAND_AR 3
-#define K_MC_COMMAND_SETRFSH 4
-#define K_MC_COMMAND_CLRRFSH 5
-#define K_MC_COMMAND_SETPWRDN 6
-#define K_MC_COMMAND_CLRPWRDN 7
-
-#define V_MC_COMMAND_EMRS V_MC_COMMAND(K_MC_COMMAND_EMRS)
-#define V_MC_COMMAND_MRS V_MC_COMMAND(K_MC_COMMAND_MRS)
-#define V_MC_COMMAND_PRE V_MC_COMMAND(K_MC_COMMAND_PRE)
-#define V_MC_COMMAND_AR V_MC_COMMAND(K_MC_COMMAND_AR)
-#define V_MC_COMMAND_SETRFSH V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
-#define V_MC_COMMAND_CLRRFSH V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
-#define V_MC_COMMAND_SETPWRDN V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
-#define V_MC_COMMAND_CLRPWRDN V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
-
-#define M_MC_CS0 _SB_MAKEMASK1(4)
-#define M_MC_CS1 _SB_MAKEMASK1(5)
-#define M_MC_CS2 _SB_MAKEMASK1(6)
-#define M_MC_CS3 _SB_MAKEMASK1(7)
+#define S_MC_COMMAND 0
+#define M_MC_COMMAND _SB_MAKEMASK(4, S_MC_COMMAND)
+#define V_MC_COMMAND(x) _SB_MAKEVALUE(x, S_MC_COMMAND)
+#define G_MC_COMMAND(x) _SB_GETVALUE(x, S_MC_COMMAND, M_MC_COMMAND)
+
+#define K_MC_COMMAND_EMRS 0
+#define K_MC_COMMAND_MRS 1
+#define K_MC_COMMAND_PRE 2
+#define K_MC_COMMAND_AR 3
+#define K_MC_COMMAND_SETRFSH 4
+#define K_MC_COMMAND_CLRRFSH 5
+#define K_MC_COMMAND_SETPWRDN 6
+#define K_MC_COMMAND_CLRPWRDN 7
+
+#define V_MC_COMMAND_EMRS V_MC_COMMAND(K_MC_COMMAND_EMRS)
+#define V_MC_COMMAND_MRS V_MC_COMMAND(K_MC_COMMAND_MRS)
+#define V_MC_COMMAND_PRE V_MC_COMMAND(K_MC_COMMAND_PRE)
+#define V_MC_COMMAND_AR V_MC_COMMAND(K_MC_COMMAND_AR)
+#define V_MC_COMMAND_SETRFSH V_MC_COMMAND(K_MC_COMMAND_SETRFSH)
+#define V_MC_COMMAND_CLRRFSH V_MC_COMMAND(K_MC_COMMAND_CLRRFSH)
+#define V_MC_COMMAND_SETPWRDN V_MC_COMMAND(K_MC_COMMAND_SETPWRDN)
+#define V_MC_COMMAND_CLRPWRDN V_MC_COMMAND(K_MC_COMMAND_CLRPWRDN)
+
+#define M_MC_CS0 _SB_MAKEMASK1(4)
+#define M_MC_CS1 _SB_MAKEMASK1(5)
+#define M_MC_CS2 _SB_MAKEMASK1(6)
+#define M_MC_CS3 _SB_MAKEMASK1(7)
/*
* DRAM Mode Register (Table 6-14)
*/
-#define S_MC_EMODE 0
-#define M_MC_EMODE _SB_MAKEMASK(15, S_MC_EMODE)
-#define V_MC_EMODE(x) _SB_MAKEVALUE(x, S_MC_EMODE)
-#define G_MC_EMODE(x) _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
-#define V_MC_EMODE_DEFAULT V_MC_EMODE(0)
-
-#define S_MC_MODE 16
-#define M_MC_MODE _SB_MAKEMASK(15, S_MC_MODE)
-#define V_MC_MODE(x) _SB_MAKEVALUE(x, S_MC_MODE)
-#define G_MC_MODE(x) _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
-#define V_MC_MODE_DEFAULT V_MC_MODE(0x22)
-
-#define S_MC_DRAM_TYPE 32
-#define M_MC_DRAM_TYPE _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
-#define V_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
-#define G_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
-
-#define K_MC_DRAM_TYPE_JEDEC 0
-#define K_MC_DRAM_TYPE_FCRAM 1
+#define S_MC_EMODE 0
+#define M_MC_EMODE _SB_MAKEMASK(15, S_MC_EMODE)
+#define V_MC_EMODE(x) _SB_MAKEVALUE(x, S_MC_EMODE)
+#define G_MC_EMODE(x) _SB_GETVALUE(x, S_MC_EMODE, M_MC_EMODE)
+#define V_MC_EMODE_DEFAULT V_MC_EMODE(0)
+
+#define S_MC_MODE 16
+#define M_MC_MODE _SB_MAKEMASK(15, S_MC_MODE)
+#define V_MC_MODE(x) _SB_MAKEVALUE(x, S_MC_MODE)
+#define G_MC_MODE(x) _SB_GETVALUE(x, S_MC_MODE, M_MC_MODE)
+#define V_MC_MODE_DEFAULT V_MC_MODE(0x22)
+
+#define S_MC_DRAM_TYPE 32
+#define M_MC_DRAM_TYPE _SB_MAKEMASK(3, S_MC_DRAM_TYPE)
+#define V_MC_DRAM_TYPE(x) _SB_MAKEVALUE(x, S_MC_DRAM_TYPE)
+#define G_MC_DRAM_TYPE(x) _SB_GETVALUE(x, S_MC_DRAM_TYPE, M_MC_DRAM_TYPE)
+
+#define K_MC_DRAM_TYPE_JEDEC 0
+#define K_MC_DRAM_TYPE_FCRAM 1
#define K_MC_DRAM_TYPE_SGRAM 2
-#define V_MC_DRAM_TYPE_JEDEC V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
-#define V_MC_DRAM_TYPE_FCRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
-#define V_MC_DRAM_TYPE_SGRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
+#define V_MC_DRAM_TYPE_JEDEC V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_JEDEC)
+#define V_MC_DRAM_TYPE_FCRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_FCRAM)
+#define V_MC_DRAM_TYPE_SGRAM V_MC_DRAM_TYPE(K_MC_DRAM_TYPE_SGRAM)
#define M_MC_EXTERNALDECODE _SB_MAKEMASK1(35)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36)
-#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37)
+#define M_MC_PRE_ON_A8 _SB_MAKEMASK1(36)
+#define M_MC_RAM_WITH_A13 _SB_MAKEMASK1(37)
#endif /* 1250 PASS3 || 112x PASS1 */
@@ -308,99 +308,99 @@
#define M_MC_r2wIDLE_TWOCYCLES _SB_MAKEMASK1(61)
#define M_MC_r2rIDLE_TWOCYCLES _SB_MAKEMASK1(62)
-#define S_MC_tFIFO 56
-#define M_MC_tFIFO _SB_MAKEMASK(4, S_MC_tFIFO)
-#define V_MC_tFIFO(x) _SB_MAKEVALUE(x, S_MC_tFIFO)
-#define G_MC_tFIFO(x) _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
-#define K_MC_tFIFO_DEFAULT 1
-#define V_MC_tFIFO_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
+#define S_MC_tFIFO 56
+#define M_MC_tFIFO _SB_MAKEMASK(4, S_MC_tFIFO)
+#define V_MC_tFIFO(x) _SB_MAKEVALUE(x, S_MC_tFIFO)
+#define G_MC_tFIFO(x) _SB_GETVALUE(x, S_MC_tFIFO, M_MC_tFIFO)
+#define K_MC_tFIFO_DEFAULT 1
+#define V_MC_tFIFO_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT)
-#define S_MC_tRFC 52
-#define M_MC_tRFC _SB_MAKEMASK(4, S_MC_tRFC)
-#define V_MC_tRFC(x) _SB_MAKEVALUE(x, S_MC_tRFC)
-#define G_MC_tRFC(x) _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
-#define K_MC_tRFC_DEFAULT 12
-#define V_MC_tRFC_DEFAULT V_MC_tRFC(K_MC_tRFC_DEFAULT)
+#define S_MC_tRFC 52
+#define M_MC_tRFC _SB_MAKEMASK(4, S_MC_tRFC)
+#define V_MC_tRFC(x) _SB_MAKEVALUE(x, S_MC_tRFC)
+#define G_MC_tRFC(x) _SB_GETVALUE(x, S_MC_tRFC, M_MC_tRFC)
+#define K_MC_tRFC_DEFAULT 12
+#define V_MC_tRFC_DEFAULT V_MC_tRFC(K_MC_tRFC_DEFAULT)
#if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define M_MC_tRFC_PLUS16 _SB_MAKEMASK1(51) /* 1250C3 and later. */
+#define M_MC_tRFC_PLUS16 _SB_MAKEMASK1(51) /* 1250C3 and later. */
#endif
-#define S_MC_tCwCr 40
-#define M_MC_tCwCr _SB_MAKEMASK(4, S_MC_tCwCr)
-#define V_MC_tCwCr(x) _SB_MAKEVALUE(x, S_MC_tCwCr)
-#define G_MC_tCwCr(x) _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
-#define K_MC_tCwCr_DEFAULT 4
-#define V_MC_tCwCr_DEFAULT V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
-
-#define S_MC_tRCr 28
-#define M_MC_tRCr _SB_MAKEMASK(4, S_MC_tRCr)
-#define V_MC_tRCr(x) _SB_MAKEVALUE(x, S_MC_tRCr)
-#define G_MC_tRCr(x) _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
-#define K_MC_tRCr_DEFAULT 9
-#define V_MC_tRCr_DEFAULT V_MC_tRCr(K_MC_tRCr_DEFAULT)
-
-#define S_MC_tRCw 24
-#define M_MC_tRCw _SB_MAKEMASK(4, S_MC_tRCw)
-#define V_MC_tRCw(x) _SB_MAKEVALUE(x, S_MC_tRCw)
-#define G_MC_tRCw(x) _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
-#define K_MC_tRCw_DEFAULT 10
-#define V_MC_tRCw_DEFAULT V_MC_tRCw(K_MC_tRCw_DEFAULT)
-
-#define S_MC_tRRD 20
-#define M_MC_tRRD _SB_MAKEMASK(4, S_MC_tRRD)
-#define V_MC_tRRD(x) _SB_MAKEVALUE(x, S_MC_tRRD)
-#define G_MC_tRRD(x) _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
-#define K_MC_tRRD_DEFAULT 2
-#define V_MC_tRRD_DEFAULT V_MC_tRRD(K_MC_tRRD_DEFAULT)
-
-#define S_MC_tRP 16
-#define M_MC_tRP _SB_MAKEMASK(4, S_MC_tRP)
-#define V_MC_tRP(x) _SB_MAKEVALUE(x, S_MC_tRP)
-#define G_MC_tRP(x) _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
-#define K_MC_tRP_DEFAULT 4
-#define V_MC_tRP_DEFAULT V_MC_tRP(K_MC_tRP_DEFAULT)
-
-#define S_MC_tCwD 8
-#define M_MC_tCwD _SB_MAKEMASK(4, S_MC_tCwD)
-#define V_MC_tCwD(x) _SB_MAKEVALUE(x, S_MC_tCwD)
-#define G_MC_tCwD(x) _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
-#define K_MC_tCwD_DEFAULT 1
-#define V_MC_tCwD_DEFAULT V_MC_tCwD(K_MC_tCwD_DEFAULT)
-
-#define M_tCrDh _SB_MAKEMASK1(7)
+#define S_MC_tCwCr 40
+#define M_MC_tCwCr _SB_MAKEMASK(4, S_MC_tCwCr)
+#define V_MC_tCwCr(x) _SB_MAKEVALUE(x, S_MC_tCwCr)
+#define G_MC_tCwCr(x) _SB_GETVALUE(x, S_MC_tCwCr, M_MC_tCwCr)
+#define K_MC_tCwCr_DEFAULT 4
+#define V_MC_tCwCr_DEFAULT V_MC_tCwCr(K_MC_tCwCr_DEFAULT)
+
+#define S_MC_tRCr 28
+#define M_MC_tRCr _SB_MAKEMASK(4, S_MC_tRCr)
+#define V_MC_tRCr(x) _SB_MAKEVALUE(x, S_MC_tRCr)
+#define G_MC_tRCr(x) _SB_GETVALUE(x, S_MC_tRCr, M_MC_tRCr)
+#define K_MC_tRCr_DEFAULT 9
+#define V_MC_tRCr_DEFAULT V_MC_tRCr(K_MC_tRCr_DEFAULT)
+
+#define S_MC_tRCw 24
+#define M_MC_tRCw _SB_MAKEMASK(4, S_MC_tRCw)
+#define V_MC_tRCw(x) _SB_MAKEVALUE(x, S_MC_tRCw)
+#define G_MC_tRCw(x) _SB_GETVALUE(x, S_MC_tRCw, M_MC_tRCw)
+#define K_MC_tRCw_DEFAULT 10
+#define V_MC_tRCw_DEFAULT V_MC_tRCw(K_MC_tRCw_DEFAULT)
+
+#define S_MC_tRRD 20
+#define M_MC_tRRD _SB_MAKEMASK(4, S_MC_tRRD)
+#define V_MC_tRRD(x) _SB_MAKEVALUE(x, S_MC_tRRD)
+#define G_MC_tRRD(x) _SB_GETVALUE(x, S_MC_tRRD, M_MC_tRRD)
+#define K_MC_tRRD_DEFAULT 2
+#define V_MC_tRRD_DEFAULT V_MC_tRRD(K_MC_tRRD_DEFAULT)
+
+#define S_MC_tRP 16
+#define M_MC_tRP _SB_MAKEMASK(4, S_MC_tRP)
+#define V_MC_tRP(x) _SB_MAKEVALUE(x, S_MC_tRP)
+#define G_MC_tRP(x) _SB_GETVALUE(x, S_MC_tRP, M_MC_tRP)
+#define K_MC_tRP_DEFAULT 4
+#define V_MC_tRP_DEFAULT V_MC_tRP(K_MC_tRP_DEFAULT)
+
+#define S_MC_tCwD 8
+#define M_MC_tCwD _SB_MAKEMASK(4, S_MC_tCwD)
+#define V_MC_tCwD(x) _SB_MAKEVALUE(x, S_MC_tCwD)
+#define G_MC_tCwD(x) _SB_GETVALUE(x, S_MC_tCwD, M_MC_tCwD)
+#define K_MC_tCwD_DEFAULT 1
+#define V_MC_tCwD_DEFAULT V_MC_tCwD(K_MC_tCwD_DEFAULT)
+
+#define M_tCrDh _SB_MAKEMASK1(7)
#define M_MC_tCrDh M_tCrDh
-#define S_MC_tCrD 4
-#define M_MC_tCrD _SB_MAKEMASK(3, S_MC_tCrD)
-#define V_MC_tCrD(x) _SB_MAKEVALUE(x, S_MC_tCrD)
-#define G_MC_tCrD(x) _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
-#define K_MC_tCrD_DEFAULT 2
-#define V_MC_tCrD_DEFAULT V_MC_tCrD(K_MC_tCrD_DEFAULT)
-
-#define S_MC_tRCD 0
-#define M_MC_tRCD _SB_MAKEMASK(4, S_MC_tRCD)
-#define V_MC_tRCD(x) _SB_MAKEVALUE(x, S_MC_tRCD)
-#define G_MC_tRCD(x) _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
-#define K_MC_tRCD_DEFAULT 3
-#define V_MC_tRCD_DEFAULT V_MC_tRCD(K_MC_tRCD_DEFAULT)
-
-#define V_MC_TIMING_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
- V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
- V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
- V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
- V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
- V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
- V_MC_tRP(K_MC_tRP_DEFAULT) | \
- V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
- V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
- V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
- M_MC_r2rIDLE_TWOCYCLES
+#define S_MC_tCrD 4
+#define M_MC_tCrD _SB_MAKEMASK(3, S_MC_tCrD)
+#define V_MC_tCrD(x) _SB_MAKEVALUE(x, S_MC_tCrD)
+#define G_MC_tCrD(x) _SB_GETVALUE(x, S_MC_tCrD, M_MC_tCrD)
+#define K_MC_tCrD_DEFAULT 2
+#define V_MC_tCrD_DEFAULT V_MC_tCrD(K_MC_tCrD_DEFAULT)
+
+#define S_MC_tRCD 0
+#define M_MC_tRCD _SB_MAKEMASK(4, S_MC_tRCD)
+#define V_MC_tRCD(x) _SB_MAKEVALUE(x, S_MC_tRCD)
+#define G_MC_tRCD(x) _SB_GETVALUE(x, S_MC_tRCD, M_MC_tRCD)
+#define K_MC_tRCD_DEFAULT 3
+#define V_MC_tRCD_DEFAULT V_MC_tRCD(K_MC_tRCD_DEFAULT)
+
+#define V_MC_TIMING_DEFAULT V_MC_tFIFO(K_MC_tFIFO_DEFAULT) | \
+ V_MC_tRFC(K_MC_tRFC_DEFAULT) | \
+ V_MC_tCwCr(K_MC_tCwCr_DEFAULT) | \
+ V_MC_tRCr(K_MC_tRCr_DEFAULT) | \
+ V_MC_tRCw(K_MC_tRCw_DEFAULT) | \
+ V_MC_tRRD(K_MC_tRRD_DEFAULT) | \
+ V_MC_tRP(K_MC_tRP_DEFAULT) | \
+ V_MC_tCwD(K_MC_tCwD_DEFAULT) | \
+ V_MC_tCrD(K_MC_tCrD_DEFAULT) | \
+ V_MC_tRCD(K_MC_tRCD_DEFAULT) | \
+ M_MC_r2rIDLE_TWOCYCLES
/*
* Errata says these are not the default
- * M_MC_w2rIDLE_TWOCYCLES | \
- * M_MC_r2wIDLE_TWOCYCLES | \
+ * M_MC_w2rIDLE_TWOCYCLES | \
+ * M_MC_r2wIDLE_TWOCYCLES | \
*/
@@ -408,143 +408,143 @@
* Chip Select Start Address Register (Table 6-17)
*/
-#define S_MC_CS0_START 0
-#define M_MC_CS0_START _SB_MAKEMASK(16, S_MC_CS0_START)
-#define V_MC_CS0_START(x) _SB_MAKEVALUE(x, S_MC_CS0_START)
-#define G_MC_CS0_START(x) _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
+#define S_MC_CS0_START 0
+#define M_MC_CS0_START _SB_MAKEMASK(16, S_MC_CS0_START)
+#define V_MC_CS0_START(x) _SB_MAKEVALUE(x, S_MC_CS0_START)
+#define G_MC_CS0_START(x) _SB_GETVALUE(x, S_MC_CS0_START, M_MC_CS0_START)
-#define S_MC_CS1_START 16
-#define M_MC_CS1_START _SB_MAKEMASK(16, S_MC_CS1_START)
-#define V_MC_CS1_START(x) _SB_MAKEVALUE(x, S_MC_CS1_START)
-#define G_MC_CS1_START(x) _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
+#define S_MC_CS1_START 16
+#define M_MC_CS1_START _SB_MAKEMASK(16, S_MC_CS1_START)
+#define V_MC_CS1_START(x) _SB_MAKEVALUE(x, S_MC_CS1_START)
+#define G_MC_CS1_START(x) _SB_GETVALUE(x, S_MC_CS1_START, M_MC_CS1_START)
-#define S_MC_CS2_START 32
-#define M_MC_CS2_START _SB_MAKEMASK(16, S_MC_CS2_START)
-#define V_MC_CS2_START(x) _SB_MAKEVALUE(x, S_MC_CS2_START)
-#define G_MC_CS2_START(x) _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
+#define S_MC_CS2_START 32
+#define M_MC_CS2_START _SB_MAKEMASK(16, S_MC_CS2_START)
+#define V_MC_CS2_START(x) _SB_MAKEVALUE(x, S_MC_CS2_START)
+#define G_MC_CS2_START(x) _SB_GETVALUE(x, S_MC_CS2_START, M_MC_CS2_START)
-#define S_MC_CS3_START 48
-#define M_MC_CS3_START _SB_MAKEMASK(16, S_MC_CS3_START)
-#define V_MC_CS3_START(x) _SB_MAKEVALUE(x, S_MC_CS3_START)
-#define G_MC_CS3_START(x) _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
+#define S_MC_CS3_START 48
+#define M_MC_CS3_START _SB_MAKEMASK(16, S_MC_CS3_START)
+#define V_MC_CS3_START(x) _SB_MAKEVALUE(x, S_MC_CS3_START)
+#define G_MC_CS3_START(x) _SB_GETVALUE(x, S_MC_CS3_START, M_MC_CS3_START)
/*
* Chip Select End Address Register (Table 6-18)
*/
-#define S_MC_CS0_END 0
-#define M_MC_CS0_END _SB_MAKEMASK(16, S_MC_CS0_END)
-#define V_MC_CS0_END(x) _SB_MAKEVALUE(x, S_MC_CS0_END)
-#define G_MC_CS0_END(x) _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
+#define S_MC_CS0_END 0
+#define M_MC_CS0_END _SB_MAKEMASK(16, S_MC_CS0_END)
+#define V_MC_CS0_END(x) _SB_MAKEVALUE(x, S_MC_CS0_END)
+#define G_MC_CS0_END(x) _SB_GETVALUE(x, S_MC_CS0_END, M_MC_CS0_END)
-#define S_MC_CS1_END 16
-#define M_MC_CS1_END _SB_MAKEMASK(16, S_MC_CS1_END)
-#define V_MC_CS1_END(x) _SB_MAKEVALUE(x, S_MC_CS1_END)
-#define G_MC_CS1_END(x) _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
+#define S_MC_CS1_END 16
+#define M_MC_CS1_END _SB_MAKEMASK(16, S_MC_CS1_END)
+#define V_MC_CS1_END(x) _SB_MAKEVALUE(x, S_MC_CS1_END)
+#define G_MC_CS1_END(x) _SB_GETVALUE(x, S_MC_CS1_END, M_MC_CS1_END)
-#define S_MC_CS2_END 32
-#define M_MC_CS2_END _SB_MAKEMASK(16, S_MC_CS2_END)
-#define V_MC_CS2_END(x) _SB_MAKEVALUE(x, S_MC_CS2_END)
-#define G_MC_CS2_END(x) _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
+#define S_MC_CS2_END 32
+#define M_MC_CS2_END _SB_MAKEMASK(16, S_MC_CS2_END)
+#define V_MC_CS2_END(x) _SB_MAKEVALUE(x, S_MC_CS2_END)
+#define G_MC_CS2_END(x) _SB_GETVALUE(x, S_MC_CS2_END, M_MC_CS2_END)
-#define S_MC_CS3_END 48
-#define M_MC_CS3_END _SB_MAKEMASK(16, S_MC_CS3_END)
-#define V_MC_CS3_END(x) _SB_MAKEVALUE(x, S_MC_CS3_END)
-#define G_MC_CS3_END(x) _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
+#define S_MC_CS3_END 48
+#define M_MC_CS3_END _SB_MAKEMASK(16, S_MC_CS3_END)
+#define V_MC_CS3_END(x) _SB_MAKEVALUE(x, S_MC_CS3_END)
+#define G_MC_CS3_END(x) _SB_GETVALUE(x, S_MC_CS3_END, M_MC_CS3_END)
/*
* Chip Select Interleave Register (Table 6-19)
*/
-#define S_MC_INTLV_RESERVED 0
-#define M_MC_INTLV_RESERVED _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
+#define S_MC_INTLV_RESERVED 0
+#define M_MC_INTLV_RESERVED _SB_MAKEMASK(5, S_MC_INTLV_RESERVED)
-#define S_MC_INTERLEAVE 7
-#define M_MC_INTERLEAVE _SB_MAKEMASK(18, S_MC_INTERLEAVE)
-#define V_MC_INTERLEAVE(x) _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
+#define S_MC_INTERLEAVE 7
+#define M_MC_INTERLEAVE _SB_MAKEMASK(18, S_MC_INTERLEAVE)
+#define V_MC_INTERLEAVE(x) _SB_MAKEVALUE(x, S_MC_INTERLEAVE)
-#define S_MC_INTLV_MBZ 25
-#define M_MC_INTLV_MBZ _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
+#define S_MC_INTLV_MBZ 25
+#define M_MC_INTLV_MBZ _SB_MAKEMASK(39, S_MC_INTLV_MBZ)
/*
* Row Address Bits Register (Table 6-20)
*/
-#define S_MC_RAS_RESERVED 0
-#define M_MC_RAS_RESERVED _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
+#define S_MC_RAS_RESERVED 0
+#define M_MC_RAS_RESERVED _SB_MAKEMASK(5, S_MC_RAS_RESERVED)
-#define S_MC_RAS_SELECT 12
-#define M_MC_RAS_SELECT _SB_MAKEMASK(25, S_MC_RAS_SELECT)
-#define V_MC_RAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
+#define S_MC_RAS_SELECT 12
+#define M_MC_RAS_SELECT _SB_MAKEMASK(25, S_MC_RAS_SELECT)
+#define V_MC_RAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_RAS_SELECT)
-#define S_MC_RAS_MBZ 37
-#define M_MC_RAS_MBZ _SB_MAKEMASK(27, S_MC_RAS_MBZ)
+#define S_MC_RAS_MBZ 37
+#define M_MC_RAS_MBZ _SB_MAKEMASK(27, S_MC_RAS_MBZ)
/*
* Column Address Bits Register (Table 6-21)
*/
-#define S_MC_CAS_RESERVED 0
-#define M_MC_CAS_RESERVED _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
+#define S_MC_CAS_RESERVED 0
+#define M_MC_CAS_RESERVED _SB_MAKEMASK(5, S_MC_CAS_RESERVED)
-#define S_MC_CAS_SELECT 5
-#define M_MC_CAS_SELECT _SB_MAKEMASK(18, S_MC_CAS_SELECT)
-#define V_MC_CAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
+#define S_MC_CAS_SELECT 5
+#define M_MC_CAS_SELECT _SB_MAKEMASK(18, S_MC_CAS_SELECT)
+#define V_MC_CAS_SELECT(x) _SB_MAKEVALUE(x, S_MC_CAS_SELECT)
-#define S_MC_CAS_MBZ 23
-#define M_MC_CAS_MBZ _SB_MAKEMASK(41, S_MC_CAS_MBZ)
+#define S_MC_CAS_MBZ 23
+#define M_MC_CAS_MBZ _SB_MAKEMASK(41, S_MC_CAS_MBZ)
/*
* Bank Address Address Bits Register (Table 6-22)
*/
-#define S_MC_BA_RESERVED 0
-#define M_MC_BA_RESERVED _SB_MAKEMASK(5, S_MC_BA_RESERVED)
+#define S_MC_BA_RESERVED 0
+#define M_MC_BA_RESERVED _SB_MAKEMASK(5, S_MC_BA_RESERVED)
-#define S_MC_BA_SELECT 5
-#define M_MC_BA_SELECT _SB_MAKEMASK(20, S_MC_BA_SELECT)
-#define V_MC_BA_SELECT(x) _SB_MAKEVALUE(x, S_MC_BA_SELECT)
+#define S_MC_BA_SELECT 5
+#define M_MC_BA_SELECT _SB_MAKEMASK(20, S_MC_BA_SELECT)
+#define V_MC_BA_SELECT(x) _SB_MAKEVALUE(x, S_MC_BA_SELECT)
-#define S_MC_BA_MBZ 25
-#define M_MC_BA_MBZ _SB_MAKEMASK(39, S_MC_BA_MBZ)
+#define S_MC_BA_MBZ 25
+#define M_MC_BA_MBZ _SB_MAKEMASK(39, S_MC_BA_MBZ)
/*
* Chip Select Attribute Register (Table 6-23)
*/
-#define K_MC_CS_ATTR_CLOSED 0
-#define K_MC_CS_ATTR_CASCHECK 1
-#define K_MC_CS_ATTR_HINT 2
-#define K_MC_CS_ATTR_OPEN 3
+#define K_MC_CS_ATTR_CLOSED 0
+#define K_MC_CS_ATTR_CASCHECK 1
+#define K_MC_CS_ATTR_HINT 2
+#define K_MC_CS_ATTR_OPEN 3
-#define S_MC_CS0_PAGE 0
-#define M_MC_CS0_PAGE _SB_MAKEMASK(2, S_MC_CS0_PAGE)
-#define V_MC_CS0_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
-#define G_MC_CS0_PAGE(x) _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
+#define S_MC_CS0_PAGE 0
+#define M_MC_CS0_PAGE _SB_MAKEMASK(2, S_MC_CS0_PAGE)
+#define V_MC_CS0_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS0_PAGE)
+#define G_MC_CS0_PAGE(x) _SB_GETVALUE(x, S_MC_CS0_PAGE, M_MC_CS0_PAGE)
-#define S_MC_CS1_PAGE 16
-#define M_MC_CS1_PAGE _SB_MAKEMASK(2, S_MC_CS1_PAGE)
-#define V_MC_CS1_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
-#define G_MC_CS1_PAGE(x) _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
+#define S_MC_CS1_PAGE 16
+#define M_MC_CS1_PAGE _SB_MAKEMASK(2, S_MC_CS1_PAGE)
+#define V_MC_CS1_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS1_PAGE)
+#define G_MC_CS1_PAGE(x) _SB_GETVALUE(x, S_MC_CS1_PAGE, M_MC_CS1_PAGE)
-#define S_MC_CS2_PAGE 32
-#define M_MC_CS2_PAGE _SB_MAKEMASK(2, S_MC_CS2_PAGE)
-#define V_MC_CS2_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
-#define G_MC_CS2_PAGE(x) _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
+#define S_MC_CS2_PAGE 32
+#define M_MC_CS2_PAGE _SB_MAKEMASK(2, S_MC_CS2_PAGE)
+#define V_MC_CS2_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS2_PAGE)
+#define G_MC_CS2_PAGE(x) _SB_GETVALUE(x, S_MC_CS2_PAGE, M_MC_CS2_PAGE)
-#define S_MC_CS3_PAGE 48
-#define M_MC_CS3_PAGE _SB_MAKEMASK(2, S_MC_CS3_PAGE)
-#define V_MC_CS3_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
-#define G_MC_CS3_PAGE(x) _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
+#define S_MC_CS3_PAGE 48
+#define M_MC_CS3_PAGE _SB_MAKEMASK(2, S_MC_CS3_PAGE)
+#define V_MC_CS3_PAGE(x) _SB_MAKEVALUE(x, S_MC_CS3_PAGE)
+#define G_MC_CS3_PAGE(x) _SB_GETVALUE(x, S_MC_CS3_PAGE, M_MC_CS3_PAGE)
/*
* ECC Test ECC Register (Table 6-25)
*/
-#define S_MC_ECC_INVERT 0
-#define M_MC_ECC_INVERT _SB_MAKEMASK(8, S_MC_ECC_INVERT)
+#define S_MC_ECC_INVERT 0
+#define M_MC_ECC_INVERT _SB_MAKEMASK(8, S_MC_ECC_INVERT)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_regs.h b/arch/mips/include/asm/sibyte/sb1250_regs.h
index 29b9f0b26b3..ee86ca0fad3 100644
--- a/arch/mips/include/asm/sibyte/sb1250_regs.h
+++ b/arch/mips/include/asm/sibyte/sb1250_regs.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Register Definitions File: sb1250_regs.h
+ * Register Definitions File: sb1250_regs.h
*
* This module contains the addresses of the on-chip peripherals
* on the SB1250.
@@ -61,45 +61,45 @@
*/
#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
-#define A_MC_BASE_0 0x0010051000
-#define A_MC_BASE_1 0x0010052000
-#define MC_REGISTER_SPACING 0x1000
+#define A_MC_BASE_0 0x0010051000
+#define A_MC_BASE_1 0x0010052000
+#define MC_REGISTER_SPACING 0x1000
-#define A_MC_BASE(ctlid) ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
+#define A_MC_BASE(ctlid) ((ctlid)*MC_REGISTER_SPACING+A_MC_BASE_0)
#define A_MC_REGISTER(ctlid, reg) (A_MC_BASE(ctlid)+(reg))
-#define R_MC_CONFIG 0x0000000100
-#define R_MC_DRAMCMD 0x0000000120
-#define R_MC_DRAMMODE 0x0000000140
-#define R_MC_TIMING1 0x0000000160
-#define R_MC_TIMING2 0x0000000180
-#define R_MC_CS_START 0x00000001A0
-#define R_MC_CS_END 0x00000001C0
-#define R_MC_CS_INTERLEAVE 0x00000001E0
-#define S_MC_CS_STARTEND 16
-
-#define R_MC_CSX_BASE 0x0000000200
-#define R_MC_CSX_ROW 0x0000000000 /* relative to CSX_BASE, above */
-#define R_MC_CSX_COL 0x0000000020 /* relative to CSX_BASE, above */
-#define R_MC_CSX_BA 0x0000000040 /* relative to CSX_BASE, above */
-#define MC_CSX_SPACING 0x0000000060 /* relative to CSX_BASE, above */
-
-#define R_MC_CS0_ROW 0x0000000200
-#define R_MC_CS0_COL 0x0000000220
-#define R_MC_CS0_BA 0x0000000240
-#define R_MC_CS1_ROW 0x0000000260
-#define R_MC_CS1_COL 0x0000000280
-#define R_MC_CS1_BA 0x00000002A0
-#define R_MC_CS2_ROW 0x00000002C0
-#define R_MC_CS2_COL 0x00000002E0
-#define R_MC_CS2_BA 0x0000000300
-#define R_MC_CS3_ROW 0x0000000320
-#define R_MC_CS3_COL 0x0000000340
-#define R_MC_CS3_BA 0x0000000360
-#define R_MC_CS_ATTR 0x0000000380
-#define R_MC_TEST_DATA 0x0000000400
-#define R_MC_TEST_ECC 0x0000000420
-#define R_MC_MCLK_CFG 0x0000000500
+#define R_MC_CONFIG 0x0000000100
+#define R_MC_DRAMCMD 0x0000000120
+#define R_MC_DRAMMODE 0x0000000140
+#define R_MC_TIMING1 0x0000000160
+#define R_MC_TIMING2 0x0000000180
+#define R_MC_CS_START 0x00000001A0
+#define R_MC_CS_END 0x00000001C0
+#define R_MC_CS_INTERLEAVE 0x00000001E0
+#define S_MC_CS_STARTEND 16
+
+#define R_MC_CSX_BASE 0x0000000200
+#define R_MC_CSX_ROW 0x0000000000 /* relative to CSX_BASE, above */
+#define R_MC_CSX_COL 0x0000000020 /* relative to CSX_BASE, above */
+#define R_MC_CSX_BA 0x0000000040 /* relative to CSX_BASE, above */
+#define MC_CSX_SPACING 0x0000000060 /* relative to CSX_BASE, above */
+
+#define R_MC_CS0_ROW 0x0000000200
+#define R_MC_CS0_COL 0x0000000220
+#define R_MC_CS0_BA 0x0000000240
+#define R_MC_CS1_ROW 0x0000000260
+#define R_MC_CS1_COL 0x0000000280
+#define R_MC_CS1_BA 0x00000002A0
+#define R_MC_CS2_ROW 0x00000002C0
+#define R_MC_CS2_COL 0x00000002E0
+#define R_MC_CS2_BA 0x0000000300
+#define R_MC_CS3_ROW 0x0000000320
+#define R_MC_CS3_COL 0x0000000340
+#define R_MC_CS3_BA 0x0000000360
+#define R_MC_CS_ATTR 0x0000000380
+#define R_MC_TEST_DATA 0x0000000400
+#define R_MC_TEST_ECC 0x0000000420
+#define R_MC_MCLK_CFG 0x0000000500
#endif /* 1250 & 112x */
@@ -109,14 +109,14 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* This L2C only on 1250/112x */
-#define A_L2_READ_TAG 0x0010040018
-#define A_L2_ECC_TAG 0x0010040038
+#define A_L2_READ_TAG 0x0010040018
+#define A_L2_ECC_TAG 0x0010040038
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define A_L2_READ_MISC 0x0010040058
+#define A_L2_READ_MISC 0x0010040058
#endif /* 1250 PASS3 || 112x PASS1 */
-#define A_L2_WAY_DISABLE 0x0010041000
-#define A_L2_MAKEDISABLE(x) (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
-#define A_L2_MGMT_TAG_BASE 0x00D0000000
+#define A_L2_WAY_DISABLE 0x0010041000
+#define A_L2_MAKEDISABLE(x) (A_L2_WAY_DISABLE | (((~(x))&0x0F) << 8))
+#define A_L2_MGMT_TAG_BASE 0x00D0000000
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_L2_CACHE_DISABLE 0x0010042000
@@ -124,10 +124,10 @@
#define A_L2_MISC_CONFIG 0x0010043000
#endif /* 1250 PASS2 || 112x PASS1 */
-/* Backward-compatibility definitions. */
+/* Backward-compatibility definitions. */
/* XXX: discourage people from using these constants. */
-#define A_L2_READ_ADDRESS A_L2_READ_TAG
-#define A_L2_EEC_ADDRESS A_L2_ECC_TAG
+#define A_L2_READ_ADDRESS A_L2_READ_TAG
+#define A_L2_EEC_ADDRESS A_L2_ECC_TAG
#endif
@@ -137,8 +137,8 @@
********************************************************************* */
#if SIBYTE_HDR_FEATURE_1250_112x /* This PCI/HT only on 1250/112x */
-#define A_PCI_TYPE00_HEADER 0x00DE000000
-#define A_PCI_TYPE01_HEADER 0x00DE000800
+#define A_PCI_TYPE00_HEADER 0x00DE000000
+#define A_PCI_TYPE01_HEADER 0x00DE000800
#endif
@@ -146,121 +146,121 @@
* Ethernet DMA and MACs
********************************************************************* */
-#define A_MAC_BASE_0 0x0010064000
-#define A_MAC_BASE_1 0x0010065000
+#define A_MAC_BASE_0 0x0010064000
+#define A_MAC_BASE_1 0x0010065000
#if SIBYTE_HDR_FEATURE_CHIP(1250)
-#define A_MAC_BASE_2 0x0010066000
+#define A_MAC_BASE_2 0x0010066000
#endif /* 1250 */
-#define MAC_SPACING 0x1000
-#define MAC_DMA_TXRX_SPACING 0x0400
-#define MAC_DMA_CHANNEL_SPACING 0x0100
-#define DMA_RX 0
-#define DMA_TX 1
+#define MAC_SPACING 0x1000
+#define MAC_DMA_TXRX_SPACING 0x0400
+#define MAC_DMA_CHANNEL_SPACING 0x0100
+#define DMA_RX 0
+#define DMA_TX 1
#define MAC_NUM_DMACHAN 2 /* channels per direction */
/* XXX: not correct; depends on SOC type. */
-#define MAC_NUM_PORTS 3
+#define MAC_NUM_PORTS 3
-#define A_MAC_CHANNEL_BASE(macnum) \
- (A_MAC_BASE_0 + \
- MAC_SPACING*(macnum))
+#define A_MAC_CHANNEL_BASE(macnum) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum))
-#define A_MAC_REGISTER(macnum,reg) \
- (A_MAC_BASE_0 + \
- MAC_SPACING*(macnum) + (reg))
+#define A_MAC_REGISTER(macnum,reg) \
+ (A_MAC_BASE_0 + \
+ MAC_SPACING*(macnum) + (reg))
#define R_MAC_DMA_CHANNELS 0x800 /* Relative to A_MAC_CHANNEL_BASE */
#define A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) \
- ((A_MAC_CHANNEL_BASE(macnum)) + \
- R_MAC_DMA_CHANNELS + \
- (MAC_DMA_TXRX_SPACING*(txrx)) + \
- (MAC_DMA_CHANNEL_SPACING*(chan)))
+ ((A_MAC_CHANNEL_BASE(macnum)) + \
+ R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
#define R_MAC_DMA_CHANNEL_BASE(txrx, chan) \
- (R_MAC_DMA_CHANNELS + \
- (MAC_DMA_TXRX_SPACING*(txrx)) + \
- (MAC_DMA_CHANNEL_SPACING*(chan)))
+ (R_MAC_DMA_CHANNELS + \
+ (MAC_DMA_TXRX_SPACING*(txrx)) + \
+ (MAC_DMA_CHANNEL_SPACING*(chan)))
-#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg) \
- (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) + \
- (reg))
+#define A_MAC_DMA_REGISTER(macnum, txrx, chan, reg) \
+ (A_MAC_DMA_CHANNEL_BASE(macnum, txrx, chan) + \
+ (reg))
-#define R_MAC_DMA_REGISTER(txrx, chan, reg) \
- (R_MAC_DMA_CHANNEL_BASE(txrx, chan) + \
- (reg))
+#define R_MAC_DMA_REGISTER(txrx, chan, reg) \
+ (R_MAC_DMA_CHANNEL_BASE(txrx, chan) + \
+ (reg))
/*
* DMA channel registers, relative to A_MAC_DMA_CHANNEL_BASE
*/
-#define R_MAC_DMA_CONFIG0 0x00000000
-#define R_MAC_DMA_CONFIG1 0x00000008
-#define R_MAC_DMA_DSCR_BASE 0x00000010
-#define R_MAC_DMA_DSCR_CNT 0x00000018
-#define R_MAC_DMA_CUR_DSCRA 0x00000020
-#define R_MAC_DMA_CUR_DSCRB 0x00000028
-#define R_MAC_DMA_CUR_DSCRADDR 0x00000030
+#define R_MAC_DMA_CONFIG0 0x00000000
+#define R_MAC_DMA_CONFIG1 0x00000008
+#define R_MAC_DMA_DSCR_BASE 0x00000010
+#define R_MAC_DMA_DSCR_CNT 0x00000018
+#define R_MAC_DMA_CUR_DSCRA 0x00000020
+#define R_MAC_DMA_CUR_DSCRB 0x00000028
+#define R_MAC_DMA_CUR_DSCRADDR 0x00000030
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1)
-#define R_MAC_DMA_OODPKTLOST_RX 0x00000038 /* rx only */
+#define R_MAC_DMA_OODPKTLOST_RX 0x00000038 /* rx only */
#endif /* 1250 PASS3 || 112x PASS1 */
/*
* RMON Counters
*/
-#define R_MAC_RMON_TX_BYTES 0x00000000
-#define R_MAC_RMON_COLLISIONS 0x00000008
-#define R_MAC_RMON_LATE_COL 0x00000010
-#define R_MAC_RMON_EX_COL 0x00000018
-#define R_MAC_RMON_FCS_ERROR 0x00000020
-#define R_MAC_RMON_TX_ABORT 0x00000028
+#define R_MAC_RMON_TX_BYTES 0x00000000
+#define R_MAC_RMON_COLLISIONS 0x00000008
+#define R_MAC_RMON_LATE_COL 0x00000010
+#define R_MAC_RMON_EX_COL 0x00000018
+#define R_MAC_RMON_FCS_ERROR 0x00000020
+#define R_MAC_RMON_TX_ABORT 0x00000028
/* Counter #6 (0x30) now reserved */
-#define R_MAC_RMON_TX_BAD 0x00000038
-#define R_MAC_RMON_TX_GOOD 0x00000040
-#define R_MAC_RMON_TX_RUNT 0x00000048
-#define R_MAC_RMON_TX_OVERSIZE 0x00000050
-#define R_MAC_RMON_RX_BYTES 0x00000080
-#define R_MAC_RMON_RX_MCAST 0x00000088
-#define R_MAC_RMON_RX_BCAST 0x00000090
-#define R_MAC_RMON_RX_BAD 0x00000098
-#define R_MAC_RMON_RX_GOOD 0x000000A0
-#define R_MAC_RMON_RX_RUNT 0x000000A8
-#define R_MAC_RMON_RX_OVERSIZE 0x000000B0
-#define R_MAC_RMON_RX_FCS_ERROR 0x000000B8
-#define R_MAC_RMON_RX_LENGTH_ERROR 0x000000C0
-#define R_MAC_RMON_RX_CODE_ERROR 0x000000C8
-#define R_MAC_RMON_RX_ALIGN_ERROR 0x000000D0
+#define R_MAC_RMON_TX_BAD 0x00000038
+#define R_MAC_RMON_TX_GOOD 0x00000040
+#define R_MAC_RMON_TX_RUNT 0x00000048
+#define R_MAC_RMON_TX_OVERSIZE 0x00000050
+#define R_MAC_RMON_RX_BYTES 0x00000080
+#define R_MAC_RMON_RX_MCAST 0x00000088
+#define R_MAC_RMON_RX_BCAST 0x00000090
+#define R_MAC_RMON_RX_BAD 0x00000098
+#define R_MAC_RMON_RX_GOOD 0x000000A0
+#define R_MAC_RMON_RX_RUNT 0x000000A8
+#define R_MAC_RMON_RX_OVERSIZE 0x000000B0
+#define R_MAC_RMON_RX_FCS_ERROR 0x000000B8
+#define R_MAC_RMON_RX_LENGTH_ERROR 0x000000C0
+#define R_MAC_RMON_RX_CODE_ERROR 0x000000C8
+#define R_MAC_RMON_RX_ALIGN_ERROR 0x000000D0
/* Updated to spec 0.2 */
-#define R_MAC_CFG 0x00000100
-#define R_MAC_THRSH_CFG 0x00000108
-#define R_MAC_VLANTAG 0x00000110
-#define R_MAC_FRAMECFG 0x00000118
-#define R_MAC_EOPCNT 0x00000120
-#define R_MAC_FIFO_PTRS 0x00000128
-#define R_MAC_ADFILTER_CFG 0x00000200
-#define R_MAC_ETHERNET_ADDR 0x00000208
-#define R_MAC_PKT_TYPE 0x00000210
+#define R_MAC_CFG 0x00000100
+#define R_MAC_THRSH_CFG 0x00000108
+#define R_MAC_VLANTAG 0x00000110
+#define R_MAC_FRAMECFG 0x00000118
+#define R_MAC_EOPCNT 0x00000120
+#define R_MAC_FIFO_PTRS 0x00000128
+#define R_MAC_ADFILTER_CFG 0x00000200
+#define R_MAC_ETHERNET_ADDR 0x00000208
+#define R_MAC_PKT_TYPE 0x00000210
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define R_MAC_ADMASK0 0x00000218
#define R_MAC_ADMASK1 0x00000220
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define R_MAC_HASH_BASE 0x00000240
-#define R_MAC_ADDR_BASE 0x00000280
-#define R_MAC_CHLO0_BASE 0x00000300
-#define R_MAC_CHUP0_BASE 0x00000320
-#define R_MAC_ENABLE 0x00000400
-#define R_MAC_STATUS 0x00000408
-#define R_MAC_INT_MASK 0x00000410
-#define R_MAC_TXD_CTL 0x00000420
-#define R_MAC_MDIO 0x00000428
+#define R_MAC_HASH_BASE 0x00000240
+#define R_MAC_ADDR_BASE 0x00000280
+#define R_MAC_CHLO0_BASE 0x00000300
+#define R_MAC_CHUP0_BASE 0x00000320
+#define R_MAC_ENABLE 0x00000400
+#define R_MAC_STATUS 0x00000408
+#define R_MAC_INT_MASK 0x00000410
+#define R_MAC_TXD_CTL 0x00000420
+#define R_MAC_MDIO 0x00000428
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define R_MAC_STATUS1 0x00000430
+#define R_MAC_STATUS1 0x00000430
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
-#define R_MAC_DEBUG_STATUS 0x00000448
+#define R_MAC_DEBUG_STATUS 0x00000448
#define MAC_HASH_COUNT 8
#define MAC_ADDR_COUNT 8
@@ -273,11 +273,11 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* This MC only on 1250 & 112x */
-#define R_DUART_NUM_PORTS 2
+#define R_DUART_NUM_PORTS 2
-#define A_DUART 0x0010060000
+#define A_DUART 0x0010060000
-#define DUART_CHANREG_SPACING 0x100
+#define DUART_CHANREG_SPACING 0x100
#define A_DUART_CHANREG(chan, reg) \
(A_DUART + DUART_CHANREG_SPACING * ((chan) + 1) + (reg))
@@ -341,44 +341,44 @@
* These constants are the absolute addresses.
*/
-#define A_DUART_MODE_REG_1_A 0x0010060100
-#define A_DUART_MODE_REG_2_A 0x0010060110
-#define A_DUART_STATUS_A 0x0010060120
-#define A_DUART_CLK_SEL_A 0x0010060130
-#define A_DUART_CMD_A 0x0010060150
-#define A_DUART_RX_HOLD_A 0x0010060160
-#define A_DUART_TX_HOLD_A 0x0010060170
-
-#define A_DUART_MODE_REG_1_B 0x0010060200
-#define A_DUART_MODE_REG_2_B 0x0010060210
-#define A_DUART_STATUS_B 0x0010060220
-#define A_DUART_CLK_SEL_B 0x0010060230
-#define A_DUART_CMD_B 0x0010060250
-#define A_DUART_RX_HOLD_B 0x0010060260
-#define A_DUART_TX_HOLD_B 0x0010060270
-
-#define A_DUART_INPORT_CHNG 0x0010060300
-#define A_DUART_AUX_CTRL 0x0010060310
-#define A_DUART_ISR_A 0x0010060320
-#define A_DUART_IMR_A 0x0010060330
-#define A_DUART_ISR_B 0x0010060340
-#define A_DUART_IMR_B 0x0010060350
-#define A_DUART_OUT_PORT 0x0010060360
-#define A_DUART_OPCR 0x0010060370
-#define A_DUART_IN_PORT 0x0010060380
-#define A_DUART_ISR 0x0010060390
-#define A_DUART_IMR 0x00100603A0
-#define A_DUART_SET_OPR 0x00100603B0
-#define A_DUART_CLEAR_OPR 0x00100603C0
-#define A_DUART_INPORT_CHNG_A 0x00100603D0
-#define A_DUART_INPORT_CHNG_B 0x00100603E0
+#define A_DUART_MODE_REG_1_A 0x0010060100
+#define A_DUART_MODE_REG_2_A 0x0010060110
+#define A_DUART_STATUS_A 0x0010060120
+#define A_DUART_CLK_SEL_A 0x0010060130
+#define A_DUART_CMD_A 0x0010060150
+#define A_DUART_RX_HOLD_A 0x0010060160
+#define A_DUART_TX_HOLD_A 0x0010060170
+
+#define A_DUART_MODE_REG_1_B 0x0010060200
+#define A_DUART_MODE_REG_2_B 0x0010060210
+#define A_DUART_STATUS_B 0x0010060220
+#define A_DUART_CLK_SEL_B 0x0010060230
+#define A_DUART_CMD_B 0x0010060250
+#define A_DUART_RX_HOLD_B 0x0010060260
+#define A_DUART_TX_HOLD_B 0x0010060270
+
+#define A_DUART_INPORT_CHNG 0x0010060300
+#define A_DUART_AUX_CTRL 0x0010060310
+#define A_DUART_ISR_A 0x0010060320
+#define A_DUART_IMR_A 0x0010060330
+#define A_DUART_ISR_B 0x0010060340
+#define A_DUART_IMR_B 0x0010060350
+#define A_DUART_OUT_PORT 0x0010060360
+#define A_DUART_OPCR 0x0010060370
+#define A_DUART_IN_PORT 0x0010060380
+#define A_DUART_ISR 0x0010060390
+#define A_DUART_IMR 0x00100603A0
+#define A_DUART_SET_OPR 0x00100603B0
+#define A_DUART_CLEAR_OPR 0x00100603C0
+#define A_DUART_INPORT_CHNG_A 0x00100603D0
+#define A_DUART_INPORT_CHNG_B 0x00100603E0
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_DUART_FULL_CTL_A 0x0010060140
#define A_DUART_FULL_CTL_B 0x0010060240
-#define A_DUART_OPCR_A 0x0010060180
-#define A_DUART_OPCR_B 0x0010060280
+#define A_DUART_OPCR_A 0x0010060180
+#define A_DUART_OPCR_B 0x0010060280
#define A_DUART_INPORT_CHNG_DEBUG 0x00100603F0
#endif /* 1250 PASS2 || 112x PASS1 */
@@ -391,94 +391,94 @@
#if SIBYTE_HDR_FEATURE_1250_112x /* sync serial only on 1250/112x */
-#define A_SER_BASE_0 0x0010060400
-#define A_SER_BASE_1 0x0010060800
-#define SER_SPACING 0x400
+#define A_SER_BASE_0 0x0010060400
+#define A_SER_BASE_1 0x0010060800
+#define SER_SPACING 0x400
-#define SER_DMA_TXRX_SPACING 0x80
+#define SER_DMA_TXRX_SPACING 0x80
-#define SER_NUM_PORTS 2
+#define SER_NUM_PORTS 2
-#define A_SER_CHANNEL_BASE(sernum) \
- (A_SER_BASE_0 + \
- SER_SPACING*(sernum))
+#define A_SER_CHANNEL_BASE(sernum) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum))
-#define A_SER_REGISTER(sernum,reg) \
- (A_SER_BASE_0 + \
- SER_SPACING*(sernum) + (reg))
+#define A_SER_REGISTER(sernum,reg) \
+ (A_SER_BASE_0 + \
+ SER_SPACING*(sernum) + (reg))
#define R_SER_DMA_CHANNELS 0 /* Relative to A_SER_BASE_x */
#define A_SER_DMA_CHANNEL_BASE(sernum,txrx) \
- ((A_SER_CHANNEL_BASE(sernum)) + \
- R_SER_DMA_CHANNELS + \
- (SER_DMA_TXRX_SPACING*(txrx)))
+ ((A_SER_CHANNEL_BASE(sernum)) + \
+ R_SER_DMA_CHANNELS + \
+ (SER_DMA_TXRX_SPACING*(txrx)))
-#define A_SER_DMA_REGISTER(sernum, txrx, reg) \
- (A_SER_DMA_CHANNEL_BASE(sernum, txrx) + \
- (reg))
+#define A_SER_DMA_REGISTER(sernum, txrx, reg) \
+ (A_SER_DMA_CHANNEL_BASE(sernum, txrx) + \
+ (reg))
/*
* DMA channel registers, relative to A_SER_DMA_CHANNEL_BASE
*/
-#define R_SER_DMA_CONFIG0 0x00000000
-#define R_SER_DMA_CONFIG1 0x00000008
-#define R_SER_DMA_DSCR_BASE 0x00000010
-#define R_SER_DMA_DSCR_CNT 0x00000018
-#define R_SER_DMA_CUR_DSCRA 0x00000020
-#define R_SER_DMA_CUR_DSCRB 0x00000028
-#define R_SER_DMA_CUR_DSCRADDR 0x00000030
-
-#define R_SER_DMA_CONFIG0_RX 0x00000000
-#define R_SER_DMA_CONFIG1_RX 0x00000008
-#define R_SER_DMA_DSCR_BASE_RX 0x00000010
-#define R_SER_DMA_DSCR_COUNT_RX 0x00000018
-#define R_SER_DMA_CUR_DSCR_A_RX 0x00000020
-#define R_SER_DMA_CUR_DSCR_B_RX 0x00000028
+#define R_SER_DMA_CONFIG0 0x00000000
+#define R_SER_DMA_CONFIG1 0x00000008
+#define R_SER_DMA_DSCR_BASE 0x00000010
+#define R_SER_DMA_DSCR_CNT 0x00000018
+#define R_SER_DMA_CUR_DSCRA 0x00000020
+#define R_SER_DMA_CUR_DSCRB 0x00000028
+#define R_SER_DMA_CUR_DSCRADDR 0x00000030
+
+#define R_SER_DMA_CONFIG0_RX 0x00000000
+#define R_SER_DMA_CONFIG1_RX 0x00000008
+#define R_SER_DMA_DSCR_BASE_RX 0x00000010
+#define R_SER_DMA_DSCR_COUNT_RX 0x00000018
+#define R_SER_DMA_CUR_DSCR_A_RX 0x00000020
+#define R_SER_DMA_CUR_DSCR_B_RX 0x00000028
#define R_SER_DMA_CUR_DSCR_ADDR_RX 0x00000030
-#define R_SER_DMA_CONFIG0_TX 0x00000080
-#define R_SER_DMA_CONFIG1_TX 0x00000088
-#define R_SER_DMA_DSCR_BASE_TX 0x00000090
-#define R_SER_DMA_DSCR_COUNT_TX 0x00000098
-#define R_SER_DMA_CUR_DSCR_A_TX 0x000000A0
-#define R_SER_DMA_CUR_DSCR_B_TX 0x000000A8
+#define R_SER_DMA_CONFIG0_TX 0x00000080
+#define R_SER_DMA_CONFIG1_TX 0x00000088
+#define R_SER_DMA_DSCR_BASE_TX 0x00000090
+#define R_SER_DMA_DSCR_COUNT_TX 0x00000098
+#define R_SER_DMA_CUR_DSCR_A_TX 0x000000A0
+#define R_SER_DMA_CUR_DSCR_B_TX 0x000000A8
#define R_SER_DMA_CUR_DSCR_ADDR_TX 0x000000B0
-#define R_SER_MODE 0x00000100
-#define R_SER_MINFRM_SZ 0x00000108
-#define R_SER_MAXFRM_SZ 0x00000110
-#define R_SER_ADDR 0x00000118
-#define R_SER_USR0_ADDR 0x00000120
-#define R_SER_USR1_ADDR 0x00000128
-#define R_SER_USR2_ADDR 0x00000130
-#define R_SER_USR3_ADDR 0x00000138
-#define R_SER_CMD 0x00000140
-#define R_SER_TX_RD_THRSH 0x00000160
-#define R_SER_TX_WR_THRSH 0x00000168
-#define R_SER_RX_RD_THRSH 0x00000170
+#define R_SER_MODE 0x00000100
+#define R_SER_MINFRM_SZ 0x00000108
+#define R_SER_MAXFRM_SZ 0x00000110
+#define R_SER_ADDR 0x00000118
+#define R_SER_USR0_ADDR 0x00000120
+#define R_SER_USR1_ADDR 0x00000128
+#define R_SER_USR2_ADDR 0x00000130
+#define R_SER_USR3_ADDR 0x00000138
+#define R_SER_CMD 0x00000140
+#define R_SER_TX_RD_THRSH 0x00000160
+#define R_SER_TX_WR_THRSH 0x00000168
+#define R_SER_RX_RD_THRSH 0x00000170
#define R_SER_LINE_MODE 0x00000178
-#define R_SER_DMA_ENABLE 0x00000180
-#define R_SER_INT_MASK 0x00000190
-#define R_SER_STATUS 0x00000188
-#define R_SER_STATUS_DEBUG 0x000001A8
-#define R_SER_RX_TABLE_BASE 0x00000200
-#define SER_RX_TABLE_COUNT 16
-#define R_SER_TX_TABLE_BASE 0x00000300
-#define SER_TX_TABLE_COUNT 16
+#define R_SER_DMA_ENABLE 0x00000180
+#define R_SER_INT_MASK 0x00000190
+#define R_SER_STATUS 0x00000188
+#define R_SER_STATUS_DEBUG 0x000001A8
+#define R_SER_RX_TABLE_BASE 0x00000200
+#define SER_RX_TABLE_COUNT 16
+#define R_SER_TX_TABLE_BASE 0x00000300
+#define SER_TX_TABLE_COUNT 16
/* RMON Counters */
-#define R_SER_RMON_TX_BYTE_LO 0x000001C0
-#define R_SER_RMON_TX_BYTE_HI 0x000001C8
-#define R_SER_RMON_RX_BYTE_LO 0x000001D0
-#define R_SER_RMON_RX_BYTE_HI 0x000001D8
-#define R_SER_RMON_TX_UNDERRUN 0x000001E0
-#define R_SER_RMON_RX_OVERFLOW 0x000001E8
-#define R_SER_RMON_RX_ERRORS 0x000001F0
-#define R_SER_RMON_RX_BADADDR 0x000001F8
+#define R_SER_RMON_TX_BYTE_LO 0x000001C0
+#define R_SER_RMON_TX_BYTE_HI 0x000001C8
+#define R_SER_RMON_RX_BYTE_LO 0x000001D0
+#define R_SER_RMON_RX_BYTE_HI 0x000001D8
+#define R_SER_RMON_TX_UNDERRUN 0x000001E0
+#define R_SER_RMON_RX_OVERFLOW 0x000001E8
+#define R_SER_RMON_RX_ERRORS 0x000001F0
+#define R_SER_RMON_RX_BADADDR 0x000001F8
#endif /* 1250/112x */
@@ -486,38 +486,38 @@
* Generic Bus Registers
********************************************************************* */
-#define IO_EXT_CFG_COUNT 8
+#define IO_EXT_CFG_COUNT 8
#define A_IO_EXT_BASE 0x0010061000
#define A_IO_EXT_REG(r) (A_IO_EXT_BASE + (r))
-#define A_IO_EXT_CFG_BASE 0x0010061000
-#define A_IO_EXT_MULT_SIZE_BASE 0x0010061100
+#define A_IO_EXT_CFG_BASE 0x0010061000
+#define A_IO_EXT_MULT_SIZE_BASE 0x0010061100
#define A_IO_EXT_START_ADDR_BASE 0x0010061200
-#define A_IO_EXT_TIME_CFG0_BASE 0x0010061600
-#define A_IO_EXT_TIME_CFG1_BASE 0x0010061700
+#define A_IO_EXT_TIME_CFG0_BASE 0x0010061600
+#define A_IO_EXT_TIME_CFG1_BASE 0x0010061700
#define IO_EXT_REGISTER_SPACING 8
#define A_IO_EXT_CS_BASE(cs) (A_IO_EXT_CFG_BASE+IO_EXT_REGISTER_SPACING*(cs))
#define R_IO_EXT_REG(reg, cs) ((cs)*IO_EXT_REGISTER_SPACING + (reg))
#define R_IO_EXT_CFG 0x0000
-#define R_IO_EXT_MULT_SIZE 0x0100
+#define R_IO_EXT_MULT_SIZE 0x0100
#define R_IO_EXT_START_ADDR 0x0200
-#define R_IO_EXT_TIME_CFG0 0x0600
-#define R_IO_EXT_TIME_CFG1 0x0700
-
-
-#define A_IO_INTERRUPT_STATUS 0x0010061A00
-#define A_IO_INTERRUPT_DATA0 0x0010061A10
-#define A_IO_INTERRUPT_DATA1 0x0010061A18
-#define A_IO_INTERRUPT_DATA2 0x0010061A20
-#define A_IO_INTERRUPT_DATA3 0x0010061A28
-#define A_IO_INTERRUPT_ADDR0 0x0010061A30
-#define A_IO_INTERRUPT_ADDR1 0x0010061A40
-#define A_IO_INTERRUPT_PARITY 0x0010061A50
-#define A_IO_PCMCIA_CFG 0x0010061A60
-#define A_IO_PCMCIA_STATUS 0x0010061A70
+#define R_IO_EXT_TIME_CFG0 0x0600
+#define R_IO_EXT_TIME_CFG1 0x0700
+
+
+#define A_IO_INTERRUPT_STATUS 0x0010061A00
+#define A_IO_INTERRUPT_DATA0 0x0010061A10
+#define A_IO_INTERRUPT_DATA1 0x0010061A18
+#define A_IO_INTERRUPT_DATA2 0x0010061A20
+#define A_IO_INTERRUPT_DATA3 0x0010061A28
+#define A_IO_INTERRUPT_ADDR0 0x0010061A30
+#define A_IO_INTERRUPT_ADDR1 0x0010061A40
+#define A_IO_INTERRUPT_PARITY 0x0010061A50
+#define A_IO_PCMCIA_CFG 0x0010061A60
+#define A_IO_PCMCIA_STATUS 0x0010061A70
#define A_IO_DRIVE_0 0x0010061300
#define A_IO_DRIVE_1 0x0010061308
#define A_IO_DRIVE_2 0x0010061310
@@ -527,76 +527,76 @@
#define R_IO_DRIVE(x) ((x)*IO_DRIVE_REGISTER_SPACING)
#define A_IO_DRIVE(x) (A_IO_DRIVE_BASE + R_IO_DRIVE(x))
-#define R_IO_INTERRUPT_STATUS 0x0A00
-#define R_IO_INTERRUPT_DATA0 0x0A10
-#define R_IO_INTERRUPT_DATA1 0x0A18
-#define R_IO_INTERRUPT_DATA2 0x0A20
-#define R_IO_INTERRUPT_DATA3 0x0A28
-#define R_IO_INTERRUPT_ADDR0 0x0A30
-#define R_IO_INTERRUPT_ADDR1 0x0A40
-#define R_IO_INTERRUPT_PARITY 0x0A50
-#define R_IO_PCMCIA_CFG 0x0A60
-#define R_IO_PCMCIA_STATUS 0x0A70
+#define R_IO_INTERRUPT_STATUS 0x0A00
+#define R_IO_INTERRUPT_DATA0 0x0A10
+#define R_IO_INTERRUPT_DATA1 0x0A18
+#define R_IO_INTERRUPT_DATA2 0x0A20
+#define R_IO_INTERRUPT_DATA3 0x0A28
+#define R_IO_INTERRUPT_ADDR0 0x0A30
+#define R_IO_INTERRUPT_ADDR1 0x0A40
+#define R_IO_INTERRUPT_PARITY 0x0A50
+#define R_IO_PCMCIA_CFG 0x0A60
+#define R_IO_PCMCIA_STATUS 0x0A70
/* *********************************************************************
* GPIO Registers
********************************************************************* */
-#define A_GPIO_CLR_EDGE 0x0010061A80
-#define A_GPIO_INT_TYPE 0x0010061A88
-#define A_GPIO_INPUT_INVERT 0x0010061A90
-#define A_GPIO_GLITCH 0x0010061A98
-#define A_GPIO_READ 0x0010061AA0
-#define A_GPIO_DIRECTION 0x0010061AA8
-#define A_GPIO_PIN_CLR 0x0010061AB0
-#define A_GPIO_PIN_SET 0x0010061AB8
+#define A_GPIO_CLR_EDGE 0x0010061A80
+#define A_GPIO_INT_TYPE 0x0010061A88
+#define A_GPIO_INPUT_INVERT 0x0010061A90
+#define A_GPIO_GLITCH 0x0010061A98
+#define A_GPIO_READ 0x0010061AA0
+#define A_GPIO_DIRECTION 0x0010061AA8
+#define A_GPIO_PIN_CLR 0x0010061AB0
+#define A_GPIO_PIN_SET 0x0010061AB8
#define A_GPIO_BASE 0x0010061A80
-#define R_GPIO_CLR_EDGE 0x00
-#define R_GPIO_INT_TYPE 0x08
-#define R_GPIO_INPUT_INVERT 0x10
-#define R_GPIO_GLITCH 0x18
-#define R_GPIO_READ 0x20
-#define R_GPIO_DIRECTION 0x28
-#define R_GPIO_PIN_CLR 0x30
-#define R_GPIO_PIN_SET 0x38
+#define R_GPIO_CLR_EDGE 0x00
+#define R_GPIO_INT_TYPE 0x08
+#define R_GPIO_INPUT_INVERT 0x10
+#define R_GPIO_GLITCH 0x18
+#define R_GPIO_READ 0x20
+#define R_GPIO_DIRECTION 0x28
+#define R_GPIO_PIN_CLR 0x30
+#define R_GPIO_PIN_SET 0x38
/* *********************************************************************
* SMBus Registers
********************************************************************* */
-#define A_SMB_XTRA_0 0x0010060000
-#define A_SMB_XTRA_1 0x0010060008
-#define A_SMB_FREQ_0 0x0010060010
-#define A_SMB_FREQ_1 0x0010060018
-#define A_SMB_STATUS_0 0x0010060020
-#define A_SMB_STATUS_1 0x0010060028
-#define A_SMB_CMD_0 0x0010060030
-#define A_SMB_CMD_1 0x0010060038
-#define A_SMB_START_0 0x0010060040
-#define A_SMB_START_1 0x0010060048
-#define A_SMB_DATA_0 0x0010060050
-#define A_SMB_DATA_1 0x0010060058
-#define A_SMB_CONTROL_0 0x0010060060
-#define A_SMB_CONTROL_1 0x0010060068
-#define A_SMB_PEC_0 0x0010060070
-#define A_SMB_PEC_1 0x0010060078
-
-#define A_SMB_0 0x0010060000
-#define A_SMB_1 0x0010060008
-#define SMB_REGISTER_SPACING 0x8
-#define A_SMB_BASE(idx) (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
+#define A_SMB_XTRA_0 0x0010060000
+#define A_SMB_XTRA_1 0x0010060008
+#define A_SMB_FREQ_0 0x0010060010
+#define A_SMB_FREQ_1 0x0010060018
+#define A_SMB_STATUS_0 0x0010060020
+#define A_SMB_STATUS_1 0x0010060028
+#define A_SMB_CMD_0 0x0010060030
+#define A_SMB_CMD_1 0x0010060038
+#define A_SMB_START_0 0x0010060040
+#define A_SMB_START_1 0x0010060048
+#define A_SMB_DATA_0 0x0010060050
+#define A_SMB_DATA_1 0x0010060058
+#define A_SMB_CONTROL_0 0x0010060060
+#define A_SMB_CONTROL_1 0x0010060068
+#define A_SMB_PEC_0 0x0010060070
+#define A_SMB_PEC_1 0x0010060078
+
+#define A_SMB_0 0x0010060000
+#define A_SMB_1 0x0010060008
+#define SMB_REGISTER_SPACING 0x8
+#define A_SMB_BASE(idx) (A_SMB_0+(idx)*SMB_REGISTER_SPACING)
#define A_SMB_REGISTER(idx, reg) (A_SMB_BASE(idx)+(reg))
-#define R_SMB_XTRA 0x0000000000
-#define R_SMB_FREQ 0x0000000010
-#define R_SMB_STATUS 0x0000000020
-#define R_SMB_CMD 0x0000000030
-#define R_SMB_START 0x0000000040
-#define R_SMB_DATA 0x0000000050
-#define R_SMB_CONTROL 0x0000000060
-#define R_SMB_PEC 0x0000000070
+#define R_SMB_XTRA 0x0000000000
+#define R_SMB_FREQ 0x0000000010
+#define R_SMB_STATUS 0x0000000020
+#define R_SMB_CMD 0x0000000030
+#define R_SMB_START 0x0000000040
+#define R_SMB_DATA 0x0000000050
+#define R_SMB_CONTROL 0x0000000060
+#define R_SMB_PEC 0x0000000070
/* *********************************************************************
* Timer Registers
@@ -607,55 +607,55 @@
*/
#define A_SCD_WDOG_0 0x0010020050
-#define A_SCD_WDOG_1 0x0010020150
-#define SCD_WDOG_SPACING 0x100
+#define A_SCD_WDOG_1 0x0010020150
+#define SCD_WDOG_SPACING 0x100
#define SCD_NUM_WDOGS 2
-#define A_SCD_WDOG_BASE(w) (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
+#define A_SCD_WDOG_BASE(w) (A_SCD_WDOG_0+SCD_WDOG_SPACING*(w))
#define A_SCD_WDOG_REGISTER(w, r) (A_SCD_WDOG_BASE(w) + (r))
#define R_SCD_WDOG_INIT 0x0000000000
#define R_SCD_WDOG_CNT 0x0000000008
#define R_SCD_WDOG_CFG 0x0000000010
-#define A_SCD_WDOG_INIT_0 0x0010020050
-#define A_SCD_WDOG_CNT_0 0x0010020058
-#define A_SCD_WDOG_CFG_0 0x0010020060
+#define A_SCD_WDOG_INIT_0 0x0010020050
+#define A_SCD_WDOG_CNT_0 0x0010020058
+#define A_SCD_WDOG_CFG_0 0x0010020060
-#define A_SCD_WDOG_INIT_1 0x0010020150
-#define A_SCD_WDOG_CNT_1 0x0010020158
-#define A_SCD_WDOG_CFG_1 0x0010020160
+#define A_SCD_WDOG_INIT_1 0x0010020150
+#define A_SCD_WDOG_CNT_1 0x0010020158
+#define A_SCD_WDOG_CFG_1 0x0010020160
/*
* Generic timers
*/
#define A_SCD_TIMER_0 0x0010020070
-#define A_SCD_TIMER_1 0x0010020078
+#define A_SCD_TIMER_1 0x0010020078
#define A_SCD_TIMER_2 0x0010020170
-#define A_SCD_TIMER_3 0x0010020178
+#define A_SCD_TIMER_3 0x0010020178
#define SCD_NUM_TIMERS 4
-#define A_SCD_TIMER_BASE(w) (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
+#define A_SCD_TIMER_BASE(w) (A_SCD_TIMER_0+0x08*((w)&1)+0x100*(((w)&2)>>1))
#define A_SCD_TIMER_REGISTER(w, r) (A_SCD_TIMER_BASE(w) + (r))
#define R_SCD_TIMER_INIT 0x0000000000
#define R_SCD_TIMER_CNT 0x0000000010
#define R_SCD_TIMER_CFG 0x0000000020
-#define A_SCD_TIMER_INIT_0 0x0010020070
-#define A_SCD_TIMER_CNT_0 0x0010020080
-#define A_SCD_TIMER_CFG_0 0x0010020090
+#define A_SCD_TIMER_INIT_0 0x0010020070
+#define A_SCD_TIMER_CNT_0 0x0010020080
+#define A_SCD_TIMER_CFG_0 0x0010020090
-#define A_SCD_TIMER_INIT_1 0x0010020078
-#define A_SCD_TIMER_CNT_1 0x0010020088
-#define A_SCD_TIMER_CFG_1 0x0010020098
+#define A_SCD_TIMER_INIT_1 0x0010020078
+#define A_SCD_TIMER_CNT_1 0x0010020088
+#define A_SCD_TIMER_CFG_1 0x0010020098
-#define A_SCD_TIMER_INIT_2 0x0010020170
-#define A_SCD_TIMER_CNT_2 0x0010020180
-#define A_SCD_TIMER_CFG_2 0x0010020190
+#define A_SCD_TIMER_INIT_2 0x0010020170
+#define A_SCD_TIMER_CNT_2 0x0010020180
+#define A_SCD_TIMER_CFG_2 0x0010020190
-#define A_SCD_TIMER_INIT_3 0x0010020178
-#define A_SCD_TIMER_CNT_3 0x0010020188
-#define A_SCD_TIMER_CFG_3 0x0010020198
+#define A_SCD_TIMER_INIT_3 0x0010020178
+#define A_SCD_TIMER_CNT_3 0x0010020188
+#define A_SCD_TIMER_CFG_3 0x0010020198
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_SCD_SCRATCH 0x0010020C10
@@ -671,28 +671,28 @@
* System Control Registers
********************************************************************* */
-#define A_SCD_SYSTEM_REVISION 0x0010020000
-#define A_SCD_SYSTEM_CFG 0x0010020008
-#define A_SCD_SYSTEM_MANUF 0x0010038000
+#define A_SCD_SYSTEM_REVISION 0x0010020000
+#define A_SCD_SYSTEM_CFG 0x0010020008
+#define A_SCD_SYSTEM_MANUF 0x0010038000
/* *********************************************************************
* System Address Trap Registers
********************************************************************* */
-#define A_ADDR_TRAP_INDEX 0x00100200B0
-#define A_ADDR_TRAP_REG 0x00100200B8
-#define A_ADDR_TRAP_UP_0 0x0010020400
-#define A_ADDR_TRAP_UP_1 0x0010020408
-#define A_ADDR_TRAP_UP_2 0x0010020410
-#define A_ADDR_TRAP_UP_3 0x0010020418
-#define A_ADDR_TRAP_DOWN_0 0x0010020420
-#define A_ADDR_TRAP_DOWN_1 0x0010020428
-#define A_ADDR_TRAP_DOWN_2 0x0010020430
-#define A_ADDR_TRAP_DOWN_3 0x0010020438
-#define A_ADDR_TRAP_CFG_0 0x0010020440
-#define A_ADDR_TRAP_CFG_1 0x0010020448
-#define A_ADDR_TRAP_CFG_2 0x0010020450
-#define A_ADDR_TRAP_CFG_3 0x0010020458
+#define A_ADDR_TRAP_INDEX 0x00100200B0
+#define A_ADDR_TRAP_REG 0x00100200B8
+#define A_ADDR_TRAP_UP_0 0x0010020400
+#define A_ADDR_TRAP_UP_1 0x0010020408
+#define A_ADDR_TRAP_UP_2 0x0010020410
+#define A_ADDR_TRAP_UP_3 0x0010020418
+#define A_ADDR_TRAP_DOWN_0 0x0010020420
+#define A_ADDR_TRAP_DOWN_1 0x0010020428
+#define A_ADDR_TRAP_DOWN_2 0x0010020430
+#define A_ADDR_TRAP_DOWN_3 0x0010020438
+#define A_ADDR_TRAP_CFG_0 0x0010020440
+#define A_ADDR_TRAP_CFG_1 0x0010020448
+#define A_ADDR_TRAP_CFG_2 0x0010020450
+#define A_ADDR_TRAP_CFG_3 0x0010020458
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
#define A_ADDR_TRAP_REG_DEBUG 0x0010020460
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
@@ -708,31 +708,31 @@
* System Interrupt Mapper Registers
********************************************************************* */
-#define A_IMR_CPU0_BASE 0x0010020000
-#define A_IMR_CPU1_BASE 0x0010022000
-#define IMR_REGISTER_SPACING 0x2000
-#define IMR_REGISTER_SPACING_SHIFT 13
+#define A_IMR_CPU0_BASE 0x0010020000
+#define A_IMR_CPU1_BASE 0x0010022000
+#define IMR_REGISTER_SPACING 0x2000
+#define IMR_REGISTER_SPACING_SHIFT 13
#define A_IMR_MAPPER(cpu) (A_IMR_CPU0_BASE+(cpu)*IMR_REGISTER_SPACING)
#define A_IMR_REGISTER(cpu, reg) (A_IMR_MAPPER(cpu)+(reg))
-#define R_IMR_INTERRUPT_DIAG 0x0010
-#define R_IMR_INTERRUPT_LDT 0x0018
-#define R_IMR_INTERRUPT_MASK 0x0028
-#define R_IMR_INTERRUPT_TRACE 0x0038
-#define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040
-#define R_IMR_LDT_INTERRUPT_SET 0x0048
-#define R_IMR_LDT_INTERRUPT 0x0018
-#define R_IMR_LDT_INTERRUPT_CLR 0x0020
-#define R_IMR_MAILBOX_CPU 0x00c0
-#define R_IMR_ALIAS_MAILBOX_CPU 0x1000
-#define R_IMR_MAILBOX_SET_CPU 0x00C8
-#define R_IMR_ALIAS_MAILBOX_SET_CPU 0x1008
-#define R_IMR_MAILBOX_CLR_CPU 0x00D0
-#define R_IMR_INTERRUPT_STATUS_BASE 0x0100
-#define R_IMR_INTERRUPT_STATUS_COUNT 7
-#define R_IMR_INTERRUPT_MAP_BASE 0x0200
-#define R_IMR_INTERRUPT_MAP_COUNT 64
+#define R_IMR_INTERRUPT_DIAG 0x0010
+#define R_IMR_INTERRUPT_LDT 0x0018
+#define R_IMR_INTERRUPT_MASK 0x0028
+#define R_IMR_INTERRUPT_TRACE 0x0038
+#define R_IMR_INTERRUPT_SOURCE_STATUS 0x0040
+#define R_IMR_LDT_INTERRUPT_SET 0x0048
+#define R_IMR_LDT_INTERRUPT 0x0018
+#define R_IMR_LDT_INTERRUPT_CLR 0x0020
+#define R_IMR_MAILBOX_CPU 0x00c0
+#define R_IMR_ALIAS_MAILBOX_CPU 0x1000
+#define R_IMR_MAILBOX_SET_CPU 0x00C8
+#define R_IMR_ALIAS_MAILBOX_SET_CPU 0x1008
+#define R_IMR_MAILBOX_CLR_CPU 0x00D0
+#define R_IMR_INTERRUPT_STATUS_BASE 0x0100
+#define R_IMR_INTERRUPT_STATUS_COUNT 7
+#define R_IMR_INTERRUPT_MAP_BASE 0x0200
+#define R_IMR_INTERRUPT_MAP_COUNT 64
/*
* these macros work together to build the address of a mailbox
@@ -746,11 +746,11 @@
* System Performance Counter Registers
********************************************************************* */
-#define A_SCD_PERF_CNT_CFG 0x00100204C0
-#define A_SCD_PERF_CNT_0 0x00100204D0
-#define A_SCD_PERF_CNT_1 0x00100204D8
-#define A_SCD_PERF_CNT_2 0x00100204E0
-#define A_SCD_PERF_CNT_3 0x00100204E8
+#define A_SCD_PERF_CNT_CFG 0x00100204C0
+#define A_SCD_PERF_CNT_0 0x00100204D0
+#define A_SCD_PERF_CNT_1 0x00100204D8
+#define A_SCD_PERF_CNT_2 0x00100204E0
+#define A_SCD_PERF_CNT_3 0x00100204E8
#define SCD_NUM_PERF_CNT 4
#define SCD_PERF_CNT_SPACING 8
@@ -760,46 +760,46 @@
* System Bus Watcher Registers
********************************************************************* */
-#define A_SCD_BUS_ERR_STATUS 0x0010020880
+#define A_SCD_BUS_ERR_STATUS 0x0010020880
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define A_SCD_BUS_ERR_STATUS_DEBUG 0x00100208D0
-#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
+#define A_BUS_ERR_STATUS_DEBUG 0x00100208D0
#endif /* 1250 PASS2 || 112x PASS1 */
-#define A_BUS_ERR_DATA_0 0x00100208A0
-#define A_BUS_ERR_DATA_1 0x00100208A8
-#define A_BUS_ERR_DATA_2 0x00100208B0
-#define A_BUS_ERR_DATA_3 0x00100208B8
-#define A_BUS_L2_ERRORS 0x00100208C0
-#define A_BUS_MEM_IO_ERRORS 0x00100208C8
+#define A_BUS_ERR_DATA_0 0x00100208A0
+#define A_BUS_ERR_DATA_1 0x00100208A8
+#define A_BUS_ERR_DATA_2 0x00100208B0
+#define A_BUS_ERR_DATA_3 0x00100208B8
+#define A_BUS_L2_ERRORS 0x00100208C0
+#define A_BUS_MEM_IO_ERRORS 0x00100208C8
/* *********************************************************************
* System Debug Controller Registers
********************************************************************* */
-#define A_SCD_JTAG_BASE 0x0010000000
+#define A_SCD_JTAG_BASE 0x0010000000
/* *********************************************************************
* System Trace Buffer Registers
********************************************************************* */
-#define A_SCD_TRACE_CFG 0x0010020A00
-#define A_SCD_TRACE_READ 0x0010020A08
-#define A_SCD_TRACE_EVENT_0 0x0010020A20
-#define A_SCD_TRACE_EVENT_1 0x0010020A28
-#define A_SCD_TRACE_EVENT_2 0x0010020A30
-#define A_SCD_TRACE_EVENT_3 0x0010020A38
-#define A_SCD_TRACE_SEQUENCE_0 0x0010020A40
-#define A_SCD_TRACE_SEQUENCE_1 0x0010020A48
-#define A_SCD_TRACE_SEQUENCE_2 0x0010020A50
-#define A_SCD_TRACE_SEQUENCE_3 0x0010020A58
-#define A_SCD_TRACE_EVENT_4 0x0010020A60
-#define A_SCD_TRACE_EVENT_5 0x0010020A68
-#define A_SCD_TRACE_EVENT_6 0x0010020A70
-#define A_SCD_TRACE_EVENT_7 0x0010020A78
-#define A_SCD_TRACE_SEQUENCE_4 0x0010020A80
-#define A_SCD_TRACE_SEQUENCE_5 0x0010020A88
-#define A_SCD_TRACE_SEQUENCE_6 0x0010020A90
-#define A_SCD_TRACE_SEQUENCE_7 0x0010020A98
+#define A_SCD_TRACE_CFG 0x0010020A00
+#define A_SCD_TRACE_READ 0x0010020A08
+#define A_SCD_TRACE_EVENT_0 0x0010020A20
+#define A_SCD_TRACE_EVENT_1 0x0010020A28
+#define A_SCD_TRACE_EVENT_2 0x0010020A30
+#define A_SCD_TRACE_EVENT_3 0x0010020A38
+#define A_SCD_TRACE_SEQUENCE_0 0x0010020A40
+#define A_SCD_TRACE_SEQUENCE_1 0x0010020A48
+#define A_SCD_TRACE_SEQUENCE_2 0x0010020A50
+#define A_SCD_TRACE_SEQUENCE_3 0x0010020A58
+#define A_SCD_TRACE_EVENT_4 0x0010020A60
+#define A_SCD_TRACE_EVENT_5 0x0010020A68
+#define A_SCD_TRACE_EVENT_6 0x0010020A70
+#define A_SCD_TRACE_EVENT_7 0x0010020A78
+#define A_SCD_TRACE_SEQUENCE_4 0x0010020A80
+#define A_SCD_TRACE_SEQUENCE_5 0x0010020A88
+#define A_SCD_TRACE_SEQUENCE_6 0x0010020A90
+#define A_SCD_TRACE_SEQUENCE_7 0x0010020A98
#define TRACE_REGISTER_SPACING 8
#define TRACE_NUM_REGISTERS 8
@@ -814,8 +814,8 @@
* System Generic DMA Registers
********************************************************************* */
-#define A_DM_0 0x0010020B00
-#define A_DM_1 0x0010020B20
+#define A_DM_0 0x0010020B00
+#define A_DM_1 0x0010020B20
#define A_DM_2 0x0010020B40
#define A_DM_3 0x0010020B60
#define DM_REGISTER_SPACING 0x20
@@ -854,39 +854,39 @@
********************************************************************* */
#if SIBYTE_HDR_FEATURE_1250_112x
-#define A_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
-#define A_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
-#define A_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
-#define A_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
+#define A_PHYS_MEMORY_0 _SB_MAKE64(0x0000000000)
+#define A_PHYS_MEMORY_SIZE _SB_MAKE64((256*1024*1024))
+#define A_PHYS_SYSTEM_CTL _SB_MAKE64(0x0010000000)
+#define A_PHYS_IO_SYSTEM _SB_MAKE64(0x0010060000)
#define A_PHYS_GENBUS _SB_MAKE64(0x0010090000)
#define A_PHYS_GENBUS_END _SB_MAKE64(0x0040000000)
#define A_PHYS_LDTPCI_IO_MATCH_BYTES_32 _SB_MAKE64(0x0040000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
-#define A_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
-#define A_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
-#define A_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
-#define A_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
-#define A_PHYS_LDT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
-#define A_PHYS_LDTPCI_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
-#define A_PHYS_LDTPCI_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
-#define A_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
-#define A_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
-#define A_PHYS_LDT_EXP _SB_MAKE64(0x8000000000)
-#define A_PHYS_PCI_FULLACCESS_BYTES _SB_MAKE64(0xF000000000)
-#define A_PHYS_PCI_FULLACCESS_BITS _SB_MAKE64(0xF100000000)
-#define A_PHYS_RESERVED _SB_MAKE64(0xF200000000)
-#define A_PHYS_RESERVED_SPECIAL_LDT _SB_MAKE64(0xFD00000000)
-
-#define A_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
-#define PHYS_L2CACHE_NUM_WAYS 4
-#define A_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000080000)
-#define A_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0180000)
-#define A_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D01A0000)
-#define A_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D01C0000)
-#define A_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D01E0000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS_32 _SB_MAKE64(0x0060000000)
+#define A_PHYS_MEMORY_1 _SB_MAKE64(0x0080000000)
+#define A_PHYS_MEMORY_2 _SB_MAKE64(0x0090000000)
+#define A_PHYS_MEMORY_3 _SB_MAKE64(0x00C0000000)
+#define A_PHYS_L2_CACHE_TEST _SB_MAKE64(0x00D0000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BYTES _SB_MAKE64(0x00D8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BYTES _SB_MAKE64(0x00DC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BYTES _SB_MAKE64(0x00DE000000)
+#define A_PHYS_LDT_SPECIAL_MATCH_BITS _SB_MAKE64(0x00F8000000)
+#define A_PHYS_LDTPCI_IO_MATCH_BITS _SB_MAKE64(0x00FC000000)
+#define A_PHYS_LDTPCI_CFG_MATCH_BITS _SB_MAKE64(0x00FE000000)
+#define A_PHYS_MEMORY_EXP _SB_MAKE64(0x0100000000)
+#define A_PHYS_MEMORY_EXP_SIZE _SB_MAKE64((508*1024*1024*1024))
+#define A_PHYS_LDT_EXP _SB_MAKE64(0x8000000000)
+#define A_PHYS_PCI_FULLACCESS_BYTES _SB_MAKE64(0xF000000000)
+#define A_PHYS_PCI_FULLACCESS_BITS _SB_MAKE64(0xF100000000)
+#define A_PHYS_RESERVED _SB_MAKE64(0xF200000000)
+#define A_PHYS_RESERVED_SPECIAL_LDT _SB_MAKE64(0xFD00000000)
+
+#define A_PHYS_L2CACHE_WAY_SIZE _SB_MAKE64(0x0000020000)
+#define PHYS_L2CACHE_NUM_WAYS 4
+#define A_PHYS_L2CACHE_TOTAL_SIZE _SB_MAKE64(0x0000080000)
+#define A_PHYS_L2CACHE_WAY0 _SB_MAKE64(0x00D0180000)
+#define A_PHYS_L2CACHE_WAY1 _SB_MAKE64(0x00D01A0000)
+#define A_PHYS_L2CACHE_WAY2 _SB_MAKE64(0x00D01C0000)
+#define A_PHYS_L2CACHE_WAY3 _SB_MAKE64(0x00D01E0000)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_scd.h b/arch/mips/include/asm/sibyte/sb1250_scd.h
index 615e165dbd2..d725f2f41af 100644
--- a/arch/mips/include/asm/sibyte/sb1250_scd.h
+++ b/arch/mips/include/asm/sibyte/sb1250_scd.h
@@ -44,10 +44,10 @@
#define M_SYS_RESERVED _SB_MAKEMASK(8, 0)
-#define S_SYS_REVISION _SB_MAKE64(8)
-#define M_SYS_REVISION _SB_MAKEMASK(8, S_SYS_REVISION)
-#define V_SYS_REVISION(x) _SB_MAKEVALUE(x, S_SYS_REVISION)
-#define G_SYS_REVISION(x) _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
+#define S_SYS_REVISION _SB_MAKE64(8)
+#define M_SYS_REVISION _SB_MAKEMASK(8, S_SYS_REVISION)
+#define V_SYS_REVISION(x) _SB_MAKEVALUE(x, S_SYS_REVISION)
+#define G_SYS_REVISION(x) _SB_GETVALUE(x, S_SYS_REVISION, M_SYS_REVISION)
#define K_SYS_REVISION_BCM1250_PASS1 0x01
@@ -93,10 +93,10 @@
#define K_SYS_REVISION_BCM1480_B0 0x11
/*Cache size - 23:20 of revision register*/
-#define S_SYS_L2C_SIZE _SB_MAKE64(20)
-#define M_SYS_L2C_SIZE _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
-#define V_SYS_L2C_SIZE(x) _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
-#define G_SYS_L2C_SIZE(x) _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
+#define S_SYS_L2C_SIZE _SB_MAKE64(20)
+#define M_SYS_L2C_SIZE _SB_MAKEMASK(4, S_SYS_L2C_SIZE)
+#define V_SYS_L2C_SIZE(x) _SB_MAKEVALUE(x, S_SYS_L2C_SIZE)
+#define G_SYS_L2C_SIZE(x) _SB_GETVALUE(x, S_SYS_L2C_SIZE, M_SYS_L2C_SIZE)
#define K_SYS_L2C_SIZE_1MB 0
#define K_SYS_L2C_SIZE_512KB 5
@@ -109,40 +109,40 @@
/* Number of CPU cores, bits 27:24 of revision register*/
-#define S_SYS_NUM_CPUS _SB_MAKE64(24)
-#define M_SYS_NUM_CPUS _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
-#define V_SYS_NUM_CPUS(x) _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
-#define G_SYS_NUM_CPUS(x) _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
+#define S_SYS_NUM_CPUS _SB_MAKE64(24)
+#define M_SYS_NUM_CPUS _SB_MAKEMASK(4, S_SYS_NUM_CPUS)
+#define V_SYS_NUM_CPUS(x) _SB_MAKEVALUE(x, S_SYS_NUM_CPUS)
+#define G_SYS_NUM_CPUS(x) _SB_GETVALUE(x, S_SYS_NUM_CPUS, M_SYS_NUM_CPUS)
/* XXX: discourage people from using these constants. */
-#define S_SYS_PART _SB_MAKE64(16)
-#define M_SYS_PART _SB_MAKEMASK(16, S_SYS_PART)
-#define V_SYS_PART(x) _SB_MAKEVALUE(x, S_SYS_PART)
-#define G_SYS_PART(x) _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
+#define S_SYS_PART _SB_MAKE64(16)
+#define M_SYS_PART _SB_MAKEMASK(16, S_SYS_PART)
+#define V_SYS_PART(x) _SB_MAKEVALUE(x, S_SYS_PART)
+#define G_SYS_PART(x) _SB_GETVALUE(x, S_SYS_PART, M_SYS_PART)
/* XXX: discourage people from using these constants. */
-#define K_SYS_PART_SB1250 0x1250
-#define K_SYS_PART_BCM1120 0x1121
-#define K_SYS_PART_BCM1125 0x1123
-#define K_SYS_PART_BCM1125H 0x1124
-#define K_SYS_PART_BCM1122 0x1113
+#define K_SYS_PART_SB1250 0x1250
+#define K_SYS_PART_BCM1120 0x1121
+#define K_SYS_PART_BCM1125 0x1123
+#define K_SYS_PART_BCM1125H 0x1124
+#define K_SYS_PART_BCM1122 0x1113
/* The "peripheral set" (SOC type) is the low 4 bits of the "part" field. */
-#define S_SYS_SOC_TYPE _SB_MAKE64(16)
-#define M_SYS_SOC_TYPE _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
-#define V_SYS_SOC_TYPE(x) _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
-#define G_SYS_SOC_TYPE(x) _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
-
-#define K_SYS_SOC_TYPE_BCM1250 0x0
-#define K_SYS_SOC_TYPE_BCM1120 0x1
-#define K_SYS_SOC_TYPE_BCM1250_ALT 0x2 /* 1250pass2 w/ 1/4 L2. */
-#define K_SYS_SOC_TYPE_BCM1125 0x3
-#define K_SYS_SOC_TYPE_BCM1125H 0x4
-#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5 /* 1250pass2 w/ 1/2 L2. */
-#define K_SYS_SOC_TYPE_BCM1x80 0x6
-#define K_SYS_SOC_TYPE_BCM1x55 0x7
+#define S_SYS_SOC_TYPE _SB_MAKE64(16)
+#define M_SYS_SOC_TYPE _SB_MAKEMASK(4, S_SYS_SOC_TYPE)
+#define V_SYS_SOC_TYPE(x) _SB_MAKEVALUE(x, S_SYS_SOC_TYPE)
+#define G_SYS_SOC_TYPE(x) _SB_GETVALUE(x, S_SYS_SOC_TYPE, M_SYS_SOC_TYPE)
+
+#define K_SYS_SOC_TYPE_BCM1250 0x0
+#define K_SYS_SOC_TYPE_BCM1120 0x1
+#define K_SYS_SOC_TYPE_BCM1250_ALT 0x2 /* 1250pass2 w/ 1/4 L2. */
+#define K_SYS_SOC_TYPE_BCM1125 0x3
+#define K_SYS_SOC_TYPE_BCM1125H 0x4
+#define K_SYS_SOC_TYPE_BCM1250_ALT2 0x5 /* 1250pass2 w/ 1/2 L2. */
+#define K_SYS_SOC_TYPE_BCM1x80 0x6
+#define K_SYS_SOC_TYPE_BCM1x55 0x7
/*
* Calculate correct SOC type given a copy of system revision register.
@@ -169,10 +169,10 @@
? K_SYS_SOC_TYPE_BCM1250 : G_SYS_SOC_TYPE(sysrev))
#endif
-#define S_SYS_WID _SB_MAKE64(32)
-#define M_SYS_WID _SB_MAKEMASK(32, S_SYS_WID)
-#define V_SYS_WID(x) _SB_MAKEVALUE(x, S_SYS_WID)
-#define G_SYS_WID(x) _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
+#define S_SYS_WID _SB_MAKE64(32)
+#define M_SYS_WID _SB_MAKEMASK(32, S_SYS_WID)
+#define V_SYS_WID(x) _SB_MAKEVALUE(x, S_SYS_WID)
+#define G_SYS_WID(x) _SB_GETVALUE(x, S_SYS_WID, M_SYS_WID)
/*
* System Manufacturing Register
@@ -181,37 +181,37 @@
#if SIBYTE_HDR_FEATURE_1250_112x
/* Wafer ID: bits 31:0 */
-#define S_SYS_WAFERID1_200 _SB_MAKE64(0)
-#define M_SYS_WAFERID1_200 _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
-#define V_SYS_WAFERID1_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
-#define G_SYS_WAFERID1_200(x) _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
+#define S_SYS_WAFERID1_200 _SB_MAKE64(0)
+#define M_SYS_WAFERID1_200 _SB_MAKEMASK(32, S_SYS_WAFERID1_200)
+#define V_SYS_WAFERID1_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID1_200)
+#define G_SYS_WAFERID1_200(x) _SB_GETVALUE(x, S_SYS_WAFERID1_200, M_SYS_WAFERID1_200)
-#define S_SYS_BIN _SB_MAKE64(32)
-#define M_SYS_BIN _SB_MAKEMASK(4, S_SYS_BIN)
-#define V_SYS_BIN(x) _SB_MAKEVALUE(x, S_SYS_BIN)
-#define G_SYS_BIN(x) _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
+#define S_SYS_BIN _SB_MAKE64(32)
+#define M_SYS_BIN _SB_MAKEMASK(4, S_SYS_BIN)
+#define V_SYS_BIN(x) _SB_MAKEVALUE(x, S_SYS_BIN)
+#define G_SYS_BIN(x) _SB_GETVALUE(x, S_SYS_BIN, M_SYS_BIN)
/* Wafer ID: bits 39:36 */
-#define S_SYS_WAFERID2_200 _SB_MAKE64(36)
-#define M_SYS_WAFERID2_200 _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
-#define V_SYS_WAFERID2_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
-#define G_SYS_WAFERID2_200(x) _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
+#define S_SYS_WAFERID2_200 _SB_MAKE64(36)
+#define M_SYS_WAFERID2_200 _SB_MAKEMASK(4, S_SYS_WAFERID2_200)
+#define V_SYS_WAFERID2_200(x) _SB_MAKEVALUE(x, S_SYS_WAFERID2_200)
+#define G_SYS_WAFERID2_200(x) _SB_GETVALUE(x, S_SYS_WAFERID2_200, M_SYS_WAFERID2_200)
/* Wafer ID: bits 39:0 */
-#define S_SYS_WAFERID_300 _SB_MAKE64(0)
-#define M_SYS_WAFERID_300 _SB_MAKEMASK(40, S_SYS_WAFERID_300)
-#define V_SYS_WAFERID_300(x) _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
-#define G_SYS_WAFERID_300(x) _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
-
-#define S_SYS_XPOS _SB_MAKE64(40)
-#define M_SYS_XPOS _SB_MAKEMASK(6, S_SYS_XPOS)
-#define V_SYS_XPOS(x) _SB_MAKEVALUE(x, S_SYS_XPOS)
-#define G_SYS_XPOS(x) _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
-
-#define S_SYS_YPOS _SB_MAKE64(46)
-#define M_SYS_YPOS _SB_MAKEMASK(6, S_SYS_YPOS)
-#define V_SYS_YPOS(x) _SB_MAKEVALUE(x, S_SYS_YPOS)
-#define G_SYS_YPOS(x) _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
+#define S_SYS_WAFERID_300 _SB_MAKE64(0)
+#define M_SYS_WAFERID_300 _SB_MAKEMASK(40, S_SYS_WAFERID_300)
+#define V_SYS_WAFERID_300(x) _SB_MAKEVALUE(x, S_SYS_WAFERID_300)
+#define G_SYS_WAFERID_300(x) _SB_GETVALUE(x, S_SYS_WAFERID_300, M_SYS_WAFERID_300)
+
+#define S_SYS_XPOS _SB_MAKE64(40)
+#define M_SYS_XPOS _SB_MAKEMASK(6, S_SYS_XPOS)
+#define V_SYS_XPOS(x) _SB_MAKEVALUE(x, S_SYS_XPOS)
+#define G_SYS_XPOS(x) _SB_GETVALUE(x, S_SYS_XPOS, M_SYS_XPOS)
+
+#define S_SYS_YPOS _SB_MAKE64(46)
+#define M_SYS_YPOS _SB_MAKEMASK(6, S_SYS_YPOS)
+#define V_SYS_YPOS(x) _SB_MAKEVALUE(x, S_SYS_YPOS)
+#define G_SYS_YPOS(x) _SB_GETVALUE(x, S_SYS_YPOS, M_SYS_YPOS)
#endif
@@ -221,55 +221,55 @@
*/
#if SIBYTE_HDR_FEATURE_1250_112x
-#define M_SYS_LDT_PLL_BYP _SB_MAKEMASK1(3)
+#define M_SYS_LDT_PLL_BYP _SB_MAKEMASK1(3)
#define M_SYS_PCI_SYNC_TEST_MODE _SB_MAKEMASK1(4)
-#define M_SYS_IOB0_DIV _SB_MAKEMASK1(5)
-#define M_SYS_IOB1_DIV _SB_MAKEMASK1(6)
-
-#define S_SYS_PLL_DIV _SB_MAKE64(7)
-#define M_SYS_PLL_DIV _SB_MAKEMASK(5, S_SYS_PLL_DIV)
-#define V_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
-#define G_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
-
-#define M_SYS_SER0_ENABLE _SB_MAKEMASK1(12)
-#define M_SYS_SER0_RSTB_EN _SB_MAKEMASK1(13)
-#define M_SYS_SER1_ENABLE _SB_MAKEMASK1(14)
-#define M_SYS_SER1_RSTB_EN _SB_MAKEMASK1(15)
-#define M_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
-
-#define S_SYS_BOOT_MODE _SB_MAKE64(17)
-#define M_SYS_BOOT_MODE _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
-#define V_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
-#define G_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
-#define K_SYS_BOOT_MODE_ROM32 0
-#define K_SYS_BOOT_MODE_ROM8 1
+#define M_SYS_IOB0_DIV _SB_MAKEMASK1(5)
+#define M_SYS_IOB1_DIV _SB_MAKEMASK1(6)
+
+#define S_SYS_PLL_DIV _SB_MAKE64(7)
+#define M_SYS_PLL_DIV _SB_MAKEMASK(5, S_SYS_PLL_DIV)
+#define V_SYS_PLL_DIV(x) _SB_MAKEVALUE(x, S_SYS_PLL_DIV)
+#define G_SYS_PLL_DIV(x) _SB_GETVALUE(x, S_SYS_PLL_DIV, M_SYS_PLL_DIV)
+
+#define M_SYS_SER0_ENABLE _SB_MAKEMASK1(12)
+#define M_SYS_SER0_RSTB_EN _SB_MAKEMASK1(13)
+#define M_SYS_SER1_ENABLE _SB_MAKEMASK1(14)
+#define M_SYS_SER1_RSTB_EN _SB_MAKEMASK1(15)
+#define M_SYS_PCMCIA_ENABLE _SB_MAKEMASK1(16)
+
+#define S_SYS_BOOT_MODE _SB_MAKE64(17)
+#define M_SYS_BOOT_MODE _SB_MAKEMASK(2, S_SYS_BOOT_MODE)
+#define V_SYS_BOOT_MODE(x) _SB_MAKEVALUE(x, S_SYS_BOOT_MODE)
+#define G_SYS_BOOT_MODE(x) _SB_GETVALUE(x, S_SYS_BOOT_MODE, M_SYS_BOOT_MODE)
+#define K_SYS_BOOT_MODE_ROM32 0
+#define K_SYS_BOOT_MODE_ROM8 1
#define K_SYS_BOOT_MODE_SMBUS_SMALL 2
#define K_SYS_BOOT_MODE_SMBUS_BIG 3
-#define M_SYS_PCI_HOST _SB_MAKEMASK1(19)
-#define M_SYS_PCI_ARBITER _SB_MAKEMASK1(20)
-#define M_SYS_SOUTH_ON_LDT _SB_MAKEMASK1(21)
-#define M_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
-#define M_SYS_GENCLK_EN _SB_MAKEMASK1(23)
-#define M_SYS_LDT_TEST_EN _SB_MAKEMASK1(24)
-#define M_SYS_GEN_PARITY_EN _SB_MAKEMASK1(25)
+#define M_SYS_PCI_HOST _SB_MAKEMASK1(19)
+#define M_SYS_PCI_ARBITER _SB_MAKEMASK1(20)
+#define M_SYS_SOUTH_ON_LDT _SB_MAKEMASK1(21)
+#define M_SYS_BIG_ENDIAN _SB_MAKEMASK1(22)
+#define M_SYS_GENCLK_EN _SB_MAKEMASK1(23)
+#define M_SYS_LDT_TEST_EN _SB_MAKEMASK1(24)
+#define M_SYS_GEN_PARITY_EN _SB_MAKEMASK1(25)
-#define S_SYS_CONFIG 26
-#define M_SYS_CONFIG _SB_MAKEMASK(6, S_SYS_CONFIG)
-#define V_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_SYS_CONFIG)
-#define G_SYS_CONFIG(x) _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
+#define S_SYS_CONFIG 26
+#define M_SYS_CONFIG _SB_MAKEMASK(6, S_SYS_CONFIG)
+#define V_SYS_CONFIG(x) _SB_MAKEVALUE(x, S_SYS_CONFIG)
+#define G_SYS_CONFIG(x) _SB_GETVALUE(x, S_SYS_CONFIG, M_SYS_CONFIG)
/* The following bits are writeable by JTAG only. */
-#define M_SYS_CLKSTOP _SB_MAKEMASK1(32)
-#define M_SYS_CLKSTEP _SB_MAKEMASK1(33)
+#define M_SYS_CLKSTOP _SB_MAKEMASK1(32)
+#define M_SYS_CLKSTEP _SB_MAKEMASK1(33)
-#define S_SYS_CLKCOUNT 34
-#define M_SYS_CLKCOUNT _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
-#define V_SYS_CLKCOUNT(x) _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
-#define G_SYS_CLKCOUNT(x) _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
+#define S_SYS_CLKCOUNT 34
+#define M_SYS_CLKCOUNT _SB_MAKEMASK(8, S_SYS_CLKCOUNT)
+#define V_SYS_CLKCOUNT(x) _SB_MAKEVALUE(x, S_SYS_CLKCOUNT)
+#define G_SYS_CLKCOUNT(x) _SB_GETVALUE(x, S_SYS_CLKCOUNT, M_SYS_CLKCOUNT)
-#define M_SYS_PLL_BYPASS _SB_MAKEMASK1(42)
+#define M_SYS_PLL_BYPASS _SB_MAKEMASK1(42)
#define S_SYS_PLL_IREF 43
#define M_SYS_PLL_IREF _SB_MAKEMASK(2, S_SYS_PLL_IREF)
@@ -280,26 +280,26 @@
#define S_SYS_PLL_VREG 47
#define M_SYS_PLL_VREG _SB_MAKEMASK(2, S_SYS_PLL_VREG)
-#define M_SYS_MEM_RESET _SB_MAKEMASK1(49)
-#define M_SYS_L2C_RESET _SB_MAKEMASK1(50)
-#define M_SYS_IO_RESET_0 _SB_MAKEMASK1(51)
-#define M_SYS_IO_RESET_1 _SB_MAKEMASK1(52)
-#define M_SYS_SCD_RESET _SB_MAKEMASK1(53)
+#define M_SYS_MEM_RESET _SB_MAKEMASK1(49)
+#define M_SYS_L2C_RESET _SB_MAKEMASK1(50)
+#define M_SYS_IO_RESET_0 _SB_MAKEMASK1(51)
+#define M_SYS_IO_RESET_1 _SB_MAKEMASK1(52)
+#define M_SYS_SCD_RESET _SB_MAKEMASK1(53)
/* End of bits writable by JTAG only. */
-#define M_SYS_CPU_RESET_0 _SB_MAKEMASK1(54)
-#define M_SYS_CPU_RESET_1 _SB_MAKEMASK1(55)
+#define M_SYS_CPU_RESET_0 _SB_MAKEMASK1(54)
+#define M_SYS_CPU_RESET_1 _SB_MAKEMASK1(55)
-#define M_SYS_UNICPU0 _SB_MAKEMASK1(56)
-#define M_SYS_UNICPU1 _SB_MAKEMASK1(57)
+#define M_SYS_UNICPU0 _SB_MAKEMASK1(56)
+#define M_SYS_UNICPU1 _SB_MAKEMASK1(57)
-#define M_SYS_SB_SOFTRES _SB_MAKEMASK1(58)
-#define M_SYS_EXT_RESET _SB_MAKEMASK1(59)
-#define M_SYS_SYSTEM_RESET _SB_MAKEMASK1(60)
+#define M_SYS_SB_SOFTRES _SB_MAKEMASK1(58)
+#define M_SYS_EXT_RESET _SB_MAKEMASK1(59)
+#define M_SYS_SYSTEM_RESET _SB_MAKEMASK1(60)
-#define M_SYS_MISR_MODE _SB_MAKEMASK1(61)
-#define M_SYS_MISR_RESET _SB_MAKEMASK1(62)
+#define M_SYS_MISR_MODE _SB_MAKEMASK1(61)
+#define M_SYS_MISR_RESET _SB_MAKEMASK1(62)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1)
#define M_SYS_SW_FLAG _SB_MAKEMASK1(63)
@@ -313,46 +313,46 @@
* Registers: SCD_MBOX_CPU_x
*/
-#define S_MBOX_INT_3 0
-#define M_MBOX_INT_3 _SB_MAKEMASK(16, S_MBOX_INT_3)
-#define S_MBOX_INT_2 16
-#define M_MBOX_INT_2 _SB_MAKEMASK(16, S_MBOX_INT_2)
-#define S_MBOX_INT_1 32
-#define M_MBOX_INT_1 _SB_MAKEMASK(16, S_MBOX_INT_1)
-#define S_MBOX_INT_0 48
-#define M_MBOX_INT_0 _SB_MAKEMASK(16, S_MBOX_INT_0)
+#define S_MBOX_INT_3 0
+#define M_MBOX_INT_3 _SB_MAKEMASK(16, S_MBOX_INT_3)
+#define S_MBOX_INT_2 16
+#define M_MBOX_INT_2 _SB_MAKEMASK(16, S_MBOX_INT_2)
+#define S_MBOX_INT_1 32
+#define M_MBOX_INT_1 _SB_MAKEMASK(16, S_MBOX_INT_1)
+#define S_MBOX_INT_0 48
+#define M_MBOX_INT_0 _SB_MAKEMASK(16, S_MBOX_INT_0)
/*
* Watchdog Registers (Table 4-8) (Table 4-9) (Table 4-10)
* Registers: SCD_WDOG_INIT_CNT_x
*/
-#define V_SCD_WDOG_FREQ 1000000
+#define V_SCD_WDOG_FREQ 1000000
-#define S_SCD_WDOG_INIT 0
-#define M_SCD_WDOG_INIT _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
+#define S_SCD_WDOG_INIT 0
+#define M_SCD_WDOG_INIT _SB_MAKEMASK(23, S_SCD_WDOG_INIT)
-#define S_SCD_WDOG_CNT 0
-#define M_SCD_WDOG_CNT _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
+#define S_SCD_WDOG_CNT 0
+#define M_SCD_WDOG_CNT _SB_MAKEMASK(23, S_SCD_WDOG_CNT)
-#define S_SCD_WDOG_ENABLE 0
-#define M_SCD_WDOG_ENABLE _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
+#define S_SCD_WDOG_ENABLE 0
+#define M_SCD_WDOG_ENABLE _SB_MAKEMASK1(S_SCD_WDOG_ENABLE)
-#define S_SCD_WDOG_RESET_TYPE 2
-#define M_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
+#define S_SCD_WDOG_RESET_TYPE 2
+#define M_SCD_WDOG_RESET_TYPE _SB_MAKEMASK(3, S_SCD_WDOG_RESET_TYPE)
#define V_SCD_WDOG_RESET_TYPE(x) _SB_MAKEVALUE(x, S_SCD_WDOG_RESET_TYPE)
#define G_SCD_WDOG_RESET_TYPE(x) _SB_GETVALUE(x, S_SCD_WDOG_RESET_TYPE, M_SCD_WDOG_RESET_TYPE)
-#define K_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
-#define K_SCD_WDOG_RESET_SOFT 1
-#define K_SCD_WDOG_RESET_CPU0 3
-#define K_SCD_WDOG_RESET_CPU1 5
+#define K_SCD_WDOG_RESET_FULL 0 /* actually, (x & 1) == 0 */
+#define K_SCD_WDOG_RESET_SOFT 1
+#define K_SCD_WDOG_RESET_CPU0 3
+#define K_SCD_WDOG_RESET_CPU1 5
#define K_SCD_WDOG_RESET_BOTH_CPUS 7
/* This feature is present in 1250 C0 and later, but *not* in 112x A revs. */
#if SIBYTE_HDR_FEATURE(1250, PASS3)
-#define S_SCD_WDOG_HAS_RESET 8
-#define M_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
+#define S_SCD_WDOG_HAS_RESET 8
+#define M_SCD_WDOG_HAS_RESET _SB_MAKEMASK1(S_SCD_WDOG_HAS_RESET)
#endif
@@ -360,46 +360,46 @@
* Timer Registers (Table 4-11) (Table 4-12) (Table 4-13)
*/
-#define V_SCD_TIMER_FREQ 1000000
+#define V_SCD_TIMER_FREQ 1000000
-#define S_SCD_TIMER_INIT 0
-#define M_SCD_TIMER_INIT _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
-#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
-#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
+#define S_SCD_TIMER_INIT 0
+#define M_SCD_TIMER_INIT _SB_MAKEMASK(23, S_SCD_TIMER_INIT)
+#define V_SCD_TIMER_INIT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_INIT)
+#define G_SCD_TIMER_INIT(x) _SB_GETVALUE(x, S_SCD_TIMER_INIT, M_SCD_TIMER_INIT)
#define V_SCD_TIMER_WIDTH 23
-#define S_SCD_TIMER_CNT 0
-#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
-#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
-#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
+#define S_SCD_TIMER_CNT 0
+#define M_SCD_TIMER_CNT _SB_MAKEMASK(V_SCD_TIMER_WIDTH, S_SCD_TIMER_CNT)
+#define V_SCD_TIMER_CNT(x) _SB_MAKEVALUE(x, S_SCD_TIMER_CNT)
+#define G_SCD_TIMER_CNT(x) _SB_GETVALUE(x, S_SCD_TIMER_CNT, M_SCD_TIMER_CNT)
-#define M_SCD_TIMER_ENABLE _SB_MAKEMASK1(0)
-#define M_SCD_TIMER_MODE _SB_MAKEMASK1(1)
+#define M_SCD_TIMER_ENABLE _SB_MAKEMASK1(0)
+#define M_SCD_TIMER_MODE _SB_MAKEMASK1(1)
#define M_SCD_TIMER_MODE_CONTINUOUS M_SCD_TIMER_MODE
/*
* System Performance Counters
*/
-#define S_SPC_CFG_SRC0 0
-#define M_SPC_CFG_SRC0 _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
-#define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
-#define G_SPC_CFG_SRC0(x) _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
+#define S_SPC_CFG_SRC0 0
+#define M_SPC_CFG_SRC0 _SB_MAKEMASK(8, S_SPC_CFG_SRC0)
+#define V_SPC_CFG_SRC0(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC0)
+#define G_SPC_CFG_SRC0(x) _SB_GETVALUE(x, S_SPC_CFG_SRC0, M_SPC_CFG_SRC0)
-#define S_SPC_CFG_SRC1 8
-#define M_SPC_CFG_SRC1 _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
-#define V_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
-#define G_SPC_CFG_SRC1(x) _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
+#define S_SPC_CFG_SRC1 8
+#define M_SPC_CFG_SRC1 _SB_MAKEMASK(8, S_SPC_CFG_SRC1)
+#define V_SPC_CFG_SRC1(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC1)
+#define G_SPC_CFG_SRC1(x) _SB_GETVALUE(x, S_SPC_CFG_SRC1, M_SPC_CFG_SRC1)
-#define S_SPC_CFG_SRC2 16
-#define M_SPC_CFG_SRC2 _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
-#define V_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
-#define G_SPC_CFG_SRC2(x) _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
+#define S_SPC_CFG_SRC2 16
+#define M_SPC_CFG_SRC2 _SB_MAKEMASK(8, S_SPC_CFG_SRC2)
+#define V_SPC_CFG_SRC2(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC2)
+#define G_SPC_CFG_SRC2(x) _SB_GETVALUE(x, S_SPC_CFG_SRC2, M_SPC_CFG_SRC2)
-#define S_SPC_CFG_SRC3 24
-#define M_SPC_CFG_SRC3 _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
-#define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
-#define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
+#define S_SPC_CFG_SRC3 24
+#define M_SPC_CFG_SRC3 _SB_MAKEMASK(8, S_SPC_CFG_SRC3)
+#define V_SPC_CFG_SRC3(x) _SB_MAKEVALUE(x, S_SPC_CFG_SRC3)
+#define G_SPC_CFG_SRC3(x) _SB_GETVALUE(x, S_SPC_CFG_SRC3, M_SPC_CFG_SRC3)
#if SIBYTE_HDR_FEATURE_1250_112x
#define M_SPC_CFG_CLEAR _SB_MAKEMASK1(32)
@@ -411,58 +411,58 @@
* Bus Watcher
*/
-#define S_SCD_BERR_TID 8
-#define M_SCD_BERR_TID _SB_MAKEMASK(10, S_SCD_BERR_TID)
-#define V_SCD_BERR_TID(x) _SB_MAKEVALUE(x, S_SCD_BERR_TID)
-#define G_SCD_BERR_TID(x) _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
+#define S_SCD_BERR_TID 8
+#define M_SCD_BERR_TID _SB_MAKEMASK(10, S_SCD_BERR_TID)
+#define V_SCD_BERR_TID(x) _SB_MAKEVALUE(x, S_SCD_BERR_TID)
+#define G_SCD_BERR_TID(x) _SB_GETVALUE(x, S_SCD_BERR_TID, M_SCD_BERR_TID)
-#define S_SCD_BERR_RID 18
-#define M_SCD_BERR_RID _SB_MAKEMASK(4, S_SCD_BERR_RID)
-#define V_SCD_BERR_RID(x) _SB_MAKEVALUE(x, S_SCD_BERR_RID)
-#define G_SCD_BERR_RID(x) _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
+#define S_SCD_BERR_RID 18
+#define M_SCD_BERR_RID _SB_MAKEMASK(4, S_SCD_BERR_RID)
+#define V_SCD_BERR_RID(x) _SB_MAKEVALUE(x, S_SCD_BERR_RID)
+#define G_SCD_BERR_RID(x) _SB_GETVALUE(x, S_SCD_BERR_RID, M_SCD_BERR_RID)
-#define S_SCD_BERR_DCODE 22
-#define M_SCD_BERR_DCODE _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
-#define V_SCD_BERR_DCODE(x) _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
-#define G_SCD_BERR_DCODE(x) _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
+#define S_SCD_BERR_DCODE 22
+#define M_SCD_BERR_DCODE _SB_MAKEMASK(3, S_SCD_BERR_DCODE)
+#define V_SCD_BERR_DCODE(x) _SB_MAKEVALUE(x, S_SCD_BERR_DCODE)
+#define G_SCD_BERR_DCODE(x) _SB_GETVALUE(x, S_SCD_BERR_DCODE, M_SCD_BERR_DCODE)
-#define M_SCD_BERR_MULTERRS _SB_MAKEMASK1(30)
+#define M_SCD_BERR_MULTERRS _SB_MAKEMASK1(30)
-#define S_SCD_L2ECC_CORR_D 0
-#define M_SCD_L2ECC_CORR_D _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
-#define V_SCD_L2ECC_CORR_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
-#define G_SCD_L2ECC_CORR_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
+#define S_SCD_L2ECC_CORR_D 0
+#define M_SCD_L2ECC_CORR_D _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_D)
+#define V_SCD_L2ECC_CORR_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_D)
+#define G_SCD_L2ECC_CORR_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_D, M_SCD_L2ECC_CORR_D)
-#define S_SCD_L2ECC_BAD_D 8
-#define M_SCD_L2ECC_BAD_D _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
-#define V_SCD_L2ECC_BAD_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
-#define G_SCD_L2ECC_BAD_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
+#define S_SCD_L2ECC_BAD_D 8
+#define M_SCD_L2ECC_BAD_D _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_D)
+#define V_SCD_L2ECC_BAD_D(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_D)
+#define G_SCD_L2ECC_BAD_D(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_D, M_SCD_L2ECC_BAD_D)
-#define S_SCD_L2ECC_CORR_T 16
-#define M_SCD_L2ECC_CORR_T _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
-#define V_SCD_L2ECC_CORR_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
-#define G_SCD_L2ECC_CORR_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
+#define S_SCD_L2ECC_CORR_T 16
+#define M_SCD_L2ECC_CORR_T _SB_MAKEMASK(8, S_SCD_L2ECC_CORR_T)
+#define V_SCD_L2ECC_CORR_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_CORR_T)
+#define G_SCD_L2ECC_CORR_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_CORR_T, M_SCD_L2ECC_CORR_T)
-#define S_SCD_L2ECC_BAD_T 24
-#define M_SCD_L2ECC_BAD_T _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
-#define V_SCD_L2ECC_BAD_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
-#define G_SCD_L2ECC_BAD_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
+#define S_SCD_L2ECC_BAD_T 24
+#define M_SCD_L2ECC_BAD_T _SB_MAKEMASK(8, S_SCD_L2ECC_BAD_T)
+#define V_SCD_L2ECC_BAD_T(x) _SB_MAKEVALUE(x, S_SCD_L2ECC_BAD_T)
+#define G_SCD_L2ECC_BAD_T(x) _SB_GETVALUE(x, S_SCD_L2ECC_BAD_T, M_SCD_L2ECC_BAD_T)
-#define S_SCD_MEM_ECC_CORR 0
-#define M_SCD_MEM_ECC_CORR _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
-#define V_SCD_MEM_ECC_CORR(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
-#define G_SCD_MEM_ECC_CORR(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
+#define S_SCD_MEM_ECC_CORR 0
+#define M_SCD_MEM_ECC_CORR _SB_MAKEMASK(8, S_SCD_MEM_ECC_CORR)
+#define V_SCD_MEM_ECC_CORR(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_CORR)
+#define G_SCD_MEM_ECC_CORR(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_CORR, M_SCD_MEM_ECC_CORR)
-#define S_SCD_MEM_ECC_BAD 8
-#define M_SCD_MEM_ECC_BAD _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
-#define V_SCD_MEM_ECC_BAD(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
-#define G_SCD_MEM_ECC_BAD(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
+#define S_SCD_MEM_ECC_BAD 8
+#define M_SCD_MEM_ECC_BAD _SB_MAKEMASK(8, S_SCD_MEM_ECC_BAD)
+#define V_SCD_MEM_ECC_BAD(x) _SB_MAKEVALUE(x, S_SCD_MEM_ECC_BAD)
+#define G_SCD_MEM_ECC_BAD(x) _SB_GETVALUE(x, S_SCD_MEM_ECC_BAD, M_SCD_MEM_ECC_BAD)
-#define S_SCD_MEM_BUSERR 16
-#define M_SCD_MEM_BUSERR _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
-#define V_SCD_MEM_BUSERR(x) _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
-#define G_SCD_MEM_BUSERR(x) _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
+#define S_SCD_MEM_BUSERR 16
+#define M_SCD_MEM_BUSERR _SB_MAKEMASK(8, S_SCD_MEM_BUSERR)
+#define V_SCD_MEM_BUSERR(x) _SB_MAKEVALUE(x, S_SCD_MEM_BUSERR)
+#define G_SCD_MEM_BUSERR(x) _SB_GETVALUE(x, S_SCD_MEM_BUSERR, M_SCD_MEM_BUSERR)
/*
@@ -473,28 +473,28 @@
#define M_ATRAP_INDEX _SB_MAKEMASK(4, 0)
#define M_ATRAP_ADDRESS _SB_MAKEMASK(40, 0)
-#define S_ATRAP_CFG_CNT 0
-#define M_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
-#define V_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
-#define G_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
+#define S_ATRAP_CFG_CNT 0
+#define M_ATRAP_CFG_CNT _SB_MAKEMASK(3, S_ATRAP_CFG_CNT)
+#define V_ATRAP_CFG_CNT(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_CNT)
+#define G_ATRAP_CFG_CNT(x) _SB_GETVALUE(x, S_ATRAP_CFG_CNT, M_ATRAP_CFG_CNT)
#define M_ATRAP_CFG_WRITE _SB_MAKEMASK1(3)
-#define M_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
-#define M_ATRAP_CFG_INV _SB_MAKEMASK1(5)
+#define M_ATRAP_CFG_ALL _SB_MAKEMASK1(4)
+#define M_ATRAP_CFG_INV _SB_MAKEMASK1(5)
#define M_ATRAP_CFG_USESRC _SB_MAKEMASK1(6)
#define M_ATRAP_CFG_SRCINV _SB_MAKEMASK1(7)
-#define S_ATRAP_CFG_AGENTID 8
-#define M_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
-#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
-#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
+#define S_ATRAP_CFG_AGENTID 8
+#define M_ATRAP_CFG_AGENTID _SB_MAKEMASK(4, S_ATRAP_CFG_AGENTID)
+#define V_ATRAP_CFG_AGENTID(x) _SB_MAKEVALUE(x, S_ATRAP_CFG_AGENTID)
+#define G_ATRAP_CFG_AGENTID(x) _SB_GETVALUE(x, S_ATRAP_CFG_AGENTID, M_ATRAP_CFG_AGENTID)
#define K_BUS_AGENT_CPU0 0
#define K_BUS_AGENT_CPU1 1
#define K_BUS_AGENT_IOB0 2
#define K_BUS_AGENT_IOB1 3
-#define K_BUS_AGENT_SCD 4
-#define K_BUS_AGENT_L2C 6
+#define K_BUS_AGENT_SCD 4
+#define K_BUS_AGENT_L2C 6
#define K_BUS_AGENT_MC 7
#define S_ATRAP_CFG_CATTR 12
@@ -503,13 +503,13 @@
#define G_ATRAP_CFG_CATTR(x) _SB_GETVALUE(x, S_ATRAP_CFG_CATTR, M_ATRAP_CFG_CATTR)
#define K_ATRAP_CFG_CATTR_IGNORE 0
-#define K_ATRAP_CFG_CATTR_UNC 1
+#define K_ATRAP_CFG_CATTR_UNC 1
#define K_ATRAP_CFG_CATTR_CACHEABLE 2
-#define K_ATRAP_CFG_CATTR_NONCOH 3
+#define K_ATRAP_CFG_CATTR_NONCOH 3
#define K_ATRAP_CFG_CATTR_COHERENT 4
#define K_ATRAP_CFG_CATTR_NOTUNC 5
#define K_ATRAP_CFG_CATTR_NOTNONCOH 6
-#define K_ATRAP_CFG_CATTR_NOTCOHERENT 7
+#define K_ATRAP_CFG_CATTR_NOTCOHERENT 7
#endif /* 1250/112x */
@@ -517,16 +517,16 @@
* Trace Buffer Config register
*/
-#define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0)
-#define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1)
-#define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2)
-#define M_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3)
-#define M_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4)
-#define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
-#define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6)
-#define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7)
+#define M_SCD_TRACE_CFG_RESET _SB_MAKEMASK1(0)
+#define M_SCD_TRACE_CFG_START_READ _SB_MAKEMASK1(1)
+#define M_SCD_TRACE_CFG_START _SB_MAKEMASK1(2)
+#define M_SCD_TRACE_CFG_STOP _SB_MAKEMASK1(3)
+#define M_SCD_TRACE_CFG_FREEZE _SB_MAKEMASK1(4)
+#define M_SCD_TRACE_CFG_FREEZE_FULL _SB_MAKEMASK1(5)
+#define M_SCD_TRACE_CFG_DEBUG_FULL _SB_MAKEMASK1(6)
+#define M_SCD_TRACE_CFG_FULL _SB_MAKEMASK1(7)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8)
+#define M_SCD_TRACE_CFG_FORCECNT _SB_MAKEMASK1(8)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
/*
@@ -534,121 +534,121 @@
* a slightly different place in the register.
*/
#if SIBYTE_HDR_FEATURE_1250_112x
-#define S_SCD_TRACE_CFG_CUR_ADDR 10
+#define S_SCD_TRACE_CFG_CUR_ADDR 10
#else
#if SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SCD_TRACE_CFG_CUR_ADDR 24
+#define S_SCD_TRACE_CFG_CUR_ADDR 24
#endif /* 1480 */
-#endif /* 1250/112x */
+#endif /* 1250/112x */
-#define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
-#define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
-#define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
+#define M_SCD_TRACE_CFG_CUR_ADDR _SB_MAKEMASK(8, S_SCD_TRACE_CFG_CUR_ADDR)
+#define V_SCD_TRACE_CFG_CUR_ADDR(x) _SB_MAKEVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR)
+#define G_SCD_TRACE_CFG_CUR_ADDR(x) _SB_GETVALUE(x, S_SCD_TRACE_CFG_CUR_ADDR, M_SCD_TRACE_CFG_CUR_ADDR)
/*
* Trace Event registers
*/
-#define S_SCD_TREVT_ADDR_MATCH 0
-#define M_SCD_TREVT_ADDR_MATCH _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
-#define V_SCD_TREVT_ADDR_MATCH(x) _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
-#define G_SCD_TREVT_ADDR_MATCH(x) _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
-
-#define M_SCD_TREVT_REQID_MATCH _SB_MAKEMASK1(4)
-#define M_SCD_TREVT_DATAID_MATCH _SB_MAKEMASK1(5)
-#define M_SCD_TREVT_RESPID_MATCH _SB_MAKEMASK1(6)
-#define M_SCD_TREVT_INTERRUPT _SB_MAKEMASK1(7)
-#define M_SCD_TREVT_DEBUG_PIN _SB_MAKEMASK1(9)
-#define M_SCD_TREVT_WRITE _SB_MAKEMASK1(10)
-#define M_SCD_TREVT_READ _SB_MAKEMASK1(11)
-
-#define S_SCD_TREVT_REQID 12
-#define M_SCD_TREVT_REQID _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
-#define V_SCD_TREVT_REQID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
-#define G_SCD_TREVT_REQID(x) _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
-
-#define S_SCD_TREVT_RESPID 16
-#define M_SCD_TREVT_RESPID _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
-#define V_SCD_TREVT_RESPID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
-#define G_SCD_TREVT_RESPID(x) _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
-
-#define S_SCD_TREVT_DATAID 20
-#define M_SCD_TREVT_DATAID _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
-#define V_SCD_TREVT_DATAID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
-#define G_SCD_TREVT_DATAID(x) _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
-
-#define S_SCD_TREVT_COUNT 24
-#define M_SCD_TREVT_COUNT _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
-#define V_SCD_TREVT_COUNT(x) _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
-#define G_SCD_TREVT_COUNT(x) _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
+#define S_SCD_TREVT_ADDR_MATCH 0
+#define M_SCD_TREVT_ADDR_MATCH _SB_MAKEMASK(4, S_SCD_TREVT_ADDR_MATCH)
+#define V_SCD_TREVT_ADDR_MATCH(x) _SB_MAKEVALUE(x, S_SCD_TREVT_ADDR_MATCH)
+#define G_SCD_TREVT_ADDR_MATCH(x) _SB_GETVALUE(x, S_SCD_TREVT_ADDR_MATCH, M_SCD_TREVT_ADDR_MATCH)
+
+#define M_SCD_TREVT_REQID_MATCH _SB_MAKEMASK1(4)
+#define M_SCD_TREVT_DATAID_MATCH _SB_MAKEMASK1(5)
+#define M_SCD_TREVT_RESPID_MATCH _SB_MAKEMASK1(6)
+#define M_SCD_TREVT_INTERRUPT _SB_MAKEMASK1(7)
+#define M_SCD_TREVT_DEBUG_PIN _SB_MAKEMASK1(9)
+#define M_SCD_TREVT_WRITE _SB_MAKEMASK1(10)
+#define M_SCD_TREVT_READ _SB_MAKEMASK1(11)
+
+#define S_SCD_TREVT_REQID 12
+#define M_SCD_TREVT_REQID _SB_MAKEMASK(4, S_SCD_TREVT_REQID)
+#define V_SCD_TREVT_REQID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_REQID)
+#define G_SCD_TREVT_REQID(x) _SB_GETVALUE(x, S_SCD_TREVT_REQID, M_SCD_TREVT_REQID)
+
+#define S_SCD_TREVT_RESPID 16
+#define M_SCD_TREVT_RESPID _SB_MAKEMASK(4, S_SCD_TREVT_RESPID)
+#define V_SCD_TREVT_RESPID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_RESPID)
+#define G_SCD_TREVT_RESPID(x) _SB_GETVALUE(x, S_SCD_TREVT_RESPID, M_SCD_TREVT_RESPID)
+
+#define S_SCD_TREVT_DATAID 20
+#define M_SCD_TREVT_DATAID _SB_MAKEMASK(4, S_SCD_TREVT_DATAID)
+#define V_SCD_TREVT_DATAID(x) _SB_MAKEVALUE(x, S_SCD_TREVT_DATAID)
+#define G_SCD_TREVT_DATAID(x) _SB_GETVALUE(x, S_SCD_TREVT_DATAID, M_SCD_TREVT_DATID)
+
+#define S_SCD_TREVT_COUNT 24
+#define M_SCD_TREVT_COUNT _SB_MAKEMASK(8, S_SCD_TREVT_COUNT)
+#define V_SCD_TREVT_COUNT(x) _SB_MAKEVALUE(x, S_SCD_TREVT_COUNT)
+#define G_SCD_TREVT_COUNT(x) _SB_GETVALUE(x, S_SCD_TREVT_COUNT, M_SCD_TREVT_COUNT)
/*
* Trace Sequence registers
*/
-#define S_SCD_TRSEQ_EVENT4 0
-#define M_SCD_TRSEQ_EVENT4 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
-#define V_SCD_TRSEQ_EVENT4(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
-#define G_SCD_TRSEQ_EVENT4(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
-
-#define S_SCD_TRSEQ_EVENT3 4
-#define M_SCD_TRSEQ_EVENT3 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
-#define V_SCD_TRSEQ_EVENT3(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
-#define G_SCD_TRSEQ_EVENT3(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
-
-#define S_SCD_TRSEQ_EVENT2 8
-#define M_SCD_TRSEQ_EVENT2 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
-#define V_SCD_TRSEQ_EVENT2(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
-#define G_SCD_TRSEQ_EVENT2(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
-
-#define S_SCD_TRSEQ_EVENT1 12
-#define M_SCD_TRSEQ_EVENT1 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
-#define V_SCD_TRSEQ_EVENT1(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
-#define G_SCD_TRSEQ_EVENT1(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
-
-#define K_SCD_TRSEQ_E0 0
-#define K_SCD_TRSEQ_E1 1
-#define K_SCD_TRSEQ_E2 2
-#define K_SCD_TRSEQ_E3 3
-#define K_SCD_TRSEQ_E0_E1 4
-#define K_SCD_TRSEQ_E1_E2 5
-#define K_SCD_TRSEQ_E2_E3 6
-#define K_SCD_TRSEQ_E0_E1_E2 7
-#define K_SCD_TRSEQ_E0_E1_E2_E3 8
-#define K_SCD_TRSEQ_E0E1 9
-#define K_SCD_TRSEQ_E0E1E2 10
-#define K_SCD_TRSEQ_E0E1E2E3 11
-#define K_SCD_TRSEQ_E0E1_E2 12
-#define K_SCD_TRSEQ_E0E1_E2E3 13
-#define K_SCD_TRSEQ_E0E1_E2_E3 14
-#define K_SCD_TRSEQ_IGNORED 15
-
-#define K_SCD_TRSEQ_TRIGGER_ALL (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
- V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
-
-#define S_SCD_TRSEQ_FUNCTION 16
-#define M_SCD_TRSEQ_FUNCTION _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
-#define V_SCD_TRSEQ_FUNCTION(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
-#define G_SCD_TRSEQ_FUNCTION(x) _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
-
-#define K_SCD_TRSEQ_FUNC_NOP 0
-#define K_SCD_TRSEQ_FUNC_START 1
-#define K_SCD_TRSEQ_FUNC_STOP 2
-#define K_SCD_TRSEQ_FUNC_FREEZE 3
-
-#define V_SCD_TRSEQ_FUNC_NOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
-#define V_SCD_TRSEQ_FUNC_START V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
-#define V_SCD_TRSEQ_FUNC_STOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
-#define V_SCD_TRSEQ_FUNC_FREEZE V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
-
-#define M_SCD_TRSEQ_ASAMPLE _SB_MAKEMASK1(18)
-#define M_SCD_TRSEQ_DSAMPLE _SB_MAKEMASK1(19)
-#define M_SCD_TRSEQ_DEBUGPIN _SB_MAKEMASK1(20)
-#define M_SCD_TRSEQ_DEBUGCPU _SB_MAKEMASK1(21)
-#define M_SCD_TRSEQ_CLEARUSE _SB_MAKEMASK1(22)
-#define M_SCD_TRSEQ_ALLD_A _SB_MAKEMASK1(23)
-#define M_SCD_TRSEQ_ALL_A _SB_MAKEMASK1(24)
+#define S_SCD_TRSEQ_EVENT4 0
+#define M_SCD_TRSEQ_EVENT4 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT4)
+#define V_SCD_TRSEQ_EVENT4(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT4)
+#define G_SCD_TRSEQ_EVENT4(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT4, M_SCD_TRSEQ_EVENT4)
+
+#define S_SCD_TRSEQ_EVENT3 4
+#define M_SCD_TRSEQ_EVENT3 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT3)
+#define V_SCD_TRSEQ_EVENT3(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT3)
+#define G_SCD_TRSEQ_EVENT3(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT3, M_SCD_TRSEQ_EVENT3)
+
+#define S_SCD_TRSEQ_EVENT2 8
+#define M_SCD_TRSEQ_EVENT2 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT2)
+#define V_SCD_TRSEQ_EVENT2(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT2)
+#define G_SCD_TRSEQ_EVENT2(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT2, M_SCD_TRSEQ_EVENT2)
+
+#define S_SCD_TRSEQ_EVENT1 12
+#define M_SCD_TRSEQ_EVENT1 _SB_MAKEMASK(4, S_SCD_TRSEQ_EVENT1)
+#define V_SCD_TRSEQ_EVENT1(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_EVENT1)
+#define G_SCD_TRSEQ_EVENT1(x) _SB_GETVALUE(x, S_SCD_TRSEQ_EVENT1, M_SCD_TRSEQ_EVENT1)
+
+#define K_SCD_TRSEQ_E0 0
+#define K_SCD_TRSEQ_E1 1
+#define K_SCD_TRSEQ_E2 2
+#define K_SCD_TRSEQ_E3 3
+#define K_SCD_TRSEQ_E0_E1 4
+#define K_SCD_TRSEQ_E1_E2 5
+#define K_SCD_TRSEQ_E2_E3 6
+#define K_SCD_TRSEQ_E0_E1_E2 7
+#define K_SCD_TRSEQ_E0_E1_E2_E3 8
+#define K_SCD_TRSEQ_E0E1 9
+#define K_SCD_TRSEQ_E0E1E2 10
+#define K_SCD_TRSEQ_E0E1E2E3 11
+#define K_SCD_TRSEQ_E0E1_E2 12
+#define K_SCD_TRSEQ_E0E1_E2E3 13
+#define K_SCD_TRSEQ_E0E1_E2_E3 14
+#define K_SCD_TRSEQ_IGNORED 15
+
+#define K_SCD_TRSEQ_TRIGGER_ALL (V_SCD_TRSEQ_EVENT1(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT2(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT3(K_SCD_TRSEQ_IGNORED) | \
+ V_SCD_TRSEQ_EVENT4(K_SCD_TRSEQ_IGNORED))
+
+#define S_SCD_TRSEQ_FUNCTION 16
+#define M_SCD_TRSEQ_FUNCTION _SB_MAKEMASK(4, S_SCD_TRSEQ_FUNCTION)
+#define V_SCD_TRSEQ_FUNCTION(x) _SB_MAKEVALUE(x, S_SCD_TRSEQ_FUNCTION)
+#define G_SCD_TRSEQ_FUNCTION(x) _SB_GETVALUE(x, S_SCD_TRSEQ_FUNCTION, M_SCD_TRSEQ_FUNCTION)
+
+#define K_SCD_TRSEQ_FUNC_NOP 0
+#define K_SCD_TRSEQ_FUNC_START 1
+#define K_SCD_TRSEQ_FUNC_STOP 2
+#define K_SCD_TRSEQ_FUNC_FREEZE 3
+
+#define V_SCD_TRSEQ_FUNC_NOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_NOP)
+#define V_SCD_TRSEQ_FUNC_START V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_START)
+#define V_SCD_TRSEQ_FUNC_STOP V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_STOP)
+#define V_SCD_TRSEQ_FUNC_FREEZE V_SCD_TRSEQ_FUNCTION(K_SCD_TRSEQ_FUNC_FREEZE)
+
+#define M_SCD_TRSEQ_ASAMPLE _SB_MAKEMASK1(18)
+#define M_SCD_TRSEQ_DSAMPLE _SB_MAKEMASK1(19)
+#define M_SCD_TRSEQ_DEBUGPIN _SB_MAKEMASK1(20)
+#define M_SCD_TRSEQ_DEBUGCPU _SB_MAKEMASK1(21)
+#define M_SCD_TRSEQ_CLEARUSE _SB_MAKEMASK1(22)
+#define M_SCD_TRSEQ_ALLD_A _SB_MAKEMASK1(23)
+#define M_SCD_TRSEQ_ALL_A _SB_MAKEMASK1(24)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_smbus.h b/arch/mips/include/asm/sibyte/sb1250_smbus.h
index 128d6b75b81..3cb73e89bbb 100644
--- a/arch/mips/include/asm/sibyte/sb1250_smbus.h
+++ b/arch/mips/include/asm/sibyte/sb1250_smbus.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * SMBUS Constants File: sb1250_smbus.h
+ * SMBUS Constants File: sb1250_smbus.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's SMbus devices.
@@ -40,83 +40,83 @@
* SMBus Clock Frequency Register (Table 14-2)
*/
-#define S_SMB_FREQ_DIV 0
-#define M_SMB_FREQ_DIV _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
-#define V_SMB_FREQ_DIV(x) _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
+#define S_SMB_FREQ_DIV 0
+#define M_SMB_FREQ_DIV _SB_MAKEMASK(13, S_SMB_FREQ_DIV)
+#define V_SMB_FREQ_DIV(x) _SB_MAKEVALUE(x, S_SMB_FREQ_DIV)
#define K_SMB_FREQ_400KHZ 0x1F
#define K_SMB_FREQ_100KHZ 0x7D
#define K_SMB_FREQ_10KHZ 1250
-#define S_SMB_CMD 0
-#define M_SMB_CMD _SB_MAKEMASK(8, S_SMB_CMD)
-#define V_SMB_CMD(x) _SB_MAKEVALUE(x, S_SMB_CMD)
+#define S_SMB_CMD 0
+#define M_SMB_CMD _SB_MAKEMASK(8, S_SMB_CMD)
+#define V_SMB_CMD(x) _SB_MAKEVALUE(x, S_SMB_CMD)
/*
* SMBus control register (Table 14-4)
*/
-#define M_SMB_ERR_INTR _SB_MAKEMASK1(0)
-#define M_SMB_FINISH_INTR _SB_MAKEMASK1(1)
+#define M_SMB_ERR_INTR _SB_MAKEMASK1(0)
+#define M_SMB_FINISH_INTR _SB_MAKEMASK1(1)
-#define S_SMB_DATA_OUT 4
-#define M_SMB_DATA_OUT _SB_MAKEMASK1(S_SMB_DATA_OUT)
-#define V_SMB_DATA_OUT(x) _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
+#define S_SMB_DATA_OUT 4
+#define M_SMB_DATA_OUT _SB_MAKEMASK1(S_SMB_DATA_OUT)
+#define V_SMB_DATA_OUT(x) _SB_MAKEVALUE(x, S_SMB_DATA_OUT)
-#define M_SMB_DATA_DIR _SB_MAKEMASK1(5)
-#define M_SMB_DATA_DIR_OUTPUT M_SMB_DATA_DIR
-#define M_SMB_CLK_OUT _SB_MAKEMASK1(6)
-#define M_SMB_DIRECT_ENABLE _SB_MAKEMASK1(7)
+#define M_SMB_DATA_DIR _SB_MAKEMASK1(5)
+#define M_SMB_DATA_DIR_OUTPUT M_SMB_DATA_DIR
+#define M_SMB_CLK_OUT _SB_MAKEMASK1(6)
+#define M_SMB_DIRECT_ENABLE _SB_MAKEMASK1(7)
/*
* SMBus status registers (Table 14-5)
*/
-#define M_SMB_BUSY _SB_MAKEMASK1(0)
-#define M_SMB_ERROR _SB_MAKEMASK1(1)
-#define M_SMB_ERROR_TYPE _SB_MAKEMASK1(2)
+#define M_SMB_BUSY _SB_MAKEMASK1(0)
+#define M_SMB_ERROR _SB_MAKEMASK1(1)
+#define M_SMB_ERROR_TYPE _SB_MAKEMASK1(2)
#if SIBYTE_HDR_FEATURE(1250, PASS3) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SMB_SCL_IN 5
-#define M_SMB_SCL_IN _SB_MAKEMASK1(S_SMB_SCL_IN)
-#define V_SMB_SCL_IN(x) _SB_MAKEVALUE(x, S_SMB_SCL_IN)
-#define G_SMB_SCL_IN(x) _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
+#define S_SMB_SCL_IN 5
+#define M_SMB_SCL_IN _SB_MAKEMASK1(S_SMB_SCL_IN)
+#define V_SMB_SCL_IN(x) _SB_MAKEVALUE(x, S_SMB_SCL_IN)
+#define G_SMB_SCL_IN(x) _SB_GETVALUE(x, S_SMB_SCL_IN, M_SMB_SCL_IN)
#endif /* 1250 PASS3 || 112x PASS1 || 1480 */
-#define S_SMB_REF 6
-#define M_SMB_REF _SB_MAKEMASK1(S_SMB_REF)
-#define V_SMB_REF(x) _SB_MAKEVALUE(x, S_SMB_REF)
-#define G_SMB_REF(x) _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
+#define S_SMB_REF 6
+#define M_SMB_REF _SB_MAKEMASK1(S_SMB_REF)
+#define V_SMB_REF(x) _SB_MAKEVALUE(x, S_SMB_REF)
+#define G_SMB_REF(x) _SB_GETVALUE(x, S_SMB_REF, M_SMB_REF)
-#define S_SMB_DATA_IN 7
-#define M_SMB_DATA_IN _SB_MAKEMASK1(S_SMB_DATA_IN)
-#define V_SMB_DATA_IN(x) _SB_MAKEVALUE(x, S_SMB_DATA_IN)
-#define G_SMB_DATA_IN(x) _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
+#define S_SMB_DATA_IN 7
+#define M_SMB_DATA_IN _SB_MAKEMASK1(S_SMB_DATA_IN)
+#define V_SMB_DATA_IN(x) _SB_MAKEVALUE(x, S_SMB_DATA_IN)
+#define G_SMB_DATA_IN(x) _SB_GETVALUE(x, S_SMB_DATA_IN, M_SMB_DATA_IN)
/*
* SMBus Start/Command registers (Table 14-9)
*/
-#define S_SMB_ADDR 0
-#define M_SMB_ADDR _SB_MAKEMASK(7, S_SMB_ADDR)
-#define V_SMB_ADDR(x) _SB_MAKEVALUE(x, S_SMB_ADDR)
-#define G_SMB_ADDR(x) _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
+#define S_SMB_ADDR 0
+#define M_SMB_ADDR _SB_MAKEMASK(7, S_SMB_ADDR)
+#define V_SMB_ADDR(x) _SB_MAKEVALUE(x, S_SMB_ADDR)
+#define G_SMB_ADDR(x) _SB_GETVALUE(x, S_SMB_ADDR, M_SMB_ADDR)
-#define M_SMB_QDATA _SB_MAKEMASK1(7)
+#define M_SMB_QDATA _SB_MAKEMASK1(7)
-#define S_SMB_TT 8
-#define M_SMB_TT _SB_MAKEMASK(3, S_SMB_TT)
-#define V_SMB_TT(x) _SB_MAKEVALUE(x, S_SMB_TT)
-#define G_SMB_TT(x) _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
+#define S_SMB_TT 8
+#define M_SMB_TT _SB_MAKEMASK(3, S_SMB_TT)
+#define V_SMB_TT(x) _SB_MAKEVALUE(x, S_SMB_TT)
+#define G_SMB_TT(x) _SB_GETVALUE(x, S_SMB_TT, M_SMB_TT)
-#define K_SMB_TT_WR1BYTE 0
-#define K_SMB_TT_WR2BYTE 1
-#define K_SMB_TT_WR3BYTE 2
-#define K_SMB_TT_CMD_RD1BYTE 3
-#define K_SMB_TT_CMD_RD2BYTE 4
-#define K_SMB_TT_RD1BYTE 5
-#define K_SMB_TT_QUICKCMD 6
-#define K_SMB_TT_EEPROMREAD 7
+#define K_SMB_TT_WR1BYTE 0
+#define K_SMB_TT_WR2BYTE 1
+#define K_SMB_TT_WR3BYTE 2
+#define K_SMB_TT_CMD_RD1BYTE 3
+#define K_SMB_TT_CMD_RD2BYTE 4
+#define K_SMB_TT_RD1BYTE 5
+#define K_SMB_TT_QUICKCMD 6
+#define K_SMB_TT_EEPROMREAD 7
#define V_SMB_TT_WR1BYTE V_SMB_TT(K_SMB_TT_WR1BYTE)
#define V_SMB_TT_WR2BYTE V_SMB_TT(K_SMB_TT_WR2BYTE)
@@ -127,51 +127,51 @@
#define V_SMB_TT_QUICKCMD V_SMB_TT(K_SMB_TT_QUICKCMD)
#define V_SMB_TT_EEPROMREAD V_SMB_TT(K_SMB_TT_EEPROMREAD)
-#define M_SMB_PEC _SB_MAKEMASK1(15)
+#define M_SMB_PEC _SB_MAKEMASK1(15)
/*
* SMBus Data Register (Table 14-6) and SMBus Extra Register (Table 14-7)
*/
-#define S_SMB_LB 0
-#define M_SMB_LB _SB_MAKEMASK(8, S_SMB_LB)
-#define V_SMB_LB(x) _SB_MAKEVALUE(x, S_SMB_LB)
+#define S_SMB_LB 0
+#define M_SMB_LB _SB_MAKEMASK(8, S_SMB_LB)
+#define V_SMB_LB(x) _SB_MAKEVALUE(x, S_SMB_LB)
-#define S_SMB_MB 8
-#define M_SMB_MB _SB_MAKEMASK(8, S_SMB_MB)
-#define V_SMB_MB(x) _SB_MAKEVALUE(x, S_SMB_MB)
+#define S_SMB_MB 8
+#define M_SMB_MB _SB_MAKEMASK(8, S_SMB_MB)
+#define V_SMB_MB(x) _SB_MAKEVALUE(x, S_SMB_MB)
/*
* SMBus Packet Error Check register (Table 14-8)
*/
-#define S_SPEC_PEC 0
-#define M_SPEC_PEC _SB_MAKEMASK(8, S_SPEC_PEC)
-#define V_SPEC_MB(x) _SB_MAKEVALUE(x, S_SPEC_PEC)
+#define S_SPEC_PEC 0
+#define M_SPEC_PEC _SB_MAKEMASK(8, S_SPEC_PEC)
+#define V_SPEC_MB(x) _SB_MAKEVALUE(x, S_SPEC_PEC)
#if SIBYTE_HDR_FEATURE(1250, PASS2) || SIBYTE_HDR_FEATURE(112x, PASS1) || SIBYTE_HDR_FEATURE_CHIP(1480)
-#define S_SMB_CMDH 8
-#define M_SMB_CMDH _SB_MAKEMASK(8, S_SMB_CMDH)
-#define V_SMB_CMDH(x) _SB_MAKEVALUE(x, S_SMB_CMDH)
+#define S_SMB_CMDH 8
+#define M_SMB_CMDH _SB_MAKEMASK(8, S_SMB_CMDH)
+#define V_SMB_CMDH(x) _SB_MAKEVALUE(x, S_SMB_CMDH)
#define M_SMB_EXTEND _SB_MAKEMASK1(14)
-#define S_SMB_DFMT 8
-#define M_SMB_DFMT _SB_MAKEMASK(3, S_SMB_DFMT)
-#define V_SMB_DFMT(x) _SB_MAKEVALUE(x, S_SMB_DFMT)
-#define G_SMB_DFMT(x) _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
+#define S_SMB_DFMT 8
+#define M_SMB_DFMT _SB_MAKEMASK(3, S_SMB_DFMT)
+#define V_SMB_DFMT(x) _SB_MAKEVALUE(x, S_SMB_DFMT)
+#define G_SMB_DFMT(x) _SB_GETVALUE(x, S_SMB_DFMT, M_SMB_DFMT)
-#define K_SMB_DFMT_1BYTE 0
-#define K_SMB_DFMT_2BYTE 1
-#define K_SMB_DFMT_3BYTE 2
-#define K_SMB_DFMT_4BYTE 3
-#define K_SMB_DFMT_NODATA 4
-#define K_SMB_DFMT_CMD4BYTE 5
-#define K_SMB_DFMT_CMD5BYTE 6
-#define K_SMB_DFMT_RESERVED 7
+#define K_SMB_DFMT_1BYTE 0
+#define K_SMB_DFMT_2BYTE 1
+#define K_SMB_DFMT_3BYTE 2
+#define K_SMB_DFMT_4BYTE 3
+#define K_SMB_DFMT_NODATA 4
+#define K_SMB_DFMT_CMD4BYTE 5
+#define K_SMB_DFMT_CMD5BYTE 6
+#define K_SMB_DFMT_RESERVED 7
#define V_SMB_DFMT_1BYTE V_SMB_DFMT(K_SMB_DFMT_1BYTE)
#define V_SMB_DFMT_2BYTE V_SMB_DFMT(K_SMB_DFMT_2BYTE)
@@ -182,13 +182,13 @@
#define V_SMB_DFMT_CMD5BYTE V_SMB_DFMT(K_SMB_DFMT_CMD5BYTE)
#define V_SMB_DFMT_RESERVED V_SMB_DFMT(K_SMB_DFMT_RESERVED)
-#define S_SMB_AFMT 11
-#define M_SMB_AFMT _SB_MAKEMASK(2, S_SMB_AFMT)
-#define V_SMB_AFMT(x) _SB_MAKEVALUE(x, S_SMB_AFMT)
-#define G_SMB_AFMT(x) _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
+#define S_SMB_AFMT 11
+#define M_SMB_AFMT _SB_MAKEMASK(2, S_SMB_AFMT)
+#define V_SMB_AFMT(x) _SB_MAKEVALUE(x, S_SMB_AFMT)
+#define G_SMB_AFMT(x) _SB_GETVALUE(x, S_SMB_AFMT, M_SMB_AFMT)
-#define K_SMB_AFMT_NONE 0
-#define K_SMB_AFMT_ADDR 1
+#define K_SMB_AFMT_NONE 0
+#define K_SMB_AFMT_ADDR 1
#define K_SMB_AFMT_ADDR_CMD1BYTE 2
#define K_SMB_AFMT_ADDR_CMD2BYTE 3
diff --git a/arch/mips/include/asm/sibyte/sb1250_syncser.h b/arch/mips/include/asm/sibyte/sb1250_syncser.h
index 274e9179d32..b3acc75cf0f 100644
--- a/arch/mips/include/asm/sibyte/sb1250_syncser.h
+++ b/arch/mips/include/asm/sibyte/sb1250_syncser.h
@@ -1,7 +1,7 @@
/* *********************************************************************
* SB1250 Board Support Package
*
- * Synchronous Serial Constants File: sb1250_syncser.h
+ * Synchronous Serial Constants File: sb1250_syncser.h
*
* This module contains constants and macros useful for
* manipulating the SB1250's Synchronous Serial
@@ -39,108 +39,108 @@
* Serial Mode Configuration Register
*/
-#define M_SYNCSER_CRC_MODE _SB_MAKEMASK1(0)
-#define M_SYNCSER_MSB_FIRST _SB_MAKEMASK1(1)
+#define M_SYNCSER_CRC_MODE _SB_MAKEMASK1(0)
+#define M_SYNCSER_MSB_FIRST _SB_MAKEMASK1(1)
-#define S_SYNCSER_FLAG_NUM 2
-#define M_SYNCSER_FLAG_NUM _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
-#define V_SYNCSER_FLAG_NUM _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
+#define S_SYNCSER_FLAG_NUM 2
+#define M_SYNCSER_FLAG_NUM _SB_MAKEMASK(4, S_SYNCSER_FLAG_NUM)
+#define V_SYNCSER_FLAG_NUM _SB_MAKEVALUE(x, S_SYNCSER_FLAG_NUM)
-#define M_SYNCSER_FLAG_EN _SB_MAKEMASK1(6)
-#define M_SYNCSER_HDLC_EN _SB_MAKEMASK1(7)
-#define M_SYNCSER_LOOP_MODE _SB_MAKEMASK1(8)
-#define M_SYNCSER_LOOPBACK _SB_MAKEMASK1(9)
+#define M_SYNCSER_FLAG_EN _SB_MAKEMASK1(6)
+#define M_SYNCSER_HDLC_EN _SB_MAKEMASK1(7)
+#define M_SYNCSER_LOOP_MODE _SB_MAKEMASK1(8)
+#define M_SYNCSER_LOOPBACK _SB_MAKEMASK1(9)
/*
* Serial Clock Source and Line Interface Mode Register
*/
-#define M_SYNCSER_RXCLK_INV _SB_MAKEMASK1(0)
-#define M_SYNCSER_RXCLK_EXT _SB_MAKEMASK1(1)
+#define M_SYNCSER_RXCLK_INV _SB_MAKEMASK1(0)
+#define M_SYNCSER_RXCLK_EXT _SB_MAKEMASK1(1)
-#define S_SYNCSER_RXSYNC_DLY 2
-#define M_SYNCSER_RXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
-#define V_SYNCSER_RXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
+#define S_SYNCSER_RXSYNC_DLY 2
+#define M_SYNCSER_RXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_RXSYNC_DLY)
+#define V_SYNCSER_RXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_RXSYNC_DLY)
-#define M_SYNCSER_RXSYNC_LOW _SB_MAKEMASK1(4)
-#define M_SYNCSER_RXSTRB_LOW _SB_MAKEMASK1(5)
+#define M_SYNCSER_RXSYNC_LOW _SB_MAKEMASK1(4)
+#define M_SYNCSER_RXSTRB_LOW _SB_MAKEMASK1(5)
-#define M_SYNCSER_RXSYNC_EDGE _SB_MAKEMASK1(6)
-#define M_SYNCSER_RXSYNC_INT _SB_MAKEMASK1(7)
+#define M_SYNCSER_RXSYNC_EDGE _SB_MAKEMASK1(6)
+#define M_SYNCSER_RXSYNC_INT _SB_MAKEMASK1(7)
-#define M_SYNCSER_TXCLK_INV _SB_MAKEMASK1(8)
-#define M_SYNCSER_TXCLK_EXT _SB_MAKEMASK1(9)
+#define M_SYNCSER_TXCLK_INV _SB_MAKEMASK1(8)
+#define M_SYNCSER_TXCLK_EXT _SB_MAKEMASK1(9)
-#define S_SYNCSER_TXSYNC_DLY 10
-#define M_SYNCSER_TXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
-#define V_SYNCSER_TXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
+#define S_SYNCSER_TXSYNC_DLY 10
+#define M_SYNCSER_TXSYNC_DLY _SB_MAKEMASK(2, S_SYNCSER_TXSYNC_DLY)
+#define V_SYNCSER_TXSYNC_DLY(x) _SB_MAKEVALUE(x, S_SYNCSER_TXSYNC_DLY)
-#define M_SYNCSER_TXSYNC_LOW _SB_MAKEMASK1(12)
-#define M_SYNCSER_TXSTRB_LOW _SB_MAKEMASK1(13)
+#define M_SYNCSER_TXSYNC_LOW _SB_MAKEMASK1(12)
+#define M_SYNCSER_TXSTRB_LOW _SB_MAKEMASK1(13)
-#define M_SYNCSER_TXSYNC_EDGE _SB_MAKEMASK1(14)
-#define M_SYNCSER_TXSYNC_INT _SB_MAKEMASK1(15)
+#define M_SYNCSER_TXSYNC_EDGE _SB_MAKEMASK1(14)
+#define M_SYNCSER_TXSYNC_INT _SB_MAKEMASK1(15)
/*
* Serial Command Register
*/
-#define M_SYNCSER_CMD_RX_EN _SB_MAKEMASK1(0)
-#define M_SYNCSER_CMD_TX_EN _SB_MAKEMASK1(1)
-#define M_SYNCSER_CMD_RX_RESET _SB_MAKEMASK1(2)
-#define M_SYNCSER_CMD_TX_RESET _SB_MAKEMASK1(3)
-#define M_SYNCSER_CMD_TX_PAUSE _SB_MAKEMASK1(5)
+#define M_SYNCSER_CMD_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_CMD_TX_EN _SB_MAKEMASK1(1)
+#define M_SYNCSER_CMD_RX_RESET _SB_MAKEMASK1(2)
+#define M_SYNCSER_CMD_TX_RESET _SB_MAKEMASK1(3)
+#define M_SYNCSER_CMD_TX_PAUSE _SB_MAKEMASK1(5)
/*
* Serial DMA Enable Register
*/
-#define M_SYNCSER_DMA_RX_EN _SB_MAKEMASK1(0)
-#define M_SYNCSER_DMA_TX_EN _SB_MAKEMASK1(4)
+#define M_SYNCSER_DMA_RX_EN _SB_MAKEMASK1(0)
+#define M_SYNCSER_DMA_TX_EN _SB_MAKEMASK1(4)
/*
* Serial Status Register
*/
-#define M_SYNCSER_RX_CRCERR _SB_MAKEMASK1(0)
-#define M_SYNCSER_RX_ABORT _SB_MAKEMASK1(1)
-#define M_SYNCSER_RX_OCTET _SB_MAKEMASK1(2)
-#define M_SYNCSER_RX_LONGFRM _SB_MAKEMASK1(3)
-#define M_SYNCSER_RX_SHORTFRM _SB_MAKEMASK1(4)
-#define M_SYNCSER_RX_OVERRUN _SB_MAKEMASK1(5)
-#define M_SYNCSER_RX_SYNC_ERR _SB_MAKEMASK1(6)
-#define M_SYNCSER_TX_CRCERR _SB_MAKEMASK1(8)
-#define M_SYNCSER_TX_UNDERRUN _SB_MAKEMASK1(9)
-#define M_SYNCSER_TX_SYNC_ERR _SB_MAKEMASK1(10)
-#define M_SYNCSER_TX_PAUSE_COMPLETE _SB_MAKEMASK1(11)
-#define M_SYNCSER_RX_EOP_COUNT _SB_MAKEMASK1(16)
-#define M_SYNCSER_RX_EOP_TIMER _SB_MAKEMASK1(17)
-#define M_SYNCSER_RX_EOP_SEEN _SB_MAKEMASK1(18)
-#define M_SYNCSER_RX_HWM _SB_MAKEMASK1(19)
-#define M_SYNCSER_RX_LWM _SB_MAKEMASK1(20)
-#define M_SYNCSER_RX_DSCR _SB_MAKEMASK1(21)
-#define M_SYNCSER_RX_DERR _SB_MAKEMASK1(22)
-#define M_SYNCSER_TX_EOP_COUNT _SB_MAKEMASK1(24)
-#define M_SYNCSER_TX_EOP_TIMER _SB_MAKEMASK1(25)
-#define M_SYNCSER_TX_EOP_SEEN _SB_MAKEMASK1(26)
-#define M_SYNCSER_TX_HWM _SB_MAKEMASK1(27)
-#define M_SYNCSER_TX_LWM _SB_MAKEMASK1(28)
-#define M_SYNCSER_TX_DSCR _SB_MAKEMASK1(29)
-#define M_SYNCSER_TX_DERR _SB_MAKEMASK1(30)
-#define M_SYNCSER_TX_DZERO _SB_MAKEMASK1(31)
+#define M_SYNCSER_RX_CRCERR _SB_MAKEMASK1(0)
+#define M_SYNCSER_RX_ABORT _SB_MAKEMASK1(1)
+#define M_SYNCSER_RX_OCTET _SB_MAKEMASK1(2)
+#define M_SYNCSER_RX_LONGFRM _SB_MAKEMASK1(3)
+#define M_SYNCSER_RX_SHORTFRM _SB_MAKEMASK1(4)
+#define M_SYNCSER_RX_OVERRUN _SB_MAKEMASK1(5)
+#define M_SYNCSER_RX_SYNC_ERR _SB_MAKEMASK1(6)
+#define M_SYNCSER_TX_CRCERR _SB_MAKEMASK1(8)
+#define M_SYNCSER_TX_UNDERRUN _SB_MAKEMASK1(9)
+#define M_SYNCSER_TX_SYNC_ERR _SB_MAKEMASK1(10)
+#define M_SYNCSER_TX_PAUSE_COMPLETE _SB_MAKEMASK1(11)
+#define M_SYNCSER_RX_EOP_COUNT _SB_MAKEMASK1(16)
+#define M_SYNCSER_RX_EOP_TIMER _SB_MAKEMASK1(17)
+#define M_SYNCSER_RX_EOP_SEEN _SB_MAKEMASK1(18)
+#define M_SYNCSER_RX_HWM _SB_MAKEMASK1(19)
+#define M_SYNCSER_RX_LWM _SB_MAKEMASK1(20)
+#define M_SYNCSER_RX_DSCR _SB_MAKEMASK1(21)
+#define M_SYNCSER_RX_DERR _SB_MAKEMASK1(22)
+#define M_SYNCSER_TX_EOP_COUNT _SB_MAKEMASK1(24)
+#define M_SYNCSER_TX_EOP_TIMER _SB_MAKEMASK1(25)
+#define M_SYNCSER_TX_EOP_SEEN _SB_MAKEMASK1(26)
+#define M_SYNCSER_TX_HWM _SB_MAKEMASK1(27)
+#define M_SYNCSER_TX_LWM _SB_MAKEMASK1(28)
+#define M_SYNCSER_TX_DSCR _SB_MAKEMASK1(29)
+#define M_SYNCSER_TX_DERR _SB_MAKEMASK1(30)
+#define M_SYNCSER_TX_DZERO _SB_MAKEMASK1(31)
/*
* Sequencer Table Entry format
*/
-#define M_SYNCSER_SEQ_LAST _SB_MAKEMASK1(0)
-#define M_SYNCSER_SEQ_BYTE _SB_MAKEMASK1(1)
+#define M_SYNCSER_SEQ_LAST _SB_MAKEMASK1(0)
+#define M_SYNCSER_SEQ_BYTE _SB_MAKEMASK1(1)
-#define S_SYNCSER_SEQ_COUNT 2
-#define M_SYNCSER_SEQ_COUNT _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
-#define V_SYNCSER_SEQ_COUNT(x) _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
+#define S_SYNCSER_SEQ_COUNT 2
+#define M_SYNCSER_SEQ_COUNT _SB_MAKEMASK(4, S_SYNCSER_SEQ_COUNT)
+#define V_SYNCSER_SEQ_COUNT(x) _SB_MAKEVALUE(x, S_SYNCSER_SEQ_COUNT)
-#define M_SYNCSER_SEQ_ENABLE _SB_MAKEMASK1(6)
-#define M_SYNCSER_SEQ_STROBE _SB_MAKEMASK1(7)
+#define M_SYNCSER_SEQ_ENABLE _SB_MAKEMASK1(6)
+#define M_SYNCSER_SEQ_STROBE _SB_MAKEMASK1(7)
#endif
diff --git a/arch/mips/include/asm/sibyte/sb1250_uart.h b/arch/mips/include/asm/sibyte/sb1250_uart.h
index bb99ecac581..a43dc197628 100644
--- a/arch/mips/include/asm/sibyte/sb1250_uart.h
+++ b/arch/mips/include/asm/sibyte/sb1250_uart.h
@@ -45,33 +45,33 @@
* Register: DUART_MODE_REG_1_B
*/
-#define S_DUART_BITS_PER_CHAR 0
-#define M_DUART_BITS_PER_CHAR _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
+#define S_DUART_BITS_PER_CHAR 0
+#define M_DUART_BITS_PER_CHAR _SB_MAKEMASK(2, S_DUART_BITS_PER_CHAR)
#define V_DUART_BITS_PER_CHAR(x) _SB_MAKEVALUE(x, S_DUART_BITS_PER_CHAR)
#define K_DUART_BITS_PER_CHAR_RSV0 0
#define K_DUART_BITS_PER_CHAR_RSV1 1
-#define K_DUART_BITS_PER_CHAR_7 2
-#define K_DUART_BITS_PER_CHAR_8 3
+#define K_DUART_BITS_PER_CHAR_7 2
+#define K_DUART_BITS_PER_CHAR_8 3
#define V_DUART_BITS_PER_CHAR_RSV0 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV0)
#define V_DUART_BITS_PER_CHAR_RSV1 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_RSV1)
-#define V_DUART_BITS_PER_CHAR_7 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
-#define V_DUART_BITS_PER_CHAR_8 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
+#define V_DUART_BITS_PER_CHAR_7 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_7)
+#define V_DUART_BITS_PER_CHAR_8 V_DUART_BITS_PER_CHAR(K_DUART_BITS_PER_CHAR_8)
#define M_DUART_PARITY_TYPE_EVEN 0x00
-#define M_DUART_PARITY_TYPE_ODD _SB_MAKEMASK1(2)
+#define M_DUART_PARITY_TYPE_ODD _SB_MAKEMASK1(2)
-#define S_DUART_PARITY_MODE 3
-#define M_DUART_PARITY_MODE _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
-#define V_DUART_PARITY_MODE(x) _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
+#define S_DUART_PARITY_MODE 3
+#define M_DUART_PARITY_MODE _SB_MAKEMASK(2, S_DUART_PARITY_MODE)
+#define V_DUART_PARITY_MODE(x) _SB_MAKEVALUE(x, S_DUART_PARITY_MODE)
-#define K_DUART_PARITY_MODE_ADD 0
+#define K_DUART_PARITY_MODE_ADD 0
#define K_DUART_PARITY_MODE_ADD_FIXED 1
#define K_DUART_PARITY_MODE_NONE 2
-#define V_DUART_PARITY_MODE_ADD V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
+#define V_DUART_PARITY_MODE_ADD V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD)
#define V_DUART_PARITY_MODE_ADD_FIXED V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_ADD_FIXED)
#define V_DUART_PARITY_MODE_NONE V_DUART_PARITY_MODE(K_DUART_PARITY_MODE_NONE)
@@ -81,7 +81,7 @@
#define M_DUART_RX_IRQ_SEL_RXRDY 0
#define M_DUART_RX_IRQ_SEL_RXFULL _SB_MAKEMASK1(6)
-#define M_DUART_RX_RTS_ENA _SB_MAKEMASK1(7)
+#define M_DUART_RX_RTS_ENA _SB_MAKEMASK1(7)
/*
* DUART Mode Register #2 (Table 10-4)
@@ -89,18 +89,18 @@
* Register: DUART_MODE_REG_2_B
*/
-#define M_DUART_MODE_RESERVED1 _SB_MAKEMASK(3, 0) /* ignored */
+#define M_DUART_MODE_RESERVED1 _SB_MAKEMASK(3, 0) /* ignored */
-#define M_DUART_STOP_BIT_LEN_2 _SB_MAKEMASK1(3)
-#define M_DUART_STOP_BIT_LEN_1 0
+#define M_DUART_STOP_BIT_LEN_2 _SB_MAKEMASK1(3)
+#define M_DUART_STOP_BIT_LEN_1 0
-#define M_DUART_TX_CTS_ENA _SB_MAKEMASK1(4)
+#define M_DUART_TX_CTS_ENA _SB_MAKEMASK1(4)
-#define M_DUART_MODE_RESERVED2 _SB_MAKEMASK1(5) /* must be zero */
+#define M_DUART_MODE_RESERVED2 _SB_MAKEMASK1(5) /* must be zero */
#define S_DUART_CHAN_MODE 6
-#define M_DUART_CHAN_MODE _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
+#define M_DUART_CHAN_MODE _SB_MAKEMASK(2, S_DUART_CHAN_MODE)
#define V_DUART_CHAN_MODE(x) _SB_MAKEVALUE(x, S_DUART_CHAN_MODE)
#define K_DUART_CHAN_MODE_NORMAL 0
@@ -117,34 +117,34 @@
* Register: DUART_CMD_B
*/
-#define M_DUART_RX_EN _SB_MAKEMASK1(0)
-#define M_DUART_RX_DIS _SB_MAKEMASK1(1)
-#define M_DUART_TX_EN _SB_MAKEMASK1(2)
-#define M_DUART_TX_DIS _SB_MAKEMASK1(3)
+#define M_DUART_RX_EN _SB_MAKEMASK1(0)
+#define M_DUART_RX_DIS _SB_MAKEMASK1(1)
+#define M_DUART_TX_EN _SB_MAKEMASK1(2)
+#define M_DUART_TX_DIS _SB_MAKEMASK1(3)
#define S_DUART_MISC_CMD 4
-#define M_DUART_MISC_CMD _SB_MAKEMASK(3, S_DUART_MISC_CMD)
-#define V_DUART_MISC_CMD(x) _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
-
-#define K_DUART_MISC_CMD_NOACTION0 0
-#define K_DUART_MISC_CMD_NOACTION1 1
-#define K_DUART_MISC_CMD_RESET_RX 2
-#define K_DUART_MISC_CMD_RESET_TX 3
-#define K_DUART_MISC_CMD_NOACTION4 4
+#define M_DUART_MISC_CMD _SB_MAKEMASK(3, S_DUART_MISC_CMD)
+#define V_DUART_MISC_CMD(x) _SB_MAKEVALUE(x, S_DUART_MISC_CMD)
+
+#define K_DUART_MISC_CMD_NOACTION0 0
+#define K_DUART_MISC_CMD_NOACTION1 1
+#define K_DUART_MISC_CMD_RESET_RX 2
+#define K_DUART_MISC_CMD_RESET_TX 3
+#define K_DUART_MISC_CMD_NOACTION4 4
#define K_DUART_MISC_CMD_RESET_BREAK_INT 5
-#define K_DUART_MISC_CMD_START_BREAK 6
-#define K_DUART_MISC_CMD_STOP_BREAK 7
-
-#define V_DUART_MISC_CMD_NOACTION0 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
-#define V_DUART_MISC_CMD_NOACTION1 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
-#define V_DUART_MISC_CMD_RESET_RX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
-#define V_DUART_MISC_CMD_RESET_TX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
-#define V_DUART_MISC_CMD_NOACTION4 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
+#define K_DUART_MISC_CMD_START_BREAK 6
+#define K_DUART_MISC_CMD_STOP_BREAK 7
+
+#define V_DUART_MISC_CMD_NOACTION0 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION0)
+#define V_DUART_MISC_CMD_NOACTION1 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION1)
+#define V_DUART_MISC_CMD_RESET_RX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_RX)
+#define V_DUART_MISC_CMD_RESET_TX V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_TX)
+#define V_DUART_MISC_CMD_NOACTION4 V_DUART_MISC_CMD(K_DUART_MISC_CMD_NOACTION4)
#define V_DUART_MISC_CMD_RESET_BREAK_INT V_DUART_MISC_CMD(K_DUART_MISC_CMD_RESET_BREAK_INT)
-#define V_DUART_MISC_CMD_START_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
-#define V_DUART_MISC_CMD_STOP_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
+#define V_DUART_MISC_CMD_START_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_START_BREAK)
+#define V_DUART_MISC_CMD_STOP_BREAK V_DUART_MISC_CMD(K_DUART_MISC_CMD_STOP_BREAK)
-#define M_DUART_CMD_RESERVED _SB_MAKEMASK1(7)
+#define M_DUART_CMD_RESERVED _SB_MAKEMASK1(7)
/*
* DUART Status Register (Table 10-6)
@@ -153,14 +153,14 @@
* READ-ONLY
*/
-#define M_DUART_RX_RDY _SB_MAKEMASK1(0)
-#define M_DUART_RX_FFUL _SB_MAKEMASK1(1)
-#define M_DUART_TX_RDY _SB_MAKEMASK1(2)
-#define M_DUART_TX_EMT _SB_MAKEMASK1(3)
-#define M_DUART_OVRUN_ERR _SB_MAKEMASK1(4)
-#define M_DUART_PARITY_ERR _SB_MAKEMASK1(5)
-#define M_DUART_FRM_ERR _SB_MAKEMASK1(6)
-#define M_DUART_RCVD_BRK _SB_MAKEMASK1(7)
+#define M_DUART_RX_RDY _SB_MAKEMASK1(0)
+#define M_DUART_RX_FFUL _SB_MAKEMASK1(1)
+#define M_DUART_TX_RDY _SB_MAKEMASK1(2)
+#define M_DUART_TX_EMT _SB_MAKEMASK1(3)
+#define M_DUART_OVRUN_ERR _SB_MAKEMASK1(4)
+#define M_DUART_PARITY_ERR _SB_MAKEMASK1(5)
+#define M_DUART_FRM_ERR _SB_MAKEMASK1(6)
+#define M_DUART_RCVD_BRK _SB_MAKEMASK1(7)
/*
* DUART Baud Rate Register (Table 10-7)
@@ -168,8 +168,8 @@
* Register: DUART_CLK_SEL_B
*/
-#define M_DUART_CLK_COUNTER _SB_MAKEMASK(12, 0)
-#define V_DUART_BAUD_RATE(x) (100000000/((x)*20)-1)
+#define M_DUART_CLK_COUNTER _SB_MAKEMASK(12, 0)
+#define V_DUART_BAUD_RATE(x) (100000000/((x)*20)-1)
/*
* DUART Data Registers (Table 10-8 and 10-9)
@@ -179,33 +179,33 @@
* Register: DUART_TX_HOLD_B
*/
-#define M_DUART_RX_DATA _SB_MAKEMASK(8, 0)
-#define M_DUART_TX_DATA _SB_MAKEMASK(8, 0)
+#define M_DUART_RX_DATA _SB_MAKEMASK(8, 0)
+#define M_DUART_TX_DATA _SB_MAKEMASK(8, 0)
/*
* DUART Input Port Register (Table 10-10)
* Register: DUART_IN_PORT
*/
-#define M_DUART_IN_PIN0_VAL _SB_MAKEMASK1(0)
-#define M_DUART_IN_PIN1_VAL _SB_MAKEMASK1(1)
-#define M_DUART_IN_PIN2_VAL _SB_MAKEMASK1(2)
-#define M_DUART_IN_PIN3_VAL _SB_MAKEMASK1(3)
-#define M_DUART_IN_PIN4_VAL _SB_MAKEMASK1(4)
-#define M_DUART_IN_PIN5_VAL _SB_MAKEMASK1(5)
-#define M_DUART_RIN0_PIN _SB_MAKEMASK1(6)
-#define M_DUART_RIN1_PIN _SB_MAKEMASK1(7)
+#define M_DUART_IN_PIN0_VAL _SB_MAKEMASK1(0)
+#define M_DUART_IN_PIN1_VAL _SB_MAKEMASK1(1)
+#define M_DUART_IN_PIN2_VAL _SB_MAKEMASK1(2)
+#define M_DUART_IN_PIN3_VAL _SB_MAKEMASK1(3)
+#define M_DUART_IN_PIN4_VAL _SB_MAKEMASK1(4)
+#define M_DUART_IN_PIN5_VAL _SB_MAKEMASK1(5)
+#define M_DUART_RIN0_PIN _SB_MAKEMASK1(6)
+#define M_DUART_RIN1_PIN _SB_MAKEMASK1(7)
/*
* DUART Input Port Change Status Register (Tables 10-11, 10-12, and 10-13)
* Register: DUART_INPORT_CHNG
*/
-#define S_DUART_IN_PIN_VAL 0
-#define M_DUART_IN_PIN_VAL _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
+#define S_DUART_IN_PIN_VAL 0
+#define M_DUART_IN_PIN_VAL _SB_MAKEMASK(4, S_DUART_IN_PIN_VAL)
-#define S_DUART_IN_PIN_CHNG 4
-#define M_DUART_IN_PIN_CHNG _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
+#define S_DUART_IN_PIN_CHNG 4
+#define M_DUART_IN_PIN_CHNG _SB_MAKEMASK(4, S_DUART_IN_PIN_CHNG)
/*
@@ -213,46 +213,46 @@
* Register: DUART_OPCR
*/
-#define M_DUART_OPCR_RESERVED0 _SB_MAKEMASK1(0) /* must be zero */
-#define M_DUART_OPC2_SEL _SB_MAKEMASK1(1)
-#define M_DUART_OPCR_RESERVED1 _SB_MAKEMASK1(2) /* must be zero */
-#define M_DUART_OPC3_SEL _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED2 _SB_MAKEMASK(4, 4) /* must be zero */
+#define M_DUART_OPCR_RESERVED0 _SB_MAKEMASK1(0) /* must be zero */
+#define M_DUART_OPC2_SEL _SB_MAKEMASK1(1)
+#define M_DUART_OPCR_RESERVED1 _SB_MAKEMASK1(2) /* must be zero */
+#define M_DUART_OPC3_SEL _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED2 _SB_MAKEMASK(4, 4) /* must be zero */
/*
* DUART Aux Control Register (Table 10-15)
* Register: DUART_AUX_CTRL
*/
-#define M_DUART_IP0_CHNG_ENA _SB_MAKEMASK1(0)
-#define M_DUART_IP1_CHNG_ENA _SB_MAKEMASK1(1)
-#define M_DUART_IP2_CHNG_ENA _SB_MAKEMASK1(2)
-#define M_DUART_IP3_CHNG_ENA _SB_MAKEMASK1(3)
-#define M_DUART_ACR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_IP0_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_IP1_CHNG_ENA _SB_MAKEMASK1(1)
+#define M_DUART_IP2_CHNG_ENA _SB_MAKEMASK1(2)
+#define M_DUART_IP3_CHNG_ENA _SB_MAKEMASK1(3)
+#define M_DUART_ACR_RESERVED _SB_MAKEMASK(4, 4)
-#define M_DUART_CTS_CHNG_ENA _SB_MAKEMASK1(0)
-#define M_DUART_CIN_CHNG_ENA _SB_MAKEMASK1(2)
+#define M_DUART_CTS_CHNG_ENA _SB_MAKEMASK1(0)
+#define M_DUART_CIN_CHNG_ENA _SB_MAKEMASK1(2)
/*
* DUART Interrupt Status Register (Table 10-16)
* Register: DUART_ISR
*/
-#define M_DUART_ISR_TX_A _SB_MAKEMASK1(0)
+#define M_DUART_ISR_TX_A _SB_MAKEMASK1(0)
-#define S_DUART_ISR_RX_A 1
-#define M_DUART_ISR_RX_A _SB_MAKEMASK1(S_DUART_ISR_RX_A)
-#define V_DUART_ISR_RX_A(x) _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
-#define G_DUART_ISR_RX_A(x) _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
+#define S_DUART_ISR_RX_A 1
+#define M_DUART_ISR_RX_A _SB_MAKEMASK1(S_DUART_ISR_RX_A)
+#define V_DUART_ISR_RX_A(x) _SB_MAKEVALUE(x, S_DUART_ISR_RX_A)
+#define G_DUART_ISR_RX_A(x) _SB_GETVALUE(x, S_DUART_ISR_RX_A, M_DUART_ISR_RX_A)
-#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_ISR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN_A _SB_MAKEMASK1(3)
#define M_DUART_ISR_ALL_A _SB_MAKEMASK(4, 0)
-#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
-#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
-#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
-#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_ISR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_ISR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_ISR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_ISR_IN_B _SB_MAKEMASK1(7)
#define M_DUART_ISR_ALL_B _SB_MAKEMASK(4, 4)
/*
@@ -262,29 +262,29 @@
* Register: DUART_ISR_B
*/
-#define M_DUART_ISR_TX _SB_MAKEMASK1(0)
-#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
-#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
-#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
+#define M_DUART_ISR_TX _SB_MAKEMASK1(0)
+#define M_DUART_ISR_RX _SB_MAKEMASK1(1)
+#define M_DUART_ISR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_ISR_IN _SB_MAKEMASK1(3)
#define M_DUART_ISR_ALL _SB_MAKEMASK(4, 0)
-#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_ISR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Interrupt Mask Register (Table 10-19)
* Register: DUART_IMR
*/
-#define M_DUART_IMR_TX_A _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX_A _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK_A _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN_A _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX_A _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX_A _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK_A _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN_A _SB_MAKEMASK1(3)
#define M_DUART_IMR_ALL_A _SB_MAKEMASK(4, 0)
-#define M_DUART_IMR_TX_B _SB_MAKEMASK1(4)
-#define M_DUART_IMR_RX_B _SB_MAKEMASK1(5)
-#define M_DUART_IMR_BRK_B _SB_MAKEMASK1(6)
-#define M_DUART_IMR_IN_B _SB_MAKEMASK1(7)
-#define M_DUART_IMR_ALL_B _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_TX_B _SB_MAKEMASK1(4)
+#define M_DUART_IMR_RX_B _SB_MAKEMASK1(5)
+#define M_DUART_IMR_BRK_B _SB_MAKEMASK1(6)
+#define M_DUART_IMR_IN_B _SB_MAKEMASK1(7)
+#define M_DUART_IMR_ALL_B _SB_MAKEMASK(4, 4)
/*
* DUART Channel A Interrupt Mask Register (Table 10-20)
@@ -293,12 +293,12 @@
* Register: DUART_IMR_B
*/
-#define M_DUART_IMR_TX _SB_MAKEMASK1(0)
-#define M_DUART_IMR_RX _SB_MAKEMASK1(1)
-#define M_DUART_IMR_BRK _SB_MAKEMASK1(2)
-#define M_DUART_IMR_IN _SB_MAKEMASK1(3)
+#define M_DUART_IMR_TX _SB_MAKEMASK1(0)
+#define M_DUART_IMR_RX _SB_MAKEMASK1(1)
+#define M_DUART_IMR_BRK _SB_MAKEMASK1(2)
+#define M_DUART_IMR_IN _SB_MAKEMASK1(3)
#define M_DUART_IMR_ALL _SB_MAKEMASK(4, 0)
-#define M_DUART_IMR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_IMR_RESERVED _SB_MAKEMASK(4, 4)
/*
@@ -306,33 +306,33 @@
* Register: DUART_SET_OPR
*/
-#define M_DUART_SET_OPR0 _SB_MAKEMASK1(0)
-#define M_DUART_SET_OPR1 _SB_MAKEMASK1(1)
-#define M_DUART_SET_OPR2 _SB_MAKEMASK1(2)
-#define M_DUART_SET_OPR3 _SB_MAKEMASK1(3)
-#define M_DUART_OPSR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_SET_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_SET_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_SET_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_SET_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPSR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Output Port Clear Register (Table 10-23)
* Register: DUART_CLEAR_OPR
*/
-#define M_DUART_CLR_OPR0 _SB_MAKEMASK1(0)
-#define M_DUART_CLR_OPR1 _SB_MAKEMASK1(1)
-#define M_DUART_CLR_OPR2 _SB_MAKEMASK1(2)
-#define M_DUART_CLR_OPR3 _SB_MAKEMASK1(3)
-#define M_DUART_OPCR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_CLR_OPR0 _SB_MAKEMASK1(0)
+#define M_DUART_CLR_OPR1 _SB_MAKEMASK1(1)
+#define M_DUART_CLR_OPR2 _SB_MAKEMASK1(2)
+#define M_DUART_CLR_OPR3 _SB_MAKEMASK1(3)
+#define M_DUART_OPCR_RESERVED _SB_MAKEMASK(4, 4)
/*
* DUART Output Port RTS Register (Table 10-24)
* Register: DUART_OUT_PORT
*/
-#define M_DUART_OUT_PIN_SET0 _SB_MAKEMASK1(0)
-#define M_DUART_OUT_PIN_SET1 _SB_MAKEMASK1(1)
-#define M_DUART_OUT_PIN_CLR0 _SB_MAKEMASK1(2)
-#define M_DUART_OUT_PIN_CLR1 _SB_MAKEMASK1(3)
-#define M_DUART_OPRR_RESERVED _SB_MAKEMASK(4, 4)
+#define M_DUART_OUT_PIN_SET0 _SB_MAKEMASK1(0)
+#define M_DUART_OUT_PIN_SET1 _SB_MAKEMASK1(1)
+#define M_DUART_OUT_PIN_CLR0 _SB_MAKEMASK1(2)
+#define M_DUART_OUT_PIN_CLR1 _SB_MAKEMASK1(3)
+#define M_DUART_OPRR_RESERVED _SB_MAKEMASK(4, 4)
#define M_DUART_OUT_PIN_SET(chan) \
(chan == 0 ? M_DUART_OUT_PIN_SET0 : M_DUART_OUT_PIN_SET1)
@@ -344,15 +344,15 @@
* Full Interrupt Control Register
*/
-#define S_DUART_SIG_FULL _SB_MAKE64(0)
-#define M_DUART_SIG_FULL _SB_MAKEMASK(4, S_DUART_SIG_FULL)
-#define V_DUART_SIG_FULL(x) _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
-#define G_DUART_SIG_FULL(x) _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
+#define S_DUART_SIG_FULL _SB_MAKE64(0)
+#define M_DUART_SIG_FULL _SB_MAKEMASK(4, S_DUART_SIG_FULL)
+#define V_DUART_SIG_FULL(x) _SB_MAKEVALUE(x, S_DUART_SIG_FULL)
+#define G_DUART_SIG_FULL(x) _SB_GETVALUE(x, S_DUART_SIG_FULL, M_DUART_SIG_FULL)
-#define S_DUART_INT_TIME _SB_MAKE64(4)
-#define M_DUART_INT_TIME _SB_MAKEMASK(4, S_DUART_INT_TIME)
-#define V_DUART_INT_TIME(x) _SB_MAKEVALUE(x, S_DUART_INT_TIME)
-#define G_DUART_INT_TIME(x) _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
+#define S_DUART_INT_TIME _SB_MAKE64(4)
+#define M_DUART_INT_TIME _SB_MAKEMASK(4, S_DUART_INT_TIME)
+#define V_DUART_INT_TIME(x) _SB_MAKEVALUE(x, S_DUART_INT_TIME)
+#define G_DUART_INT_TIME(x) _SB_GETVALUE(x, S_DUART_INT_TIME, M_DUART_INT_TIME)
#endif /* 1250 PASS2 || 112x PASS1 || 1480 */
diff --git a/arch/mips/include/asm/sibyte/sentosa.h b/arch/mips/include/asm/sibyte/sentosa.h
index 64c47874f32..0351a46eebb 100644
--- a/arch/mips/include/asm/sibyte/sentosa.h
+++ b/arch/mips/include/asm/sibyte/sentosa.h
@@ -30,11 +30,11 @@
/* Generic bus chip selects */
#ifdef CONFIG_SIBYTE_RHONE
-#define LEDS_CS 6
-#define LEDS_PHYS 0x1d0a0000
+#define LEDS_CS 6
+#define LEDS_PHYS 0x1d0a0000
#endif
/* GPIOs */
-#define K_GPIO_DBG_LED 0
+#define K_GPIO_DBG_LED 0
#endif /* __ASM_SIBYTE_SENTOSA_H */
diff --git a/arch/mips/include/asm/sibyte/swarm.h b/arch/mips/include/asm/sibyte/swarm.h
index 114d9d29ca9..187cfb1f67c 100644
--- a/arch/mips/include/asm/sibyte/swarm.h
+++ b/arch/mips/include/asm/sibyte/swarm.h
@@ -24,41 +24,41 @@
#ifdef CONFIG_SIBYTE_SWARM
#define SIBYTE_BOARD_NAME "BCM91250A (SWARM)"
#define SIBYTE_HAVE_PCMCIA 1
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#endif
#ifdef CONFIG_SIBYTE_LITTLESUR
#define SIBYTE_BOARD_NAME "BCM91250C2 (LittleSur)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 1
+#define SIBYTE_HAVE_IDE 1
#define SIBYTE_DEFAULT_CONSOLE "cfe0"
#endif
#ifdef CONFIG_SIBYTE_CRHONE
#define SIBYTE_BOARD_NAME "BCM91125C (CRhone)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 0
+#define SIBYTE_HAVE_IDE 0
#endif
#ifdef CONFIG_SIBYTE_CRHINE
#define SIBYTE_BOARD_NAME "BCM91120C (CRhine)"
#define SIBYTE_HAVE_PCMCIA 0
-#define SIBYTE_HAVE_IDE 0
+#define SIBYTE_HAVE_IDE 0
#endif
/* Generic bus chip selects */
-#define LEDS_CS 3
-#define LEDS_PHYS 0x100a0000
+#define LEDS_CS 3
+#define LEDS_PHYS 0x100a0000
#ifdef SIBYTE_HAVE_IDE
-#define IDE_CS 4
-#define IDE_PHYS 0x100b0000
-#define K_GPIO_GB_IDE 4
-#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
+#define IDE_CS 4
+#define IDE_PHYS 0x100b0000
+#define K_GPIO_GB_IDE 4
+#define K_INT_GB_IDE (K_INT_GPIO_0 + K_GPIO_GB_IDE)
#endif
#ifdef SIBYTE_HAVE_PCMCIA
-#define PCMCIA_CS 6
-#define PCMCIA_PHYS 0x11000000
+#define PCMCIA_CS 6
+#define PCMCIA_PHYS 0x11000000
#define K_GPIO_PC_READY 9
-#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
+#define K_INT_PC_READY (K_INT_GPIO_0 + K_GPIO_PC_READY)
#endif
#endif /* __ASM_SIBYTE_SWARM_H */
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index f33b5fd6972..eb600875848 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -26,7 +26,7 @@ extern cpumask_t cpu_sibling_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
/* Map from cpu id to sequential logical cpu number. This will only
- not be idempotent when cpus failed to come on-line. */
+ not be idempotent when cpus failed to come on-line. */
extern int __cpu_number_map[NR_CPUS];
#define cpu_number_map(cpu) __cpu_number_map[cpu]
@@ -36,7 +36,7 @@ extern int __cpu_logical_map[NR_CPUS];
#define NO_PROC_ID (-1)
-#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
+#define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
@@ -62,14 +62,14 @@ static inline void smp_send_reschedule(int cpu)
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
return mp_ops->cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->cpu_die(cpu);
}
@@ -81,14 +81,14 @@ extern asmlinkage void smp_call_function_interrupt(void);
static inline void arch_send_call_function_single_ipi(int cpu)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
- extern struct plat_smp_ops *mp_ops; /* private */
+ extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
diff --git a/arch/mips/include/asm/smtc.h b/arch/mips/include/asm/smtc.h
index 8935426a56a..e56b439b787 100644
--- a/arch/mips/include/asm/smtc.h
+++ b/arch/mips/include/asm/smtc.h
@@ -14,8 +14,8 @@
extern unsigned int smtc_status;
-#define SMTC_TLB_SHARED 0x00000001
-#define SMTC_MTC_ACTIVE 0x00000002
+#define SMTC_TLB_SHARED 0x00000001
+#define SMTC_MTC_ACTIVE 0x00000002
/*
* TLB/ASID Management information
diff --git a/arch/mips/include/asm/sn/addrs.h b/arch/mips/include/asm/sn/addrs.h
index 2367b56dcde..66814f8ba8e 100644
--- a/arch/mips/include/asm/sn/addrs.h
+++ b/arch/mips/include/asm/sn/addrs.h
@@ -88,8 +88,8 @@
#define SWIN_SIZE_BITS 24
#define SWIN_SIZE (UINT64_CAST 1 << 24)
-#define SWIN_SIZEMASK (SWIN_SIZE - 1)
-#define SWIN_WIDGET_MASK 0xF
+#define SWIN_SIZEMASK (SWIN_SIZE - 1)
+#define SWIN_WIDGET_MASK 0xF
/*
* Convert smallwindow address to xtalk address.
@@ -97,8 +97,8 @@
* 'addr' can be physical or virtual address, but will be converted
* to Xtalk address in the range 0 -> SWINZ_SIZEMASK
*/
-#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
-#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
+#define SWIN_WIDGETADDR(addr) ((addr) & SWIN_SIZEMASK)
+#define SWIN_WIDGETNUM(addr) (((addr) >> SWIN_SIZE_BITS) & SWIN_WIDGET_MASK)
/*
* Verify if addr belongs to small window address on node with "nasid"
*
@@ -108,7 +108,7 @@
*
*
*/
-#define NODE_SWIN_ADDR(nasid, addr) \
+#define NODE_SWIN_ADDR(nasid, addr) \
(((addr) >= NODE_SWIN_BASE(nasid, 0)) && \
((addr) < (NODE_SWIN_BASE(nasid, HUB_NUM_WIDGET) + SWIN_SIZE)\
))
@@ -150,7 +150,7 @@
#endif
-#define HUB_REGISTER_WIDGET 1
+#define HUB_REGISTER_WIDGET 1
#define IALIAS_BASE NODE_SWIN_BASE(0, HUB_REGISTER_WIDGET)
#define IALIAS_SIZE 0x800000 /* 8 Megabytes */
#define IS_IALIAS(_a) (((_a) >= IALIAS_BASE) && \
@@ -174,16 +174,16 @@
* WARNING: They won't work in assembler.
*
* BDDIR_ENTRY_LO returns the address of the low double-word of the dir
- * entry corresponding to a physical (Cac or Uncac) address.
+ * entry corresponding to a physical (Cac or Uncac) address.
* BDDIR_ENTRY_HI returns the address of the high double-word of the entry.
* BDPRT_ENTRY returns the address of the double-word protection entry
- * corresponding to the page containing the physical address.
+ * corresponding to the page containing the physical address.
* BDPRT_ENTRY_S Stores the value into the protection entry.
* BDPRT_ENTRY_L Load the value from the protection entry.
* BDECC_ENTRY returns the address of the ECC byte corresponding to a
- * double-word at a specified physical address.
+ * double-word at a specified physical address.
* BDECC_ENTRY_H returns the address of the two ECC bytes corresponding to a
- * quad-word at a specified physical address.
+ * quad-word at a specified physical address.
*/
#define NODE_BDOOR_BASE(_n) (NODE_HSPEC_BASE(_n) + (NODE_ADDRSPACE_SIZE/2))
@@ -226,11 +226,11 @@
#define BDADDR_IS_DIR(_ba) ((UINT64_CAST (_ba) & 0x200) != 0)
#define BDADDR_IS_PRT(_ba) ((UINT64_CAST (_ba) & 0x200) == 0)
-#define BDDIR_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+#define BDDIR_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
(UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2 | \
(UINT64_CAST(_ba) & 0x1f << 4) << 3)
-#define BDPRT_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
+#define BDPRT_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
(UINT64_CAST(_ba) & BDDIR_UPPER_MASK)<<2)
#define BDECC_TO_MEM(_ba) (UINT64_CAST (_ba) & NASID_MASK | \
@@ -251,23 +251,23 @@
/*
* WARNING:
* When certain Hub chip workaround are defined, it's not sufficient
- * to dereference the *_HUB_ADDR() macros. You should instead use
+ * to dereference the *_HUB_ADDR() macros. You should instead use
* HUB_L() and HUB_S() if you must deal with pointers to hub registers.
* Otherwise, the recommended approach is to use *_HUB_L() and *_HUB_S().
* They're always safe.
*/
#define LOCAL_HUB_ADDR(_x) (HUBREG_CAST (IALIAS_BASE + (_x)))
-#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
+#define REMOTE_HUB_ADDR(_n, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#ifdef CONFIG_SGI_IP27
-#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
+#define REMOTE_HUB_PI_ADDR(_n, _sn, _x) (HUBREG_CAST (NODE_SWIN_BASE(_n, 1) + \
0x800000 + (_x)))
#endif /* CONFIG_SGI_IP27 */
#ifndef __ASSEMBLY__
#define HUB_L(_a) *(_a)
-#define HUB_S(_a, _d) *(_a) = (_d)
+#define HUB_S(_a, _d) *(_a) = (_d)
#define LOCAL_HUB_L(_r) HUB_L(LOCAL_HUB_ADDR(_r))
#define LOCAL_HUB_S(_r, _d) HUB_S(LOCAL_HUB_ADDR(_r), (_d))
@@ -330,14 +330,14 @@
#define KLI_LAUNCH 0 /* Dir. entries */
#define KLI_KLCONFIG 1
-#define KLI_NMI 2
+#define KLI_NMI 2
#define KLI_GDA 3
#define KLI_FREEMEM 4
-#define KLI_SYMMON_STK 5
+#define KLI_SYMMON_STK 5
#define KLI_PI_ERROR 6
#define KLI_KERN_VARS 7
-#define KLI_KERN_XP 8
-#define KLI_KERN_PARTID 9
+#define KLI_KERN_XP 8
+#define KLI_KERN_PARTID 9
#ifndef __ASSEMBLY__
@@ -350,8 +350,8 @@
#define KLD_SYMMON_STK(nasid) (KLD_BASE(nasid) + KLI_SYMMON_STK)
#define KLD_FREEMEM(nasid) (KLD_BASE(nasid) + KLI_FREEMEM)
#define KLD_KERN_VARS(nasid) (KLD_BASE(nasid) + KLI_KERN_VARS)
-#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
-#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
+#define KLD_KERN_XP(nasid) (KLD_BASE(nasid) + KLI_KERN_XP)
+#define KLD_KERN_PARTID(nasid) (KLD_BASE(nasid) + KLI_KERN_PARTID)
#define LAUNCH_OFFSET(nasid, slice) \
(KLD_LAUNCH(nasid)->offset + \
@@ -365,7 +365,7 @@
KLD_NMI(nasid)->stride * (slice))
#define NMI_ADDR(nasid, slice) \
TO_NODE_UNCAC((nasid), SN_NMI_OFFSET(nasid, slice))
-#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
+#define NMI_SIZE(nasid) KLD_NMI(nasid)->size
#define KLCONFIG_OFFSET(nasid) KLD_KLCONFIG(nasid)->offset
#define KLCONFIG_ADDR(nasid) \
@@ -390,8 +390,8 @@
/* loading symmon 4k below UNIX. the arcs loader needs the topaddr for a
* relocatable program
*/
-#define UNIX_DEBUG_LOADADDR 0x300000
-#define SYMMON_LOADADDR(nasid) \
+#define UNIX_DEBUG_LOADADDR 0x300000
+#define SYMMON_LOADADDR(nasid) \
TO_NODE(nasid, PHYS_TO_K0(UNIX_DEBUG_LOADADDR - 0x1000))
#define FREEMEM_OFFSET(nasid) KLD_FREEMEM(nasid)->offset
@@ -420,8 +420,8 @@
#define KERN_VARS_ADDR(nasid) KLD_KERN_VARS(nasid)->pointer
#define KERN_VARS_SIZE(nasid) KLD_KERN_VARS(nasid)->size
-#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
-#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
+#define KERN_XP_ADDR(nasid) KLD_KERN_XP(nasid)->pointer
+#define KERN_XP_SIZE(nasid) KLD_KERN_XP(nasid)->size
#define GPDA_ADDR(nasid) TO_NODE_CAC(nasid, GPDA_OFFSET)
diff --git a/arch/mips/include/asm/sn/agent.h b/arch/mips/include/asm/sn/agent.h
index dc81114d474..e33d0929301 100644
--- a/arch/mips/include/asm/sn/agent.h
+++ b/arch/mips/include/asm/sn/agent.h
@@ -25,21 +25,21 @@
*/
#if defined(CONFIG_SGI_IP27)
-#define HUB_NIC_ADDR(_cpuid) \
- REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
+#define HUB_NIC_ADDR(_cpuid) \
+ REMOTE_HUB_ADDR(COMPACT_TO_NASID_NODEID(cpu_to_node(_cpuid)), \
MD_MLAN_CTL)
#endif
-#define SET_HUB_NIC(_my_cpuid, _val) \
+#define SET_HUB_NIC(_my_cpuid, _val) \
(HUB_S(HUB_NIC_ADDR(_my_cpuid), (_val)))
-#define SET_MY_HUB_NIC(_v) \
+#define SET_MY_HUB_NIC(_v) \
SET_HUB_NIC(cpuid(), (_v))
-#define GET_HUB_NIC(_my_cpuid) \
+#define GET_HUB_NIC(_my_cpuid) \
(HUB_L(HUB_NIC_ADDR(_my_cpuid)))
-#define GET_MY_HUB_NIC() \
+#define GET_MY_HUB_NIC() \
GET_HUB_NIC(cpuid())
#endif /* _ASM_SGI_SN_AGENT_H */
diff --git a/arch/mips/include/asm/sn/arch.h b/arch/mips/include/asm/sn/arch.h
index bd75945e10f..471e6870d87 100644
--- a/arch/mips/include/asm/sn/arch.h
+++ b/arch/mips/include/asm/sn/arch.h
@@ -28,14 +28,14 @@ typedef u64 hubreg_t;
#define INVALID_CNODEID (cnodeid_t)-1
#define INVALID_PNODEID (pnodeid_t)-1
#define INVALID_MODULE (moduleid_t)-1
-#define INVALID_PARTID (partid_t)-1
+#define INVALID_PARTID (partid_t)-1
extern nasid_t get_nasid(void);
extern cnodeid_t get_cpu_cnode(cpuid_t);
extern int get_cpu_slice(cpuid_t);
/*
- * NO ONE should access these arrays directly. The only reason we refer to
+ * NO ONE should access these arrays directly. The only reason we refer to
* them here is to avoid the procedure call that would be required in the
* macros below. (Really want private data members here :-)
*/
@@ -44,12 +44,12 @@ extern nasid_t compact_to_nasid_node[MAX_COMPACT_NODES];
/*
* These macros are used by various parts of the kernel to convert
- * between the three different kinds of node numbering. At least some
+ * between the three different kinds of node numbering. At least some
* of them may change to procedure calls in the future, but the macros
* will continue to work. Don't use the arrays above directly.
*/
-#define NASID_TO_REGION(nnode) \
+#define NASID_TO_REGION(nnode) \
((nnode) >> \
(is_fine_dirmode() ? NASID_TO_FINEREG_SHFT : NASID_TO_COARSEREG_SHFT))
diff --git a/arch/mips/include/asm/sn/fru.h b/arch/mips/include/asm/sn/fru.h
index b3e3606723b..bbb83257c8e 100644
--- a/arch/mips/include/asm/sn/fru.h
+++ b/arch/mips/include/asm/sn/fru.h
@@ -21,24 +21,24 @@ typedef struct kf_mem_s {
* is this necessary ?
*/
confidence_t km_dimm[MAX_DIMMS];
- /* confidence level that dimm[i] is bad
+ /* confidence level that dimm[i] is bad
*I think this is the right number
*/
} kf_mem_t;
typedef struct kf_cpu_s {
- confidence_t kc_confidence; /* confidence level that cpu is bad */
- confidence_t kc_icache; /* confidence level that instr. cache is bad */
- confidence_t kc_dcache; /* confidence level that data cache is bad */
- confidence_t kc_scache; /* confidence level that sec. cache is bad */
+ confidence_t kc_confidence; /* confidence level that cpu is bad */
+ confidence_t kc_icache; /* confidence level that instr. cache is bad */
+ confidence_t kc_dcache; /* confidence level that data cache is bad */
+ confidence_t kc_scache; /* confidence level that sec. cache is bad */
confidence_t kc_sysbus; /* confidence level that sysad/cmd/state bus is bad */
} kf_cpu_t;
typedef struct kf_pci_bus_s {
confidence_t kpb_belief; /* confidence level that the pci bus is bad */
confidence_t kpb_pcidev_belief[MAX_PCIDEV];
- /* confidence level that the pci dev is bad */
+ /* confidence level that the pci dev is bad */
} kf_pci_bus_t;
#endif /* __ASM_SN_FRU_H */
diff --git a/arch/mips/include/asm/sn/gda.h b/arch/mips/include/asm/sn/gda.h
index 9cb6ff77091..85fa1b5f639 100644
--- a/arch/mips/include/asm/sn/gda.h
+++ b/arch/mips/include/asm/sn/gda.h
@@ -8,7 +8,7 @@
* Copyright (C) 1992 - 1997, 2000 Silicon Graphics, Inc.
*
* gda.h -- Contains the data structure for the global data area,
- * The GDA contains information communicated between the
+ * The GDA contains information communicated between the
* PROM, SYMMON, and the kernel.
*/
#ifndef _ASM_SN_GDA_H
@@ -23,8 +23,8 @@
*
* Version # | Change
* -------------+-------------------------------------------------------
- * 1 | Initial SN0 version
- * 2 | Prom sets g_partid field to the partition number. 0 IS
+ * 1 | Initial SN0 version
+ * 2 | Prom sets g_partid field to the partition number. 0 IS
* | a valid partition #.
*/
@@ -60,7 +60,7 @@ typedef struct gda {
/* Pointer to a mask of nodes with copies
* of the kernel. */
char g_padding[56]; /* pad out to 128 bytes */
- nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
+ nasid_t g_nasidtable[MAX_COMPACT_NODES]; /* NASID of each node,
* indexed by cnodeid.
*/
} gda_t;
@@ -74,7 +74,7 @@ typedef struct gda {
* revisions assume GDA is NOT set up, and read partition
* information from the board info.
*/
-#define PART_GDA_VERSION 2
+#define PART_GDA_VERSION 2
/*
* The following requests can be sent to the PROM during startup.
@@ -83,17 +83,17 @@ typedef struct gda {
#define PROMOP_MAGIC 0x0ead0000
#define PROMOP_MAGIC_MASK 0x0fff0000
-#define PROMOP_BIST_SHIFT 11
-#define PROMOP_BIST_MASK (0x3 << 11)
+#define PROMOP_BIST_SHIFT 11
+#define PROMOP_BIST_MASK (0x3 << 11)
#define PROMOP_REG PI_ERR_STACK_ADDR_A
#define PROMOP_INVALID (PROMOP_MAGIC | 0x00)
-#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
-#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
-#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
-#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
-#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
+#define PROMOP_HALT (PROMOP_MAGIC | 0x10)
+#define PROMOP_POWERDOWN (PROMOP_MAGIC | 0x20)
+#define PROMOP_RESTART (PROMOP_MAGIC | 0x30)
+#define PROMOP_REBOOT (PROMOP_MAGIC | 0x40)
+#define PROMOP_IMODE (PROMOP_MAGIC | 0x50)
#define PROMOP_CMD_MASK 0x00f0
#define PROMOP_OPTIONS_MASK 0xfff0
diff --git a/arch/mips/include/asm/sn/intr.h b/arch/mips/include/asm/sn/intr.h
index 6718b644b97..fc134819395 100644
--- a/arch/mips/include/asm/sn/intr.h
+++ b/arch/mips/include/asm/sn/intr.h
@@ -14,8 +14,8 @@
#define INT_PEND0_BASELVL 0
#define INT_PEND1_BASELVL 64
-#define N_INTPENDJUNK_BITS 8
-#define INTPENDJUNK_CLRBIT 0x80
+#define N_INTPENDJUNK_BITS 8
+#define INTPENDJUNK_CLRBIT 0x80
/*
* Macros to manipulate the interrupt register on the calling hub chip.
@@ -32,7 +32,7 @@
* We do an uncached load of the int_pend0 register to ensure this.
*/
-#define LOCAL_HUB_CLR_INTR(level) \
+#define LOCAL_HUB_CLR_INTR(level) \
do { \
LOCAL_HUB_S(PI_INT_PEND_MOD, (level)); \
LOCAL_HUB_L(PI_INT_PEND0); \
@@ -40,7 +40,7 @@ do { \
#define REMOTE_HUB_CLR_INTR(hub, level) \
do { \
- nasid_t __hub = (hub); \
+ nasid_t __hub = (hub); \
\
REMOTE_HUB_S(__hub, PI_INT_PEND_MOD, (level)); \
REMOTE_HUB_L(__hub, PI_INT_PEND0); \
@@ -102,8 +102,8 @@ do { \
#define LLP_PFAIL_INTR_A 41 /* see ml/SN/SN0/sysctlr.c */
#define LLP_PFAIL_INTR_B 42
-#define TLB_INTR_A 43 /* used for tlb flush random */
-#define TLB_INTR_B 44
+#define TLB_INTR_A 43 /* used for tlb flush random */
+#define TLB_INTR_B 44
#define IP27_INTR_0 45 /* Reserved for PROM use */
#define IP27_INTR_1 46 /* do not use in Kernel */
@@ -116,8 +116,8 @@ do { \
#define BRIDGE_ERROR_INTR 53 /* Setup by PROM to catch */
/* Bridge Errors */
-#define DEBUG_INTR_A 54
-#define DEBUG_INTR_B 55 /* Used by symmon to stop all cpus */
+#define DEBUG_INTR_A 54
+#define DEBUG_INTR_B 55 /* Used by symmon to stop all cpus */
#define IO_ERROR_INTR 57 /* Setup by PROM */
#define CLK_ERR_INTR 58
#define COR_ERR_INTR_A 59
diff --git a/arch/mips/include/asm/sn/io.h b/arch/mips/include/asm/sn/io.h
index 24c6775fbb0..d5174d04538 100644
--- a/arch/mips/include/asm/sn/io.h
+++ b/arch/mips/include/asm/sn/io.h
@@ -31,7 +31,7 @@
#define HUB_PIO_MAP_TO_MEM 0
#define HUB_PIO_MAP_TO_IO 1
-#define IIO_ITTE_INVALID_WIDGET 3 /* an invalid widget */
+#define IIO_ITTE_INVALID_WIDGET 3 /* an invalid widget */
#define IIO_ITTE_PUT(nasid, bigwin, io_or_mem, widget, addr) \
REMOTE_HUB_S((nasid), IIO_ITTE(bigwin), \
@@ -52,7 +52,7 @@
* value _x is expected to be a widget number in the range
* 0, 8 - 0xF
*/
-#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
+#define IIO_IOPRB(_x) (IIO_IOPRB_0 + ( ( (_x) < HUB_WIDGET_ID_MIN ? \
(_x) : \
(_x) - (HUB_WIDGET_ID_MIN-1)) << 3) )
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index 099677774d7..e33f0363235 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -62,8 +62,8 @@ struct ioc3_sioregs {
volatile u8 fill3[0x170 - 0x169 - 1];
- struct ioc3_uartregs uartb; /* 0x20170 */
- struct ioc3_uartregs uarta; /* 0x20178 */
+ struct ioc3_uartregs uartb; /* 0x20170 */
+ struct ioc3_uartregs uarta; /* 0x20178 */
};
/* Register layout of IOC3 in configuration space. */
@@ -106,7 +106,7 @@ struct ioc3 {
volatile u32 ppbr_l_b; /* 0x00094 */
volatile u32 ppcr_b; /* 0x00098 */
- /* Keyboard and Mouse Registers */
+ /* Keyboard and Mouse Registers */
volatile u32 km_csr; /* 0x0009c */
volatile u32 k_rd; /* 0x000a0 */
volatile u32 m_rd; /* 0x000a4 */
@@ -208,7 +208,7 @@ struct ioc3_erxbuf {
/*
* Ethernet TX Descriptor
*/
-#define ETXD_DATALEN 104
+#define ETXD_DATALEN 104
struct ioc3_etxd {
u32 cmd; /* command field */
u32 bufcnt; /* buffer counts field */
diff --git a/arch/mips/include/asm/sn/klconfig.h b/arch/mips/include/asm/sn/klconfig.h
index fe02900b930..467c313d576 100644
--- a/arch/mips/include/asm/sn/klconfig.h
+++ b/arch/mips/include/asm/sn/klconfig.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999, 2000 Silicon Graphics, Inc.
* Copyright (C) 1999, 2000 by Ralf Baechle
*/
-#ifndef _ASM_SN_KLCONFIG_H
-#define _ASM_SN_KLCONFIG_H
+#ifndef _ASM_SN_KLCONFIG_H
+#define _ASM_SN_KLCONFIG_H
/*
* The KLCONFIG structures store info about the various BOARDs found
@@ -20,11 +20,11 @@
/*
* WARNING:
* Certain assembly language routines (notably xxxxx.s) in the IP27PROM
- * will depend on the format of the data structures in this file. In
- * most cases, rearranging the fields can seriously break things.
- * Adding fields in the beginning or middle can also break things.
- * Add fields if necessary, to the end of a struct in such a way
- * that offsets of existing fields do not change.
+ * will depend on the format of the data structures in this file. In
+ * most cases, rearranging the fields can seriously break things.
+ * Adding fields in the beginning or middle can also break things.
+ * Add fields if necessary, to the end of a struct in such a way
+ * that offsets of existing fields do not change.
*/
#include <linux/types.h>
@@ -35,7 +35,7 @@
#include <asm/sn/sn0/addrs.h>
//#include <sys/SN/router.h>
// XXX Stolen from <sys/SN/router.h>:
-#define MAX_ROUTER_PORTS (6) /* Max. number of ports on a router */
+#define MAX_ROUTER_PORTS (6) /* Max. number of ports on a router */
#include <asm/sn/fru.h>
//#include <sys/graph.h>
//#include <sys/xtalk/xbow.h>
@@ -63,14 +63,14 @@
typedef u64 nic_t;
-#define KLCFGINFO_MAGIC 0xbeedbabe
+#define KLCFGINFO_MAGIC 0xbeedbabe
typedef s32 klconf_off_t;
/*
* Some IMPORTANT OFFSETS. These are the offsets on all NODES.
*/
-#define MAX_MODULE_ID 255
+#define MAX_MODULE_ID 255
#define SIZE_PAD 4096 /* 4k padding for structures */
/*
* 1 NODE brd, 2 Router brd (1 8p, 1 meta), 6 Widgets,
@@ -86,25 +86,25 @@ typedef s32 klconf_off_t;
/* All bits in this field are currently used. Try the pad fields if
you need more flag bits */
-#define ENABLE_BOARD 0x01
-#define FAILED_BOARD 0x02
-#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
+#define ENABLE_BOARD 0x01
+#define FAILED_BOARD 0x02
+#define DUPLICATE_BOARD 0x04 /* Boards like midplanes/routers which
are discovered twice. Use one of them */
#define VISITED_BOARD 0x08 /* Used for compact hub numbering. */
-#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
+#define LOCAL_MASTER_IO6 0x10 /* master io6 for that node */
#define GLOBAL_MASTER_IO6 0x20
-#define THIRD_NIC_PRESENT 0x40 /* for future use */
-#define SECOND_NIC_PRESENT 0x80 /* addons like MIO are present */
+#define THIRD_NIC_PRESENT 0x40 /* for future use */
+#define SECOND_NIC_PRESENT 0x80 /* addons like MIO are present */
/* klinfo->flags fields */
-#define KLINFO_ENABLE 0x01 /* This component is enabled */
-#define KLINFO_FAILED 0x02 /* This component failed */
-#define KLINFO_DEVICE 0x04 /* This component is a device */
-#define KLINFO_VISITED 0x08 /* This component has been visited */
-#define KLINFO_CONTROLLER 0x10 /* This component is a device controller */
-#define KLINFO_INSTALL 0x20 /* Install a driver */
-#define KLINFO_HEADLESS 0x40 /* Headless (or hubless) component */
+#define KLINFO_ENABLE 0x01 /* This component is enabled */
+#define KLINFO_FAILED 0x02 /* This component failed */
+#define KLINFO_DEVICE 0x04 /* This component is a device */
+#define KLINFO_VISITED 0x08 /* This component has been visited */
+#define KLINFO_CONTROLLER 0x10 /* This component is a device controller */
+#define KLINFO_INSTALL 0x20 /* Install a driver */
+#define KLINFO_HEADLESS 0x40 /* Headless (or hubless) component */
#define IS_CONSOLE_IOC3(i) ((((klinfo_t *)i)->flags) & KLINFO_INSTALL)
#define GB2 0x80000000
@@ -116,30 +116,30 @@ typedef s32 klconf_off_t;
is used in the code to allocate various areas.
*/
-#define BOARD_STRUCT 0
-#define COMPONENT_STRUCT 1
-#define ERRINFO_STRUCT 2
-#define KLMALLOC_TYPE_MAX (ERRINFO_STRUCT + 1)
-#define DEVICE_STRUCT 3
+#define BOARD_STRUCT 0
+#define COMPONENT_STRUCT 1
+#define ERRINFO_STRUCT 2
+#define KLMALLOC_TYPE_MAX (ERRINFO_STRUCT + 1)
+#define DEVICE_STRUCT 3
typedef struct console_s {
- unsigned long uart_base;
- unsigned long config_base;
- unsigned long memory_base;
+ unsigned long uart_base;
+ unsigned long config_base;
+ unsigned long memory_base;
short baud;
short flag;
int type;
nasid_t nasid;
char wid;
- char npci;
+ char npci;
nic_t baseio_nic;
} console_t;
typedef struct klc_malloc_hdr {
- klconf_off_t km_base;
- klconf_off_t km_limit;
- klconf_off_t km_current;
+ klconf_off_t km_base;
+ klconf_off_t km_limit;
+ klconf_off_t km_current;
} klc_malloc_hdr_t;
/* Functions/macros needed to use this structure */
@@ -148,7 +148,7 @@ typedef struct kl_config_hdr {
u64 ch_magic; /* set this to KLCFGINFO_MAGIC */
u32 ch_version; /* structure version number */
klconf_off_t ch_malloc_hdr_off; /* offset of ch_malloc_hdr */
- klconf_off_t ch_cons_off; /* offset of ch_cons */
+ klconf_off_t ch_cons_off; /* offset of ch_cons */
klconf_off_t ch_board_info; /* the link list of boards */
console_t ch_cons_info; /* address info of the console */
klc_malloc_hdr_t ch_malloc_hdr[KLMALLOC_TYPE_MAX];
@@ -157,27 +157,27 @@ typedef struct kl_config_hdr {
} kl_config_hdr_t;
-#define KL_CONFIG_HDR(_nasid) ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
+#define KL_CONFIG_HDR(_nasid) ((kl_config_hdr_t *)(KLCONFIG_ADDR(_nasid)))
#define KL_CONFIG_INFO_OFFSET(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_board_info)
+ (KL_CONFIG_HDR(_nasid)->ch_board_info)
#define KL_CONFIG_INFO_SET_OFFSET(_nasid, _off) \
- (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
+ (KL_CONFIG_HDR(_nasid)->ch_board_info = (_off))
-#define KL_CONFIG_INFO(_nasid) \
- (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ? \
+#define KL_CONFIG_INFO(_nasid) \
+ (lboard_t *)((KL_CONFIG_HDR(_nasid)->ch_board_info) ? \
NODE_OFFSET_TO_K1((_nasid), KL_CONFIG_HDR(_nasid)->ch_board_info) : \
0)
#define KL_CONFIG_MAGIC(_nasid) (KL_CONFIG_HDR(_nasid)->ch_magic)
#define KL_CONFIG_CHECK_MAGIC(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
+ (KL_CONFIG_HDR(_nasid)->ch_magic == KLCFGINFO_MAGIC)
#define KL_CONFIG_HDR_INIT_MAGIC(_nasid) \
- (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
+ (KL_CONFIG_HDR(_nasid)->ch_magic = KLCFGINFO_MAGIC)
/* --- New Macros for the changed kl_config_hdr_t structure --- */
-#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
+#define PTR_CH_MALLOC_HDR(_k) ((klc_malloc_hdr_t *)\
((unsigned long)_k + (_k->ch_malloc_hdr_off)))
#define KL_CONFIG_CH_MALLOC_HDR(_n) PTR_CH_MALLOC_HDR(KL_CONFIG_HDR(_n))
@@ -190,29 +190,29 @@ typedef struct kl_config_hdr {
/* ------------------------------------------------------------- */
#define KL_CONFIG_INFO_START(_nasid) \
- (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
+ (klconf_off_t)(KLCONFIG_OFFSET(_nasid) + sizeof(kl_config_hdr_t))
#define KL_CONFIG_BOARD_NASID(_brd) ((_brd)->brd_nasid)
#define KL_CONFIG_BOARD_SET_NEXT(_brd, _off) ((_brd)->brd_next = (_off))
-#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
+#define KL_CONFIG_DUPLICATE_BOARD(_brd) ((_brd)->brd_flags & DUPLICATE_BOARD)
-#define XBOW_PORT_TYPE_HUB(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
-#define XBOW_PORT_TYPE_IO(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
+#define XBOW_PORT_TYPE_HUB(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_HUB)
+#define XBOW_PORT_TYPE_IO(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_IO)
-#define XBOW_PORT_IS_ENABLED(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
-#define XBOW_PORT_NASID(_xbowp, _link) \
- ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
+#define XBOW_PORT_IS_ENABLED(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_flag & XBOW_PORT_ENABLE)
+#define XBOW_PORT_NASID(_xbowp, _link) \
+ ((_xbowp)->xbow_port_info[(_link) - BASE_XBOW_PORT].port_nasid)
-#define XBOW_PORT_IO 0x1
-#define XBOW_PORT_HUB 0x2
+#define XBOW_PORT_IO 0x1
+#define XBOW_PORT_HUB 0x2
#define XBOW_PORT_ENABLE 0x4
-#define SN0_PORT_FENCE_SHFT 0
-#define SN0_PORT_FENCE_MASK (1 << SN0_PORT_FENCE_SHFT)
+#define SN0_PORT_FENCE_SHFT 0
+#define SN0_PORT_FENCE_MASK (1 << SN0_PORT_FENCE_SHFT)
/*
* The KLCONFIG area is organized as a LINKED LIST of BOARDs. A BOARD
@@ -242,28 +242,28 @@ typedef struct kl_config_hdr {
*
KLCONFIG
- +------------+ +------------+ +------------+ +------------+
- | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
- +------------+ | +------------+ | +------------+ | +------------+
- | board info | | | board info | | |errinfo,bptr| | | board info |
- +------------+ | +------------+ | +------------+ | +------------+
- | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
- +------------+ +------------+ +------------+ +------------+
+ +------------+ +------------+ +------------+ +------------+
+ | lboard | +-->| lboard | +-->| rboard | +-->| lboard |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | board info | | | board info | | |errinfo,bptr| | | board info |
+ +------------+ | +------------+ | +------------+ | +------------+
+ | offset |--+ | offset |--+ | offset |--+ |offset=NULL |
+ +------------+ +------------+ +------------+ +------------+
+------------+
| board info |
- +------------+ +--------------------------------+
+ +------------+ +--------------------------------+
| compt 1 |------>| type, rev, diaginfo, size ... | (CPU)
- +------------+ +--------------------------------+
+ +------------+ +--------------------------------+
| compt 2 |--+
- +------------+ | +--------------------------------+
- | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
- +------------+ +--------------------------------+
+ +------------+ | +--------------------------------+
+ | ... | +--->| type, rev, diaginfo, size ... | (MEM_BANK)
+ +------------+ +--------------------------------+
| errinfo |--+
- +------------+ | +--------------------------------+
- +--->|r/l brd errinfo,compt err flags |
- +--------------------------------+
+ +------------+ | +--------------------------------+
+ +--->|r/l brd errinfo,compt err flags |
+ +--------------------------------+
*
* Each BOARD consists of COMPONENTs and the BOARD structure has
@@ -311,7 +311,7 @@ typedef struct kl_config_hdr {
*/
#define KL_CPU_R4000 0x1 /* Standard R4000 */
#define KL_CPU_TFP 0x2 /* TFP processor */
-#define KL_CPU_R10000 0x3 /* R10000 (T5) */
+#define KL_CPU_R10000 0x3 /* R10000 (T5) */
#define KL_CPU_NONE (-1) /* no cpu present in slot */
/*
@@ -320,13 +320,13 @@ typedef struct kl_config_hdr {
#define KLCLASS_MASK 0xf0
#define KLCLASS_NONE 0x00
-#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
+#define KLCLASS_NODE 0x10 /* CPU, Memory and HUB board */
#define KLCLASS_CPU KLCLASS_NODE
-#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
+#define KLCLASS_IO 0x20 /* BaseIO, 4 ch SCSI, ethernet, FDDI
and the non-graphics widget boards */
-#define KLCLASS_ROUTER 0x30 /* Router board */
-#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
- so that we can record error info */
+#define KLCLASS_ROUTER 0x30 /* Router board */
+#define KLCLASS_MIDPLANE 0x40 /* We need to treat this as a board
+ so that we can record error info */
#define KLCLASS_GFX 0x50 /* graphics boards */
#define KLCLASS_PSEUDO_GFX 0x60 /* HDTV type cards that use a gfx
@@ -336,7 +336,7 @@ typedef struct kl_config_hdr {
#define KLCLASS_MAX 7 /* Bump this if a new CLASS is added */
#define KLTYPE_MAX 10 /* Bump this if a new CLASS is added */
-#define KLCLASS_UNKNOWN 0xf0
+#define KLCLASS_UNKNOWN 0xf0
#define KLCLASS(_x) ((_x) & KLCLASS_MASK)
@@ -353,36 +353,36 @@ typedef struct kl_config_hdr {
#define KLTYPE_WEIRDIO (KLCLASS_IO | 0x0)
#define KLTYPE_BASEIO (KLCLASS_IO | 0x1) /* IOC3, SuperIO, Bridge, SCSI */
-#define KLTYPE_IO6 KLTYPE_BASEIO /* Additional name */
+#define KLTYPE_IO6 KLTYPE_BASEIO /* Additional name */
#define KLTYPE_4CHSCSI (KLCLASS_IO | 0x2)
-#define KLTYPE_MSCSI KLTYPE_4CHSCSI /* Additional name */
-#define KLTYPE_ETHERNET (KLCLASS_IO | 0x3)
-#define KLTYPE_MENET KLTYPE_ETHERNET /* Additional name */
-#define KLTYPE_FDDI (KLCLASS_IO | 0x4)
+#define KLTYPE_MSCSI KLTYPE_4CHSCSI /* Additional name */
+#define KLTYPE_ETHERNET (KLCLASS_IO | 0x3)
+#define KLTYPE_MENET KLTYPE_ETHERNET /* Additional name */
+#define KLTYPE_FDDI (KLCLASS_IO | 0x4)
#define KLTYPE_UNUSED (KLCLASS_IO | 0x5) /* XXX UNUSED */
-#define KLTYPE_HAROLD (KLCLASS_IO | 0x6) /* PCI SHOE BOX */
+#define KLTYPE_HAROLD (KLCLASS_IO | 0x6) /* PCI SHOE BOX */
#define KLTYPE_PCI KLTYPE_HAROLD
-#define KLTYPE_VME (KLCLASS_IO | 0x7) /* Any 3rd party VME card */
-#define KLTYPE_MIO (KLCLASS_IO | 0x8)
-#define KLTYPE_FC (KLCLASS_IO | 0x9)
-#define KLTYPE_LINC (KLCLASS_IO | 0xA)
-#define KLTYPE_TPU (KLCLASS_IO | 0xB) /* Tensor Processing Unit */
-#define KLTYPE_GSN_A (KLCLASS_IO | 0xC) /* Main GSN board */
-#define KLTYPE_GSN_B (KLCLASS_IO | 0xD) /* Auxiliary GSN board */
+#define KLTYPE_VME (KLCLASS_IO | 0x7) /* Any 3rd party VME card */
+#define KLTYPE_MIO (KLCLASS_IO | 0x8)
+#define KLTYPE_FC (KLCLASS_IO | 0x9)
+#define KLTYPE_LINC (KLCLASS_IO | 0xA)
+#define KLTYPE_TPU (KLCLASS_IO | 0xB) /* Tensor Processing Unit */
+#define KLTYPE_GSN_A (KLCLASS_IO | 0xC) /* Main GSN board */
+#define KLTYPE_GSN_B (KLCLASS_IO | 0xD) /* Auxiliary GSN board */
#define KLTYPE_GFX (KLCLASS_GFX | 0x0) /* unknown graphics type */
#define KLTYPE_GFX_KONA (KLCLASS_GFX | 0x1) /* KONA graphics on IP27 */
#define KLTYPE_GFX_MGRA (KLCLASS_GFX | 0x3) /* MGRAS graphics on IP27 */
#define KLTYPE_WEIRDROUTER (KLCLASS_ROUTER | 0x0)
-#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
-#define KLTYPE_ROUTER2 KLTYPE_ROUTER /* Obsolete! */
+#define KLTYPE_ROUTER (KLCLASS_ROUTER | 0x1)
+#define KLTYPE_ROUTER2 KLTYPE_ROUTER /* Obsolete! */
#define KLTYPE_NULL_ROUTER (KLCLASS_ROUTER | 0x2)
#define KLTYPE_META_ROUTER (KLCLASS_ROUTER | 0x3)
#define KLTYPE_WEIRDMIDPLANE (KLCLASS_MIDPLANE | 0x0)
#define KLTYPE_MIDPLANE8 (KLCLASS_MIDPLANE | 0x1) /* 8 slot backplane */
-#define KLTYPE_MIDPLANE KLTYPE_MIDPLANE8
+#define KLTYPE_MIDPLANE KLTYPE_MIDPLANE8
#define KLTYPE_PBRICK_XBOW (KLCLASS_MIDPLANE | 0x2)
#define KLTYPE_IOBRICK (KLCLASS_IOBRICK | 0x0)
@@ -398,11 +398,11 @@ typedef struct kl_config_hdr {
* When bringup started nic names had not standardized and so we
* had to hard code. (For people interested in history.)
*/
-#define KLTYPE_XTHD (KLCLASS_PSEUDO_GFX | 0x9)
+#define KLTYPE_XTHD (KLCLASS_PSEUDO_GFX | 0x9)
#define KLTYPE_UNKNOWN (KLCLASS_UNKNOWN | 0xf)
-#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
+#define KLTYPE(_x) ((_x) & KLTYPE_MASK)
#define IS_MIO_PRESENT(l) ((l->brd_type == KLTYPE_BASEIO) && \
(l->brd_flags & SECOND_NIC_PRESENT))
#define IS_MIO_IOC3(l, n) (IS_MIO_PRESENT(l) && (n > 2))
@@ -416,33 +416,33 @@ typedef struct kl_config_hdr {
#define LOCAL_BOARD 1
#define REMOTE_BOARD 2
-#define LBOARD_STRUCT_VERSION 2
+#define LBOARD_STRUCT_VERSION 2
typedef struct lboard_s {
- klconf_off_t brd_next; /* Next BOARD */
- unsigned char struct_type; /* type of structure, local or remote */
- unsigned char brd_type; /* type+class */
- unsigned char brd_sversion; /* version of this structure */
- unsigned char brd_brevision; /* board revision */
- unsigned char brd_promver; /* board prom version, if any */
- unsigned char brd_flags; /* Enabled, Disabled etc */
- unsigned char brd_slot; /* slot number */
- unsigned short brd_debugsw; /* Debug switches */
- moduleid_t brd_module; /* module to which it belongs */
- partid_t brd_partition; /* Partition number */
- unsigned short brd_diagval; /* diagnostic value */
- unsigned short brd_diagparm; /* diagnostic parameter */
- unsigned char brd_inventory; /* inventory history */
- unsigned char brd_numcompts; /* Number of components */
- nic_t brd_nic; /* Number in CAN */
- nasid_t brd_nasid; /* passed parameter */
- klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
- klconf_off_t brd_errinfo; /* Board's error information */
+ klconf_off_t brd_next; /* Next BOARD */
+ unsigned char struct_type; /* type of structure, local or remote */
+ unsigned char brd_type; /* type+class */
+ unsigned char brd_sversion; /* version of this structure */
+ unsigned char brd_brevision; /* board revision */
+ unsigned char brd_promver; /* board prom version, if any */
+ unsigned char brd_flags; /* Enabled, Disabled etc */
+ unsigned char brd_slot; /* slot number */
+ unsigned short brd_debugsw; /* Debug switches */
+ moduleid_t brd_module; /* module to which it belongs */
+ partid_t brd_partition; /* Partition number */
+ unsigned short brd_diagval; /* diagnostic value */
+ unsigned short brd_diagparm; /* diagnostic parameter */
+ unsigned char brd_inventory; /* inventory history */
+ unsigned char brd_numcompts; /* Number of components */
+ nic_t brd_nic; /* Number in CAN */
+ nasid_t brd_nasid; /* passed parameter */
+ klconf_off_t brd_compts[MAX_COMPTS_PER_BRD]; /* pointers to COMPONENTS */
+ klconf_off_t brd_errinfo; /* Board's error information */
struct lboard_s *brd_parent; /* Logical parent for this brd */
- vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
+ vertex_hdl_t brd_graph_link; /* vertex hdl to connect extern compts */
confidence_t brd_confidence; /* confidence that the board is bad */
- nasid_t brd_owner; /* who owns this board */
- unsigned char brd_nic_flags; /* To handle 8 more NICs */
+ nasid_t brd_owner; /* who owns this board */
+ unsigned char brd_nic_flags; /* To handle 8 more NICs */
char brd_name[32];
} lboard_t;
@@ -456,23 +456,23 @@ typedef struct lboard_s {
#define KLCF_CLASS(_brd) KLCLASS((_brd)->brd_type)
#define KLCF_TYPE(_brd) KLTYPE((_brd)->brd_type)
-#define KLCF_REMOTE(_brd) (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
+#define KLCF_REMOTE(_brd) (((_brd)->struct_type & LOCAL_BOARD) ? 0 : 1)
#define KLCF_NUM_COMPS(_brd) ((_brd)->brd_numcompts)
#define KLCF_MODULE_ID(_brd) ((_brd)->brd_module)
-#define KLCF_NEXT(_brd) \
- ((_brd)->brd_next ? \
+#define KLCF_NEXT(_brd) \
+ ((_brd)->brd_next ? \
(lboard_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), (_brd)->brd_next)):\
NULL)
-#define KLCF_COMP(_brd, _ndx) \
- (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
+#define KLCF_COMP(_brd, _ndx) \
+ (klinfo_t *)(NODE_OFFSET_TO_K1(NASID_GET(_brd), \
(_brd)->brd_compts[(_ndx)]))
#define KLCF_COMP_ERROR(_brd, _comp) \
- (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
+ (NODE_OFFSET_TO_K1(NASID_GET(_brd), (_comp)->errinfo))
#define KLCF_COMP_TYPE(_comp) ((_comp)->struct_type)
-#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid) /* Widget ID */
+#define KLCF_BRIDGE_W_ID(_comp) ((_comp)->physid) /* Widget ID */
@@ -481,73 +481,73 @@ typedef struct lboard_s {
* component.
*/
-typedef struct klinfo_s { /* Generic info */
- unsigned char struct_type; /* type of this structure */
- unsigned char struct_version; /* version of this structure */
- unsigned char flags; /* Enabled, disabled etc */
- unsigned char revision; /* component revision */
- unsigned short diagval; /* result of diagnostics */
- unsigned short diagparm; /* diagnostic parameter */
- unsigned char inventory; /* previous inventory status */
- nic_t nic; /* MUst be aligned properly */
- unsigned char physid; /* physical id of component */
- unsigned int virtid; /* virtual id as seen by system */
- unsigned char widid; /* Widget id - if applicable */
- nasid_t nasid; /* node number - from parent */
+typedef struct klinfo_s { /* Generic info */
+ unsigned char struct_type; /* type of this structure */
+ unsigned char struct_version; /* version of this structure */
+ unsigned char flags; /* Enabled, disabled etc */
+ unsigned char revision; /* component revision */
+ unsigned short diagval; /* result of diagnostics */
+ unsigned short diagparm; /* diagnostic parameter */
+ unsigned char inventory; /* previous inventory status */
+ nic_t nic; /* MUst be aligned properly */
+ unsigned char physid; /* physical id of component */
+ unsigned int virtid; /* virtual id as seen by system */
+ unsigned char widid; /* Widget id - if applicable */
+ nasid_t nasid; /* node number - from parent */
char pad1; /* pad out structure. */
char pad2; /* pad out structure. */
- COMPONENT *arcs_compt; /* ptr to the arcs struct for ease*/
- klconf_off_t errinfo; /* component specific errors */
- unsigned short pad3; /* pci fields have moved over to */
- unsigned short pad4; /* klbri_t */
+ COMPONENT *arcs_compt; /* ptr to the arcs struct for ease*/
+ klconf_off_t errinfo; /* component specific errors */
+ unsigned short pad3; /* pci fields have moved over to */
+ unsigned short pad4; /* klbri_t */
} klinfo_t ;
#define KLCONFIG_INFO_ENABLED(_i) ((_i)->flags & KLINFO_ENABLE)
/*
* Component structures.
* Following are the currently identified components:
- * CPU, HUB, MEM_BANK,
- * XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
- * BRIDGE, IOC3, SuperIO, SCSI, FDDI
- * ROUTER
- * GRAPHICS
+ * CPU, HUB, MEM_BANK,
+ * XBOW(consists of 16 WIDGETs, each of which can be HUB or GRAPHICS or BRIDGE)
+ * BRIDGE, IOC3, SuperIO, SCSI, FDDI
+ * ROUTER
+ * GRAPHICS
*/
#define KLSTRUCT_UNKNOWN 0
-#define KLSTRUCT_CPU 1
-#define KLSTRUCT_HUB 2
-#define KLSTRUCT_MEMBNK 3
-#define KLSTRUCT_XBOW 4
-#define KLSTRUCT_BRI 5
-#define KLSTRUCT_IOC3 6
-#define KLSTRUCT_PCI 7
-#define KLSTRUCT_VME 8
+#define KLSTRUCT_CPU 1
+#define KLSTRUCT_HUB 2
+#define KLSTRUCT_MEMBNK 3
+#define KLSTRUCT_XBOW 4
+#define KLSTRUCT_BRI 5
+#define KLSTRUCT_IOC3 6
+#define KLSTRUCT_PCI 7
+#define KLSTRUCT_VME 8
#define KLSTRUCT_ROU 9
-#define KLSTRUCT_GFX 10
-#define KLSTRUCT_SCSI 11
-#define KLSTRUCT_FDDI 12
-#define KLSTRUCT_MIO 13
-#define KLSTRUCT_DISK 14
-#define KLSTRUCT_TAPE 15
-#define KLSTRUCT_CDROM 16
-#define KLSTRUCT_HUB_UART 17
-#define KLSTRUCT_IOC3ENET 18
-#define KLSTRUCT_IOC3UART 19
+#define KLSTRUCT_GFX 10
+#define KLSTRUCT_SCSI 11
+#define KLSTRUCT_FDDI 12
+#define KLSTRUCT_MIO 13
+#define KLSTRUCT_DISK 14
+#define KLSTRUCT_TAPE 15
+#define KLSTRUCT_CDROM 16
+#define KLSTRUCT_HUB_UART 17
+#define KLSTRUCT_IOC3ENET 18
+#define KLSTRUCT_IOC3UART 19
#define KLSTRUCT_UNUSED 20 /* XXX UNUSED */
-#define KLSTRUCT_IOC3PCKM 21
-#define KLSTRUCT_RAD 22
-#define KLSTRUCT_HUB_TTY 23
-#define KLSTRUCT_IOC3_TTY 24
+#define KLSTRUCT_IOC3PCKM 21
+#define KLSTRUCT_RAD 22
+#define KLSTRUCT_HUB_TTY 23
+#define KLSTRUCT_IOC3_TTY 24
/* Early Access IO proms are compatible
only with KLSTRUCT values up to 24. */
-#define KLSTRUCT_FIBERCHANNEL 25
+#define KLSTRUCT_FIBERCHANNEL 25
#define KLSTRUCT_MOD_SERIAL_NUM 26
-#define KLSTRUCT_IOC3MS 27
-#define KLSTRUCT_TPU 28
-#define KLSTRUCT_GSN_A 29
-#define KLSTRUCT_GSN_B 30
-#define KLSTRUCT_XTHD 31
+#define KLSTRUCT_IOC3MS 27
+#define KLSTRUCT_TPU 28
+#define KLSTRUCT_GSN_A 29
+#define KLSTRUCT_GSN_B 30
+#define KLSTRUCT_XTHD 31
/*
* These are the indices of various components within a lboard structure.
@@ -583,7 +583,7 @@ typedef u64 *router_t;
* The port info in ip27_cfg area translates to a lboart_t in the
* KLCONFIG area. But since KLCONFIG does not use pointers, lboart_t
* is stored in terms of a nasid and a offset from start of KLCONFIG
- * area on that nasid.
+ * area on that nasid.
*/
typedef struct klport_s {
nasid_t port_nasid;
@@ -591,20 +591,20 @@ typedef struct klport_s {
klconf_off_t port_offset;
} klport_t;
-typedef struct klcpu_s { /* CPU */
- klinfo_t cpu_info;
- unsigned short cpu_prid; /* Processor PRID value */
- unsigned short cpu_fpirr; /* FPU IRR value */
- unsigned short cpu_speed; /* Speed in MHZ */
- unsigned short cpu_scachesz; /* secondary cache size in MB */
- unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
+typedef struct klcpu_s { /* CPU */
+ klinfo_t cpu_info;
+ unsigned short cpu_prid; /* Processor PRID value */
+ unsigned short cpu_fpirr; /* FPU IRR value */
+ unsigned short cpu_speed; /* Speed in MHZ */
+ unsigned short cpu_scachesz; /* secondary cache size in MB */
+ unsigned short cpu_scachespeed;/* secondary cache speed in MHz */
} klcpu_t ;
#define CPU_STRUCT_VERSION 2
typedef struct klhub_s { /* HUB */
- klinfo_t hub_info;
- unsigned int hub_flags; /* PCFG_HUB_xxx flags */
+ klinfo_t hub_info;
+ unsigned int hub_flags; /* PCFG_HUB_xxx flags */
klport_t hub_port; /* hub is connected to this */
nic_t hub_box_nic; /* nic of containing box */
klconf_off_t hub_mfg_nic; /* MFG NIC string */
@@ -612,36 +612,36 @@ typedef struct klhub_s { /* HUB */
} klhub_t ;
typedef struct klhub_uart_s { /* HUB */
- klinfo_t hubuart_info;
- unsigned int hubuart_flags; /* PCFG_HUB_xxx flags */
+ klinfo_t hubuart_info;
+ unsigned int hubuart_flags; /* PCFG_HUB_xxx flags */
nic_t hubuart_box_nic; /* nic of containing box */
} klhub_uart_t ;
-#define MEMORY_STRUCT_VERSION 2
+#define MEMORY_STRUCT_VERSION 2
typedef struct klmembnk_s { /* MEMORY BANK */
- klinfo_t membnk_info;
- short membnk_memsz; /* Total memory in megabytes */
+ klinfo_t membnk_info;
+ short membnk_memsz; /* Total memory in megabytes */
short membnk_dimm_select; /* bank to physical addr mapping*/
short membnk_bnksz[MD_MEM_BANKS]; /* Memory bank sizes */
short membnk_attr;
} klmembnk_t ;
#define KLCONFIG_MEMBNK_SIZE(_info, _bank) \
- ((_info)->membnk_bnksz[(_bank)])
+ ((_info)->membnk_bnksz[(_bank)])
#define MEMBNK_PREMIUM 1
#define KLCONFIG_MEMBNK_PREMIUM(_info, _bank) \
- ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
+ ((_info)->membnk_attr & (MEMBNK_PREMIUM << (_bank)))
#define MAX_SERIAL_NUM_SIZE 10
typedef struct klmod_serial_num_s {
- klinfo_t snum_info;
+ klinfo_t snum_info;
union {
- char snum_str[MAX_SERIAL_NUM_SIZE];
- unsigned long long snum_int;
+ char snum_str[MAX_SERIAL_NUM_SIZE];
+ unsigned long long snum_int;
} snum;
} klmod_serial_num_t;
@@ -650,43 +650,43 @@ typedef struct klmod_serial_num_s {
serial number struct as a component without losing compatibility
between prom versions. */
-#define GET_SNUM_COMP(_l) ((klmod_serial_num_t *)\
+#define GET_SNUM_COMP(_l) ((klmod_serial_num_t *)\
KLCF_COMP(_l, _l->brd_numcompts))
#define MAX_XBOW_LINKS 16
-typedef struct klxbow_s { /* XBOW */
- klinfo_t xbow_info ;
+typedef struct klxbow_s { /* XBOW */
+ klinfo_t xbow_info ;
klport_t xbow_port_info[MAX_XBOW_LINKS] ; /* Module number */
- int xbow_master_hub_link;
- /* type of brd connected+component struct ptr+flags */
+ int xbow_master_hub_link;
+ /* type of brd connected+component struct ptr+flags */
} klxbow_t ;
#define MAX_PCI_SLOTS 8
typedef struct klpci_device_s {
s32 pci_device_id; /* 32 bits of vendor/device ID. */
- s32 pci_device_pad; /* 32 bits of padding. */
+ s32 pci_device_pad; /* 32 bits of padding. */
} klpci_device_t;
#define BRIDGE_STRUCT_VERSION 2
-typedef struct klbri_s { /* BRIDGE */
- klinfo_t bri_info ;
- unsigned char bri_eprominfo ; /* IO6prom connected to bridge */
- unsigned char bri_bustype ; /* PCI/VME BUS bridge/GIO */
- pci_t pci_specific ; /* PCI Board config info */
+typedef struct klbri_s { /* BRIDGE */
+ klinfo_t bri_info ;
+ unsigned char bri_eprominfo ; /* IO6prom connected to bridge */
+ unsigned char bri_bustype ; /* PCI/VME BUS bridge/GIO */
+ pci_t pci_specific ; /* PCI Board config info */
klpci_device_t bri_devices[MAX_PCI_DEVS] ; /* PCI IDs */
klconf_off_t bri_mfg_nic ;
} klbri_t ;
#define MAX_IOC3_TTY 2
-typedef struct klioc3_s { /* IOC3 */
- klinfo_t ioc3_info ;
- unsigned char ioc3_ssram ; /* Info about ssram */
- unsigned char ioc3_nvram ; /* Info about nvram */
- klinfo_t ioc3_superio ; /* Info about superio */
+typedef struct klioc3_s { /* IOC3 */
+ klinfo_t ioc3_info ;
+ unsigned char ioc3_ssram ; /* Info about ssram */
+ unsigned char ioc3_nvram ; /* Info about nvram */
+ klinfo_t ioc3_superio ; /* Info about superio */
klconf_off_t ioc3_tty_off ;
klinfo_t ioc3_enet ;
klconf_off_t ioc3_enet_off ;
@@ -695,27 +695,27 @@ typedef struct klioc3_s { /* IOC3 */
#define MAX_VME_SLOTS 8
-typedef struct klvmeb_s { /* VME BRIDGE - PCI CTLR */
- klinfo_t vmeb_info ;
+typedef struct klvmeb_s { /* VME BRIDGE - PCI CTLR */
+ klinfo_t vmeb_info ;
vmeb_t vmeb_specific ;
- klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+ klconf_off_t vmeb_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
} klvmeb_t ;
-typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
+typedef struct klvmed_s { /* VME DEVICE - VME BOARD */
klinfo_t vmed_info ;
vmed_t vmed_specific ;
- klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
+ klconf_off_t vmed_brdinfo[MAX_VME_SLOTS] ; /* VME Board config info */
} klvmed_t ;
#define ROUTER_VECTOR_VERS 2
/* XXX - Don't we need the number of ports here?!? */
-typedef struct klrou_s { /* ROUTER */
- klinfo_t rou_info ;
- unsigned int rou_flags ; /* PCFG_ROUTER_xxx flags */
- nic_t rou_box_nic ; /* nic of the containing module */
- klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
- klconf_off_t rou_mfg_nic ; /* MFG NIC string */
+typedef struct klrou_s { /* ROUTER */
+ klinfo_t rou_info ;
+ unsigned int rou_flags ; /* PCFG_ROUTER_xxx flags */
+ nic_t rou_box_nic ; /* nic of the containing module */
+ klport_t rou_port[MAX_ROUTER_PORTS + 1] ; /* array index 1 to 6 */
+ klconf_off_t rou_mfg_nic ; /* MFG NIC string */
u64 rou_vector; /* vector from master node */
} klrou_t ;
@@ -732,30 +732,30 @@ typedef struct klrou_s { /* ROUTER */
#define KLGFX_COOKIE 0x0c0de000
typedef struct klgfx_s { /* GRAPHICS Device */
- klinfo_t gfx_info;
- klconf_off_t old_gndevs; /* for compatibility with older proms */
- klconf_off_t old_gdoff0; /* for compatibility with older proms */
+ klinfo_t gfx_info;
+ klconf_off_t old_gndevs; /* for compatibility with older proms */
+ klconf_off_t old_gdoff0; /* for compatibility with older proms */
unsigned int cookie; /* for compatibility with older proms */
unsigned int moduleslot;
struct klgfx_s *gfx_next_pipe;
graphics_t gfx_specific;
- klconf_off_t pad0; /* for compatibility with older proms */
- klconf_off_t gfx_mfg_nic;
+ klconf_off_t pad0; /* for compatibility with older proms */
+ klconf_off_t gfx_mfg_nic;
} klgfx_t;
typedef struct klxthd_s {
- klinfo_t xthd_info ;
- klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
+ klinfo_t xthd_info ;
+ klconf_off_t xthd_mfg_nic ; /* MFG NIC string */
} klxthd_t ;
-typedef struct kltpu_s { /* TPU board */
- klinfo_t tpu_info ;
- klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
+typedef struct kltpu_s { /* TPU board */
+ klinfo_t tpu_info ;
+ klconf_off_t tpu_mfg_nic ; /* MFG NIC string */
} kltpu_t ;
-typedef struct klgsn_s { /* GSN board */
- klinfo_t gsn_info ;
- klconf_off_t gsn_mfg_nic ; /* MFG NIC string */
+typedef struct klgsn_s { /* GSN board */
+ klinfo_t gsn_info ;
+ klconf_off_t gsn_mfg_nic ; /* MFG NIC string */
} klgsn_t ;
#define MAX_SCSI_DEVS 16
@@ -767,57 +767,57 @@ typedef struct klgsn_s { /* GSN board */
* that as the size to be klmalloced.
*/
-typedef struct klscsi_s { /* SCSI Controller */
- klinfo_t scsi_info ;
- scsi_t scsi_specific ;
- unsigned char scsi_numdevs ;
+typedef struct klscsi_s { /* SCSI Controller */
+ klinfo_t scsi_info ;
+ scsi_t scsi_specific ;
+ unsigned char scsi_numdevs ;
klconf_off_t scsi_devinfo[MAX_SCSI_DEVS] ;
} klscsi_t ;
-typedef struct klscdev_s { /* SCSI device */
- klinfo_t scdev_info ;
+typedef struct klscdev_s { /* SCSI device */
+ klinfo_t scdev_info ;
struct scsidisk_data *scdev_cfg ; /* driver fills up this */
} klscdev_t ;
-typedef struct klttydev_s { /* TTY device */
- klinfo_t ttydev_info ;
+typedef struct klttydev_s { /* TTY device */
+ klinfo_t ttydev_info ;
struct terminal_data *ttydev_cfg ; /* driver fills up this */
} klttydev_t ;
-typedef struct klenetdev_s { /* ENET device */
- klinfo_t enetdev_info ;
+typedef struct klenetdev_s { /* ENET device */
+ klinfo_t enetdev_info ;
struct net_data *enetdev_cfg ; /* driver fills up this */
} klenetdev_t ;
-typedef struct klkbddev_s { /* KBD device */
- klinfo_t kbddev_info ;
+typedef struct klkbddev_s { /* KBD device */
+ klinfo_t kbddev_info ;
struct keyboard_data *kbddev_cfg ; /* driver fills up this */
} klkbddev_t ;
-typedef struct klmsdev_s { /* mouse device */
- klinfo_t msdev_info ;
- void *msdev_cfg ;
+typedef struct klmsdev_s { /* mouse device */
+ klinfo_t msdev_info ;
+ void *msdev_cfg ;
} klmsdev_t ;
#define MAX_FDDI_DEVS 10 /* XXX Is this true */
-typedef struct klfddi_s { /* FDDI */
- klinfo_t fddi_info ;
- fddi_t fddi_specific ;
+typedef struct klfddi_s { /* FDDI */
+ klinfo_t fddi_info ;
+ fddi_t fddi_specific ;
klconf_off_t fddi_devinfo[MAX_FDDI_DEVS] ;
} klfddi_t ;
-typedef struct klmio_s { /* MIO */
- klinfo_t mio_info ;
- mio_t mio_specific ;
+typedef struct klmio_s { /* MIO */
+ klinfo_t mio_info ;
+ mio_t mio_specific ;
} klmio_t ;
typedef union klcomp_s {
klcpu_t kc_cpu;
klhub_t kc_hub;
- klmembnk_t kc_mem;
- klxbow_t kc_xbow;
+ klmembnk_t kc_mem;
+ klxbow_t kc_xbow;
klbri_t kc_bri;
klioc3_t kc_ioc3;
klvmeb_t kc_vmeb;
@@ -831,11 +831,11 @@ typedef union klcomp_s {
klmod_serial_num_t kc_snum ;
} klcomp_t;
-typedef union kldev_s { /* for device structure allocation */
+typedef union kldev_s { /* for device structure allocation */
klscdev_t kc_scsi_dev ;
klttydev_t kc_tty_dev ;
klenetdev_t kc_enet_dev ;
- klkbddev_t kc_kbd_dev ;
+ klkbddev_t kc_kbd_dev ;
} kldev_t ;
/* Data structure interface routines. TBD */
diff --git a/arch/mips/include/asm/sn/kldir.h b/arch/mips/include/asm/sn/kldir.h
index 1327e12e964..bfb3aec9453 100644
--- a/arch/mips/include/asm/sn/kldir.h
+++ b/arch/mips/include/asm/sn/kldir.h
@@ -16,8 +16,8 @@
* The kldir memory area resides at a fixed place in each node's memory and
* provides pointers to most other IP27 memory areas. This allows us to
* resize and/or relocate memory areas at a later time without breaking all
- * firmware and kernels that use them. Indices in the array are
- * permanently dedicated to areas listed below. Some memory areas (marked
+ * firmware and kernels that use them. Indices in the array are
+ * permanently dedicated to areas listed below. Some memory areas (marked
* below) reside at a permanently fixed location, but are included in the
* directory for completeness.
*/
@@ -28,98 +28,98 @@
* The upper portion of the memory map applies during boot
* only and is overwritten by IRIX/SYMMON.
*
- * MEMORY MAP PER NODE
+ * MEMORY MAP PER NODE
*
- * 0x2000000 (32M) +-----------------------------------------+
- * | IO6 BUFFERS FOR FLASH ENET IOC3 |
- * 0x1F80000 (31.5M) +-----------------------------------------+
- * | IO6 TEXT/DATA/BSS/stack |
- * 0x1C00000 (30M) +-----------------------------------------+
- * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
- * 0x0800000 (28M) +-----------------------------------------+
- * | IP27 PROM TEXT/DATA/BSS/stack |
- * 0x1B00000 (27M) +-----------------------------------------+
- * | IP27 CFG |
- * 0x1A00000 (26M) +-----------------------------------------+
- * | Graphics PROM |
- * 0x1800000 (24M) +-----------------------------------------+
- * | 3rd Party PROM drivers |
- * 0x1600000 (22M) +-----------------------------------------+
- * | |
- * | Free |
- * | |
- * +-----------------------------------------+
- * | UNIX DEBUG Version |
- * 0x190000 (2M--) +-----------------------------------------+
- * | SYMMON |
- * | (For UNIX Debug only) |
- * 0x34000 (208K) +-----------------------------------------+
- * | SYMMON STACK [NUM_CPU_PER_NODE] |
- * | (For UNIX Debug only) |
- * 0x25000 (148K) +-----------------------------------------+
- * | KLCONFIG - II (temp) |
- * | |
- * | ---------------------------- |
- * | |
- * | UNIX NON-DEBUG Version |
- * 0x19000 (100K) +-----------------------------------------+
+ * 0x2000000 (32M) +-----------------------------------------+
+ * | IO6 BUFFERS FOR FLASH ENET IOC3 |
+ * 0x1F80000 (31.5M) +-----------------------------------------+
+ * | IO6 TEXT/DATA/BSS/stack |
+ * 0x1C00000 (30M) +-----------------------------------------+
+ * | IO6 PROM DEBUG TEXT/DATA/BSS/stack |
+ * 0x0800000 (28M) +-----------------------------------------+
+ * | IP27 PROM TEXT/DATA/BSS/stack |
+ * 0x1B00000 (27M) +-----------------------------------------+
+ * | IP27 CFG |
+ * 0x1A00000 (26M) +-----------------------------------------+
+ * | Graphics PROM |
+ * 0x1800000 (24M) +-----------------------------------------+
+ * | 3rd Party PROM drivers |
+ * 0x1600000 (22M) +-----------------------------------------+
+ * | |
+ * | Free |
+ * | |
+ * +-----------------------------------------+
+ * | UNIX DEBUG Version |
+ * 0x190000 (2M--) +-----------------------------------------+
+ * | SYMMON |
+ * | (For UNIX Debug only) |
+ * 0x34000 (208K) +-----------------------------------------+
+ * | SYMMON STACK [NUM_CPU_PER_NODE] |
+ * | (For UNIX Debug only) |
+ * 0x25000 (148K) +-----------------------------------------+
+ * | KLCONFIG - II (temp) |
+ * | |
+ * | ---------------------------- |
+ * | |
+ * | UNIX NON-DEBUG Version |
+ * 0x19000 (100K) +-----------------------------------------+
*
*
* The lower portion of the memory map contains information that is
* permanent and is used by the IP27PROM, IO6PROM and IRIX.
*
- * 0x19000 (100K) +-----------------------------------------+
- * | |
- * | PI Error Spools (32K) |
- * | |
- * 0x12000 (72K) +-----------------------------------------+
- * | Unused |
- * 0x11c00 (71K) +-----------------------------------------+
- * | CPU 1 NMI Eframe area |
- * 0x11a00 (70.5K) +-----------------------------------------+
- * | CPU 0 NMI Eframe area |
- * 0x11800 (70K) +-----------------------------------------+
- * | CPU 1 NMI Register save area |
- * 0x11600 (69.5K) +-----------------------------------------+
- * | CPU 0 NMI Register save area |
- * 0x11400 (69K) +-----------------------------------------+
- * | GDA (1k) |
- * 0x11000 (68K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
+ * 0x19000 (100K) +-----------------------------------------+
+ * | |
+ * | PI Error Spools (32K) |
+ * | |
+ * 0x12000 (72K) +-----------------------------------------+
+ * | Unused |
+ * 0x11c00 (71K) +-----------------------------------------+
+ * | CPU 1 NMI Eframe area |
+ * 0x11a00 (70.5K) +-----------------------------------------+
+ * | CPU 0 NMI Eframe area |
+ * 0x11800 (70K) +-----------------------------------------+
+ * | CPU 1 NMI Register save area |
+ * 0x11600 (69.5K) +-----------------------------------------+
+ * | CPU 0 NMI Register save area |
+ * 0x11400 (69K) +-----------------------------------------+
+ * | GDA (1k) |
+ * 0x11000 (68K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
* 0x10800 (66k) +-----------------------------------------+
- * | cache error eframe |
- * 0x10400 (65K) +-----------------------------------------+
- * | Exception Handlers (UALIAS copy) |
- * 0x10000 (64K) +-----------------------------------------+
- * | |
- * | |
- * | KLCONFIG - I (permanent) (48K) |
- * | |
- * | |
- * | |
- * 0x4000 (16K) +-----------------------------------------+
- * | NMI Handler (Protected Page) |
- * 0x3000 (12K) +-----------------------------------------+
- * | ARCS PVECTORS (master node only) |
- * 0x2c00 (11K) +-----------------------------------------+
- * | ARCS TVECTORS (master node only) |
- * 0x2800 (10K) +-----------------------------------------+
- * | LAUNCH [NUM_CPU] |
- * 0x2400 (9K) +-----------------------------------------+
- * | Low memory directory (KLDIR) |
- * 0x2000 (8K) +-----------------------------------------+
- * | ARCS SPB (1K) |
- * 0x1000 (4K) +-----------------------------------------+
- * | Early cache Exception stack |
- * | and/or |
- * | kernel/io6prom nmi registers |
- * 0x800 (2k) +-----------------------------------------+
- * | cache error eframe |
- * 0x400 (1K) +-----------------------------------------+
- * | Exception Handlers |
- * 0x0 (0K) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x10400 (65K) +-----------------------------------------+
+ * | Exception Handlers (UALIAS copy) |
+ * 0x10000 (64K) +-----------------------------------------+
+ * | |
+ * | |
+ * | KLCONFIG - I (permanent) (48K) |
+ * | |
+ * | |
+ * | |
+ * 0x4000 (16K) +-----------------------------------------+
+ * | NMI Handler (Protected Page) |
+ * 0x3000 (12K) +-----------------------------------------+
+ * | ARCS PVECTORS (master node only) |
+ * 0x2c00 (11K) +-----------------------------------------+
+ * | ARCS TVECTORS (master node only) |
+ * 0x2800 (10K) +-----------------------------------------+
+ * | LAUNCH [NUM_CPU] |
+ * 0x2400 (9K) +-----------------------------------------+
+ * | Low memory directory (KLDIR) |
+ * 0x2000 (8K) +-----------------------------------------+
+ * | ARCS SPB (1K) |
+ * 0x1000 (4K) +-----------------------------------------+
+ * | Early cache Exception stack |
+ * | and/or |
+ * | kernel/io6prom nmi registers |
+ * 0x800 (2k) +-----------------------------------------+
+ * | cache error eframe |
+ * 0x400 (1K) +-----------------------------------------+
+ * | Exception Handlers |
+ * 0x0 (0K) +-----------------------------------------+
*/
#ifdef __ASSEMBLY__
@@ -202,13 +202,13 @@
#ifndef __ASSEMBLY__
typedef struct kldir_ent_s {
- u64 magic; /* Indicates validity of entry */
+ u64 magic; /* Indicates validity of entry */
off_t offset; /* Offset from start of node space */
unsigned long pointer; /* Pointer to area in some cases */
- size_t size; /* Size in bytes */
+ size_t size; /* Size in bytes */
u64 count; /* Repeat count if array, 1 if not */
- size_t stride; /* Stride if array, 0 if not */
- char rsvd[16]; /* Pad entry to 0x40 bytes */
+ size_t stride; /* Stride if array, 0 if not */
+ char rsvd[16]; /* Pad entry to 0x40 bytes */
/* NOTE: These 16 bytes are used in the Partition KLDIR
entry to store partition info. Refer to klpart.h for this. */
} kldir_ent_t;
diff --git a/arch/mips/include/asm/sn/launch.h b/arch/mips/include/asm/sn/launch.h
index b7c2226312c..04226d8d30c 100644
--- a/arch/mips/include/asm/sn/launch.h
+++ b/arch/mips/include/asm/sn/launch.h
@@ -19,7 +19,7 @@
*
* The master stores launch parameters in the launch structure
* corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor. The slave calls the desired
+ * an interrupt to the slave processor. The slave calls the desired
* function, then returns to the slave loop. The master may poll or wait
* for the slaves to finish.
*
@@ -33,7 +33,7 @@
#define LAUNCH_PADSZ 0xa0
#endif
-#define LAUNCH_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define LAUNCH_OFF_MAGIC 0x00 /* Struct offsets for assembly */
#define LAUNCH_OFF_BUSY 0x08
#define LAUNCH_OFF_CALL 0x10
#define LAUNCH_OFF_CALLC 0x18
@@ -44,7 +44,7 @@
#define LAUNCH_OFF_BEVNORMAL 0x40
#define LAUNCH_OFF_BEVECC 0x48
-#define LAUNCH_STATE_DONE 0 /* Return value of LAUNCH_POLL */
+#define LAUNCH_STATE_DONE 0 /* Return value of LAUNCH_POLL */
#define LAUNCH_STATE_SENT 1
#define LAUNCH_STATE_RECD 2
@@ -65,16 +65,16 @@ typedef int launch_state_t;
typedef void (*launch_proc_t)(u64 call_parm);
typedef struct launch_s {
- volatile u64 magic; /* Magic number */
- volatile u64 busy; /* Slave currently active */
+ volatile u64 magic; /* Magic number */
+ volatile u64 busy; /* Slave currently active */
volatile launch_proc_t call_addr; /* Func. for slave to call */
volatile u64 call_addr_c; /* 1's complement of call_addr*/
volatile u64 call_parm; /* Single parm passed to call*/
volatile void *stack_addr; /* Stack pointer for slave function */
volatile void *gp_addr; /* Global pointer for slave func. */
- volatile char *bevutlb;/* Address of bev utlb ex handler */
- volatile char *bevnormal;/*Address of bev normal ex handler */
- volatile char *bevecc;/* Address of bev cache err handler */
+ volatile char *bevutlb;/* Address of bev utlb ex handler */
+ volatile char *bevnormal;/*Address of bev normal ex handler */
+ volatile char *bevecc;/* Address of bev cache err handler */
volatile char pad[160]; /* Pad to LAUNCH_SIZEOF */
} launch_t;
diff --git a/arch/mips/include/asm/sn/mapped_kernel.h b/arch/mips/include/asm/sn/mapped_kernel.h
index 721496a0bb9..401f3b0eee1 100644
--- a/arch/mips/include/asm/sn/mapped_kernel.h
+++ b/arch/mips/include/asm/sn/mapped_kernel.h
@@ -48,7 +48,7 @@
#endif /* CONFIG_MAPPED_KERNEL */
-#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
-#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
+#define MAPPED_KERN_RO_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RO_TO_PHYS(x))
+#define MAPPED_KERN_RW_TO_K0(x) PHYS_TO_K0(MAPPED_KERN_RW_TO_PHYS(x))
#endif /* __ASM_SN_MAPPED_KERNEL_H */
diff --git a/arch/mips/include/asm/sn/nmi.h b/arch/mips/include/asm/sn/nmi.h
index 1af49897d4e..12ac210f12a 100644
--- a/arch/mips/include/asm/sn/nmi.h
+++ b/arch/mips/include/asm/sn/nmi.h
@@ -19,7 +19,7 @@
*
* The master stores launch parameters in the launch structure
* corresponding to a target processor that is in a slave loop, then sends
- * an interrupt to the slave processor. The slave calls the desired
+ * an interrupt to the slave processor. The slave calls the desired
* function, followed by an optional rendezvous function, then returns to
* the slave loop. The master does not wait for the slaves before
* returning.
@@ -31,7 +31,7 @@
#define NMI_MAGIC 0x48414d4d455201
#define NMI_SIZEOF 0x40
-#define NMI_OFF_MAGIC 0x00 /* Struct offsets for assembly */
+#define NMI_OFF_MAGIC 0x00 /* Struct offsets for assembly */
#define NMI_OFF_FLAGS 0x08
#define NMI_OFF_CALL 0x10
#define NMI_OFF_CALLC 0x18
@@ -53,8 +53,8 @@
typedef struct nmi_s {
volatile unsigned long magic; /* Magic number */
volatile unsigned long flags; /* Combination of flags above */
- volatile void *call_addr; /* Routine for slave to call */
- volatile void *call_addr_c; /* 1's complement of address */
+ volatile void *call_addr; /* Routine for slave to call */
+ volatile void *call_addr_c; /* 1's complement of address */
volatile void *call_parm; /* Single parm passed to call */
volatile unsigned long gmaster; /* Flag true only on global master*/
} nmi_t;
diff --git a/arch/mips/include/asm/sn/sn0/addrs.h b/arch/mips/include/asm/sn/sn0/addrs.h
index b06190093bb..6b53070f400 100644
--- a/arch/mips/include/asm/sn/sn0/addrs.h
+++ b/arch/mips/include/asm/sn/sn0/addrs.h
@@ -29,7 +29,7 @@
* chapter of the Hub specification.
*
* NOTE: This header file is included both by C and by assembler source
- * files. Please bracket any language-dependent definitions
+ * files. Please bracket any language-dependent definitions
* appropriately.
*/
@@ -102,14 +102,14 @@
#define BWIN_INDEX_BITS 3
#define BWIN_SIZE (UINT64_CAST 1 << BWIN_SIZE_BITS)
-#define BWIN_SIZEMASK (BWIN_SIZE - 1)
-#define BWIN_WIDGET_MASK 0x7
+#define BWIN_SIZEMASK (BWIN_SIZE - 1)
+#define BWIN_WIDGET_MASK 0x7
#define NODE_BWIN_BASE0(nasid) (NODE_IO_BASE(nasid) + BWIN_SIZE)
-#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
+#define NODE_BWIN_BASE(nasid, bigwin) (NODE_BWIN_BASE0(nasid) + \
(UINT64_CAST(bigwin) << BWIN_SIZE_BITS))
-#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
-#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
+#define BWIN_WIDGETADDR(addr) ((addr) & BWIN_SIZEMASK)
+#define BWIN_WINDOWNUM(addr) (((addr) >> BWIN_SIZE_BITS) & BWIN_WIDGET_MASK)
/*
* Verify if addr belongs to large window address of node with "nasid"
*
@@ -120,7 +120,7 @@
*
*/
-#define NODE_BWIN_ADDR(nasid, addr) \
+#define NODE_BWIN_ADDR(nasid, addr) \
(((addr) >= NODE_BWIN_BASE0(nasid)) && \
((addr) < (NODE_BWIN_BASE(nasid, HUB_NUM_BIG_WINDOW) + \
BWIN_SIZE)))
@@ -129,7 +129,7 @@
* The following define the major position-independent aliases used
* in SN0.
* CALIAS -- Varies in size, points to the first n bytes of memory
- * on the reader's node.
+ * on the reader's node.
*/
#define CALIAS_BASE CAC_BASE
@@ -146,7 +146,7 @@
#ifndef __ASSEMBLY__
#define KERN_NMI_ADDR(nasid, slice) \
- TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + \
+ TO_NODE_UNCAC((nasid), IP27_NMI_KREGS_OFFSET + \
(IP27_NMI_KREGS_CPU_SIZE * (slice)))
#endif /* !__ASSEMBLY__ */
@@ -203,7 +203,7 @@
#define IO6PROM_BASE PHYS_TO_K0(0x01c00000)
#define IO6PROM_SIZE 0x400000
-#define IO6PROM_BASE_MAPPED (UNCAC_BASE | 0x11c00000)
+#define IO6PROM_BASE_MAPPED (UNCAC_BASE | 0x11c00000)
#define IO6DPROM_BASE PHYS_TO_K0(0x01c00000)
#define IO6DPROM_SIZE 0x200000
diff --git a/arch/mips/include/asm/sn/sn0/arch.h b/arch/mips/include/asm/sn/sn0/arch.h
index f734f2007f2..425a67e6a94 100644
--- a/arch/mips/include/asm/sn/sn0/arch.h
+++ b/arch/mips/include/asm/sn/sn0/arch.h
@@ -12,23 +12,23 @@
#define _ASM_SN_SN0_ARCH_H
-#ifndef SN0XXL /* 128 cpu SMP max */
+#ifndef SN0XXL /* 128 cpu SMP max */
/*
* This is the maximum number of nodes that can be part of a kernel.
* Effectively, it's the maximum number of compact node ids (cnodeid_t).
*/
-#define MAX_COMPACT_NODES 64
+#define MAX_COMPACT_NODES 64
/*
* MAXCPUS refers to the maximum number of CPUs in a single kernel.
* This is not necessarily the same as MAXNODES * CPUS_PER_NODE
*/
-#define MAXCPUS 128
+#define MAXCPUS 128
#else /* SN0XXL system */
-#define MAX_COMPACT_NODES 128
-#define MAXCPUS 256
+#define MAX_COMPACT_NODES 128
+#define MAXCPUS 256
#endif /* SN0XXL */
@@ -41,9 +41,9 @@
/*
* MAX_REGIONS refers to the maximum number of hardware partitioned regions.
*/
-#define MAX_REGIONS 64
-#define MAX_NONPREMIUM_REGIONS 16
-#define MAX_PREMIUM_REGIONS MAX_REGIONS
+#define MAX_REGIONS 64
+#define MAX_NONPREMIUM_REGIONS 16
+#define MAX_PREMIUM_REGIONS MAX_REGIONS
/*
* MAX_PARITIONS refers to the maximum number of logically defined
@@ -57,12 +57,12 @@
* Slot constants for SN0
*/
#ifdef CONFIG_SGI_SN_N_MODE
-#define MAX_MEM_SLOTS 16 /* max slots per node */
+#define MAX_MEM_SLOTS 16 /* max slots per node */
#else /* !CONFIG_SGI_SN_N_MODE, assume CONFIG_SGI_SN_M_MODE */
-#define MAX_MEM_SLOTS 32 /* max slots per node */
+#define MAX_MEM_SLOTS 32 /* max slots per node */
#endif /* CONFIG_SGI_SN_M_MODE */
-#define SLOT_SHIFT (27)
+#define SLOT_SHIFT (27)
#define SLOT_MIN_MEM_SIZE (32*1024*1024)
#define CPUS_PER_NODE 2 /* CPUs on a single hub */
diff --git a/arch/mips/include/asm/sn/sn0/hub.h b/arch/mips/include/asm/sn/sn0/hub.h
index 3e228f8e796..d78dd76d5dc 100644
--- a/arch/mips/include/asm/sn/sn0/hub.h
+++ b/arch/mips/include/asm/sn/sn0/hub.h
@@ -19,8 +19,8 @@
#define HUB_REV_2_0 2
#define HUB_REV_2_1 3
#define HUB_REV_2_2 4
-#define HUB_REV_2_3 5
-#define HUB_REV_2_4 6
+#define HUB_REV_2_3 5
+#define HUB_REV_2_4 6
#define MAX_HUB_PATH 80
@@ -32,9 +32,9 @@
//#include <asm/sn/sn0/hubcore.h>
/* Translation of uncached attributes */
-#define UATTR_HSPEC 0
-#define UATTR_IO 1
-#define UATTR_MSPEC 2
-#define UATTR_UNCAC 3
+#define UATTR_HSPEC 0
+#define UATTR_IO 1
+#define UATTR_MSPEC 2
+#define UATTR_UNCAC 3
#endif /* _ASM_SN_SN0_HUB_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 46286d8302a..5998b13e976 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SGI_SN_SN0_HUBIO_H
-#define _ASM_SGI_SN_SN0_HUBIO_H
+#ifndef _ASM_SGI_SN_SN0_HUBIO_H
+#define _ASM_SGI_SN_SN0_HUBIO_H
/*
* Hub I/O interface registers
@@ -22,7 +22,7 @@
* Slightly friendlier names for some common registers.
* The hardware definitions follow.
*/
-#define IIO_WIDGET IIO_WID /* Widget identification */
+#define IIO_WIDGET IIO_WID /* Widget identification */
#define IIO_WIDGET_STAT IIO_WSTAT /* Widget status register */
#define IIO_WIDGET_CTRL IIO_WCR /* Widget control register */
#define IIO_WIDGET_TOUT IIO_WRTO /* Widget request timeout */
@@ -37,21 +37,21 @@
#define IIO_XTALKCC_TOUT IIO_IXCC /* Xtalk credit count timeout*/
#define IIO_XTALKTT_TOUT IIO_IXTT /* Xtalk tail timeout */
#define IIO_IO_ERR_CLR IIO_IECLR /* IO error clear */
-#define IIO_BTE_CRB_CNT IIO_IBCN /* IO BTE CRB count */
+#define IIO_BTE_CRB_CNT IIO_IBCN /* IO BTE CRB count */
#define IIO_LLP_CSR_IS_UP 0x00002000
-#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
-#define IIO_LLP_CSR_LLP_STAT_SHFT 12
+#define IIO_LLP_CSR_LLP_STAT_MASK 0x00003000
+#define IIO_LLP_CSR_LLP_STAT_SHFT 12
/* key to IIO_PROTECT_OVRRD */
#define IIO_PROTECT_OVRRD_KEY 0x53474972756c6573ull /* "SGIrules" */
/* BTE register names */
#define IIO_BTE_STAT_0 IIO_IBLS_0 /* Also BTE length/status 0 */
-#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
+#define IIO_BTE_SRC_0 IIO_IBSA_0 /* Also BTE source address 0 */
#define IIO_BTE_DEST_0 IIO_IBDA_0 /* Also BTE dest. address 0 */
#define IIO_BTE_CTRL_0 IIO_IBCT_0 /* Also BTE control/terminate 0 */
-#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
+#define IIO_BTE_NOTIFY_0 IIO_IBNA_0 /* Also BTE notification 0 */
#define IIO_BTE_INT_0 IIO_IBIA_0 /* Also BTE interrupt 0 */
#define IIO_BTE_OFF_0 0 /* Base offset from BTE 0 regs. */
#define IIO_BTE_OFF_1 IIO_IBLS_1 - IIO_IBLS_0 /* Offset from base to BTE 1 */
@@ -83,11 +83,11 @@
#define IIO_WSTAT 0x400008 /* Widget status */
#define IIO_WCR 0x400020 /* Widget control */
-#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
-#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
-#define IIO_WSTAT_TXRETRY_MASK (0x7F)
-#define IIO_WSTAT_TXRETRY_SHFT (16)
-#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
+#define IIO_WSTAT_ECRAZY (1ULL << 32) /* Hub gone crazy */
+#define IIO_WSTAT_TXRETRY (1ULL << 9) /* Hub Tx Retry timeout */
+#define IIO_WSTAT_TXRETRY_MASK (0x7F)
+#define IIO_WSTAT_TXRETRY_SHFT (16)
+#define IIO_WSTAT_TXRETRY_CNT(w) (((w) >> IIO_WSTAT_TXRETRY_SHFT) & \
IIO_WSTAT_TXRETRY_MASK)
#define IIO_ILAPR 0x400100 /* Local Access Protection */
@@ -130,12 +130,12 @@
#define IIO_IGFX_INIT(widget, node, cpu, valid) (\
(((widget) & IIO_IGFX_W_NUM_MASK) << IIO_IGFX_W_NUM_SHIFT) | \
(((node) & IIO_IGFX_N_NUM_MASK) << IIO_IGFX_N_NUM_SHIFT) | \
- (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
- (((valid) & IIO_IGFX_VLD_MASK) << IIO_IGFX_VLD_SHIFT) )
+ (((cpu) & IIO_IGFX_P_NUM_MASK) << IIO_IGFX_P_NUM_SHIFT) | \
+ (((valid) & IIO_IGFX_VLD_MASK) << IIO_IGFX_VLD_SHIFT) )
/* Scratch registers (not all bits available) */
#define IIO_SCRATCH_REG0 0x400150
-#define IIO_SCRATCH_REG1 0x400158
+#define IIO_SCRATCH_REG1 0x400158
#define IIO_SCRATCH_MASK 0x0000000f00f11fff
#define IIO_SCRATCH_BIT0_0 0x0000000800000000
@@ -174,43 +174,43 @@
typedef union hubii_wid_u {
u64 wid_reg_value;
struct {
- u64 wid_rsvd: 32, /* unused */
+ u64 wid_rsvd: 32, /* unused */
wid_rev_num: 4, /* revision number */
wid_part_num: 16, /* the widget type: hub=c101 */
wid_mfg_num: 11, /* Manufacturer id (IBM) */
wid_rsvd1: 1; /* Reserved */
- } wid_fields_s;
+ } wid_fields_s;
} hubii_wid_t;
typedef union hubii_wcr_u {
u64 wcr_reg_value;
struct {
- u64 wcr_rsvd: 41, /* unused */
+ u64 wcr_rsvd: 41, /* unused */
wcr_e_thresh: 5, /* elasticity threshold */
wcr_dir_con: 1, /* widget direct connect */
wcr_f_bad_pkt: 1, /* Force bad llp pkt enable */
wcr_xbar_crd: 3, /* LLP crossbar credit */
wcr_rsvd1: 8, /* Reserved */
- wcr_tag_mode: 1, /* Tag mode */
+ wcr_tag_mode: 1, /* Tag mode */
wcr_widget_id: 4; /* LLP crossbar credit */
- } wcr_fields_s;
+ } wcr_fields_s;
} hubii_wcr_t;
-#define iwcr_dir_con wcr_fields_s.wcr_dir_con
+#define iwcr_dir_con wcr_fields_s.wcr_dir_con
typedef union hubii_wstat_u {
- u64 reg_value;
+ u64 reg_value;
struct {
u64 rsvd1: 31,
crazy: 1, /* Crazy bit */
rsvd2: 8,
- llp_tx_cnt: 8, /* LLP Xmit retry counter */
+ llp_tx_cnt: 8, /* LLP Xmit retry counter */
rsvd3: 6,
tx_max_rtry: 1, /* LLP Retry Timeout Signal */
rsvd4: 2,
xt_tail_to: 1, /* Xtalk Tail Timeout */
- xt_crd_to: 1, /* Xtalk Credit Timeout */
+ xt_crd_to: 1, /* Xtalk Credit Timeout */
pending: 4; /* Pending Requests */
} wstat_fields_s;
} hubii_wstat_t;
@@ -219,50 +219,50 @@ typedef union hubii_wstat_u {
typedef union hubii_ilcsr_u {
u64 icsr_reg_value;
struct {
- u64 icsr_rsvd: 22, /* unused */
- icsr_max_burst: 10, /* max burst */
- icsr_rsvd4: 6, /* reserved */
- icsr_max_retry: 10, /* max retry */
- icsr_rsvd3: 2, /* reserved */
- icsr_lnk_stat: 2, /* link status */
- icsr_bm8: 1, /* Bit mode 8 */
- icsr_llp_en: 1, /* LLP enable bit */
- icsr_rsvd2: 1, /* reserver */
- icsr_wrm_reset: 1, /* Warm reset bit */
+ u64 icsr_rsvd: 22, /* unused */
+ icsr_max_burst: 10, /* max burst */
+ icsr_rsvd4: 6, /* reserved */
+ icsr_max_retry: 10, /* max retry */
+ icsr_rsvd3: 2, /* reserved */
+ icsr_lnk_stat: 2, /* link status */
+ icsr_bm8: 1, /* Bit mode 8 */
+ icsr_llp_en: 1, /* LLP enable bit */
+ icsr_rsvd2: 1, /* reserver */
+ icsr_wrm_reset: 1, /* Warm reset bit */
icsr_rsvd1: 2, /* Data ready offset */
- icsr_null_to: 6; /* Null timeout */
+ icsr_null_to: 6; /* Null timeout */
- } icsr_fields_s;
+ } icsr_fields_s;
} hubii_ilcsr_t;
typedef union hubii_iowa_u {
u64 iowa_reg_value;
struct {
- u64 iowa_rsvd: 48, /* unused */
+ u64 iowa_rsvd: 48, /* unused */
iowa_wxoac: 8, /* xtalk widget access bits */
iowa_rsvd1: 7, /* xtalk widget access bits */
iowa_w0oac: 1; /* xtalk widget access bits */
- } iowa_fields_s;
+ } iowa_fields_s;
} hubii_iowa_t;
typedef union hubii_iiwa_u {
u64 iiwa_reg_value;
struct {
- u64 iiwa_rsvd: 48, /* unused */
+ u64 iiwa_rsvd: 48, /* unused */
iiwa_wxiac: 8, /* hub wid access bits */
iiwa_rsvd1: 7, /* reserved */
iiwa_w0iac: 1; /* hub wid0 access */
- } iiwa_fields_s;
+ } iiwa_fields_s;
} hubii_iiwa_t;
typedef union hubii_illr_u {
u64 illr_reg_value;
struct {
- u64 illr_rsvd: 32, /* unused */
+ u64 illr_rsvd: 32, /* unused */
illr_cb_cnt: 16, /* checkbit error count */
illr_sn_cnt: 16; /* sequence number count */
- } illr_fields_s;
+ } illr_fields_s;
} hubii_illr_t;
/* The structures below are defined to extract and modify the ii
@@ -273,7 +273,7 @@ performance registers */
typedef union io_perf_sel {
u64 perf_sel_reg;
struct {
- u64 perf_rsvd : 48,
+ u64 perf_rsvd : 48,
perf_icct : 8,
perf_ippr1 : 4,
perf_ippr0 : 4;
@@ -301,7 +301,7 @@ typedef union io_perf_cnt {
#define IIO_LLP_SN_MAX 0xffff
/* IO PRB Entries */
-#define IIO_NUM_IPRBS (9)
+#define IIO_NUM_IPRBS (9)
#define IIO_IOPRB_0 0x400198 /* PRB entry 0 */
#define IIO_IOPRB_8 0x4001a0 /* PRB entry 8 */
#define IIO_IOPRB_9 0x4001a8 /* PRB entry 9 */
@@ -318,21 +318,21 @@ typedef union io_perf_cnt {
#define IIO_IMEM 0x4001e8 /* Miscellaneous Enable Mask */
#define IIO_IXTT 0x4001f0 /* Crosstalk tail timeout */
#define IIO_IECLR 0x4001f8 /* IO error clear */
-#define IIO_IBCN 0x400200 /* IO BTE CRB count */
+#define IIO_IBCN 0x400200 /* IO BTE CRB count */
/*
* IIO_IMEM Register fields.
*/
-#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
-#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
-#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
+#define IIO_IMEM_W0ESD 0x1 /* Widget 0 shut down due to error */
+#define IIO_IMEM_B0ESD (1 << 4) /* BTE 0 shut down due to error */
+#define IIO_IMEM_B1ESD (1 << 8) /* BTE 1 Shut down due to error */
/* PIO Read address Table Entries */
#define IIO_IPCA 0x400300 /* PRB Counter adjust */
#define IIO_NUM_PRTES 8 /* Total number of PRB table entries */
#define IIO_PRTE_0 0x400308 /* PIO Read address table entry 0 */
#define IIO_PRTE(_x) (IIO_PRTE_0 + (8 * (_x)))
-#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
+#define IIO_WIDPRTE(x) IIO_PRTE(((x) - 8)) /* widget ID to its PRTE num */
#define IIO_IPDR 0x400388 /* PIO table entry deallocation */
#define IIO_ICDR 0x400390 /* CRB Entry Deallocation */
#define IIO_IFDR 0x400398 /* IOQ FIFO Depth */
@@ -369,35 +369,35 @@ typedef union io_perf_cnt {
/*
* IIO PIO Deallocation register field masks : (IIO_IPDR)
*/
-#define IIO_IPDR_PND (1 << 4)
+#define IIO_IPDR_PND (1 << 4)
/*
* IIO CRB deallocation register field masks: (IIO_ICDR)
*/
-#define IIO_ICDR_PND (1 << 4)
+#define IIO_ICDR_PND (1 << 4)
/*
* IIO CRB control register Fields: IIO_ICCR
*/
-#define IIO_ICCR_PENDING (0x10000)
-#define IIO_ICCR_CMD_MASK (0xFF)
-#define IIO_ICCR_CMD_SHFT (7)
-#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
-#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
-#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
-#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
+#define IIO_ICCR_PENDING (0x10000)
+#define IIO_ICCR_CMD_MASK (0xFF)
+#define IIO_ICCR_CMD_SHFT (7)
+#define IIO_ICCR_CMD_NOP (0x0) /* No Op */
+#define IIO_ICCR_CMD_WAKE (0x100) /* Reactivate CRB entry and process */
+#define IIO_ICCR_CMD_TIMEOUT (0x200) /* Make CRB timeout & mark invalid */
+#define IIO_ICCR_CMD_EJECT (0x400) /* Contents of entry written to memory
* via a WB
*/
-#define IIO_ICCR_CMD_FLUSH (0x800)
+#define IIO_ICCR_CMD_FLUSH (0x800)
/*
* CRB manipulation macros
* The CRB macros are slightly complicated, since there are up to
- * four registers associated with each CRB entry.
+ * four registers associated with each CRB entry.
*/
#define IIO_NUM_CRBS 15 /* Number of CRBs */
-#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
-#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
+#define IIO_NUM_NORMAL_CRBS 12 /* Number of regular CRB entries */
+#define IIO_NUM_PC_CRBS 4 /* Number of partial cache CRBs */
#define IIO_ICRB_OFFSET 8
#define IIO_ICRB_0 0x400400
/* XXX - This is now tuneable:
@@ -405,9 +405,9 @@ typedef union io_perf_cnt {
*/
#define IIO_ICRB_A(_x) (IIO_ICRB_0 + (4 * IIO_ICRB_OFFSET * (_x)))
-#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
+#define IIO_ICRB_B(_x) (IIO_ICRB_A(_x) + 1*IIO_ICRB_OFFSET)
#define IIO_ICRB_C(_x) (IIO_ICRB_A(_x) + 2*IIO_ICRB_OFFSET)
-#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
+#define IIO_ICRB_D(_x) (IIO_ICRB_A(_x) + 3*IIO_ICRB_OFFSET)
/* XXX - IBUE register coming for Hub 2 */
@@ -444,16 +444,16 @@ typedef union io_perf_cnt {
typedef union icrba_u {
u64 reg_value;
struct {
- u64 resvd: 6,
+ u64 resvd: 6,
stall_bte0: 1, /* Stall BTE 0 */
stall_bte1: 1, /* Stall BTE 1 */
error: 1, /* CRB has an error */
- ecode: 3, /* Error Code */
+ ecode: 3, /* Error Code */
lnetuce: 1, /* SN0net Uncorrectable error */
- mark: 1, /* CRB Has been marked */
+ mark: 1, /* CRB Has been marked */
xerr: 1, /* Error bit set in xtalk header */
sidn: 4, /* SIDN field from xtalk */
- tnum: 5, /* TNUM field in xtalk */
+ tnum: 5, /* TNUM field in xtalk */
addr: 38, /* Address of request */
valid: 1, /* Valid status */
iow: 1; /* IO Write operation */
@@ -467,15 +467,15 @@ typedef union h1_icrba_u {
u64 reg_value;
struct {
- u64 resvd: 6,
- unused: 1, /* Unused but RW!! */
+ u64 resvd: 6,
+ unused: 1, /* Unused but RW!! */
error: 1, /* CRB has an error */
- ecode: 4, /* Error Code */
+ ecode: 4, /* Error Code */
lnetuce: 1, /* SN0net Uncorrectable error */
- mark: 1, /* CRB Has been marked */
+ mark: 1, /* CRB Has been marked */
xerr: 1, /* Error bit set in xtalk header */
sidn: 4, /* SIDN field from xtalk */
- tnum: 5, /* TNUM field in xtalk */
+ tnum: 5, /* TNUM field in xtalk */
addr: 38, /* Address of request */
valid: 1, /* Valid status */
iow: 1; /* IO Write operation */
@@ -488,21 +488,21 @@ typedef union h1_icrba_u {
#endif /* !__ASSEMBLY__ */
-#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
+#define IIO_ICRB_ADDR_SHFT 2 /* Shift to get proper address */
/*
* values for "ecode" field
*/
-#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
-#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
-#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
+#define IIO_ICRB_ECODE_DERR 0 /* Directory error due to IIO access */
+#define IIO_ICRB_ECODE_PERR 1 /* Poison error on IO access */
+#define IIO_ICRB_ECODE_WERR 2 /* Write error by IIO access
* e.g. WINV to a Read only line.
*/
-#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
-#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
-#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
-#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
-#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
+#define IIO_ICRB_ECODE_AERR 3 /* Access error caused by IIO access */
+#define IIO_ICRB_ECODE_PWERR 4 /* Error on partial write */
+#define IIO_ICRB_ECODE_PRERR 5 /* Error on partial read */
+#define IIO_ICRB_ECODE_TOUT 6 /* CRB timeout before deallocating */
+#define IIO_ICRB_ECODE_XTERR 7 /* Incoming xtalk pkt had error bit */
@@ -513,10 +513,10 @@ typedef union h1_icrba_u {
typedef union icrbb_u {
u64 reg_value;
struct {
- u64 rsvd1: 5,
- btenum: 1, /* BTE to which entry belongs to */
- cohtrans: 1, /* Coherent transaction */
- xtsize: 2, /* Xtalk operation size
+ u64 rsvd1: 5,
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
* 0: Double Word
* 1: 32 Bytes.
* 2: 128 Bytes,
@@ -526,11 +526,11 @@ typedef union icrbb_u {
srcinit: 2, /* Source Initiator:
* See below for field values.
*/
- useold: 1, /* Use OLD command for processing */
+ useold: 1, /* Use OLD command for processing */
imsgtype: 2, /* Incoming message type
* see below for field values
*/
- imsg: 8, /* Incoming message */
+ imsg: 8, /* Incoming message */
initator: 3, /* Initiator of original request
* See below for field values.
*/
@@ -538,12 +538,12 @@ typedef union icrbb_u {
* See below for field values.
*/
rsvd2: 7,
- ackcnt: 11, /* Invalidate ack count */
+ ackcnt: 11, /* Invalidate ack count */
resp: 1, /* data response given to processor */
- ack: 1, /* indicates data ack received */
+ ack: 1, /* indicates data ack received */
hold: 1, /* entry is gathering inval acks */
wb_pend:1, /* waiting for writeback to complete */
- intvn: 1, /* Intervention */
+ intvn: 1, /* Intervention */
stall_ib: 1, /* Stall Ibuf (from crosstalk) */
stall_intr: 1; /* Stall internal interrupts */
} icrbb_field_s;
@@ -556,9 +556,9 @@ typedef union h1_icrbb_u {
u64 reg_value;
struct {
u64 rsvd1: 5,
- btenum: 1, /* BTE to which entry belongs to */
- cohtrans: 1, /* Coherent transaction */
- xtsize: 2, /* Xtalk operation size
+ btenum: 1, /* BTE to which entry belongs to */
+ cohtrans: 1, /* Coherent transaction */
+ xtsize: 2, /* Xtalk operation size
* 0: Double Word
* 1: 32 Bytes.
* 2: 128 Bytes,
@@ -568,99 +568,99 @@ typedef union h1_icrbb_u {
srcinit: 2, /* Source Initiator:
* See below for field values.
*/
- useold: 1, /* Use OLD command for processing */
+ useold: 1, /* Use OLD command for processing */
imsgtype: 2, /* Incoming message type
* see below for field values
*/
- imsg: 8, /* Incoming message */
+ imsg: 8, /* Incoming message */
initator: 3, /* Initiator of original request
* See below for field values.
*/
- rsvd2: 1,
+ rsvd2: 1,
pcache: 1, /* entry belongs to partial cache */
reqtype: 5, /* Identifies type of request
* See below for field values.
*/
- stl_ib: 1, /* stall Ibus coming from xtalk */
+ stl_ib: 1, /* stall Ibus coming from xtalk */
stl_intr: 1, /* Stall internal interrupts */
- stl_bte0: 1, /* Stall BTE 0 */
+ stl_bte0: 1, /* Stall BTE 0 */
stl_bte1: 1, /* Stall BTE 1 */
- intrvn: 1, /* Req was target of intervention */
- ackcnt: 11, /* Invalidate ack count */
+ intrvn: 1, /* Req was target of intervention */
+ ackcnt: 11, /* Invalidate ack count */
resp: 1, /* data response given to processor */
- ack: 1, /* indicates data ack received */
+ ack: 1, /* indicates data ack received */
hold: 1, /* entry is gathering inval acks */
wb_pend:1, /* waiting for writeback to complete */
- sleep: 1, /* xtalk req sleeping till IO-sync */
+ sleep: 1, /* xtalk req sleeping till IO-sync */
pnd_reply: 1, /* replies not issed due to IOQ full */
pnd_req: 1; /* reqs not issued due to IOQ full */
} h1_icrbb_field_s;
} h1_icrbb_t;
-#define b_imsgtype icrbb_field_s.imsgtype
-#define b_btenum icrbb_field_s.btenum
-#define b_cohtrans icrbb_field_s.cohtrans
-#define b_xtsize icrbb_field_s.xtsize
-#define b_srcnode icrbb_field_s.srcnode
-#define b_srcinit icrbb_field_s.srcinit
-#define b_imsgtype icrbb_field_s.imsgtype
-#define b_imsg icrbb_field_s.imsg
-#define b_initiator icrbb_field_s.initiator
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_btenum icrbb_field_s.btenum
+#define b_cohtrans icrbb_field_s.cohtrans
+#define b_xtsize icrbb_field_s.xtsize
+#define b_srcnode icrbb_field_s.srcnode
+#define b_srcinit icrbb_field_s.srcinit
+#define b_imsgtype icrbb_field_s.imsgtype
+#define b_imsg icrbb_field_s.imsg
+#define b_initiator icrbb_field_s.initiator
#endif /* !__ASSEMBLY__ */
/*
* values for field xtsize
*/
-#define IIO_ICRB_XTSIZE_DW 0 /* Xtalk operation size is 8 bytes */
-#define IIO_ICRB_XTSIZE_32 1 /* Xtalk operation size is 32 bytes */
-#define IIO_ICRB_XTSIZE_128 2 /* Xtalk operation size is 128 bytes */
+#define IIO_ICRB_XTSIZE_DW 0 /* Xtalk operation size is 8 bytes */
+#define IIO_ICRB_XTSIZE_32 1 /* Xtalk operation size is 32 bytes */
+#define IIO_ICRB_XTSIZE_128 2 /* Xtalk operation size is 128 bytes */
/*
* values for field srcinit
*/
-#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
-#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
-#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
-#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
+#define IIO_ICRB_PROC0 0 /* Source of request is Proc 0 */
+#define IIO_ICRB_PROC1 1 /* Source of request is Proc 1 */
+#define IIO_ICRB_GB_REQ 2 /* Source is Guaranteed BW request */
+#define IIO_ICRB_IO_REQ 3 /* Source is Normal IO request */
/*
* Values for field imsgtype
*/
-#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
-#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
-#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
-#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
+#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
+#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
/*
* values for field initiator.
*/
-#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
-#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
-#define IIO_ICRB_INIT_SN0NET 0x2 /* Message originated in SN0net */
-#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
-#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
+#define IIO_ICRB_INIT_XTALK 0 /* Message originated in xtalk */
+#define IIO_ICRB_INIT_BTE0 0x1 /* Message originated in BTE 0 */
+#define IIO_ICRB_INIT_SN0NET 0x2 /* Message originated in SN0net */
+#define IIO_ICRB_INIT_CRB 0x3 /* Message originated in CRB ? */
+#define IIO_ICRB_INIT_BTE1 0x5 /* MEssage originated in BTE 1 */
/*
* Values for field reqtype.
*/
/* XXX - Need to fix this for Hub 2 */
-#define IIO_ICRB_REQ_DWRD 0 /* Request type double word */
-#define IIO_ICRB_REQ_QCLRD 1 /* Request is Qrtr Caceh line Rd */
-#define IIO_ICRB_REQ_BLKRD 2 /* Request is block read */
-#define IIO_ICRB_REQ_RSHU 6 /* Request is BTE block read */
-#define IIO_ICRB_REQ_REXU 7 /* request is BTE Excl Read */
-#define IIO_ICRB_REQ_RDEX 8 /* Request is Read Exclusive */
-#define IIO_ICRB_REQ_WINC 9 /* Request is Write Invalidate */
-#define IIO_ICRB_REQ_BWINV 10 /* Request is BTE Winv */
-#define IIO_ICRB_REQ_PIORD 11 /* Request is PIO read */
-#define IIO_ICRB_REQ_PIOWR 12 /* Request is PIO Write */
-#define IIO_ICRB_REQ_PRDM 13 /* Request is Fetch&Op */
-#define IIO_ICRB_REQ_PWRM 14 /* Request is Store &Op */
-#define IIO_ICRB_REQ_PTPWR 15 /* Request is Peer to peer */
-#define IIO_ICRB_REQ_WB 16 /* Request is Write back */
-#define IIO_ICRB_REQ_DEX 17 /* Retained DEX Cache line */
+#define IIO_ICRB_REQ_DWRD 0 /* Request type double word */
+#define IIO_ICRB_REQ_QCLRD 1 /* Request is Qrtr Caceh line Rd */
+#define IIO_ICRB_REQ_BLKRD 2 /* Request is block read */
+#define IIO_ICRB_REQ_RSHU 6 /* Request is BTE block read */
+#define IIO_ICRB_REQ_REXU 7 /* request is BTE Excl Read */
+#define IIO_ICRB_REQ_RDEX 8 /* Request is Read Exclusive */
+#define IIO_ICRB_REQ_WINC 9 /* Request is Write Invalidate */
+#define IIO_ICRB_REQ_BWINV 10 /* Request is BTE Winv */
+#define IIO_ICRB_REQ_PIORD 11 /* Request is PIO read */
+#define IIO_ICRB_REQ_PIOWR 12 /* Request is PIO Write */
+#define IIO_ICRB_REQ_PRDM 13 /* Request is Fetch&Op */
+#define IIO_ICRB_REQ_PWRM 14 /* Request is Store &Op */
+#define IIO_ICRB_REQ_PTPWR 15 /* Request is Peer to peer */
+#define IIO_ICRB_REQ_WB 16 /* Request is Write back */
+#define IIO_ICRB_REQ_DEX 17 /* Retained DEX Cache line */
/*
* Fields in CRB Register C
@@ -674,8 +674,8 @@ typedef union icrbc_s {
u64 rsvd: 6,
sleep: 1,
pricnt: 4, /* Priority count sent with Read req */
- pripsc: 4, /* Priority Pre scalar */
- bteop: 1, /* BTE Operation */
+ pripsc: 4, /* Priority Pre scalar */
+ bteop: 1, /* BTE Operation */
push_be: 34, /* Push address Byte enable
* Holds push addr, if CRB is for BTE
* If CRB belongs to Partial cache,
@@ -684,20 +684,20 @@ typedef union icrbc_s {
*/
suppl: 11, /* Supplemental field */
barrop: 1, /* Barrier Op bit set in xtalk req */
- doresp: 1, /* Xtalk req needs a response */
- gbr: 1; /* GBR bit set in xtalk packet */
+ doresp: 1, /* Xtalk req needs a response */
+ gbr: 1; /* GBR bit set in xtalk packet */
} icrbc_field_s;
} icrbc_t;
-#define c_pricnt icrbc_field_s.pricnt
-#define c_pripsc icrbc_field_s.pripsc
-#define c_bteop icrbc_field_s.bteop
-#define c_bteaddr icrbc_field_s.push_be /* push_be field has 2 names */
-#define c_benable icrbc_field_s.push_be /* push_be field has 2 names */
-#define c_suppl icrbc_field_s.suppl
-#define c_barrop icrbc_field_s.barrop
-#define c_doresp icrbc_field_s.doresp
-#define c_gbr icrbc_field_s.gbr
+#define c_pricnt icrbc_field_s.pricnt
+#define c_pripsc icrbc_field_s.pripsc
+#define c_bteop icrbc_field_s.bteop
+#define c_bteaddr icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_benable icrbc_field_s.push_be /* push_be field has 2 names */
+#define c_suppl icrbc_field_s.suppl
+#define c_barrop icrbc_field_s.barrop
+#define c_doresp icrbc_field_s.doresp
+#define c_gbr icrbc_field_s.gbr
#endif /* !__ASSEMBLY__ */
/*
@@ -708,31 +708,31 @@ typedef union icrbc_s {
typedef union icrbd_s {
u64 reg_value;
struct {
- u64 rsvd: 38,
+ u64 rsvd: 38,
toutvld: 1, /* Timeout in progress for this CRB */
- ctxtvld: 1, /* Context field below is valid */
+ ctxtvld: 1, /* Context field below is valid */
rsvd2: 1,
- context: 15, /* Bit vector:
+ context: 15, /* Bit vector:
* Has a bit set for each CRB entry
* which needs to be deallocated
* before this CRB entry is processed.
* Set only for barrier operations.
*/
- timeout: 8; /* Timeout Upper 8 bits */
+ timeout: 8; /* Timeout Upper 8 bits */
} icrbd_field_s;
} icrbd_t;
-#define icrbd_toutvld icrbd_field_s.toutvld
-#define icrbd_ctxtvld icrbd_field_s.ctxtvld
-#define icrbd_context icrbd_field_s.context
+#define icrbd_toutvld icrbd_field_s.toutvld
+#define icrbd_ctxtvld icrbd_field_s.ctxtvld
+#define icrbd_context icrbd_field_s.context
typedef union hubii_ifdr_u {
u64 hi_ifdr_value;
struct {
u64 ifdr_rsvd: 49,
- ifdr_maxrp: 7,
- ifdr_rsvd1: 1,
+ ifdr_maxrp: 7,
+ ifdr_rsvd1: 1,
ifdr_maxrq: 7;
} hi_ifdr_fields;
} hubii_ifdr_t;
@@ -789,26 +789,26 @@ typedef union hubii_ifdr_u {
typedef union iprte_a {
u64 entry;
struct {
- u64 rsvd1 : 7, /* Reserved field */
- valid : 1, /* Maps to a timeout entry */
- rsvd2 : 1,
- srcnode : 9, /* Node which did this PIO */
- initiator : 2, /* If T5A or T5B or IO */
- rsvd3 : 3,
- addr : 38, /* Physical address of PIO */
- rsvd4 : 3;
+ u64 rsvd1 : 7, /* Reserved field */
+ valid : 1, /* Maps to a timeout entry */
+ rsvd2 : 1,
+ srcnode : 9, /* Node which did this PIO */
+ initiator : 2, /* If T5A or T5B or IO */
+ rsvd3 : 3,
+ addr : 38, /* Physical address of PIO */
+ rsvd4 : 3;
} iprte_fields;
} iprte_a_t;
-#define iprte_valid iprte_fields.valid
-#define iprte_timeout iprte_fields.timeout
-#define iprte_srcnode iprte_fields.srcnode
-#define iprte_init iprte_fields.initiator
-#define iprte_addr iprte_fields.addr
+#define iprte_valid iprte_fields.valid
+#define iprte_timeout iprte_fields.timeout
+#define iprte_srcnode iprte_fields.srcnode
+#define iprte_init iprte_fields.initiator
+#define iprte_addr iprte_fields.addr
#endif /* !__ASSEMBLY__ */
-#define IPRTE_ADDRSHFT 3
+#define IPRTE_ADDRSHFT 3
/*
* Hub IIO PRB Register format.
@@ -823,14 +823,14 @@ typedef union iprte_a {
typedef union iprb_u {
u64 reg_value;
struct {
- u64 rsvd1: 15,
+ u64 rsvd1: 15,
error: 1, /* Widget rcvd wr resp pkt w/ error */
- ovflow: 5, /* Overflow count. perf measurement */
+ ovflow: 5, /* Overflow count. perf measurement */
fire_and_forget: 1, /* Launch Write without response */
mode: 2, /* Widget operation Mode */
rsvd2: 2,
bnakctr: 14,
- rsvd3: 2,
+ rsvd3: 2,
anakctr: 14,
xtalkctr: 8;
} iprb_fields_s;
@@ -838,13 +838,13 @@ typedef union iprb_u {
#define iprb_regval reg_value
-#define iprb_error iprb_fields_s.error
-#define iprb_ovflow iprb_fields_s.ovflow
-#define iprb_ff iprb_fields_s.fire_and_forget
-#define iprb_mode iprb_fields_s.mode
-#define iprb_bnakctr iprb_fields_s.bnakctr
-#define iprb_anakctr iprb_fields_s.anakctr
-#define iprb_xtalkctr iprb_fields_s.xtalkctr
+#define iprb_error iprb_fields_s.error
+#define iprb_ovflow iprb_fields_s.ovflow
+#define iprb_ff iprb_fields_s.fire_and_forget
+#define iprb_mode iprb_fields_s.mode
+#define iprb_bnakctr iprb_fields_s.bnakctr
+#define iprb_anakctr iprb_fields_s.anakctr
+#define iprb_xtalkctr iprb_fields_s.xtalkctr
#endif /* !__ASSEMBLY__ */
@@ -853,10 +853,10 @@ typedef union iprb_u {
* For details of the meanings of NAK and Accept, refer the PIO flow
* document
*/
-#define IPRB_MODE_NORMAL (0)
-#define IPRB_MODE_COLLECT_A (1) /* PRB in collect A mode */
-#define IPRB_MODE_SERVICE_A (2) /* NAK B and Accept A */
-#define IPRB_MODE_SERVICE_B (3) /* NAK A and Accept B */
+#define IPRB_MODE_NORMAL (0)
+#define IPRB_MODE_COLLECT_A (1) /* PRB in collect A mode */
+#define IPRB_MODE_SERVICE_A (2) /* NAK B and Accept A */
+#define IPRB_MODE_SERVICE_B (3) /* NAK A and Accept B */
/*
* IO CRB entry C_A to E_A : Partial (cache) CRBS
@@ -865,31 +865,31 @@ typedef union iprb_u {
typedef union icrbp_a {
u64 ip_reg; /* the entire register value */
struct {
- u64 error: 1, /* 63, error occurred */
- ln_uce: 1, /* 62: uncorrectable memory */
- ln_ae: 1, /* 61: protection violation */
- ln_werr:1, /* 60: write access error */
- ln_aerr:1, /* 59: sn0net: Address error */
- ln_perr:1, /* 58: sn0net: poison error */
- timeout:1, /* 57: CRB timed out */
- l_bdpkt:1, /* 56: truncated pkt on sn0net */
- c_bdpkt:1, /* 55: truncated pkt on xtalk */
- c_err: 1, /* 54: incoming xtalk req, err set*/
+ u64 error: 1, /* 63, error occurred */
+ ln_uce: 1, /* 62: uncorrectable memory */
+ ln_ae: 1, /* 61: protection violation */
+ ln_werr:1, /* 60: write access error */
+ ln_aerr:1, /* 59: sn0net: Address error */
+ ln_perr:1, /* 58: sn0net: poison error */
+ timeout:1, /* 57: CRB timed out */
+ l_bdpkt:1, /* 56: truncated pkt on sn0net */
+ c_bdpkt:1, /* 55: truncated pkt on xtalk */
+ c_err: 1, /* 54: incoming xtalk req, err set*/
rsvd1: 12, /* 53-42: reserved */
- valid: 1, /* 41: Valid status */
+ valid: 1, /* 41: Valid status */
sidn: 4, /* 40-37: SIDN field of xtalk rqst */
tnum: 5, /* 36-32: TNUM of xtalk request */
- bo: 1, /* 31: barrier op set in xtalk rqst*/
- resprqd:1, /* 30: xtalk rqst requires response*/
- gbr: 1, /* 29: gbr bit set in xtalk rqst */
+ bo: 1, /* 31: barrier op set in xtalk rqst*/
+ resprqd:1, /* 30: xtalk rqst requires response*/
+ gbr: 1, /* 29: gbr bit set in xtalk rqst */
size: 2, /* 28-27: size of xtalk request */
excl: 4, /* 26-23: exclusive bit(s) */
stall: 3, /* 22-20: stall (xtalk, bte 0/1) */
- intvn: 1, /* 19: rqst target of intervention*/
- resp: 1, /* 18: Data response given to t5 */
- ack: 1, /* 17: Data ack received. */
- hold: 1, /* 16: crb gathering invalidate acks*/
- wb: 1, /* 15: writeback pending. */
+ intvn: 1, /* 19: rqst target of intervention*/
+ resp: 1, /* 18: Data response given to t5 */
+ ack: 1, /* 17: Data ack received. */
+ hold: 1, /* 16: crb gathering invalidate acks*/
+ wb: 1, /* 15: writeback pending. */
ack_cnt:11, /* 14-04: counter of invalidate acks*/
tscaler:4; /* 03-00: Timeout prescaler */
} ip_fmt;
@@ -908,13 +908,13 @@ typedef union hubii_idsr {
u64 iin_reg;
struct {
u64 rsvd1 : 35,
- isent : 1,
- rsvd2 : 3,
- ienable: 1,
- rsvd : 7,
- node : 9,
- rsvd4 : 1,
- level : 7;
+ isent : 1,
+ rsvd2 : 3,
+ ienable: 1,
+ rsvd : 7,
+ node : 9,
+ rsvd4 : 1,
+ level : 7;
} iin_fmt;
} hubii_idsr_t;
#endif /* !__ASSEMBLY__ */
@@ -966,7 +966,7 @@ typedef union hubii_idsr {
* Value of 3 is required by Xbow 1.1
* We may be able to increase this to 4 with Xbow 1.2.
*/
-#define HUBII_XBOW_CREDIT 3
+#define HUBII_XBOW_CREDIT 3
#define HUBII_XBOW_REV2_CREDIT 4
#endif /* _ASM_SGI_SN_SN0_HUBIO_H */
diff --git a/arch/mips/include/asm/sn/sn0/hubmd.h b/arch/mips/include/asm/sn/sn0/hubmd.h
index 14c225d8066..305d002be18 100644
--- a/arch/mips/include/asm/sn/sn0/hubmd.h
+++ b/arch/mips/include/asm/sn/sn0/hubmd.h
@@ -8,16 +8,16 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SN_SN0_HUBMD_H
-#define _ASM_SN_SN0_HUBMD_H
+#ifndef _ASM_SN_SN0_HUBMD_H
+#define _ASM_SN_SN0_HUBMD_H
/*
* Hub Memory/Directory interface registers
*/
-#define CACHE_SLINE_SIZE 128 /* Secondary cache line size on SN0 */
+#define CACHE_SLINE_SIZE 128 /* Secondary cache line size on SN0 */
-#define MAX_REGIONS 64
+#define MAX_REGIONS 64
/* Hardware page size and shift */
@@ -34,62 +34,62 @@
#define MD_IO_PROT_OVRRD 0x200008 /* Clear my bit in MD_IO_PROTECT */
#define MD_HSPEC_PROTECT 0x200010 /* BDDIR, LBOOT, RBOOT protection */
#define MD_MEMORY_CONFIG 0x200018 /* Memory/Directory DIMM control */
-#define MD_REFRESH_CONTROL 0x200020 /* Memory/Directory refresh ctrl */
-#define MD_FANDOP_CAC_STAT 0x200028 /* Fetch-and-op cache status */
-#define MD_MIG_DIFF_THRESH 0x200030 /* Page migr. count diff thresh. */
-#define MD_MIG_VALUE_THRESH 0x200038 /* Page migr. count abs. thresh. */
-#define MD_MIG_CANDIDATE 0x200040 /* Latest page migration candidate */
-#define MD_MIG_CANDIDATE_CLR 0x200048 /* Clear page migration candidate */
-#define MD_DIR_ERROR 0x200050 /* Directory DIMM error */
-#define MD_DIR_ERROR_CLR 0x200058 /* Directory DIMM error clear */
-#define MD_PROTOCOL_ERROR 0x200060 /* Directory protocol error */
+#define MD_REFRESH_CONTROL 0x200020 /* Memory/Directory refresh ctrl */
+#define MD_FANDOP_CAC_STAT 0x200028 /* Fetch-and-op cache status */
+#define MD_MIG_DIFF_THRESH 0x200030 /* Page migr. count diff thresh. */
+#define MD_MIG_VALUE_THRESH 0x200038 /* Page migr. count abs. thresh. */
+#define MD_MIG_CANDIDATE 0x200040 /* Latest page migration candidate */
+#define MD_MIG_CANDIDATE_CLR 0x200048 /* Clear page migration candidate */
+#define MD_DIR_ERROR 0x200050 /* Directory DIMM error */
+#define MD_DIR_ERROR_CLR 0x200058 /* Directory DIMM error clear */
+#define MD_PROTOCOL_ERROR 0x200060 /* Directory protocol error */
#define MD_PROTOCOL_ERROR_CLR 0x200068 /* Directory protocol error clear */
-#define MD_MEM_ERROR 0x200070 /* Memory DIMM error */
-#define MD_MEM_ERROR_CLR 0x200078 /* Memory DIMM error clear */
-#define MD_MISC_ERROR 0x200080 /* Miscellaneous MD error */
+#define MD_MEM_ERROR 0x200070 /* Memory DIMM error */
+#define MD_MEM_ERROR_CLR 0x200078 /* Memory DIMM error clear */
+#define MD_MISC_ERROR 0x200080 /* Miscellaneous MD error */
#define MD_MISC_ERROR_CLR 0x200088 /* Miscellaneous MD error clear */
#define MD_MEM_DIMM_INIT 0x200090 /* Memory DIMM mode initization. */
-#define MD_DIR_DIMM_INIT 0x200098 /* Directory DIMM mode init. */
-#define MD_MOQ_SIZE 0x2000a0 /* MD outgoing queue size */
+#define MD_DIR_DIMM_INIT 0x200098 /* Directory DIMM mode init. */
+#define MD_MOQ_SIZE 0x2000a0 /* MD outgoing queue size */
#define MD_MLAN_CTL 0x2000a8 /* NIC (Microlan) control register */
-#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
-#define MD_PERF_CNT0 0x210010 /* Performance counter 0 */
-#define MD_PERF_CNT1 0x210018 /* Performance counter 1 */
-#define MD_PERF_CNT2 0x210020 /* Performance counter 2 */
-#define MD_PERF_CNT3 0x210028 /* Performance counter 3 */
-#define MD_PERF_CNT4 0x210030 /* Performance counter 4 */
-#define MD_PERF_CNT5 0x210038 /* Performance counter 5 */
-
-#define MD_UREG0_0 0x220000 /* uController/UART 0 register */
-#define MD_UREG0_1 0x220008 /* uController/UART 0 register */
-#define MD_UREG0_2 0x220010 /* uController/UART 0 register */
-#define MD_UREG0_3 0x220018 /* uController/UART 0 register */
-#define MD_UREG0_4 0x220020 /* uController/UART 0 register */
-#define MD_UREG0_5 0x220028 /* uController/UART 0 register */
-#define MD_UREG0_6 0x220030 /* uController/UART 0 register */
-#define MD_UREG0_7 0x220038 /* uController/UART 0 register */
+#define MD_PERF_SEL 0x210000 /* Select perf monitor events */
+#define MD_PERF_CNT0 0x210010 /* Performance counter 0 */
+#define MD_PERF_CNT1 0x210018 /* Performance counter 1 */
+#define MD_PERF_CNT2 0x210020 /* Performance counter 2 */
+#define MD_PERF_CNT3 0x210028 /* Performance counter 3 */
+#define MD_PERF_CNT4 0x210030 /* Performance counter 4 */
+#define MD_PERF_CNT5 0x210038 /* Performance counter 5 */
+
+#define MD_UREG0_0 0x220000 /* uController/UART 0 register */
+#define MD_UREG0_1 0x220008 /* uController/UART 0 register */
+#define MD_UREG0_2 0x220010 /* uController/UART 0 register */
+#define MD_UREG0_3 0x220018 /* uController/UART 0 register */
+#define MD_UREG0_4 0x220020 /* uController/UART 0 register */
+#define MD_UREG0_5 0x220028 /* uController/UART 0 register */
+#define MD_UREG0_6 0x220030 /* uController/UART 0 register */
+#define MD_UREG0_7 0x220038 /* uController/UART 0 register */
#define MD_SLOTID_USTAT 0x220048 /* Hub slot ID & UART/uCtlr status */
-#define MD_LED0 0x220050 /* Eight-bit LED for CPU A */
-#define MD_LED1 0x220058 /* Eight-bit LED for CPU B */
-
-#define MD_UREG1_0 0x220080 /* uController/UART 1 register */
-#define MD_UREG1_1 0x220088 /* uController/UART 1 register */
-#define MD_UREG1_2 0x220090 /* uController/UART 1 register */
-#define MD_UREG1_3 0x220098 /* uController/UART 1 register */
-#define MD_UREG1_4 0x2200a0 /* uController/UART 1 register */
-#define MD_UREG1_5 0x2200a8 /* uController/UART 1 register */
-#define MD_UREG1_6 0x2200b0 /* uController/UART 1 register */
-#define MD_UREG1_7 0x2200b8 /* uController/UART 1 register */
-#define MD_UREG1_8 0x2200c0 /* uController/UART 1 register */
-#define MD_UREG1_9 0x2200c8 /* uController/UART 1 register */
-#define MD_UREG1_10 0x2200d0 /* uController/UART 1 register */
-#define MD_UREG1_11 0x2200d8 /* uController/UART 1 register */
-#define MD_UREG1_12 0x2200e0 /* uController/UART 1 register */
-#define MD_UREG1_13 0x2200e8 /* uController/UART 1 register */
-#define MD_UREG1_14 0x2200f0 /* uController/UART 1 register */
-#define MD_UREG1_15 0x2200f8 /* uController/UART 1 register */
+#define MD_LED0 0x220050 /* Eight-bit LED for CPU A */
+#define MD_LED1 0x220058 /* Eight-bit LED for CPU B */
+
+#define MD_UREG1_0 0x220080 /* uController/UART 1 register */
+#define MD_UREG1_1 0x220088 /* uController/UART 1 register */
+#define MD_UREG1_2 0x220090 /* uController/UART 1 register */
+#define MD_UREG1_3 0x220098 /* uController/UART 1 register */
+#define MD_UREG1_4 0x2200a0 /* uController/UART 1 register */
+#define MD_UREG1_5 0x2200a8 /* uController/UART 1 register */
+#define MD_UREG1_6 0x2200b0 /* uController/UART 1 register */
+#define MD_UREG1_7 0x2200b8 /* uController/UART 1 register */
+#define MD_UREG1_8 0x2200c0 /* uController/UART 1 register */
+#define MD_UREG1_9 0x2200c8 /* uController/UART 1 register */
+#define MD_UREG1_10 0x2200d0 /* uController/UART 1 register */
+#define MD_UREG1_11 0x2200d8 /* uController/UART 1 register */
+#define MD_UREG1_12 0x2200e0 /* uController/UART 1 register */
+#define MD_UREG1_13 0x2200e8 /* uController/UART 1 register */
+#define MD_UREG1_14 0x2200f0 /* uController/UART 1 register */
+#define MD_UREG1_15 0x2200f8 /* uController/UART 1 register */
#ifdef CONFIG_SGI_SN_N_MODE
#define MD_MEM_BANKS 4 /* 4 banks of memory max in N mode */
@@ -106,14 +106,14 @@
* Bits not used by the MD are used by software.
*/
-#define MD_SIZE_EMPTY 0 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_EMPTY 0 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_8MB 1
#define MD_SIZE_16MB 2
#define MD_SIZE_32MB 3 /* Broken in Hub 1 */
-#define MD_SIZE_64MB 4 /* Valid in MEMORY_CONFIG */
-#define MD_SIZE_128MB 5 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_64MB 4 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_128MB 5 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_256MB 6
-#define MD_SIZE_512MB 7 /* Valid in MEMORY_CONFIG */
+#define MD_SIZE_512MB 7 /* Valid in MEMORY_CONFIG */
#define MD_SIZE_1GB 8
#define MD_SIZE_2GB 9
#define MD_SIZE_4GB 10
@@ -207,16 +207,16 @@
/* MD_SLOTID_USTAT bit definitions */
-#define MSU_CORECLK_TST_SHFT 7 /* You don't wanna know */
+#define MSU_CORECLK_TST_SHFT 7 /* You don't wanna know */
#define MSU_CORECLK_TST_MASK (UINT64_CAST 1 << 7)
#define MSU_CORECLK_TST (UINT64_CAST 1 << 7)
-#define MSU_CORECLK_SHFT 6 /* You don't wanna know */
+#define MSU_CORECLK_SHFT 6 /* You don't wanna know */
#define MSU_CORECLK_MASK (UINT64_CAST 1 << 6)
#define MSU_CORECLK (UINT64_CAST 1 << 6)
-#define MSU_NETSYNC_SHFT 5 /* You don't wanna know */
+#define MSU_NETSYNC_SHFT 5 /* You don't wanna know */
#define MSU_NETSYNC_MASK (UINT64_CAST 1 << 5)
#define MSU_NETSYNC (UINT64_CAST 1 << 5)
-#define MSU_FPROMRDY_SHFT 4 /* Flash PROM ready bit */
+#define MSU_FPROMRDY_SHFT 4 /* Flash PROM ready bit */
#define MSU_FPROMRDY_MASK (UINT64_CAST 1 << 4)
#define MSU_FPROMRDY (UINT64_CAST 1 << 4)
#define MSU_I2CINTR_SHFT 3 /* I2C interrupt bit */
@@ -228,8 +228,8 @@
#define MSU_SN00_SLOTID_SHFT 7
#define MSU_SN00_SLOTID_MASK (UINT64_CAST 0x80)
-#define MSU_PIMM_PSC_SHFT 4
-#define MSU_PIMM_PSC_MASK (0xf << MSU_PIMM_PSC_SHFT)
+#define MSU_PIMM_PSC_SHFT 4
+#define MSU_PIMM_PSC_MASK (0xf << MSU_PIMM_PSC_SHFT)
/* MD_MIG_DIFF_THRESH bit definitions */
@@ -260,7 +260,7 @@
/* Other MD definitions */
-#define MD_BANK_SHFT 29 /* log2(512 MB) */
+#define MD_BANK_SHFT 29 /* log2(512 MB) */
#define MD_BANK_MASK (UINT64_CAST 7 << 29)
#define MD_BANK_SIZE (UINT64_CAST 1 << MD_BANK_SHFT) /* 512 MB */
#define MD_BANK_OFFSET(_b) (UINT64_CAST (_b) << MD_BANK_SHFT)
@@ -300,32 +300,32 @@
* Format C: STATE != shared (FINE must be 0)
*/
-#define MD_PDIR_MASK 0xffffffffffff /* Whole entry */
+#define MD_PDIR_MASK 0xffffffffffff /* Whole entry */
#define MD_PDIR_ECC_SHFT 0 /* ABC low or high */
#define MD_PDIR_ECC_MASK 0x7f
-#define MD_PDIR_PRIO_SHFT 8 /* ABC low */
+#define MD_PDIR_PRIO_SHFT 8 /* ABC low */
#define MD_PDIR_PRIO_MASK (0xf << 8)
-#define MD_PDIR_AX_SHFT 7 /* ABC low */
+#define MD_PDIR_AX_SHFT 7 /* ABC low */
#define MD_PDIR_AX_MASK (1 << 7)
#define MD_PDIR_AX (1 << 7)
-#define MD_PDIR_FINE_SHFT 12 /* ABC low */
+#define MD_PDIR_FINE_SHFT 12 /* ABC low */
#define MD_PDIR_FINE_MASK (1 << 12)
#define MD_PDIR_FINE (1 << 12)
-#define MD_PDIR_OCT_SHFT 13 /* A low */
+#define MD_PDIR_OCT_SHFT 13 /* A low */
#define MD_PDIR_OCT_MASK (7 << 13)
-#define MD_PDIR_STATE_SHFT 13 /* BC low */
+#define MD_PDIR_STATE_SHFT 13 /* BC low */
#define MD_PDIR_STATE_MASK (7 << 13)
-#define MD_PDIR_ONECNT_SHFT 16 /* BC low */
+#define MD_PDIR_ONECNT_SHFT 16 /* BC low */
#define MD_PDIR_ONECNT_MASK (0x3f << 16)
-#define MD_PDIR_PTR_SHFT 22 /* C low */
+#define MD_PDIR_PTR_SHFT 22 /* C low */
#define MD_PDIR_PTR_MASK (UINT64_CAST 0x7ff << 22)
-#define MD_PDIR_VECMSB_SHFT 22 /* AB low */
+#define MD_PDIR_VECMSB_SHFT 22 /* AB low */
#define MD_PDIR_VECMSB_BITMASK 0x3ffffff
#define MD_PDIR_VECMSB_BITSHFT 27
#define MD_PDIR_VECMSB_MASK (UINT64_CAST MD_PDIR_VECMSB_BITMASK << 22)
-#define MD_PDIR_CWOFF_SHFT 7 /* C high */
+#define MD_PDIR_CWOFF_SHFT 7 /* C high */
#define MD_PDIR_CWOFF_MASK (7 << 7)
-#define MD_PDIR_VECLSB_SHFT 10 /* AB high */
+#define MD_PDIR_VECLSB_SHFT 10 /* AB high */
#define MD_PDIR_VECLSB_BITMASK (UINT64_CAST 0x3fffffffff)
#define MD_PDIR_VECLSB_BITSHFT 0
#define MD_PDIR_VECLSB_MASK (MD_PDIR_VECLSB_BITMASK << 10)
@@ -349,25 +349,25 @@
* Format C: STATE != shared
*/
-#define MD_SDIR_MASK 0xffff /* Whole entry */
+#define MD_SDIR_MASK 0xffff /* Whole entry */
#define MD_SDIR_ECC_SHFT 0 /* AC low or high */
#define MD_SDIR_ECC_MASK 0x1f
-#define MD_SDIR_PRIO_SHFT 6 /* AC low */
+#define MD_SDIR_PRIO_SHFT 6 /* AC low */
#define MD_SDIR_PRIO_MASK (1 << 6)
-#define MD_SDIR_AX_SHFT 5 /* AC low */
+#define MD_SDIR_AX_SHFT 5 /* AC low */
#define MD_SDIR_AX_MASK (1 << 5)
#define MD_SDIR_AX (1 << 5)
-#define MD_SDIR_STATE_SHFT 7 /* AC low */
+#define MD_SDIR_STATE_SHFT 7 /* AC low */
#define MD_SDIR_STATE_MASK (7 << 7)
-#define MD_SDIR_PTR_SHFT 10 /* C low */
+#define MD_SDIR_PTR_SHFT 10 /* C low */
#define MD_SDIR_PTR_MASK (0x3f << 10)
-#define MD_SDIR_CWOFF_SHFT 5 /* C high */
+#define MD_SDIR_CWOFF_SHFT 5 /* C high */
#define MD_SDIR_CWOFF_MASK (7 << 5)
-#define MD_SDIR_VECMSB_SHFT 11 /* A low */
+#define MD_SDIR_VECMSB_SHFT 11 /* A low */
#define MD_SDIR_VECMSB_BITMASK 0x1f
#define MD_SDIR_VECMSB_BITSHFT 7
#define MD_SDIR_VECMSB_MASK (MD_SDIR_VECMSB_BITMASK << 11)
-#define MD_SDIR_VECLSB_SHFT 5 /* A high */
+#define MD_SDIR_VECLSB_SHFT 5 /* A high */
#define MD_SDIR_VECLSB_BITMASK 0x7ff
#define MD_SDIR_VECLSB_BITSHFT 0
#define MD_SDIR_VECLSB_MASK (MD_SDIR_VECLSB_BITMASK << 5)
@@ -390,7 +390,7 @@
/* Premium SIMM protection entry shifts and masks. */
-#define MD_PPROT_SHFT 0 /* Prot. field */
+#define MD_PPROT_SHFT 0 /* Prot. field */
#define MD_PPROT_MASK 7
#define MD_PPROT_MIGMD_SHFT 3 /* Migration mode */
#define MD_PPROT_MIGMD_MASK (3 << 3)
@@ -403,7 +403,7 @@
/* Standard SIMM protection entry shifts and masks. */
-#define MD_SPROT_SHFT 0 /* Prot. field */
+#define MD_SPROT_SHFT 0 /* Prot. field */
#define MD_SPROT_MASK 7
#define MD_SPROT_MIGMD_SHFT 3 /* Migration mode */
#define MD_SPROT_MIGMD_MASK (3 << 3)
@@ -431,13 +431,13 @@
#define CPU_LED_ADDR(_nasid, _slice) \
(private.p_sn00 ? \
- REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) : \
+ REMOTE_HUB_ADDR((_nasid), MD_UREG1_0 + ((_slice) << 5)) : \
REMOTE_HUB_ADDR((_nasid), MD_LED0 + ((_slice) << 3)))
#define SET_CPU_LEDS(_nasid, _slice, _val) \
(HUB_S(CPU_LED_ADDR(_nasid, _slice), (_val)))
-#define SET_MY_LEDS(_v) \
+#define SET_MY_LEDS(_v) \
SET_CPU_LEDS(get_nasid(), get_slice(), (_v))
/*
@@ -541,7 +541,7 @@
*/
struct dir_error_reg {
- u64 uce_vld: 1, /* 63: valid directory uce */
+ u64 uce_vld: 1, /* 63: valid directory uce */
ae_vld: 1, /* 62: valid dir prot ecc error */
ce_vld: 1, /* 61: valid correctable ECC err*/
rsvd1: 19, /* 60-42: reserved */
@@ -555,13 +555,13 @@ struct dir_error_reg {
};
typedef union md_dir_error {
- u64 derr_reg; /* the entire register */
+ u64 derr_reg; /* the entire register */
struct dir_error_reg derr_fmt; /* the register format */
} md_dir_error_t;
struct mem_error_reg {
- u64 uce_vld: 1, /* 63: valid memory uce */
+ u64 uce_vld: 1, /* 63: valid memory uce */
ce_vld: 1, /* 62: valid correctable ECC err*/
rsvd1: 22, /* 61-40: reserved */
bad_syn: 8, /* 39-32: bad mem ecc syndrome */
@@ -573,8 +573,8 @@ struct mem_error_reg {
typedef union md_mem_error {
- u64 merr_reg; /* the entire register */
- struct mem_error_reg merr_fmt; /* format of the mem_error reg */
+ u64 merr_reg; /* the entire register */
+ struct mem_error_reg merr_fmt; /* format of the mem_error reg */
} md_mem_error_t;
@@ -594,7 +594,7 @@ struct proto_error_reg {
};
typedef union md_proto_error {
- u64 perr_reg; /* the entire register */
+ u64 perr_reg; /* the entire register */
struct proto_error_reg perr_fmt; /* format of the register */
} md_proto_error_t;
@@ -695,33 +695,33 @@ typedef union md_pdir_loent {
* represent directory memory information.
*/
-typedef union md_dir_high {
- md_sdir_high_t md_sdir_high;
- md_pdir_high_t md_pdir_high;
+typedef union md_dir_high {
+ md_sdir_high_t md_sdir_high;
+ md_pdir_high_t md_pdir_high;
} md_dir_high_t;
-typedef union md_dir_low {
- md_sdir_low_t md_sdir_low;
- md_pdir_low_t md_pdir_low;
+typedef union md_dir_low {
+ md_sdir_low_t md_sdir_low;
+ md_pdir_low_t md_pdir_low;
} md_dir_low_t;
-typedef struct bddir_entry {
- md_dir_low_t md_dir_low;
- md_dir_high_t md_dir_high;
+typedef struct bddir_entry {
+ md_dir_low_t md_dir_low;
+ md_dir_high_t md_dir_high;
} bddir_entry_t;
typedef struct dir_mem_entry {
- u64 prcpf[MAX_REGIONS];
- bddir_entry_t directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
+ u64 prcpf[MAX_REGIONS];
+ bddir_entry_t directory_words[MD_PAGE_SIZE/CACHE_SLINE_SIZE];
} dir_mem_entry_t;
typedef union md_perf_sel {
- u64 perf_sel_reg;
+ u64 perf_sel_reg;
struct {
u64 perf_rsvd : 60,
- perf_en : 1,
+ perf_en : 1,
perf_sel : 3;
} perf_sel_bits;
} md_perf_sel_t;
@@ -730,7 +730,7 @@ typedef union md_perf_cnt {
u64 perf_cnt;
struct {
u64 perf_rsvd : 44,
- perf_cnt : 20;
+ perf_cnt : 20;
} perf_cnt_bits;
} md_perf_cnt_t;
diff --git a/arch/mips/include/asm/sn/sn0/hubni.h b/arch/mips/include/asm/sn/sn0/hubni.h
index b40d3ef97a1..b73c4bee65f 100644
--- a/arch/mips/include/asm/sn/sn0/hubni.h
+++ b/arch/mips/include/asm/sn/sn0/hubni.h
@@ -25,38 +25,38 @@
#define NI_BASE_TABLES 0x630000
#define NI_STATUS_REV_ID 0x600000 /* Hub network status, rev, and ID */
-#define NI_PORT_RESET 0x600008 /* Reset the network interface */
+#define NI_PORT_RESET 0x600008 /* Reset the network interface */
#define NI_PROTECTION 0x600010 /* NI register access permissions */
-#define NI_GLOBAL_PARMS 0x600018 /* LLP parameters */
+#define NI_GLOBAL_PARMS 0x600018 /* LLP parameters */
#define NI_SCRATCH_REG0 0x600100 /* Scratch register 0 (64 bits) */
#define NI_SCRATCH_REG1 0x600108 /* Scratch register 1 (64 bits) */
#define NI_DIAG_PARMS 0x600110 /* Parameters for diags */
#define NI_VECTOR_PARMS 0x600200 /* Vector PIO routing parameters */
-#define NI_VECTOR 0x600208 /* Vector PIO route */
-#define NI_VECTOR_DATA 0x600210 /* Vector PIO data */
-#define NI_VECTOR_STATUS 0x600300 /* Vector PIO return status */
-#define NI_RETURN_VECTOR 0x600308 /* Vector PIO return vector */
-#define NI_VECTOR_READ_DATA 0x600310 /* Vector PIO read data */
+#define NI_VECTOR 0x600208 /* Vector PIO route */
+#define NI_VECTOR_DATA 0x600210 /* Vector PIO data */
+#define NI_VECTOR_STATUS 0x600300 /* Vector PIO return status */
+#define NI_RETURN_VECTOR 0x600308 /* Vector PIO return vector */
+#define NI_VECTOR_READ_DATA 0x600310 /* Vector PIO read data */
#define NI_VECTOR_CLEAR 0x600380 /* Vector PIO read & clear status */
-#define NI_IO_PROTECT 0x600400 /* PIO protection bits */
-#define NI_IO_PROT_OVRRD 0x600408 /* PIO protection bit override */
-
-#define NI_AGE_CPU0_MEMORY 0x600500 /* CPU 0 memory age control */
-#define NI_AGE_CPU0_PIO 0x600508 /* CPU 0 PIO age control */
-#define NI_AGE_CPU1_MEMORY 0x600510 /* CPU 1 memory age control */
-#define NI_AGE_CPU1_PIO 0x600518 /* CPU 1 PIO age control */
-#define NI_AGE_GBR_MEMORY 0x600520 /* GBR memory age control */
-#define NI_AGE_GBR_PIO 0x600528 /* GBR PIO age control */
-#define NI_AGE_IO_MEMORY 0x600530 /* IO memory age control */
-#define NI_AGE_IO_PIO 0x600538 /* IO PIO age control */
+#define NI_IO_PROTECT 0x600400 /* PIO protection bits */
+#define NI_IO_PROT_OVRRD 0x600408 /* PIO protection bit override */
+
+#define NI_AGE_CPU0_MEMORY 0x600500 /* CPU 0 memory age control */
+#define NI_AGE_CPU0_PIO 0x600508 /* CPU 0 PIO age control */
+#define NI_AGE_CPU1_MEMORY 0x600510 /* CPU 1 memory age control */
+#define NI_AGE_CPU1_PIO 0x600518 /* CPU 1 PIO age control */
+#define NI_AGE_GBR_MEMORY 0x600520 /* GBR memory age control */
+#define NI_AGE_GBR_PIO 0x600528 /* GBR PIO age control */
+#define NI_AGE_IO_MEMORY 0x600530 /* IO memory age control */
+#define NI_AGE_IO_PIO 0x600538 /* IO PIO age control */
#define NI_AGE_REG_MIN NI_AGE_CPU0_MEMORY
#define NI_AGE_REG_MAX NI_AGE_IO_PIO
-#define NI_PORT_PARMS 0x608000 /* LLP Parameters */
-#define NI_PORT_ERROR 0x608008 /* LLP Errors */
-#define NI_PORT_ERROR_CLEAR 0x608088 /* Clear the error bits */
+#define NI_PORT_PARMS 0x608000 /* LLP Parameters */
+#define NI_PORT_ERROR 0x608008 /* LLP Errors */
+#define NI_PORT_ERROR_CLEAR 0x608088 /* Clear the error bits */
#define NI_META_TABLE0 0x638000 /* First meta routing table entry */
#define NI_META_TABLE(_x) (NI_META_TABLE0 + (8 * (_x)))
@@ -76,13 +76,13 @@
#define NSRI_LINKUP_SHFT 29
#define NSRI_LINKUP_MASK (UINT64_CAST 0x1 << 29)
#define NSRI_DOWNREASON_SHFT 28 /* 0=failed, 1=never came */
-#define NSRI_DOWNREASON_MASK (UINT64_CAST 0x1 << 28) /* out of reset. */
+#define NSRI_DOWNREASON_MASK (UINT64_CAST 0x1 << 28) /* out of reset. */
#define NSRI_MORENODES_SHFT 18
#define NSRI_MORENODES_MASK (UINT64_CAST 1 << 18) /* Max. # of nodes */
#define MORE_MEMORY 0
#define MORE_NODES 1
#define NSRI_REGIONSIZE_SHFT 17
-#define NSRI_REGIONSIZE_MASK (UINT64_CAST 1 << 17) /* Granularity */
+#define NSRI_REGIONSIZE_MASK (UINT64_CAST 1 << 17) /* Granularity */
#define REGIONSIZE_FINE 1
#define REGIONSIZE_COARSE 0
#define NSRI_NODEID_SHFT 8
@@ -90,14 +90,14 @@
#define NSRI_REV_SHFT 4
#define NSRI_REV_MASK (UINT64_CAST 0xf << 4) /* Chip Revision */
#define NSRI_CHIPID_SHFT 0
-#define NSRI_CHIPID_MASK (UINT64_CAST 0xf) /* Chip type ID */
+#define NSRI_CHIPID_MASK (UINT64_CAST 0xf) /* Chip type ID */
/*
- * In fine mode, each node is a region. In coarse mode, there are
+ * In fine mode, each node is a region. In coarse mode, there are
* eight nodes per region.
*/
#define NASID_TO_FINEREG_SHFT 0
-#define NASID_TO_COARSEREG_SHFT 3
+#define NASID_TO_COARSEREG_SHFT 3
/* NI_PORT_RESET mask definitions */
@@ -111,21 +111,21 @@
/* NI_GLOBAL_PARMS mask and shift definitions */
-#define NGP_MAXRETRY_SHFT 48 /* Maximum retries */
+#define NGP_MAXRETRY_SHFT 48 /* Maximum retries */
#define NGP_MAXRETRY_MASK (UINT64_CAST 0x3ff << 48)
-#define NGP_TAILTOWRAP_SHFT 32 /* Tail timeout wrap */
+#define NGP_TAILTOWRAP_SHFT 32 /* Tail timeout wrap */
#define NGP_TAILTOWRAP_MASK (UINT64_CAST 0xffff << 32)
-#define NGP_CREDITTOVAL_SHFT 16 /* Tail timeout wrap */
+#define NGP_CREDITTOVAL_SHFT 16 /* Tail timeout wrap */
#define NGP_CREDITTOVAL_MASK (UINT64_CAST 0xf << 16)
-#define NGP_TAILTOVAL_SHFT 4 /* Tail timeout value */
+#define NGP_TAILTOVAL_SHFT 4 /* Tail timeout value */
#define NGP_TAILTOVAL_MASK (UINT64_CAST 0xf << 4)
/* NI_DIAG_PARMS mask and shift definitions */
#define NDP_PORTTORESET (UINT64_CAST 1 << 18) /* Port tmout reset */
#define NDP_LLP8BITMODE (UINT64_CAST 1 << 12) /* LLP 8-bit mode */
-#define NDP_PORTDISABLE (UINT64_CAST 1 << 6) /* Port disable */
+#define NDP_PORTDISABLE (UINT64_CAST 1 << 6) /* Port disable */
#define NDP_SENDERROR (UINT64_CAST 1) /* Send data error */
/*
@@ -137,7 +137,7 @@
#define NVP_PIOID_MASK (UINT64_CAST 0x3ff << 40)
#define NVP_WRITEID_SHFT 32
#define NVP_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define NVP_ADDRESS_MASK (UINT64_CAST 0xffff8) /* Bits 19:3 */
+#define NVP_ADDRESS_MASK (UINT64_CAST 0xffff8) /* Bits 19:3 */
#define NVP_TYPE_SHFT 0
#define NVP_TYPE_MASK (UINT64_CAST 0x3)
@@ -151,7 +151,7 @@
#define NVS_PIOID_MASK (UINT64_CAST 0x3ff << 40)
#define NVS_WRITEID_SHFT 32
#define NVS_WRITEID_MASK (UINT64_CAST 0xff << 32)
-#define NVS_ADDRESS_MASK (UINT64_CAST 0xfffffff8) /* Bits 31:3 */
+#define NVS_ADDRESS_MASK (UINT64_CAST 0xfffffff8) /* Bits 31:3 */
#define NVS_TYPE_SHFT 0
#define NVS_TYPE_MASK (UINT64_CAST 0x7)
#define NVS_ERROR_MASK (UINT64_CAST 0x4) /* bit set means error */
@@ -161,10 +161,10 @@
#define PIOTYPE_WRITE 1 /* VECTOR_PARMS and VECTOR_STATUS */
#define PIOTYPE_UNDEFINED 2 /* VECTOR_PARMS and VECTOR_STATUS */
#define PIOTYPE_EXCHANGE 3 /* VECTOR_PARMS and VECTOR_STATUS */
-#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
-#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
-#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
-#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
+#define PIOTYPE_ADDR_ERR 4 /* VECTOR_STATUS only */
+#define PIOTYPE_CMD_ERR 5 /* VECTOR_STATUS only */
+#define PIOTYPE_PROT_ERR 6 /* VECTOR_STATUS only */
+#define PIOTYPE_UNKNOWN 7 /* VECTOR_STATUS only */
/* NI_AGE_XXX mask and shift definitions */
@@ -215,7 +215,7 @@
#define NPE_FATAL_ERRORS (NPE_LINKRESET | NPE_INTERNALERROR | \
NPE_BADMESSAGE | NPE_BADDEST | \
- NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
+ NPE_FIFOOVERFLOW | NPE_CREDITTO_MASK | \
NPE_TAILTO_MASK)
/* NI_META_TABLE mask and shift definitions */
@@ -231,7 +231,7 @@
typedef union hubni_port_error_u {
u64 nipe_reg_value;
struct {
- u64 nipe_rsvd: 26, /* unused */
+ u64 nipe_rsvd: 26, /* unused */
nipe_lnk_reset: 1, /* link reset */
nipe_intl_err: 1, /* internal error */
nipe_bad_msg: 1, /* bad message */
diff --git a/arch/mips/include/asm/sn/sn0/hubpi.h b/arch/mips/include/asm/sn/sn0/hubpi.h
index e39f5f9da04..7b83655913c 100644
--- a/arch/mips/include/asm/sn/sn0/hubpi.h
+++ b/arch/mips/include/asm/sn/sn0/hubpi.h
@@ -8,8 +8,8 @@
* Copyright (C) 1992 - 1997, 1999 Silicon Graphics, Inc.
* Copyright (C) 1999 by Ralf Baechle
*/
-#ifndef _ASM_SN_SN0_HUBPI_H
-#define _ASM_SN_SN0_HUBPI_H
+#ifndef _ASM_SN_SN0_HUBPI_H
+#define _ASM_SN_SN0_HUBPI_H
#include <linux/types.h>
@@ -25,13 +25,13 @@
/* General protection and control registers */
-#define PI_CPU_PROTECT 0x000000 /* CPU Protection */
-#define PI_PROT_OVERRD 0x000008 /* Clear CPU Protection bit */
-#define PI_IO_PROTECT 0x000010 /* Interrupt Pending Protection */
+#define PI_CPU_PROTECT 0x000000 /* CPU Protection */
+#define PI_PROT_OVERRD 0x000008 /* Clear CPU Protection bit */
+#define PI_IO_PROTECT 0x000010 /* Interrupt Pending Protection */
#define PI_REGION_PRESENT 0x000018 /* Indicates whether region exists */
-#define PI_CPU_NUM 0x000020 /* CPU Number ID */
-#define PI_CALIAS_SIZE 0x000028 /* Cached Alias Size */
-#define PI_MAX_CRB_TIMEOUT 0x000030 /* Maximum Timeout for CRB */
+#define PI_CPU_NUM 0x000020 /* CPU Number ID */
+#define PI_CALIAS_SIZE 0x000028 /* Cached Alias Size */
+#define PI_MAX_CRB_TIMEOUT 0x000030 /* Maximum Timeout for CRB */
#define PI_CRB_SFACTOR 0x000038 /* Scale factor for CRB timeout */
/* CALIAS values */
@@ -54,28 +54,28 @@
/* Processor control and status checking */
-#define PI_CPU_PRESENT_A 0x000040 /* CPU Present A */
-#define PI_CPU_PRESENT_B 0x000048 /* CPU Present B */
-#define PI_CPU_ENABLE_A 0x000050 /* CPU Enable A */
-#define PI_CPU_ENABLE_B 0x000058 /* CPU Enable B */
-#define PI_REPLY_LEVEL 0x000060 /* Reply Level */
+#define PI_CPU_PRESENT_A 0x000040 /* CPU Present A */
+#define PI_CPU_PRESENT_B 0x000048 /* CPU Present B */
+#define PI_CPU_ENABLE_A 0x000050 /* CPU Enable A */
+#define PI_CPU_ENABLE_B 0x000058 /* CPU Enable B */
+#define PI_REPLY_LEVEL 0x000060 /* Reply Level */
#define PI_HARDRESET_BIT 0x020068 /* Bit cleared by s/w on SR */
-#define PI_NMI_A 0x000070 /* NMI to CPU A */
-#define PI_NMI_B 0x000078 /* NMI to CPU B */
+#define PI_NMI_A 0x000070 /* NMI to CPU A */
+#define PI_NMI_B 0x000078 /* NMI to CPU B */
#define PI_NMI_OFFSET (PI_NMI_B - PI_NMI_A)
-#define PI_SOFTRESET 0x000080 /* Softreset (to both CPUs) */
+#define PI_SOFTRESET 0x000080 /* Softreset (to both CPUs) */
-/* Regular Interrupt register checking. */
+/* Regular Interrupt register checking. */
#define PI_INT_PEND_MOD 0x000090 /* Write to set pending ints */
-#define PI_INT_PEND0 0x000098 /* Read to get pending ints */
-#define PI_INT_PEND1 0x0000a0 /* Read to get pending ints */
-#define PI_INT_MASK0_A 0x0000a8 /* Interrupt Mask 0 for CPU A */
-#define PI_INT_MASK1_A 0x0000b0 /* Interrupt Mask 1 for CPU A */
-#define PI_INT_MASK0_B 0x0000b8 /* Interrupt Mask 0 for CPU B */
-#define PI_INT_MASK1_B 0x0000c0 /* Interrupt Mask 1 for CPU B */
+#define PI_INT_PEND0 0x000098 /* Read to get pending ints */
+#define PI_INT_PEND1 0x0000a0 /* Read to get pending ints */
+#define PI_INT_MASK0_A 0x0000a8 /* Interrupt Mask 0 for CPU A */
+#define PI_INT_MASK1_A 0x0000b0 /* Interrupt Mask 1 for CPU A */
+#define PI_INT_MASK0_B 0x0000b8 /* Interrupt Mask 0 for CPU B */
+#define PI_INT_MASK1_B 0x0000c0 /* Interrupt Mask 1 for CPU B */
-#define PI_INT_MASK_OFFSET 0x10 /* Offset from A to B */
+#define PI_INT_MASK_OFFSET 0x10 /* Offset from A to B */
/* Crosscall interrupts */
@@ -83,49 +83,49 @@
#define PI_CC_PEND_SET_B 0x0000d0 /* CC Interrupt Pending Set, CPU B */
#define PI_CC_PEND_CLR_A 0x0000d8 /* CC Interrupt Pending Clr, CPU A */
#define PI_CC_PEND_CLR_B 0x0000e0 /* CC Interrupt Pending Clr, CPU B */
-#define PI_CC_MASK 0x0000e8 /* CC Interrupt mask */
+#define PI_CC_MASK 0x0000e8 /* CC Interrupt mask */
-#define PI_INT_SET_OFFSET 0x08 /* Offset from A to B */
+#define PI_INT_SET_OFFSET 0x08 /* Offset from A to B */
/* Realtime Counter and Profiler control registers */
-#define PI_RT_COUNT 0x030100 /* Real Time Counter */
-#define PI_RT_COMPARE_A 0x000108 /* Real Time Compare A */
-#define PI_RT_COMPARE_B 0x000110 /* Real Time Compare B */
+#define PI_RT_COUNT 0x030100 /* Real Time Counter */
+#define PI_RT_COMPARE_A 0x000108 /* Real Time Compare A */
+#define PI_RT_COMPARE_B 0x000110 /* Real Time Compare B */
#define PI_PROFILE_COMPARE 0x000118 /* L5 int to both cpus when == RTC */
-#define PI_RT_PEND_A 0x000120 /* Set if RT int for A pending */
-#define PI_RT_PEND_B 0x000128 /* Set if RT int for B pending */
+#define PI_RT_PEND_A 0x000120 /* Set if RT int for A pending */
+#define PI_RT_PEND_B 0x000128 /* Set if RT int for B pending */
#define PI_PROF_PEND_A 0x000130 /* Set if Prof int for A pending */
#define PI_PROF_PEND_B 0x000138 /* Set if Prof int for B pending */
-#define PI_RT_EN_A 0x000140 /* RT int for CPU A enable */
-#define PI_RT_EN_B 0x000148 /* RT int for CPU B enable */
-#define PI_PROF_EN_A 0x000150 /* PROF int for CPU A enable */
-#define PI_PROF_EN_B 0x000158 /* PROF int for CPU B enable */
-#define PI_RT_LOCAL_CTRL 0x000160 /* RT control register */
+#define PI_RT_EN_A 0x000140 /* RT int for CPU A enable */
+#define PI_RT_EN_B 0x000148 /* RT int for CPU B enable */
+#define PI_PROF_EN_A 0x000150 /* PROF int for CPU A enable */
+#define PI_PROF_EN_B 0x000158 /* PROF int for CPU B enable */
+#define PI_RT_LOCAL_CTRL 0x000160 /* RT control register */
#define PI_RT_FILTER_CTRL 0x000168 /* GCLK Filter control register */
#define PI_COUNT_OFFSET 0x08 /* A to B offset for all counts */
/* Built-In Self Test support */
-#define PI_BIST_WRITE_DATA 0x000200 /* BIST write data */
-#define PI_BIST_READ_DATA 0x000208 /* BIST read data */
-#define PI_BIST_COUNT_TARG 0x000210 /* BIST Count and Target */
-#define PI_BIST_READY 0x000218 /* BIST Ready indicator */
-#define PI_BIST_SHIFT_LOAD 0x000220 /* BIST control */
-#define PI_BIST_SHIFT_UNLOAD 0x000228 /* BIST control */
-#define PI_BIST_ENTER_RUN 0x000230 /* BIST control */
+#define PI_BIST_WRITE_DATA 0x000200 /* BIST write data */
+#define PI_BIST_READ_DATA 0x000208 /* BIST read data */
+#define PI_BIST_COUNT_TARG 0x000210 /* BIST Count and Target */
+#define PI_BIST_READY 0x000218 /* BIST Ready indicator */
+#define PI_BIST_SHIFT_LOAD 0x000220 /* BIST control */
+#define PI_BIST_SHIFT_UNLOAD 0x000228 /* BIST control */
+#define PI_BIST_ENTER_RUN 0x000230 /* BIST control */
/* Graphics control registers */
-#define PI_GFX_PAGE_A 0x000300 /* Graphics page A */
-#define PI_GFX_CREDIT_CNTR_A 0x000308 /* Graphics credit counter A */
-#define PI_GFX_BIAS_A 0x000310 /* Graphics bias A */
+#define PI_GFX_PAGE_A 0x000300 /* Graphics page A */
+#define PI_GFX_CREDIT_CNTR_A 0x000308 /* Graphics credit counter A */
+#define PI_GFX_BIAS_A 0x000310 /* Graphics bias A */
#define PI_GFX_INT_CNTR_A 0x000318 /* Graphics interrupt counter A */
#define PI_GFX_INT_CMP_A 0x000320 /* Graphics interrupt comparator A */
-#define PI_GFX_PAGE_B 0x000328 /* Graphics page B */
-#define PI_GFX_CREDIT_CNTR_B 0x000330 /* Graphics credit counter B */
-#define PI_GFX_BIAS_B 0x000338 /* Graphics bias B */
+#define PI_GFX_PAGE_B 0x000328 /* Graphics page B */
+#define PI_GFX_CREDIT_CNTR_B 0x000330 /* Graphics credit counter B */
+#define PI_GFX_BIAS_B 0x000338 /* Graphics bias B */
#define PI_GFX_INT_CNTR_B 0x000340 /* Graphics interrupt counter B */
#define PI_GFX_INT_CMP_B 0x000348 /* Graphics interrupt comparator B */
@@ -138,24 +138,24 @@
#define PI_ERR_INT_MASK_B 0x000410 /* Error Interrupt mask for CPU B */
#define PI_ERR_STACK_ADDR_A 0x000418 /* Error stack address for CPU A */
#define PI_ERR_STACK_ADDR_B 0x000420 /* Error stack address for CPU B */
-#define PI_ERR_STACK_SIZE 0x000428 /* Error Stack Size */
-#define PI_ERR_STATUS0_A 0x000430 /* Error Status 0A */
+#define PI_ERR_STACK_SIZE 0x000428 /* Error Stack Size */
+#define PI_ERR_STATUS0_A 0x000430 /* Error Status 0A */
#define PI_ERR_STATUS0_A_RCLR 0x000438 /* Error Status 0A clear on read */
-#define PI_ERR_STATUS1_A 0x000440 /* Error Status 1A */
+#define PI_ERR_STATUS1_A 0x000440 /* Error Status 1A */
#define PI_ERR_STATUS1_A_RCLR 0x000448 /* Error Status 1A clear on read */
-#define PI_ERR_STATUS0_B 0x000450 /* Error Status 0B */
+#define PI_ERR_STATUS0_B 0x000450 /* Error Status 0B */
#define PI_ERR_STATUS0_B_RCLR 0x000458 /* Error Status 0B clear on read */
-#define PI_ERR_STATUS1_B 0x000460 /* Error Status 1B */
+#define PI_ERR_STATUS1_B 0x000460 /* Error Status 1B */
#define PI_ERR_STATUS1_B_RCLR 0x000468 /* Error Status 1B clear on read */
-#define PI_SPOOL_CMP_A 0x000470 /* Spool compare for CPU A */
-#define PI_SPOOL_CMP_B 0x000478 /* Spool compare for CPU B */
-#define PI_CRB_TIMEOUT_A 0x000480 /* Timed out CRB entries for A */
-#define PI_CRB_TIMEOUT_B 0x000488 /* Timed out CRB entries for B */
+#define PI_SPOOL_CMP_A 0x000470 /* Spool compare for CPU A */
+#define PI_SPOOL_CMP_B 0x000478 /* Spool compare for CPU B */
+#define PI_CRB_TIMEOUT_A 0x000480 /* Timed out CRB entries for A */
+#define PI_CRB_TIMEOUT_B 0x000488 /* Timed out CRB entries for B */
#define PI_SYSAD_ERRCHK_EN 0x000490 /* Enables SYSAD error checking */
-#define PI_BAD_CHECK_BIT_A 0x000498 /* Force SYSAD check bit error */
-#define PI_BAD_CHECK_BIT_B 0x0004a0 /* Force SYSAD check bit error */
-#define PI_NACK_CNT_A 0x0004a8 /* Consecutive NACK counter */
-#define PI_NACK_CNT_B 0x0004b0 /* " " for CPU B */
+#define PI_BAD_CHECK_BIT_A 0x000498 /* Force SYSAD check bit error */
+#define PI_BAD_CHECK_BIT_B 0x0004a0 /* Force SYSAD check bit error */
+#define PI_NACK_CNT_A 0x0004a8 /* Consecutive NACK counter */
+#define PI_NACK_CNT_B 0x0004b0 /* " " for CPU B */
#define PI_NACK_CMP 0x0004b8 /* NACK count compare */
#define PI_STACKADDR_OFFSET (PI_ERR_STACK_ADDR_B - PI_ERR_STACK_ADDR_A)
#define PI_ERRSTAT_OFFSET (PI_ERR_STATUS0_B - PI_ERR_STATUS0_A)
@@ -168,7 +168,7 @@
#define PI_ERR_SPUR_MSG_A 0x00000008
#define PI_ERR_WRB_TERR_B 0x00000010 /* WRB TERR */
#define PI_ERR_WRB_TERR_A 0x00000020
-#define PI_ERR_WRB_WERR_B 0x00000040 /* WRB WERR */
+#define PI_ERR_WRB_WERR_B 0x00000040 /* WRB WERR */
#define PI_ERR_WRB_WERR_A 0x00000080
#define PI_ERR_SYSSTATE_B 0x00000100 /* SysState parity error */
#define PI_ERR_SYSSTATE_A 0x00000200
@@ -196,32 +196,32 @@
* The following three macros define all possible error int pends.
*/
-#define PI_FATAL_ERR_CPU_A (PI_ERR_SYSSTATE_TAG_A | \
- PI_ERR_BAD_SPOOL_A | \
- PI_ERR_SYSCMD_ADDR_A | \
- PI_ERR_SYSCMD_DATA_A | \
- PI_ERR_SYSAD_ADDR_A | \
+#define PI_FATAL_ERR_CPU_A (PI_ERR_SYSSTATE_TAG_A | \
+ PI_ERR_BAD_SPOOL_A | \
+ PI_ERR_SYSCMD_ADDR_A | \
+ PI_ERR_SYSCMD_DATA_A | \
+ PI_ERR_SYSAD_ADDR_A | \
PI_ERR_SYSAD_DATA_A | \
PI_ERR_SYSSTATE_A)
-#define PI_MISC_ERR_CPU_A (PI_ERR_UNCAC_UNCORR_A | \
- PI_ERR_WRB_WERR_A | \
- PI_ERR_WRB_TERR_A | \
- PI_ERR_SPUR_MSG_A | \
+#define PI_MISC_ERR_CPU_A (PI_ERR_UNCAC_UNCORR_A | \
+ PI_ERR_WRB_WERR_A | \
+ PI_ERR_WRB_TERR_A | \
+ PI_ERR_SPUR_MSG_A | \
PI_ERR_SPOOL_CMP_A)
-#define PI_FATAL_ERR_CPU_B (PI_ERR_SYSSTATE_TAG_B | \
- PI_ERR_BAD_SPOOL_B | \
- PI_ERR_SYSCMD_ADDR_B | \
- PI_ERR_SYSCMD_DATA_B | \
- PI_ERR_SYSAD_ADDR_B | \
+#define PI_FATAL_ERR_CPU_B (PI_ERR_SYSSTATE_TAG_B | \
+ PI_ERR_BAD_SPOOL_B | \
+ PI_ERR_SYSCMD_ADDR_B | \
+ PI_ERR_SYSCMD_DATA_B | \
+ PI_ERR_SYSAD_ADDR_B | \
PI_ERR_SYSAD_DATA_B | \
PI_ERR_SYSSTATE_B)
-#define PI_MISC_ERR_CPU_B (PI_ERR_UNCAC_UNCORR_B | \
- PI_ERR_WRB_WERR_B | \
- PI_ERR_WRB_TERR_B | \
- PI_ERR_SPUR_MSG_B | \
+#define PI_MISC_ERR_CPU_B (PI_ERR_UNCAC_UNCORR_B | \
+ PI_ERR_WRB_WERR_B | \
+ PI_ERR_WRB_TERR_B | \
+ PI_ERR_SPUR_MSG_B | \
PI_ERR_SPOOL_CMP_B)
#define PI_ERR_GENERIC (PI_ERR_MD_UNCORR)
@@ -242,24 +242,24 @@
#define PI_ERR_ST0_CMD_SHFT 17
#define PI_ERR_ST0_ADDR_MASK 0x3ffffffffe000000
#define PI_ERR_ST0_ADDR_SHFT 25
-#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
-#define PI_ERR_ST0_OVERRUN_SHFT 62
+#define PI_ERR_ST0_OVERRUN_MASK 0x4000000000000000
+#define PI_ERR_ST0_OVERRUN_SHFT 62
#define PI_ERR_ST0_VALID_MASK 0x8000000000000000
#define PI_ERR_ST0_VALID_SHFT 63
/* Fields in PI_ERR_STATUS1_[AB] */
#define PI_ERR_ST1_SPOOL_MASK 0x00000000001fffff
#define PI_ERR_ST1_SPOOL_SHFT 0
-#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
-#define PI_ERR_ST1_TOUTCNT_SHFT 21
+#define PI_ERR_ST1_TOUTCNT_MASK 0x000000001fe00000
+#define PI_ERR_ST1_TOUTCNT_SHFT 21
#define PI_ERR_ST1_INVCNT_MASK 0x0000007fe0000000
#define PI_ERR_ST1_INVCNT_SHFT 29
#define PI_ERR_ST1_CRBNUM_MASK 0x0000038000000000
#define PI_ERR_ST1_CRBNUM_SHFT 39
#define PI_ERR_ST1_WRBRRB_MASK 0x0000040000000000
#define PI_ERR_ST1_WRBRRB_SHFT 42
-#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
-#define PI_ERR_ST1_CRBSTAT_SHFT 43
+#define PI_ERR_ST1_CRBSTAT_MASK 0x001ff80000000000
+#define PI_ERR_ST1_CRBSTAT_SHFT 43
#define PI_ERR_ST1_MSGSRC_MASK 0xffe0000000000000
#define PI_ERR_ST1_MSGSRC_SHFT 53
@@ -274,8 +274,8 @@
#define PI_ERR_STK_CRBNUM_SHFT 9
#define PI_ERR_STK_WRBRRB_MASK 0x0000000000001000
#define PI_ERR_STK_WRBRRB_SHFT 12
-#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
-#define PI_ERR_STK_CRBSTAT_SHFT 13
+#define PI_ERR_STK_CRBSTAT_MASK 0x00000000007fe000
+#define PI_ERR_STK_CRBSTAT_SHFT 13
#define PI_ERR_STK_CMD_MASK 0x000000007f800000
#define PI_ERR_STK_CMD_SHFT 23
#define PI_ERR_STK_ADDR_MASK 0xffffffff80000000
@@ -364,11 +364,11 @@ typedef u64 rtc_time_t;
/* Bits in PI_SYSAD_ERRCHK_EN */
#define PI_SYSAD_ERRCHK_ECCGEN 0x01 /* Enable ECC generation */
-#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
-#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
+#define PI_SYSAD_ERRCHK_QUALGEN 0x02 /* Enable data quality signal gen. */
+#define PI_SYSAD_ERRCHK_SADP 0x04 /* Enable SysAD parity checking */
#define PI_SYSAD_ERRCHK_CMDP 0x08 /* Enable SysCmd parity checking */
#define PI_SYSAD_ERRCHK_STATE 0x10 /* Enable SysState parity checking */
-#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
+#define PI_SYSAD_ERRCHK_QUAL 0x20 /* Enable data quality checking */
#define PI_SYSAD_CHECK_ALL 0x3f /* Generate and check all signals. */
/* Interrupt pending bits on R10000 */
diff --git a/arch/mips/include/asm/sn/sn0/ip27.h b/arch/mips/include/asm/sn/sn0/ip27.h
index 3c97e0855c8..3b5efeefcc3 100644
--- a/arch/mips/include/asm/sn/sn0/ip27.h
+++ b/arch/mips/include/asm/sn/sn0/ip27.h
@@ -21,14 +21,14 @@
#ifndef __ASSEMBLY__
-#define CAUSE_BERRINTR IE_IRQ5
+#define CAUSE_BERRINTR IE_IRQ5
-#define ECCF_CACHE_ERR 0
-#define ECCF_TAGLO 1
-#define ECCF_ECC 2
-#define ECCF_ERROREPC 3
-#define ECCF_PADDR 4
-#define ECCF_SIZE (5 * sizeof(long))
+#define ECCF_CACHE_ERR 0
+#define ECCF_TAGLO 1
+#define ECCF_ECC 2
+#define ECCF_ERROREPC 3
+#define ECCF_PADDR 4
+#define ECCF_SIZE (5 * sizeof(long))
#endif /* !__ASSEMBLY__ */
@@ -39,8 +39,8 @@
* the processor number of the calling processor. The proc parameters
* must be a register.
*/
-#define KL_GET_CPUNUM(proc) \
- dli proc, LOCAL_HUB(0); \
+#define KL_GET_CPUNUM(proc) \
+ dli proc, LOCAL_HUB(0); \
ld proc, PI_CPU_NUM(proc)
#endif /* __ASSEMBLY__ */
@@ -71,15 +71,15 @@
#define NUM_CAUSE_INTRS 8
-#define SCACHE_LINESIZE 128
-#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
+#define SCACHE_LINESIZE 128
+#define SCACHE_LINEMASK (SCACHE_LINESIZE - 1)
#include <asm/sn/addrs.h>
-#define LED_CYCLE_MASK 0x0f
-#define LED_CYCLE_SHFT 4
+#define LED_CYCLE_MASK 0x0f
+#define LED_CYCLE_SHFT 4
#define SEND_NMI(_nasid, _slice) \
- REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
+ REMOTE_HUB_S((_nasid), (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
#endif /* _ASM_SN_SN0_IP27_H */
diff --git a/arch/mips/include/asm/sn/types.h b/arch/mips/include/asm/sn/types.h
index 74d0bb260b8..c4813d67aec 100644
--- a/arch/mips/include/asm/sn/types.h
+++ b/arch/mips/include/asm/sn/types.h
@@ -11,7 +11,7 @@
#include <linux/types.h>
-typedef unsigned long cpuid_t;
+typedef unsigned long cpuid_t;
typedef unsigned long cnodemask_t;
typedef signed short nasid_t; /* node id in numa-as-id space */
typedef signed short cnodeid_t; /* node id in compact-id space */
@@ -19,7 +19,7 @@ typedef signed char partid_t; /* partition ID type */
typedef signed short moduleid_t; /* user-visible module number type */
typedef signed short cmoduleid_t; /* kernel compact module id type */
typedef unsigned char clusterid_t; /* Clusterid of the cell */
-typedef unsigned long pfn_t;
+typedef unsigned long pfn_t;
typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */
diff --git a/arch/mips/include/asm/sni.h b/arch/mips/include/asm/sni.h
index 8c1eb02c6d1..a107201a2e1 100644
--- a/arch/mips/include/asm/sni.h
+++ b/arch/mips/include/asm/sni.h
@@ -13,27 +13,27 @@
extern unsigned int sni_brd_type;
-#define SNI_BRD_10 2
-#define SNI_BRD_10NEW 3
-#define SNI_BRD_TOWER_OASIC 4
-#define SNI_BRD_MINITOWER 5
-#define SNI_BRD_PCI_TOWER 6
-#define SNI_BRD_RM200 7
-#define SNI_BRD_PCI_MTOWER 8
-#define SNI_BRD_PCI_DESKTOP 9
-#define SNI_BRD_PCI_TOWER_CPLUS 10
+#define SNI_BRD_10 2
+#define SNI_BRD_10NEW 3
+#define SNI_BRD_TOWER_OASIC 4
+#define SNI_BRD_MINITOWER 5
+#define SNI_BRD_PCI_TOWER 6
+#define SNI_BRD_RM200 7
+#define SNI_BRD_PCI_MTOWER 8
+#define SNI_BRD_PCI_DESKTOP 9
+#define SNI_BRD_PCI_TOWER_CPLUS 10
#define SNI_BRD_PCI_MTOWER_CPLUS 11
/* RM400 cpu types */
-#define SNI_CPU_M8021 0x01
-#define SNI_CPU_M8030 0x04
-#define SNI_CPU_M8031 0x06
-#define SNI_CPU_M8034 0x0f
-#define SNI_CPU_M8037 0x07
-#define SNI_CPU_M8040 0x05
-#define SNI_CPU_M8043 0x09
-#define SNI_CPU_M8050 0x0b
-#define SNI_CPU_M8053 0x0d
+#define SNI_CPU_M8021 0x01
+#define SNI_CPU_M8030 0x04
+#define SNI_CPU_M8031 0x06
+#define SNI_CPU_M8034 0x0f
+#define SNI_CPU_M8037 0x07
+#define SNI_CPU_M8040 0x05
+#define SNI_CPU_M8043 0x09
+#define SNI_CPU_M8050 0x0b
+#define SNI_CPU_M8053 0x0d
#define SNI_PORT_BASE CKSEG1ADDR(0xb4000000)
@@ -52,14 +52,14 @@ extern unsigned int sni_brd_type;
#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0044)
#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff004c)
#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0054)
-#define IT_INT2 0x01
-#define IT_INTD 0x02
-#define IT_INTC 0x04
-#define IT_INTB 0x08
-#define IT_INTA 0x10
-#define IT_EISA 0x20
-#define IT_SCSI 0x40
-#define IT_ETH 0x80
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff005c)
#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0064)
#define PCIMT_ECCREG CKSEG1ADDR(0xbfff006c)
@@ -86,14 +86,14 @@ extern unsigned int sni_brd_type;
#define PCIMT_ERRADDR CKSEG1ADDR(0xbfff0040)
#define PCIMT_SYNDROME CKSEG1ADDR(0xbfff0048)
#define PCIMT_ITPEND CKSEG1ADDR(0xbfff0050)
-#define IT_INT2 0x01
-#define IT_INTD 0x02
-#define IT_INTC 0x04
-#define IT_INTB 0x08
-#define IT_INTA 0x10
-#define IT_EISA 0x20
-#define IT_SCSI 0x40
-#define IT_ETH 0x80
+#define IT_INT2 0x01
+#define IT_INTD 0x02
+#define IT_INTC 0x04
+#define IT_INTB 0x08
+#define IT_INTA 0x10
+#define IT_EISA 0x20
+#define IT_SCSI 0x40
+#define IT_ETH 0x80
#define PCIMT_IRQSEL CKSEG1ADDR(0xbfff0058)
#define PCIMT_TESTMEM CKSEG1ADDR(0xbfff0060)
#define PCIMT_ECCREG CKSEG1ADDR(0xbfff0068)
@@ -137,29 +137,29 @@ extern unsigned int sni_brd_type;
/*
* A20R based boards
*/
-#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000)
-#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000)
-#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000)
+#define A20R_PT_CLOCK_BASE CKSEG1ADDR(0xbc040000)
+#define A20R_PT_TIM0_ACK CKSEG1ADDR(0xbc050000)
+#define A20R_PT_TIM1_ACK CKSEG1ADDR(0xbc060000)
-#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
-#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
+#define SNI_A20R_IRQ_BASE MIPS_CPU_IRQ_BASE
+#define SNI_A20R_IRQ_TIMER (SNI_A20R_IRQ_BASE+5)
-#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c)
+#define SNI_PCIT_INT_REG CKSEG1ADDR(0xbfff000c)
-#define SNI_PCIT_INT_START 24
-#define SNI_PCIT_INT_END 30
+#define SNI_PCIT_INT_START 24
+#define SNI_PCIT_INT_END 30
-#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
-#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
-#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
-#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
-#define PCIT_IRQ_INTD (SNI_PCIT_INT_START + 3)
-#define PCIT_IRQ_SCSI0 (SNI_PCIT_INT_START + 4)
-#define PCIT_IRQ_SCSI1 (SNI_PCIT_INT_START + 5)
+#define PCIT_IRQ_ETHERNET (MIPS_CPU_IRQ_BASE + 5)
+#define PCIT_IRQ_INTA (SNI_PCIT_INT_START + 0)
+#define PCIT_IRQ_INTB (SNI_PCIT_INT_START + 1)
+#define PCIT_IRQ_INTC (SNI_PCIT_INT_START + 2)
+#define PCIT_IRQ_INTD (SNI_PCIT_INT_START + 3)
+#define PCIT_IRQ_SCSI0 (SNI_PCIT_INT_START + 4)
+#define PCIT_IRQ_SCSI1 (SNI_PCIT_INT_START + 5)
/*
- * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
+ * Interrupt 0-16 are EISA interrupts. Interrupts from 16 on are assigned
* to the other interrupts generated by ASIC PCI.
*
* INT2 is a wired-or of the push button interrupt, high temperature interrupt
@@ -204,12 +204,12 @@ extern unsigned int sni_brd_type;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define __SNI_END 3
#endif
-#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000)
+#define SNI_IDPROM_BASE CKSEG1ADDR(0x1ff00000)
#define SNI_IDPROM_MEMSIZE (SNI_IDPROM_BASE + (0x28 ^ __SNI_END))
#define SNI_IDPROM_BRDTYPE (SNI_IDPROM_BASE + (0x29 ^ __SNI_END))
#define SNI_IDPROM_CPUTYPE (SNI_IDPROM_BASE + (0x30 ^ __SNI_END))
-#define SNI_IDPROM_SIZE 0x1000
+#define SNI_IDPROM_SIZE 0x1000
/* board specific init functions */
extern void sni_a20r_init(void);
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 65900dab3ad..d2da53c2c2f 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -11,7 +11,7 @@
#else
# define SECTION_SIZE_BITS 28
#endif
-#define MAX_PHYSMEM_BITS 35
+#define MAX_PHYSMEM_BITS 35
#endif /* CONFIG_SPARSEMEM */
#endif /* _MIPS_SPARSEMEM_H */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index ca61e846ab0..5130c88d642 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -17,7 +17,7 @@
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
- * Simple spin lock operations. There are two variants, one clears IRQ's
+ * Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* These are fair FIFO ticket locks
@@ -222,7 +222,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
-#define arch_write_can_lock(rw) (!(rw)->lock)
+#define arch_write_can_lock(rw) (!(rw)->lock)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h
index c52f36013a9..9b2528e612c 100644
--- a/arch/mips/include/asm/spinlock_types.h
+++ b/arch/mips/include/asm/spinlock_types.h
@@ -11,7 +11,7 @@
typedef union {
/*
- * bits 0..15 : serving_now
+ * bits 0..15 : serving_now
* bits 16..31 : ticket
*/
u32 lock;
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index cb41af5f340..c9938401816 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -218,17 +218,17 @@
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
- .set mips64
- pref 0, 0($28) /* Prefetch the current pointer */
- pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
+ .set mips64
+ pref 0, 0($28) /* Prefetch the current pointer */
+ pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
/* The Octeon multiplier state is affected by general multiply
instructions. It must be saved before and kernel code might
corrupt it */
- jal octeon_mult_save
- LONG_L v1, 0($28) /* Load the current pointer */
+ jal octeon_mult_save
+ LONG_L v1, 0($28) /* Load the current pointer */
/* Restore $31(ra) that was changed by the jal */
- LONG_L ra, PT_R31(sp)
- pref 0, 0(v1) /* Prefetch the current thread */
+ LONG_L ra, PT_R31(sp)
+ pref 0, 0(v1) /* Prefetch the current thread */
#endif
.set pop
.endm
diff --git a/arch/mips/include/asm/string.h b/arch/mips/include/asm/string.h
index 436e3ad352d..29030cb398e 100644
--- a/arch/mips/include/asm/string.h
+++ b/arch/mips/include/asm/string.h
@@ -35,7 +35,7 @@ static __inline__ char *strcpy(char *__dest, __const__ char *__src)
".set\tat\n\t"
".set\treorder"
: "=r" (__dest), "=r" (__src)
- : "0" (__dest), "1" (__src)
+ : "0" (__dest), "1" (__src)
: "memory");
return __xdest;
@@ -62,9 +62,9 @@ static __inline__ char *strncpy(char *__dest, __const__ char *__src, size_t __n)
"2:\n\t"
".set\tat\n\t"
".set\treorder"
- : "=r" (__dest), "=r" (__src), "=r" (__n)
- : "0" (__dest), "1" (__src), "2" (__n)
- : "memory");
+ : "=r" (__dest), "=r" (__src), "=r" (__n)
+ : "0" (__dest), "1" (__src), "2" (__n)
+ : "memory");
return __xdest;
}
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 4f8ddba8c36..fd16bcb6c31 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -30,7 +30,7 @@ extern struct task_struct *ll_task;
#ifdef CONFIG_MIPS_MT_FPAFF
/*
- * Handle the scheduler resume end of FPU affinity management. We do this
+ * Handle the scheduler resume end of FPU affinity management. We do this
* inline to try to keep the overhead down. If we have been forced to run on
* a "CPU" with an FPU because of a previous high level of FP computation,
* but did not actually use the FPU during the most recent time-slice (CU1
@@ -72,7 +72,7 @@ do { \
__save_dsp(prev); \
__clear_software_ll_bit(); \
__usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
- (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+ (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
} while (0)
#define finish_arch_switch(prev) \
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index b2050b9e64b..178f7924149 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -44,7 +44,7 @@ struct thread_info {
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
- .exec_domain = &default_exec_domain, \
+ .exec_domain = &default_exec_domain, \
.flags = _TIF_FIXADE, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 761f2e92119..debc8009bd5 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -6,8 +6,8 @@
* include/asm-mips/time.h
* header file for the new style time.c file and time services.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -75,7 +75,7 @@ extern int init_r4k_clocksource(void);
static inline int init_mips_clocksource(void)
{
-#ifdef CONFIG_CSRC_R4K
+#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
return init_r4k_clocksource();
#else
return 0;
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h
index 80d9dfcf1e8..c67842bc8ef 100644
--- a/arch/mips/include/asm/tlb.h
+++ b/arch/mips/include/asm/tlb.h
@@ -5,7 +5,7 @@
* MIPS doesn't need any special per-pte or per-vma handling, except
* we need to flush cache for area to be unmapped.
*/
-#define tlb_start_vma(tlb, vma) \
+#define tlb_start_vma(tlb, vma) \
do { \
if (!tlb->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 259145e07e9..12609a17dc8 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -11,7 +11,7 @@
#include <topology.h>
#ifdef CONFIG_SMP
-#define smt_capable() (smp_num_siblings > 1)
+#define smt_capable() (smp_num_siblings > 1)
#endif
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index 420ca06b2f4..f41cf3ee82a 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -14,7 +14,7 @@
/*
* Possible status responses for a board_be_handler backend.
*/
-#define MIPS_BE_DISCARD 0 /* return with no action */
+#define MIPS_BE_DISCARD 0 /* return with no action */
#define MIPS_BE_FIXUP 1 /* return to the fixup code */
#define MIPS_BE_FATAL 2 /* treat as an unrecoverable error */
diff --git a/arch/mips/include/asm/txx9/jmr3927.h b/arch/mips/include/asm/txx9/jmr3927.h
index 8808d7f82da..aab959dc30b 100644
--- a/arch/mips/include/asm/txx9/jmr3927.h
+++ b/arch/mips/include/asm/txx9/jmr3927.h
@@ -40,7 +40,7 @@
#define JMR3927_PCIIO_BASE (KSEG1 + JMR3927_PCIIO)
#define JMR3927_IOC_REV_ADDR (JMR3927_IOC_BASE + 0x00000000)
-#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
+#define JMR3927_IOC_NVRAMB_ADDR (JMR3927_IOC_BASE + 0x00010000)
#define JMR3927_IOC_LED_ADDR (JMR3927_IOC_BASE + 0x00020000)
#define JMR3927_IOC_DIPSW_ADDR (JMR3927_IOC_BASE + 0x00030000)
#define JMR3927_IOC_BREV_ADDR (JMR3927_IOC_BASE + 0x00040000)
@@ -115,9 +115,9 @@
#define JMR3927_NR_IRQ_IRC 16 /* On-Chip IRC */
#define JMR3927_NR_IRQ_IOC 8 /* PCI/MODEM/INT[6:7] */
-#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
-#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
-#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
+#define JMR3927_IRQ_IRC TXX9_IRQ_BASE
+#define JMR3927_IRQ_IOC (JMR3927_IRQ_IRC + JMR3927_NR_IRQ_IRC)
+#define JMR3927_IRQ_END (JMR3927_IRQ_IOC + JMR3927_NR_IRQ_IOC)
#define JMR3927_IRQ_IRC_INT0 (JMR3927_IRQ_IRC + TX3927_IR_INT0)
#define JMR3927_IRQ_IRC_INT1 (JMR3927_IRQ_IRC + TX3927_IR_INT1)
@@ -127,11 +127,11 @@
#define JMR3927_IRQ_IRC_INT5 (JMR3927_IRQ_IRC + TX3927_IR_INT5)
#define JMR3927_IRQ_IRC_SIO0 (JMR3927_IRQ_IRC + TX3927_IR_SIO0)
#define JMR3927_IRQ_IRC_SIO1 (JMR3927_IRQ_IRC + TX3927_IR_SIO1)
-#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
+#define JMR3927_IRQ_IRC_SIO(ch) (JMR3927_IRQ_IRC + TX3927_IR_SIO(ch))
#define JMR3927_IRQ_IRC_DMA (JMR3927_IRQ_IRC + TX3927_IR_DMA)
#define JMR3927_IRQ_IRC_PIO (JMR3927_IRQ_IRC + TX3927_IR_PIO)
#define JMR3927_IRQ_IRC_PCI (JMR3927_IRQ_IRC + TX3927_IR_PCI)
-#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
+#define JMR3927_IRQ_IRC_TMR(ch) (JMR3927_IRQ_IRC + TX3927_IR_TMR(ch))
#define JMR3927_IRQ_IOC_PCIA (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIA)
#define JMR3927_IRQ_IOC_PCIB (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIB)
#define JMR3927_IRQ_IOC_PCIC (JMR3927_IRQ_IOC + JMR3927_IOC_INTB_PCIC)
@@ -147,7 +147,7 @@
#define JMR3927_IRQ_ETHER0 JMR3927_IRQ_IRC_INT3
/* Clocks */
-#define JMR3927_CORECLK 132710400 /* 132.7MHz */
+#define JMR3927_CORECLK 132710400 /* 132.7MHz */
/*
* TX3927 Pin Configuration:
diff --git a/arch/mips/include/asm/txx9/rbtx4927.h b/arch/mips/include/asm/txx9/rbtx4927.h
index b2adab3d1ac..4060ad26ca9 100644
--- a/arch/mips/include/asm/txx9/rbtx4927.h
+++ b/arch/mips/include/asm/txx9/rbtx4927.h
@@ -1,6 +1,6 @@
/*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
@@ -38,7 +38,7 @@
#define RBTX4927_IMASK_ADDR (IO_BASE + TXX9_CE(2) + 0x00002000)
#define RBTX4927_IMSTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x00002006)
#define RBTX4927_SOFTINT_ADDR (IO_BASE + TXX9_CE(2) + 0x00003000)
-#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
+#define RBTX4927_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f000)
#define RBTX4927_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f002)
#define RBTX4927_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x0000f006)
#define RBTX4927_BRAMRTC_BASE (IO_BASE + TXX9_CE(2) + 0x00010000)
@@ -50,7 +50,7 @@
#define rbtx4927_imask_addr ((__u8 __iomem *)RBTX4927_IMASK_ADDR)
#define rbtx4927_imstat_addr ((__u8 __iomem *)RBTX4927_IMSTAT_ADDR)
#define rbtx4927_softint_addr ((__u8 __iomem *)RBTX4927_SOFTINT_ADDR)
-#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
+#define rbtx4927_softreset_addr ((__u8 __iomem *)RBTX4927_SOFTRESET_ADDR)
#define rbtx4927_softresetlock_addr \
((__u8 __iomem *)RBTX4927_SOFTRESETLOCK_ADDR)
#define rbtx4927_pcireset_addr ((__u8 __iomem *)RBTX4927_PCIRESET_ADDR)
diff --git a/arch/mips/include/asm/txx9/rbtx4938.h b/arch/mips/include/asm/txx9/rbtx4938.h
index 9f0441a2812..9c969dd3c6e 100644
--- a/arch/mips/include/asm/txx9/rbtx4938.h
+++ b/arch/mips/include/asm/txx9/rbtx4938.h
@@ -36,7 +36,7 @@
#define RBTX4938_SPICS_ADDR (IO_BASE + TXX9_CE(2) + 0x00005002)
#define RBTX4938_SFPWR_ADDR (IO_BASE + TXX9_CE(2) + 0x00005008)
#define RBTX4938_SFVOL_ADDR (IO_BASE + TXX9_CE(2) + 0x0000500a)
-#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
+#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
#define RBTX4938_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x00007002)
#define RBTX4938_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007004)
#define RBTX4938_ETHER_BASE (IO_BASE + TXX9_CE(2) + 0x00020000)
@@ -78,7 +78,7 @@
#define rbtx4938_spics_addr ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
#define rbtx4938_sfpwr_addr ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
#define rbtx4938_sfvol_addr ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
-#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
+#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
#define rbtx4938_softresetlock_addr \
((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
@@ -94,7 +94,7 @@
/* These are the virtual IRQ numbers, we divide all IRQ's into
* 'spaces', the 'space' determines where and how to enable/disable
- * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
+ * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
* IRQ hardware is supported.
*/
#define RBTX4938_NR_IRQ_IOC 8
@@ -103,18 +103,18 @@
#define RBTX4938_IRQ_IOC (TXX9_IRQ_BASE + TX4938_NUM_IR)
#define RBTX4938_IRQ_END (RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC)
-#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
-#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
-#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
-#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
+#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
+#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
+#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
+#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
#define RBTX4938_IRQ_IRC_DMA(ch, n) (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n))
#define RBTX4938_IRQ_IRC_PIO (RBTX4938_IRQ_IRC + TX4938_IR_PIO)
#define RBTX4938_IRQ_IRC_PDMAC (RBTX4938_IRQ_IRC + TX4938_IR_PDMAC)
#define RBTX4938_IRQ_IRC_PCIC (RBTX4938_IRQ_IRC + TX4938_IR_PCIC)
-#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
+#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
#define RBTX4938_IRQ_IRC_NDFMC (RBTX4938_IRQ_IRC + TX4938_IR_NDFMC)
-#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
-#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
+#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
+#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
#define RBTX4938_IRQ_IRC_ACLC (RBTX4938_IRQ_IRC + TX4938_IR_ACLC)
#define RBTX4938_IRQ_IRC_ACLCPME (RBTX4938_IRQ_IRC + TX4938_IR_ACLCPME)
#define RBTX4938_IRQ_IRC_PCIC1 (RBTX4938_IRQ_IRC + TX4938_IR_PCIC1)
diff --git a/arch/mips/include/asm/txx9/rbtx4939.h b/arch/mips/include/asm/txx9/rbtx4939.h
index e517899794a..6157bfd9084 100644
--- a/arch/mips/include/asm/txx9/rbtx4939.h
+++ b/arch/mips/include/asm/txx9/rbtx4939.h
@@ -17,7 +17,7 @@
/* Address map */
#define RBTX4939_IOC_REG_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
-#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
+#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
#define RBTX4939_IOC_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000002)
#define RBTX4939_CONFIG1_ADDR (IO_BASE + TXX9_CE(1) + 0x00000004)
#define RBTX4939_CONFIG2_ADDR (IO_BASE + TXX9_CE(1) + 0x00000006)
@@ -46,9 +46,9 @@
#define RBTX4939_VPSIN_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500c)
#define RBTX4939_7SEG_ADDR(s, ch) \
(IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
-#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
+#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
#define RBTX4939_RESETEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00007002)
-#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
+#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
#define RBTX4939_ETHER_BASE (IO_BASE + TXX9_CE(1) + 0x00020000)
/* Ethernet port address */
@@ -77,11 +77,11 @@
#define RBTX4939_PE2_CIR 0x08
#define RBTX4939_PE2_SPI 0x10
#define RBTX4939_PE2_GPIO 0x20
-#define RBTX4939_PE3_VP 0x01
+#define RBTX4939_PE3_VP 0x01
#define RBTX4939_PE3_VP_P 0x02
#define RBTX4939_PE3_VP_S 0x04
-#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
+#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
#define rbtx4939_ioc_rev_addr ((u8 __iomem *)RBTX4939_IOC_REV_ADDR)
#define rbtx4939_config1_addr ((u8 __iomem *)RBTX4939_CONFIG1_ADDR)
#define rbtx4939_config2_addr ((u8 __iomem *)RBTX4939_CONFIG2_ADDR)
@@ -110,9 +110,9 @@
#define rbtx4939_vpsin_addr ((u8 __iomem *)RBTX4939_VPSIN_ADDR)
#define rbtx4939_7seg_addr(s, ch) \
((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
-#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
+#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
#define rbtx4939_reseten_addr ((u8 __iomem *)RBTX4939_RESETEN_ADDR)
-#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
+#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
/*
* IRQ mappings
diff --git a/arch/mips/include/asm/txx9/smsc_fdc37m81x.h b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
index d1d6332b4ca..926d08f1846 100644
--- a/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
+++ b/arch/mips/include/asm/txx9/smsc_fdc37m81x.h
@@ -18,43 +18,43 @@
/* Common Registers */
#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
#define SMSC_FDC37M81X_CONFIG_DATA 0x01
-#define SMSC_FDC37M81X_CONF 0x02
-#define SMSC_FDC37M81X_INDEX 0x03
-#define SMSC_FDC37M81X_DNUM 0x07
-#define SMSC_FDC37M81X_DID 0x20
-#define SMSC_FDC37M81X_DREV 0x21
-#define SMSC_FDC37M81X_PCNT 0x22
-#define SMSC_FDC37M81X_PMGT 0x23
-#define SMSC_FDC37M81X_OSC 0x24
-#define SMSC_FDC37M81X_CONFPA0 0x26
-#define SMSC_FDC37M81X_CONFPA1 0x27
-#define SMSC_FDC37M81X_TEST4 0x2B
-#define SMSC_FDC37M81X_TEST5 0x2C
-#define SMSC_FDC37M81X_TEST1 0x2D
-#define SMSC_FDC37M81X_TEST2 0x2E
-#define SMSC_FDC37M81X_TEST3 0x2F
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
/* Logical device numbers */
-#define SMSC_FDC37M81X_FDD 0x00
-#define SMSC_FDC37M81X_PARALLEL 0x03
-#define SMSC_FDC37M81X_SERIAL1 0x04
-#define SMSC_FDC37M81X_SERIAL2 0x05
-#define SMSC_FDC37M81X_KBD 0x07
-#define SMSC_FDC37M81X_AUXIO 0x08
-#define SMSC_FDC37M81X_NONE 0xff
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_PARALLEL 0x03
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
+#define SMSC_FDC37M81X_AUXIO 0x08
+#define SMSC_FDC37M81X_NONE 0xff
/* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_ACTIVE 0x30
#define SMSC_FDC37M81X_BASEADDR0 0x60
#define SMSC_FDC37M81X_BASEADDR1 0x61
-#define SMSC_FDC37M81X_INT 0x70
-#define SMSC_FDC37M81X_INT2 0x72
-#define SMSC_FDC37M81X_LDCR_F0 0xF0
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_LDCR_F0 0xF0
/* Chip Config Values */
#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
-#define SMSC_FDC37M81X_CHIP_ID 0x4d
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
unsigned long smsc_fdc37m81x_init(unsigned long port);
diff --git a/arch/mips/include/asm/txx9/tx3927.h b/arch/mips/include/asm/txx9/tx3927.h
index dc30c8d4206..149fab4f832 100644
--- a/arch/mips/include/asm/txx9/tx3927.h
+++ b/arch/mips/include/asm/txx9/tx3927.h
@@ -8,8 +8,8 @@
#ifndef __ASM_TXX9_TX3927_H
#define __ASM_TXX9_TX3927_H
-#define TX3927_REG_BASE 0xfffe0000UL
-#define TX3927_REG_SIZE 0x00010000
+#define TX3927_REG_BASE 0xfffe0000UL
+#define TX3927_REG_SIZE 0x00010000
#define TX3927_SDRAMC_REG (TX3927_REG_BASE + 0x8000)
#define TX3927_ROMC_REG (TX3927_REG_BASE + 0x9000)
#define TX3927_DMA_REG (TX3927_REG_BASE + 0xb000)
@@ -191,8 +191,8 @@ struct tx3927_ccfg_reg {
#define TX3927_DMA_CCR_XFSZ_1W TX3927_DMA_CCR_XFSZ(2)
#define TX3927_DMA_CCR_XFSZ_4W TX3927_DMA_CCR_XFSZ(4)
#define TX3927_DMA_CCR_XFSZ_8W TX3927_DMA_CCR_XFSZ(5)
-#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
-#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
+#define TX3927_DMA_CCR_XFSZ_16W TX3927_DMA_CCR_XFSZ(6)
+#define TX3927_DMA_CCR_XFSZ_32W TX3927_DMA_CCR_XFSZ(7)
#define TX3927_DMA_CCR_MEMIO 0x00000002
#define TX3927_DMA_CCR_ONEAD 0x00000001
@@ -250,7 +250,7 @@ struct tx3927_ccfg_reg {
/* see PCI_BASE_ADDRESS_XXX in linux/pci.h */
/* bits for PBAPMC */
-#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
+#define TX3927_PCIC_PBAPMC_RPBA 0x00000004
#define TX3927_PCIC_PBAPMC_PBAEN 0x00000002
#define TX3927_PCIC_PBAPMC_BMCEN 0x00000001
@@ -282,7 +282,7 @@ struct tx3927_ccfg_reg {
#define TX3927_CCFG_TLBOFF 0x00020000
#define TX3927_CCFG_BEOW 0x00010000
#define TX3927_CCFG_WR 0x00008000
-#define TX3927_CCFG_TOE 0x00004000
+#define TX3927_CCFG_TOE 0x00004000
#define TX3927_CCFG_PCIXARB 0x00002000
#define TX3927_CCFG_PCI3 0x00001000
#define TX3927_CCFG_PSNP 0x00000800
@@ -301,8 +301,8 @@ struct tx3927_ccfg_reg {
#define TX3927_PCFG_SELALL 0x0003ffff
#define TX3927_PCFG_SELCS 0x00020000
#define TX3927_PCFG_SELDSF 0x00010000
-#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
-#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
+#define TX3927_PCFG_SELSIOC_ALL 0x0000c000
+#define TX3927_PCFG_SELSIOC(ch) (0x00004000<<(ch))
#define TX3927_PCFG_SELSIO_ALL 0x00003000
#define TX3927_PCFG_SELSIO(ch) (0x00001000<<(ch))
#define TX3927_PCFG_SELTMR_ALL 0x00000e00
diff --git a/arch/mips/include/asm/txx9/tx4927.h b/arch/mips/include/asm/txx9/tx4927.h
index 18c98c52afd..284eea752d5 100644
--- a/arch/mips/include/asm/txx9/tx4927.h
+++ b/arch/mips/include/asm/txx9/tx4927.h
@@ -1,6 +1,6 @@
/*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2006 MontaVista Software Inc.
*
@@ -33,11 +33,11 @@
#include <asm/txx9/tx4927pcic.h>
#ifdef CONFIG_64BIT
-#define TX4927_REG_BASE 0xffffffffff1f0000UL
+#define TX4927_REG_BASE 0xffffffffff1f0000UL
#else
-#define TX4927_REG_BASE 0xff1f0000UL
+#define TX4927_REG_BASE 0xff1f0000UL
#endif
-#define TX4927_REG_SIZE 0x00010000
+#define TX4927_REG_SIZE 0x00010000
#define TX4927_SDRAMC_REG (TX4927_REG_BASE + 0x8000)
#define TX4927_EBUSC_REG (TX4927_REG_BASE + 0x9000)
@@ -118,10 +118,10 @@ struct tx4927_ccfg_reg {
#define TX4927_CCFG_DIVMODE_2 (0x4 << 17)
#define TX4927_CCFG_DIVMODE_3 (0x5 << 17)
#define TX4927_CCFG_DIVMODE_4 (0x6 << 17)
-#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
+#define TX4927_CCFG_DIVMODE_2_5 (0x7 << 17)
#define TX4927_CCFG_BEOW 0x00010000
#define TX4927_CCFG_WR 0x00008000
-#define TX4927_CCFG_TOE 0x00004000
+#define TX4927_CCFG_TOE 0x00004000
#define TX4927_CCFG_PCIARB 0x00002000
#define TX4927_CCFG_PCIDIVMODE_MASK 0x00001800
#define TX4927_CCFG_PCIDIVMODE_2_5 0x00000000
@@ -136,10 +136,10 @@ struct tx4927_ccfg_reg {
/* PCFG : Pin Configuration */
#define TX4927_PCFG_SDCLKDLY_MASK 0x30000000
-#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4927_PCFG_SDCLKDLY(d) ((d)<<28)
#define TX4927_PCFG_SYSCLKEN 0x08000000
-#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
-#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4927_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4927_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
#define TX4927_PCFG_PCICLKEN_ALL 0x003f0000
#define TX4927_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
#define TX4927_PCFG_SEL2 0x00000200
diff --git a/arch/mips/include/asm/txx9/tx4927pcic.h b/arch/mips/include/asm/txx9/tx4927pcic.h
index c470b8a5fe5..9eab2698cae 100644
--- a/arch/mips/include/asm/txx9/tx4927pcic.h
+++ b/arch/mips/include/asm/txx9/tx4927pcic.h
@@ -93,7 +93,7 @@ struct tx4927_pcic_reg {
/* bits for PBACFG */
#define TX4927_PCIC_PBACFG_FIXPA 0x00000008
-#define TX4927_PCIC_PBACFG_RPBA 0x00000004
+#define TX4927_PCIC_PBACFG_RPBA 0x00000004
#define TX4927_PCIC_PBACFG_PBAEN 0x00000002
#define TX4927_PCIC_PBACFG_BMCEN 0x00000001
@@ -165,7 +165,7 @@ struct tx4927_pcic_reg {
#define TX4927_PCIC_PDMCFG_CHNEN 0x00000080
#define TX4927_PCIC_PDMCFG_XFRACT 0x00000040
#define TX4927_PCIC_PDMCFG_BSWAP 0x00000020
-#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
+#define TX4927_PCIC_PDMCFG_XFRSIZE_MASK 0x0000000c
#define TX4927_PCIC_PDMCFG_XFRSIZE_1DW 0x00000000
#define TX4927_PCIC_PDMCFG_XFRSIZE_1QW 0x00000004
#define TX4927_PCIC_PDMCFG_XFRSIZE_4QW 0x00000008
@@ -174,7 +174,7 @@ struct tx4927_pcic_reg {
/* bits for PDMSTS */
#define TX4927_PCIC_PDMSTS_REQCNT_MASK 0x3f000000
-#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
+#define TX4927_PCIC_PDMSTS_FIFOCNT_MASK 0x00f00000
#define TX4927_PCIC_PDMSTS_FIFOWP_MASK 0x000c0000
#define TX4927_PCIC_PDMSTS_FIFORP_MASK 0x00030000
#define TX4927_PCIC_PDMSTS_ERRINT 0x00000800
diff --git a/arch/mips/include/asm/txx9/tx4938.h b/arch/mips/include/asm/txx9/tx4938.h
index 8a178f186f7..6ca767ee646 100644
--- a/arch/mips/include/asm/txx9/tx4938.h
+++ b/arch/mips/include/asm/txx9/tx4938.h
@@ -16,11 +16,11 @@
#include <asm/txx9/tx4927.h>
#ifdef CONFIG_64BIT
-#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xffffffffff1f0000UL /* == TX4937_REG_BASE */
#else
-#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
+#define TX4938_REG_BASE 0xff1f0000UL /* == TX4937_REG_BASE */
#endif
-#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
+#define TX4938_REG_SIZE 0x00010000 /* == TX4937_REG_SIZE */
/* NDFMC, SRAMC, PCIC1, SPIC: TX4938 only */
#define TX4938_NDFMC_REG (TX4938_REG_BASE + 0x5000)
@@ -72,16 +72,16 @@ struct tx4938_ccfg_reg {
#define TX4938_NUM_IR_DMA 4
#define TX4938_IR_DMA(ch, n) ((ch ? 27 : 10) + (n)) /* 10-13, 27-30 */
#define TX4938_IR_PIO 14
-#define TX4938_IR_PDMAC 15
+#define TX4938_IR_PDMAC 15
#define TX4938_IR_PCIC 16
#define TX4938_NUM_IR_TMR 3
#define TX4938_IR_TMR(n) (17 + (n))
-#define TX4938_IR_NDFMC 21
+#define TX4938_IR_NDFMC 21
#define TX4938_IR_PCIERR 22
#define TX4938_IR_PCIPME 23
#define TX4938_IR_ACLC 24
#define TX4938_IR_ACLCPME 25
-#define TX4938_IR_PCIC1 26
+#define TX4938_IR_PCIC1 26
#define TX4938_IR_SPI 31
#define TX4938_NUM_IR 32
/* multiplex */
@@ -105,10 +105,10 @@ struct tx4938_ccfg_reg {
#define TX4938_CCFG_PCI1_66 0x00200000
#define TX4938_CCFG_DIVMODE_MASK 0x001e0000
#define TX4938_CCFG_DIVMODE_2 (0x4 << 17)
-#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
+#define TX4938_CCFG_DIVMODE_2_5 (0xf << 17)
#define TX4938_CCFG_DIVMODE_3 (0x5 << 17)
#define TX4938_CCFG_DIVMODE_4 (0x6 << 17)
-#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
+#define TX4938_CCFG_DIVMODE_4_5 (0xd << 17)
#define TX4938_CCFG_DIVMODE_8 (0x0 << 17)
#define TX4938_CCFG_DIVMODE_10 (0xb << 17)
#define TX4938_CCFG_DIVMODE_12 (0x1 << 17)
@@ -116,7 +116,7 @@ struct tx4938_ccfg_reg {
#define TX4938_CCFG_DIVMODE_18 (0x9 << 17)
#define TX4938_CCFG_BEOW 0x00010000
#define TX4938_CCFG_WR 0x00008000
-#define TX4938_CCFG_TOE 0x00004000
+#define TX4938_CCFG_TOE 0x00004000
#define TX4938_CCFG_PCIARB 0x00002000
#define TX4938_CCFG_PCIDIVMODE_MASK 0x00001c00
#define TX4938_CCFG_PCIDIVMODE_4 (0x1 << 10)
@@ -141,10 +141,10 @@ struct tx4938_ccfg_reg {
#define TX4938_PCFG_SPI_SEL 0x0800000000000000ULL
#define TX4938_PCFG_NDF_SEL 0x0400000000000000ULL
#define TX4938_PCFG_SDCLKDLY_MASK 0x30000000
-#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
+#define TX4938_PCFG_SDCLKDLY(d) ((d)<<28)
#define TX4938_PCFG_SYSCLKEN 0x08000000
-#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
-#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
+#define TX4938_PCFG_SDCLKEN_ALL 0x07800000
+#define TX4938_PCFG_SDCLKEN(ch) (0x00800000<<(ch))
#define TX4938_PCFG_PCICLKEN_ALL 0x003f0000
#define TX4938_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
#define TX4938_PCFG_SEL2 0x00000200
@@ -230,8 +230,8 @@ struct tx4938_ccfg_reg {
#define TX4938_DMA_CCR_XFSZ_2W TX4938_DMA_CCR_XFSZ(3)
#define TX4938_DMA_CCR_XFSZ_4W TX4938_DMA_CCR_XFSZ(4)
#define TX4938_DMA_CCR_XFSZ_8W TX4938_DMA_CCR_XFSZ(5)
-#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
-#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
+#define TX4938_DMA_CCR_XFSZ_16W TX4938_DMA_CCR_XFSZ(6)
+#define TX4938_DMA_CCR_XFSZ_32W TX4938_DMA_CCR_XFSZ(7)
#define TX4938_DMA_CCR_MEMIO 0x00000002
#define TX4938_DMA_CCR_SNGAD 0x00000001
@@ -263,9 +263,9 @@ struct tx4938_ccfg_reg {
#define TX4938_REV_PCODE() \
((__u32)__raw_readq(&tx4938_ccfgptr->crir) >> 16)
-#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
+#define tx4938_ccfg_clear(bits) tx4927_ccfg_clear(bits)
#define tx4938_ccfg_set(bits) tx4927_ccfg_set(bits)
-#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
+#define tx4938_ccfg_change(change, new) tx4927_ccfg_change(change, new)
#define TX4938_SDRAMC_CR(ch) TX4927_SDRAMC_CR(ch)
#define TX4938_SDRAMC_BA(ch) TX4927_SDRAMC_BA(ch)
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
index d4f342cd593..6d667087f2a 100644
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ b/arch/mips/include/asm/txx9/tx4939.h
@@ -14,11 +14,11 @@
#include <asm/txx9/tx4938.h>
#ifdef CONFIG_64BIT
-#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
#else
-#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
+#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
#endif
-#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
+#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
#define TX4939_ATA_REG(ch) (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
#define TX4939_NDFMC_REG (TX4939_REG_BASE + 0x5000)
@@ -189,14 +189,14 @@ struct tx4939_vpc_desc {
#define TX4939_IR_INT(n) (3 + (n))
#define TX4939_NUM_IR_ETH 2
#define TX4939_IR_ETH(n) ((n) ? 43 : 6)
-#define TX4939_IR_VIDEO 7
+#define TX4939_IR_VIDEO 7
#define TX4939_IR_CIR 8
#define TX4939_NUM_IR_SIO 4
#define TX4939_IR_SIO(n) ((n) ? 43 + (n) : 9) /* 9,44-46 */
#define TX4939_NUM_IR_DMA 4
#define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
#define TX4939_IR_IRC 14
-#define TX4939_IR_PDMAC 15
+#define TX4939_IR_PDMAC 15
#define TX4939_NUM_IR_TMR 6
#define TX4939_IR_TMR(n) (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */
#define TX4939_NUM_IR_ATA 2
@@ -210,10 +210,10 @@ struct tx4939_vpc_desc {
#define TX4939_IR_I2C 33
#define TX4939_IR_SPI 34
#define TX4939_IR_PCIC 35
-#define TX4939_IR_PCIC1 36
+#define TX4939_IR_PCIC1 36
#define TX4939_IR_PCIERR 37
#define TX4939_IR_PCIPME 38
-#define TX4939_IR_NDFMC 39
+#define TX4939_IR_NDFMC 39
#define TX4939_IR_ACLCPME 40
#define TX4939_IR_RTC 41
#define TX4939_IR_RND 42
@@ -239,7 +239,7 @@ struct tx4939_vpc_desc {
#define TX4939_CCFG_PCI66 0x00800000
#define TX4939_CCFG_PCIMODE 0x00400000
#define TX4939_CCFG_SSCG 0x00100000
-#define TX4939_CCFG_MULCLK_MASK 0x000e0000
+#define TX4939_CCFG_MULCLK_MASK 0x000e0000
#define TX4939_CCFG_MULCLK_8 (0x7 << 17)
#define TX4939_CCFG_MULCLK_9 (0x0 << 17)
#define TX4939_CCFG_MULCLK_10 (0x1 << 17)
@@ -250,7 +250,7 @@ struct tx4939_vpc_desc {
#define TX4939_CCFG_MULCLK_15 (0x6 << 17)
#define TX4939_CCFG_BEOW 0x00010000
#define TX4939_CCFG_WR 0x00008000
-#define TX4939_CCFG_TOE 0x00004000
+#define TX4939_CCFG_TOE 0x00004000
#define TX4939_CCFG_PCIARB 0x00002000
#define TX4939_CCFG_YDIVMODE_MASK 0x00001c00
#define TX4939_CCFG_YDIVMODE_2 (0x0 << 10)
@@ -275,7 +275,7 @@ struct tx4939_vpc_desc {
#define TX4939_PCFG_I2CMODE 0x1000000000000000ULL
#define TX4939_PCFG_I2SMODE_MASK 0x0c00000000000000ULL
#define TX4939_PCFG_I2SMODE_GPIO 0x0c00000000000000ULL
-#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
+#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
#define TX4939_PCFG_I2SMODE_I2S_ALT 0x0400000000000000ULL
#define TX4939_PCFG_I2SMODE_ACLC 0x0000000000000000ULL
#define TX4939_PCFG_SIO3MODE 0x0200000000000000ULL
@@ -392,15 +392,15 @@ struct tx4939_vpc_desc {
/*
* CRYPTO
*/
-#define TX4939_CRYPTO_CSR_SAESO 0x08000000
-#define TX4939_CRYPTO_CSR_SAESI 0x04000000
-#define TX4939_CRYPTO_CSR_SDESO 0x02000000
-#define TX4939_CRYPTO_CSR_SDESI 0x01000000
+#define TX4939_CRYPTO_CSR_SAESO 0x08000000
+#define TX4939_CRYPTO_CSR_SAESI 0x04000000
+#define TX4939_CRYPTO_CSR_SDESO 0x02000000
+#define TX4939_CRYPTO_CSR_SDESI 0x01000000
#define TX4939_CRYPTO_CSR_INDXBST_MASK 0x00700000
#define TX4939_CRYPTO_CSR_INDXBST(n) ((n) << 20)
-#define TX4939_CRYPTO_CSR_TOINT 0x00080000
-#define TX4939_CRYPTO_CSR_DCINT 0x00040000
-#define TX4939_CRYPTO_CSR_GBINT 0x00010000
+#define TX4939_CRYPTO_CSR_TOINT 0x00080000
+#define TX4939_CRYPTO_CSR_DCINT 0x00040000
+#define TX4939_CRYPTO_CSR_GBINT 0x00010000
#define TX4939_CRYPTO_CSR_INDXAST_MASK 0x0000e000
#define TX4939_CRYPTO_CSR_INDXAST(n) ((n) << 13)
#define TX4939_CRYPTO_CSR_CSWAP_MASK 0x00001800
@@ -418,7 +418,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_CSR_PDINT_END 0x00000040
#define TX4939_CRYPTO_CSR_PDINT_NEXT 0x00000080
#define TX4939_CRYPTO_CSR_PDINT_NONE 0x000000c0
-#define TX4939_CRYPTO_CSR_GINTE 0x00000008
+#define TX4939_CRYPTO_CSR_GINTE 0x00000008
#define TX4939_CRYPTO_CSR_RSTD 0x00000004
#define TX4939_CRYPTO_CSR_RSTC 0x00000002
#define TX4939_CRYPTO_CSR_ENCR 0x00000001
@@ -442,7 +442,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_DESC_START 0x00000200
#define TX4939_CRYPTO_DESC_END 0x00000100
#define TX4939_CRYPTO_DESC_XOR 0x00000010
-#define TX4939_CRYPTO_DESC_LAST 0x00000008
+#define TX4939_CRYPTO_DESC_LAST 0x00000008
#define TX4939_CRYPTO_DESC_ERR_MASK 0x00000006
#define TX4939_CRYPTO_DESC_ERR_NONE 0x00000000
#define TX4939_CRYPTO_DESC_ERR_TOUT 0x00000002
@@ -457,7 +457,7 @@ struct tx4939_vpc_desc {
#define TX4939_CRYPTO_NR_SET 6
-#define TX4939_CRYPTO_RCSR_INTE 0x00000008
+#define TX4939_CRYPTO_RCSR_INTE 0x00000008
#define TX4939_CRYPTO_RCSR_RST 0x00000004
#define TX4939_CRYPTO_RCSR_FIN 0x00000002
#define TX4939_CRYPTO_RCSR_ST 0x00000001
@@ -480,8 +480,8 @@ struct tx4939_vpc_desc {
#define TX4939_VPC_CTRLA_PDINT_ALL 0x00000000
#define TX4939_VPC_CTRLA_PDINT_NEXT 0x00000010
#define TX4939_VPC_CTRLA_PDINT_NONE 0x00000030
-#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
-#define TX4939_VPC_CTRLA_VDMODE 0x00000004
+#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
+#define TX4939_VPC_CTRLA_VDMODE 0x00000004
#define TX4939_VPC_CTRLA_VDFOR 0x00000002
#define TX4939_VPC_CTRLA_ENVPC 0x00000001
@@ -512,9 +512,9 @@ struct tx4939_vpc_desc {
((__u32)((__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_BCFG_MASK) \
>> 32))
-#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
+#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
#define tx4939_ccfg_set(bits) tx4938_ccfg_set(bits)
-#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
+#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
#define TX4939_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
#define TX4939_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
@@ -522,7 +522,7 @@ struct tx4939_vpc_desc {
#define TX4939_EBUSC_WIDTH(ch) \
(16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
-/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
+/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
#define TX4939_SCLK0(mst) \
((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2)
diff --git a/arch/mips/include/asm/txx9tmr.h b/arch/mips/include/asm/txx9tmr.h
index 67f70a8f09b..466a3def386 100644
--- a/arch/mips/include/asm/txx9tmr.h
+++ b/arch/mips/include/asm/txx9tmr.h
@@ -59,9 +59,9 @@ void txx9_clockevent_init(unsigned long baseaddr, int irq,
void txx9_tmr_init(unsigned long baseaddr);
#ifdef CONFIG_CPU_TX39XX
-#define TXX9_TIMER_BITS 24
+#define TXX9_TIMER_BITS 24
#else
-#define TXX9_TIMER_BITS 32
+#define TXX9_TIMER_BITS 32
#endif
#endif /* __ASM_TXX9TMR_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 3b92efef56d..bd87e36bf26 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -87,12 +87,12 @@ extern u64 __ua_limit;
/*
* access_ok: - Checks if a user space pointer is valid
* @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that
- * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
- * to write to a block, it is always safe to read from it.
+ * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
+ * to write to a block, it is always safe to read from it.
* @addr: User space pointer to start of block to check
* @size: Size of block to check
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Checks if a pointer to a block of memory in user space is valid.
*
@@ -124,10 +124,10 @@ extern u64 __ua_limit;
/*
* put_user: - Write a simple value into user space.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -138,15 +138,15 @@ extern u64 __ua_limit;
*
* Returns zero on success, or -EFAULT on error.
*/
-#define put_user(x,ptr) \
+#define put_user(x,ptr) \
__put_user_check((x), (ptr), sizeof(*(ptr)))
/*
* get_user: - Get a simple variable from user space.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -163,10 +163,10 @@ extern u64 __ua_limit;
/*
* __put_user: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -185,10 +185,10 @@ extern u64 __ua_limit;
/*
* __get_user: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -390,10 +390,10 @@ extern void __put_user_unknown(void);
/*
* put_user_unaligned: - Write a simple value into user space.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -409,10 +409,10 @@ extern void __put_user_unknown(void);
/*
* get_user_unaligned: - Get a simple variable from user space.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -429,10 +429,10 @@ extern void __put_user_unknown(void);
/*
* __put_user_unaligned: - Write a simple value into user space, with less checking.
- * @x: Value to copy to user space.
+ * @x: Value to copy to user space.
* @ptr: Destination address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple value from kernel space to user
* space. It supports simple types like char and int, but not larger
@@ -451,10 +451,10 @@ extern void __put_user_unknown(void);
/*
* __get_user_unaligned: - Get a simple variable from user space, with less checking.
- * @x: Variable to store result.
+ * @x: Variable to store result.
* @ptr: Source address, in user space.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* This macro copies a single simple variable from user space to kernel
* space. It supports simple types like char and int, but not larger
@@ -543,7 +543,7 @@ do { \
*/
#define __get_user_unaligned_asm_ll32(val, addr) \
{ \
- unsigned long long __gu_tmp; \
+ unsigned long long __gu_tmp; \
\
__asm__ __volatile__( \
"1: ulw %1, (%3) \n" \
@@ -631,7 +631,7 @@ do { \
#define __put_user_unaligned_asm_ll32(ptr) \
{ \
__asm__ __volatile__( \
- "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
+ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \
"2: sw %D2, 4(%3) \n" \
"3: \n" \
" .section .fixup,\"ax\" \n" \
@@ -658,7 +658,7 @@ extern void __put_user_unaligned_unknown(void);
#ifdef MODULE
#define __MODULE_JAL(destination) \
".set\tnoat\n\t" \
- __UA_LA "\t$1, " #destination "\n\t" \
+ __UA_LA "\t$1, " #destination "\n\t" \
"jalr\t$1\n\t" \
".set\tat\n\t"
#else
@@ -694,11 +694,11 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
- * @to: Destination address, in user space.
+ * @to: Destination address, in user space.
* @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -716,7 +716,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@@ -731,7 +731,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@@ -744,18 +744,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
/*
* copy_to_user: - Copy a block of data into user space.
- * @to: Destination address, in user space.
+ * @to: Destination address, in user space.
* @from: Source address, in kernel space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from kernel space to user space.
*
@@ -774,7 +774,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
@@ -827,11 +827,11 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
/*
* __copy_from_user: - Copy a block of data from user space, with less checking.
- * @to: Destination address, in kernel space.
+ * @to: Destination address, in kernel space.
* @from: Source address, in user space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -853,17 +853,17 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
__cu_len; \
})
/*
* copy_from_user: - Copy a block of data from user space.
- * @to: Destination address, in kernel space.
+ * @to: Destination address, in kernel space.
* @from: Source address, in user space.
- * @n: Number of bytes to copy.
+ * @n: Number of bytes to copy.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Copy data from user space to kernel space.
*
@@ -885,7 +885,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
@@ -901,7 +901,7 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
__cu_len; \
})
@@ -915,18 +915,18 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
__cu_from = (from); \
__cu_len = (n); \
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
- access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
+ access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
} \
__cu_len; \
})
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
- * @to: Destination address, in user space.
- * @n: Number of bytes to zero.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
*
* Zero a block of memory in user space. Caller must check
* the specified block with access_ok() before calling this function.
@@ -966,7 +966,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
/*
* __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
* @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
+ * least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
@@ -1005,7 +1005,7 @@ __strncpy_from_user(char *__to, const char __user *__from, long __len)
/*
* strncpy_from_user: - Copy a NUL terminated string from userspace.
* @dst: Destination address, in kernel space. This buffer must be at
- * least @count bytes long.
+ * least @count bytes long.
* @src: Source address, in user space.
* @count: Maximum number of bytes to copy, including the trailing NUL.
*
@@ -1060,7 +1060,7 @@ static inline long __strlen_user(const char __user *s)
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
@@ -1108,7 +1108,7 @@ static inline long __strnlen_user(const char __user *s, long n)
* strlen_user: - Get the size of a string in user space.
* @str: The string to measure.
*
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep.
*
* Get the size of a NUL-terminated string in user space.
*
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 7e0bf17c932..058e941626a 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc.
diff --git a/arch/mips/include/asm/user.h b/arch/mips/include/asm/user.h
index afa83a4c188..6bad61b0a53 100644
--- a/arch/mips/include/asm/user.h
+++ b/arch/mips/include/asm/user.h
@@ -20,7 +20,7 @@
* upage: 1 page consisting of a user struct that tells gdb
* what is present in the file. Directly after this is a
* copy of the task_struct, which is currently not used by gdb,
- * but it may come in handy at some point. All of the registers
+ * but it may come in handy at some point. All of the registers
* are stored as part of the upage. The upage should always be
* only one page long.
* data: The data segment follows next. We use current->end_text to
diff --git a/arch/mips/include/asm/vr41xx/pci.h b/arch/mips/include/asm/vr41xx/pci.h
index c231a3d6cfd..a866918cfea 100644
--- a/arch/mips/include/asm/vr41xx/pci.h
+++ b/arch/mips/include/asm/vr41xx/pci.h
@@ -20,7 +20,7 @@
#ifndef __NEC_VR41XX_PCI_H
#define __NEC_VR41XX_PCI_H
-#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
+#define PCI_MASTER_ADDRESS_MASK 0x7fffffffU
struct pci_master_address_conversion {
uint32_t bus_base_address;
diff --git a/arch/mips/include/asm/vr41xx/tb0287.h b/arch/mips/include/asm/vr41xx/tb0287.h
index 61bead68abf..d58b5678f24 100644
--- a/arch/mips/include/asm/vr41xx/tb0287.h
+++ b/arch/mips/include/asm/vr41xx/tb0287.h
@@ -1,7 +1,7 @@
/*
* tb0287.h, Include file for TANBAC TB0287 mini-ITX board.
*
- * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
+ * Copyright (C) 2005 Media Lab Inc. <ito@mlb.co.jp>
*
* This code is largely based on tb0219.h.
*
diff --git a/arch/mips/include/asm/war.h b/arch/mips/include/asm/war.h
index 65e344532de..9344e247a6c 100644
--- a/arch/mips/include/asm/war.h
+++ b/arch/mips/include/asm/war.h
@@ -83,30 +83,30 @@
#endif
/*
- * Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata:
+ * Pleasures of the R4600 V1.x. Cite from the IDT R4600 V1.7 errata:
*
* 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
- * Hit_Invalidate_D and Create_Dirty_Excl_D should only be
- * executed if there is no other dcache activity. If the dcache is
- * accessed for another instruction immeidately preceding when these
- * cache instructions are executing, it is possible that the dcache
- * tag match outputs used by these cache instructions will be
- * incorrect. These cache instructions should be preceded by at least
- * four instructions that are not any kind of load or store
- * instruction.
- *
- * This is not allowed: lw
- * nop
- * nop
- * nop
- * cache Hit_Writeback_Invalidate_D
- *
- * This is allowed: lw
- * nop
- * nop
- * nop
- * nop
- * cache Hit_Writeback_Invalidate_D
+ * Hit_Invalidate_D and Create_Dirty_Excl_D should only be
+ * executed if there is no other dcache activity. If the dcache is
+ * accessed for another instruction immeidately preceding when these
+ * cache instructions are executing, it is possible that the dcache
+ * tag match outputs used by these cache instructions will be
+ * incorrect. These cache instructions should be preceded by at least
+ * four instructions that are not any kind of load or store
+ * instruction.
+ *
+ * This is not allowed: lw
+ * nop
+ * nop
+ * nop
+ * cache Hit_Writeback_Invalidate_D
+ *
+ * This is allowed: lw
+ * nop
+ * nop
+ * nop
+ * nop
+ * cache Hit_Writeback_Invalidate_D
*/
#ifndef R4600_V1_HIT_CACHEOP_WAR
#error Check setting of R4600_V1_HIT_CACHEOP_WAR for your platform
@@ -118,7 +118,7 @@
*
* R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
* Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only
- * operate correctly if the internal data cache refill buffer is empty. These
+ * operate correctly if the internal data cache refill buffer is empty. These
* CACHE instructions should be separated from any potential data cache miss
* by a load instruction to an uncached address to empty the response buffer."
* (Revision 2.0 device errata from IDT available on http://www.idt.com/
diff --git a/arch/mips/include/asm/xtalk/xtalk.h b/arch/mips/include/asm/xtalk/xtalk.h
index 79bac882a73..680e7efebba 100644
--- a/arch/mips/include/asm/xtalk/xtalk.h
+++ b/arch/mips/include/asm/xtalk/xtalk.h
@@ -16,15 +16,15 @@
/*
* User-level device driver visible types
*/
-typedef char xwidgetnum_t; /* xtalk widget number (0..15) */
+typedef char xwidgetnum_t; /* xtalk widget number (0..15) */
#define XWIDGET_NONE -1
-typedef int xwidget_part_num_t; /* xtalk widget part number */
+typedef int xwidget_part_num_t; /* xtalk widget part number */
#define XWIDGET_PART_NUM_NONE -1
-typedef int xwidget_rev_num_t; /* xtalk widget revision number */
+typedef int xwidget_rev_num_t; /* xtalk widget revision number */
#define XWIDGET_REV_NUM_NONE -1
@@ -37,15 +37,15 @@ typedef struct xtalk_piomap_s *xtalk_piomap_t;
/* It is often convenient to fold the XIO target port
* number into the XIO address.
*/
-#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull)
-#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull)
-#define XIO_PORT_BITS (0xF000000000000000ull)
-#define XIO_PORT_SHIFT (60)
-
-#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0)
-#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS)
-#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
-#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
+#define XIO_NOWHERE (0xFFFFFFFFFFFFFFFFull)
+#define XIO_ADDR_BITS (0x0000FFFFFFFFFFFFull)
+#define XIO_PORT_BITS (0xF000000000000000ull)
+#define XIO_PORT_SHIFT (60)
+
+#define XIO_PACKED(x) (((x)&XIO_PORT_BITS) != 0)
+#define XIO_ADDR(x) ((x)&XIO_ADDR_BITS)
+#define XIO_PORT(x) ((xwidgetnum_t)(((x)&XIO_PORT_BITS) >> XIO_PORT_SHIFT))
+#define XIO_PACK(p, o) ((((uint64_t)(p))<<XIO_PORT_SHIFT) | ((o)&XIO_ADDR_BITS))
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/xtalk/xwidget.h b/arch/mips/include/asm/xtalk/xwidget.h
index b4a13d7405e..32e4e884f9b 100644
--- a/arch/mips/include/asm/xtalk/xwidget.h
+++ b/arch/mips/include/asm/xtalk/xwidget.h
@@ -45,12 +45,12 @@
#define WIDGET_PENDING 0x0000001f
/* WIDGET_ERR_UPPER_ADDR */
-#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff
+#define WIDGET_ERR_UPPER_ADDR_ONLY 0x0000ffff
/* WIDGET_CONTROL */
#define WIDGET_F_BAD_PKT 0x00010000
#define WIDGET_LLP_XBAR_CRD 0x0000f000
-#define WIDGET_LLP_XBAR_CRD_SHFT 12
+#define WIDGET_LLP_XBAR_CRD_SHFT 12
#define WIDGET_CLR_RLLP_CNT 0x00000800
#define WIDGET_CLR_TLLP_CNT 0x00000400
#define WIDGET_SYS_END 0x00000200
@@ -86,8 +86,8 @@
/*
* according to the crosstalk spec, only 32-bits access to the widget
- * configuration registers is allowed. some widgets may allow 64-bits
- * access but software should not depend on it. registers beyond the
+ * configuration registers is allowed. some widgets may allow 64-bits
+ * access but software should not depend on it. registers beyond the
* widget target flush register are widget dependent thus will not be
* defined here
*/
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 77d4fb33f75..350ccccadcb 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -8,6 +8,7 @@ header-y += byteorder.h
header-y += cachectl.h
header-y += errno.h
header-y += fcntl.h
+header-y += inst.h
header-y += ioctl.h
header-y += ioctls.h
header-y += ipcbuf.h
diff --git a/arch/mips/include/uapi/asm/break.h b/arch/mips/include/uapi/asm/break.h
index 9161e684cb4..002c39ea20c 100644
--- a/arch/mips/include/uapi/asm/break.h
+++ b/arch/mips/include/uapi/asm/break.h
@@ -6,8 +6,8 @@
* Copyright (C) 1995, 2003 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
-#ifndef __ASM_BREAK_H
-#define __ASM_BREAK_H
+#ifndef __UAPI_ASM_BREAK_H
+#define __UAPI_ASM_BREAK_H
/*
* The following break codes are or were in use for specific purposes in
@@ -16,22 +16,14 @@
* non-Linux/MIPS object files or make use of them in the future.
*/
#define BRK_USERBP 0 /* User bp (used by debuggers) */
-#define BRK_KERNELBP 1 /* Break in the kernel */
-#define BRK_ABORT 2 /* Sometimes used by abort(3) to SIGIOT */
-#define BRK_BD_TAKEN 3 /* For bd slot emulation - not implemented */
-#define BRK_BD_NOTTAKEN 4 /* For bd slot emulation - not implemented */
#define BRK_SSTEPBP 5 /* User bp (used by debuggers) */
#define BRK_OVERFLOW 6 /* Overflow check */
#define BRK_DIVZERO 7 /* Divide by zero check */
#define BRK_RANGE 8 /* Range error check */
-#define BRK_STACKOVERFLOW 9 /* For Ada stackchecking */
-#define BRK_NORLD 10 /* No rld found - not used by Linux/MIPS */
-#define _BRK_THREADBP 11 /* For threads, user bp (used by debuggers) */
-#define BRK_BUG 512 /* Used by BUG() */
-#define BRK_KDB 513 /* Used in KDB_ENTER() */
+#define BRK_BUG 12 /* Used by BUG() */
#define BRK_MEMU 514 /* Used by FPU emulator */
#define BRK_KPROBE_BP 515 /* Kprobe break */
#define BRK_KPROBE_SSTEPBP 516 /* Kprobe single step software implementation */
#define BRK_MULOVF 1023 /* Multiply overflow */
-#endif /* __ASM_BREAK_H */
+#endif /* __UAPI_ASM_BREAK_H */
diff --git a/arch/mips/include/uapi/asm/cachectl.h b/arch/mips/include/uapi/asm/cachectl.h
index f3ce721861d..23039090877 100644
--- a/arch/mips/include/uapi/asm/cachectl.h
+++ b/arch/mips/include/uapi/asm/cachectl.h
@@ -5,15 +5,15 @@
*
* Copyright (C) 1994, 1995, 1996 by Ralf Baechle
*/
-#ifndef _ASM_CACHECTL
-#define _ASM_CACHECTL
+#ifndef _ASM_CACHECTL
+#define _ASM_CACHECTL
/*
* Options for cacheflush system call
*/
-#define ICACHE (1<<0) /* flush instruction cache */
-#define DCACHE (1<<1) /* writeback and flush data cache */
-#define BCACHE (ICACHE|DCACHE) /* flush both caches */
+#define ICACHE (1<<0) /* flush instruction cache */
+#define DCACHE (1<<1) /* writeback and flush data cache */
+#define BCACHE (ICACHE|DCACHE) /* flush both caches */
/*
* Caching modes for the cachectl(2) call
diff --git a/arch/mips/include/uapi/asm/errno.h b/arch/mips/include/uapi/asm/errno.h
index bd67b15042e..31575e2fd1b 100644
--- a/arch/mips/include/uapi/asm/errno.h
+++ b/arch/mips/include/uapi/asm/errno.h
@@ -14,95 +14,95 @@
#include <asm-generic/errno-base.h>
-#define ENOMSG 35 /* No message of desired type */
-#define EIDRM 36 /* Identifier removed */
-#define ECHRNG 37 /* Channel number out of range */
-#define EL2NSYNC 38 /* Level 2 not synchronized */
-#define EL3HLT 39 /* Level 3 halted */
-#define EL3RST 40 /* Level 3 reset */
-#define ELNRNG 41 /* Link number out of range */
-#define EUNATCH 42 /* Protocol driver not attached */
-#define ENOCSI 43 /* No CSI structure available */
-#define EL2HLT 44 /* Level 2 halted */
-#define EDEADLK 45 /* Resource deadlock would occur */
-#define ENOLCK 46 /* No record locks available */
-#define EBADE 50 /* Invalid exchange */
-#define EBADR 51 /* Invalid request descriptor */
-#define EXFULL 52 /* Exchange full */
-#define ENOANO 53 /* No anode */
-#define EBADRQC 54 /* Invalid request code */
-#define EBADSLT 55 /* Invalid slot */
-#define EDEADLOCK 56 /* File locking deadlock error */
-#define EBFONT 59 /* Bad font file format */
-#define ENOSTR 60 /* Device not a stream */
-#define ENODATA 61 /* No data available */
-#define ETIME 62 /* Timer expired */
-#define ENOSR 63 /* Out of streams resources */
-#define ENONET 64 /* Machine is not on the network */
-#define ENOPKG 65 /* Package not installed */
-#define EREMOTE 66 /* Object is remote */
-#define ENOLINK 67 /* Link has been severed */
-#define EADV 68 /* Advertise error */
-#define ESRMNT 69 /* Srmount error */
-#define ECOMM 70 /* Communication error on send */
-#define EPROTO 71 /* Protocol error */
-#define EDOTDOT 73 /* RFS specific error */
-#define EMULTIHOP 74 /* Multihop attempted */
-#define EBADMSG 77 /* Not a data message */
-#define ENAMETOOLONG 78 /* File name too long */
-#define EOVERFLOW 79 /* Value too large for defined data type */
-#define ENOTUNIQ 80 /* Name not unique on network */
-#define EBADFD 81 /* File descriptor in bad state */
-#define EREMCHG 82 /* Remote address changed */
-#define ELIBACC 83 /* Can not access a needed shared library */
-#define ELIBBAD 84 /* Accessing a corrupted shared library */
-#define ELIBSCN 85 /* .lib section in a.out corrupted */
-#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
-#define ELIBEXEC 87 /* Cannot exec a shared library directly */
-#define EILSEQ 88 /* Illegal byte sequence */
-#define ENOSYS 89 /* Function not implemented */
-#define ELOOP 90 /* Too many symbolic links encountered */
-#define ERESTART 91 /* Interrupted system call should be restarted */
-#define ESTRPIPE 92 /* Streams pipe error */
-#define ENOTEMPTY 93 /* Directory not empty */
-#define EUSERS 94 /* Too many users */
-#define ENOTSOCK 95 /* Socket operation on non-socket */
-#define EDESTADDRREQ 96 /* Destination address required */
-#define EMSGSIZE 97 /* Message too long */
-#define EPROTOTYPE 98 /* Protocol wrong type for socket */
-#define ENOPROTOOPT 99 /* Protocol not available */
-#define EPROTONOSUPPORT 120 /* Protocol not supported */
-#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
-#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
-#define EPFNOSUPPORT 123 /* Protocol family not supported */
-#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
-#define EADDRINUSE 125 /* Address already in use */
-#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
-#define ENETDOWN 127 /* Network is down */
-#define ENETUNREACH 128 /* Network is unreachable */
-#define ENETRESET 129 /* Network dropped connection because of reset */
-#define ECONNABORTED 130 /* Software caused connection abort */
-#define ECONNRESET 131 /* Connection reset by peer */
-#define ENOBUFS 132 /* No buffer space available */
-#define EISCONN 133 /* Transport endpoint is already connected */
-#define ENOTCONN 134 /* Transport endpoint is not connected */
-#define EUCLEAN 135 /* Structure needs cleaning */
-#define ENOTNAM 137 /* Not a XENIX named type file */
-#define ENAVAIL 138 /* No XENIX semaphores available */
-#define EISNAM 139 /* Is a named type file */
-#define EREMOTEIO 140 /* Remote I/O error */
+#define ENOMSG 35 /* No message of desired type */
+#define EIDRM 36 /* Identifier removed */
+#define ECHRNG 37 /* Channel number out of range */
+#define EL2NSYNC 38 /* Level 2 not synchronized */
+#define EL3HLT 39 /* Level 3 halted */
+#define EL3RST 40 /* Level 3 reset */
+#define ELNRNG 41 /* Link number out of range */
+#define EUNATCH 42 /* Protocol driver not attached */
+#define ENOCSI 43 /* No CSI structure available */
+#define EL2HLT 44 /* Level 2 halted */
+#define EDEADLK 45 /* Resource deadlock would occur */
+#define ENOLCK 46 /* No record locks available */
+#define EBADE 50 /* Invalid exchange */
+#define EBADR 51 /* Invalid request descriptor */
+#define EXFULL 52 /* Exchange full */
+#define ENOANO 53 /* No anode */
+#define EBADRQC 54 /* Invalid request code */
+#define EBADSLT 55 /* Invalid slot */
+#define EDEADLOCK 56 /* File locking deadlock error */
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EDOTDOT 73 /* RFS specific error */
+#define EMULTIHOP 74 /* Multihop attempted */
+#define EBADMSG 77 /* Not a data message */
+#define ENAMETOOLONG 78 /* File name too long */
+#define EOVERFLOW 79 /* Value too large for defined data type */
+#define ENOTUNIQ 80 /* Name not unique on network */
+#define EBADFD 81 /* File descriptor in bad state */
+#define EREMCHG 82 /* Remote address changed */
+#define ELIBACC 83 /* Can not access a needed shared library */
+#define ELIBBAD 84 /* Accessing a corrupted shared library */
+#define ELIBSCN 85 /* .lib section in a.out corrupted */
+#define ELIBMAX 86 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 87 /* Cannot exec a shared library directly */
+#define EILSEQ 88 /* Illegal byte sequence */
+#define ENOSYS 89 /* Function not implemented */
+#define ELOOP 90 /* Too many symbolic links encountered */
+#define ERESTART 91 /* Interrupted system call should be restarted */
+#define ESTRPIPE 92 /* Streams pipe error */
+#define ENOTEMPTY 93 /* Directory not empty */
+#define EUSERS 94 /* Too many users */
+#define ENOTSOCK 95 /* Socket operation on non-socket */
+#define EDESTADDRREQ 96 /* Destination address required */
+#define EMSGSIZE 97 /* Message too long */
+#define EPROTOTYPE 98 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 99 /* Protocol not available */
+#define EPROTONOSUPPORT 120 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 121 /* Socket type not supported */
+#define EOPNOTSUPP 122 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 123 /* Protocol family not supported */
+#define EAFNOSUPPORT 124 /* Address family not supported by protocol */
+#define EADDRINUSE 125 /* Address already in use */
+#define EADDRNOTAVAIL 126 /* Cannot assign requested address */
+#define ENETDOWN 127 /* Network is down */
+#define ENETUNREACH 128 /* Network is unreachable */
+#define ENETRESET 129 /* Network dropped connection because of reset */
+#define ECONNABORTED 130 /* Software caused connection abort */
+#define ECONNRESET 131 /* Connection reset by peer */
+#define ENOBUFS 132 /* No buffer space available */
+#define EISCONN 133 /* Transport endpoint is already connected */
+#define ENOTCONN 134 /* Transport endpoint is not connected */
+#define EUCLEAN 135 /* Structure needs cleaning */
+#define ENOTNAM 137 /* Not a XENIX named type file */
+#define ENAVAIL 138 /* No XENIX semaphores available */
+#define EISNAM 139 /* Is a named type file */
+#define EREMOTEIO 140 /* Remote I/O error */
#define EINIT 141 /* Reserved */
#define EREMDEV 142 /* Error 142 */
-#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
-#define ETOOMANYREFS 144 /* Too many references: cannot splice */
-#define ETIMEDOUT 145 /* Connection timed out */
-#define ECONNREFUSED 146 /* Connection refused */
-#define EHOSTDOWN 147 /* Host is down */
-#define EHOSTUNREACH 148 /* No route to host */
-#define EWOULDBLOCK EAGAIN /* Operation would block */
-#define EALREADY 149 /* Operation already in progress */
-#define EINPROGRESS 150 /* Operation now in progress */
-#define ESTALE 151 /* Stale NFS file handle */
+#define ESHUTDOWN 143 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 144 /* Too many references: cannot splice */
+#define ETIMEDOUT 145 /* Connection timed out */
+#define ECONNREFUSED 146 /* Connection refused */
+#define EHOSTDOWN 147 /* Host is down */
+#define EHOSTUNREACH 148 /* No route to host */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define EALREADY 149 /* Operation already in progress */
+#define EINPROGRESS 150 /* Operation now in progress */
+#define ESTALE 151 /* Stale NFS file handle */
#define ECANCELED 158 /* AIO operation canceled */
/*
@@ -110,16 +110,16 @@
*/
#define ENOMEDIUM 159 /* No medium found */
#define EMEDIUMTYPE 160 /* Wrong medium type */
-#define ENOKEY 161 /* Required key not available */
-#define EKEYEXPIRED 162 /* Key has expired */
-#define EKEYREVOKED 163 /* Key has been revoked */
-#define EKEYREJECTED 164 /* Key was rejected by service */
+#define ENOKEY 161 /* Required key not available */
+#define EKEYEXPIRED 162 /* Key has expired */
+#define EKEYREVOKED 163 /* Key has been revoked */
+#define EKEYREJECTED 164 /* Key was rejected by service */
/* for robust mutexes */
-#define EOWNERDEAD 165 /* Owner died */
-#define ENOTRECOVERABLE 166 /* State not recoverable */
+#define EOWNERDEAD 165 /* Owner died */
+#define ENOTRECOVERABLE 166 /* State not recoverable */
-#define ERFKILL 167 /* Operation not possible due to RF-kill */
+#define ERFKILL 167 /* Operation not possible due to RF-kill */
#define EHWPOISON 168 /* Memory page has hardware error */
diff --git a/arch/mips/include/uapi/asm/fcntl.h b/arch/mips/include/uapi/asm/fcntl.h
index 75eddedcfc3..0bda78f70e1 100644
--- a/arch/mips/include/uapi/asm/fcntl.h
+++ b/arch/mips/include/uapi/asm/fcntl.h
@@ -12,7 +12,7 @@
#define O_APPEND 0x0008
#define O_DSYNC 0x0010 /* used to be O_SYNC, see below */
#define O_NONBLOCK 0x0080
-#define O_CREAT 0x0100 /* not fcntl */
+#define O_CREAT 0x0100 /* not fcntl */
#define O_TRUNC 0x0200 /* not fcntl */
#define O_EXCL 0x0400 /* not fcntl */
#define O_NOCTTY 0x0800 /* not fcntl */
@@ -50,7 +50,7 @@
/*
* The flavours of struct flock. "struct flock" is the ABI compliant
- * variant. Finally struct flock64 is the LFS variant of struct flock. As
+ * variant. Finally struct flock64 is the LFS variant of struct flock. As
* a historic accident and inconsistence with the ABI definition it doesn't
* contain all the same fields as struct flock.
*/
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
new file mode 100644
index 00000000000..4d078815eaa
--- /dev/null
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -0,0 +1,331 @@
+/*
+ * Format of an instruction in memory.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 2000 by Ralf Baechle
+ * Copyright (C) 2006 by Thiemo Seufer
+ */
+#ifndef _UAPI_ASM_INST_H
+#define _UAPI_ASM_INST_H
+
+/*
+ * Major opcodes; before MIPS IV cop1x was called cop3.
+ */
+enum major_op {
+ spec_op, bcond_op, j_op, jal_op,
+ beq_op, bne_op, blez_op, bgtz_op,
+ addi_op, addiu_op, slti_op, sltiu_op,
+ andi_op, ori_op, xori_op, lui_op,
+ cop0_op, cop1_op, cop2_op, cop1x_op,
+ beql_op, bnel_op, blezl_op, bgtzl_op,
+ daddi_op, daddiu_op, ldl_op, ldr_op,
+ spec2_op, jalx_op, mdmx_op, spec3_op,
+ lb_op, lh_op, lwl_op, lw_op,
+ lbu_op, lhu_op, lwr_op, lwu_op,
+ sb_op, sh_op, swl_op, sw_op,
+ sdl_op, sdr_op, swr_op, cache_op,
+ ll_op, lwc1_op, lwc2_op, pref_op,
+ lld_op, ldc1_op, ldc2_op, ld_op,
+ sc_op, swc1_op, swc2_op, major_3b_op,
+ scd_op, sdc1_op, sdc2_op, sd_op
+};
+
+/*
+ * func field of spec opcode.
+ */
+enum spec_op {
+ sll_op, movc_op, srl_op, sra_op,
+ sllv_op, pmon_op, srlv_op, srav_op,
+ jr_op, jalr_op, movz_op, movn_op,
+ syscall_op, break_op, spim_op, sync_op,
+ mfhi_op, mthi_op, mflo_op, mtlo_op,
+ dsllv_op, spec2_unused_op, dsrlv_op, dsrav_op,
+ mult_op, multu_op, div_op, divu_op,
+ dmult_op, dmultu_op, ddiv_op, ddivu_op,
+ add_op, addu_op, sub_op, subu_op,
+ and_op, or_op, xor_op, nor_op,
+ spec3_unused_op, spec4_unused_op, slt_op, sltu_op,
+ dadd_op, daddu_op, dsub_op, dsubu_op,
+ tge_op, tgeu_op, tlt_op, tltu_op,
+ teq_op, spec5_unused_op, tne_op, spec6_unused_op,
+ dsll_op, spec7_unused_op, dsrl_op, dsra_op,
+ dsll32_op, spec8_unused_op, dsrl32_op, dsra32_op
+};
+
+/*
+ * func field of spec2 opcode.
+ */
+enum spec2_op {
+ madd_op, maddu_op, mul_op, spec2_3_unused_op,
+ msub_op, msubu_op, /* more unused ops */
+ clz_op = 0x20, clo_op,
+ dclz_op = 0x24, dclo_op,
+ sdbpp_op = 0x3f
+};
+
+/*
+ * func field of spec3 opcode.
+ */
+enum spec3_op {
+ ext_op, dextm_op, dextu_op, dext_op,
+ ins_op, dinsm_op, dinsu_op, dins_op,
+ lx_op = 0x0a,
+ bshfl_op = 0x20,
+ dbshfl_op = 0x24,
+ rdhwr_op = 0x3b
+};
+
+/*
+ * rt field of bcond opcodes.
+ */
+enum rt_op {
+ bltz_op, bgez_op, bltzl_op, bgezl_op,
+ spimi_op, unused_rt_op_0x05, unused_rt_op_0x06, unused_rt_op_0x07,
+ tgei_op, tgeiu_op, tlti_op, tltiu_op,
+ teqi_op, unused_0x0d_rt_op, tnei_op, unused_0x0f_rt_op,
+ bltzal_op, bgezal_op, bltzall_op, bgezall_op,
+ rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
+ rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+};
+
+/*
+ * rs field of cop opcodes.
+ */
+enum cop_op {
+ mfc_op = 0x00, dmfc_op = 0x01,
+ cfc_op = 0x02, mtc_op = 0x04,
+ dmtc_op = 0x05, ctc_op = 0x06,
+ bc_op = 0x08, cop_op = 0x10,
+ copm_op = 0x18
+};
+
+/*
+ * rt field of cop.bc_op opcodes
+ */
+enum bcop_op {
+ bcf_op, bct_op, bcfl_op, bctl_op
+};
+
+/*
+ * func field of cop0 coi opcodes.
+ */
+enum cop0_coi_func {
+ tlbr_op = 0x01, tlbwi_op = 0x02,
+ tlbwr_op = 0x06, tlbp_op = 0x08,
+ rfe_op = 0x10, eret_op = 0x18
+};
+
+/*
+ * func field of cop0 com opcodes.
+ */
+enum cop0_com_func {
+ tlbr1_op = 0x01, tlbw_op = 0x02,
+ tlbp1_op = 0x08, dctr_op = 0x09,
+ dctw_op = 0x0a
+};
+
+/*
+ * fmt field of cop1 opcodes.
+ */
+enum cop1_fmt {
+ s_fmt, d_fmt, e_fmt, q_fmt,
+ w_fmt, l_fmt
+};
+
+/*
+ * func field of cop1 instructions using d, s or w format.
+ */
+enum cop1_sdw_func {
+ fadd_op = 0x00, fsub_op = 0x01,
+ fmul_op = 0x02, fdiv_op = 0x03,
+ fsqrt_op = 0x04, fabs_op = 0x05,
+ fmov_op = 0x06, fneg_op = 0x07,
+ froundl_op = 0x08, ftruncl_op = 0x09,
+ fceill_op = 0x0a, ffloorl_op = 0x0b,
+ fround_op = 0x0c, ftrunc_op = 0x0d,
+ fceil_op = 0x0e, ffloor_op = 0x0f,
+ fmovc_op = 0x11, fmovz_op = 0x12,
+ fmovn_op = 0x13, frecip_op = 0x15,
+ frsqrt_op = 0x16, fcvts_op = 0x20,
+ fcvtd_op = 0x21, fcvte_op = 0x22,
+ fcvtw_op = 0x24, fcvtl_op = 0x25,
+ fcmp_op = 0x30
+};
+
+/*
+ * func field of cop1x opcodes (MIPS IV).
+ */
+enum cop1x_func {
+ lwxc1_op = 0x00, ldxc1_op = 0x01,
+ pfetch_op = 0x07, swxc1_op = 0x08,
+ sdxc1_op = 0x09, madd_s_op = 0x20,
+ madd_d_op = 0x21, madd_e_op = 0x22,
+ msub_s_op = 0x28, msub_d_op = 0x29,
+ msub_e_op = 0x2a, nmadd_s_op = 0x30,
+ nmadd_d_op = 0x31, nmadd_e_op = 0x32,
+ nmsub_s_op = 0x38, nmsub_d_op = 0x39,
+ nmsub_e_op = 0x3a
+};
+
+/*
+ * func field for mad opcodes (MIPS IV).
+ */
+enum mad_func {
+ madd_fp_op = 0x08, msub_fp_op = 0x0a,
+ nmadd_fp_op = 0x0c, nmsub_fp_op = 0x0e
+};
+
+/*
+ * func field for special3 lx opcodes (Cavium Octeon).
+ */
+enum lx_func {
+ lwx_op = 0x00,
+ lhx_op = 0x04,
+ lbux_op = 0x06,
+ ldx_op = 0x08,
+ lwux_op = 0x10,
+ lhux_op = 0x14,
+ lbx_op = 0x16,
+};
+
+/*
+ * Damn ... bitfields depend from byteorder :-(
+ */
+#ifdef __MIPSEB__
+#define BITFIELD_FIELD(field, more) \
+ field; \
+ more
+
+#elif defined(__MIPSEL__)
+
+#define BITFIELD_FIELD(field, more) \
+ more \
+ field;
+
+#else /* !defined (__MIPSEB__) && !defined (__MIPSEL__) */
+#error "MIPS but neither __MIPSEL__ nor __MIPSEB__?"
+#endif
+
+struct j_format {
+ BITFIELD_FIELD(unsigned int opcode : 6, /* Jump format */
+ BITFIELD_FIELD(unsigned int target : 26,
+ ;))
+};
+
+struct i_format { /* signed immediate format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(signed int simmediate : 16,
+ ;))))
+};
+
+struct u_format { /* unsigned immediate format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int uimmediate : 16,
+ ;))))
+};
+
+struct c_format { /* Cache (>= R6000) format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int c_op : 3,
+ BITFIELD_FIELD(unsigned int cache : 2,
+ BITFIELD_FIELD(unsigned int simmediate : 16,
+ ;)))))
+};
+
+struct r_format { /* Register format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct p_format { /* Performance counter format (R10000) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct f_format { /* FPU register format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int : 1,
+ BITFIELD_FIELD(unsigned int fmt : 4,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(unsigned int rd : 5,
+ BITFIELD_FIELD(unsigned int re : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+struct ma_format { /* FPU multiply and add format (MIPS IV) */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int fr : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 4,
+ BITFIELD_FIELD(unsigned int fmt : 2,
+ ;)))))))
+};
+
+struct b_format { /* BREAK and SYSCALL */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int code : 20,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))
+};
+
+struct ps_format { /* MIPS-3D / paired single format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int ft : 5,
+ BITFIELD_FIELD(unsigned int fs : 5,
+ BITFIELD_FIELD(unsigned int fd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;))))))
+};
+
+struct v_format { /* MDMX vector format */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int sel : 4,
+ BITFIELD_FIELD(unsigned int fmt : 1,
+ BITFIELD_FIELD(unsigned int vt : 5,
+ BITFIELD_FIELD(unsigned int vs : 5,
+ BITFIELD_FIELD(unsigned int vd : 5,
+ BITFIELD_FIELD(unsigned int func : 6,
+ ;)))))))
+};
+
+union mips_instruction {
+ unsigned int word;
+ unsigned short halfword[2];
+ unsigned char byte[4];
+ struct j_format j_format;
+ struct i_format i_format;
+ struct u_format u_format;
+ struct c_format c_format;
+ struct r_format r_format;
+ struct p_format p_format;
+ struct f_format f_format;
+ struct ma_format ma_format;
+ struct b_format b_format;
+ struct ps_format ps_format;
+ struct v_format v_format;
+};
+
+#endif /* _UAPI_ASM_INST_H */
diff --git a/arch/mips/include/uapi/asm/ioctls.h b/arch/mips/include/uapi/asm/ioctls.h
index addd56b6069..b1e637757fe 100644
--- a/arch/mips/include/uapi/asm/ioctls.h
+++ b/arch/mips/include/uapi/asm/ioctls.h
@@ -41,7 +41,7 @@
#define TIOCPKT_START 0x08 /* start output */
#define TIOCPKT_NOSTOP 0x10 /* no more ^S, ^Q */
#define TIOCPKT_DOSTOP 0x20 /* now do ^S ^Q */
-#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
+#define TIOCPKT_IOCTL 0x40 /* state change of pty driver */
#define TIOCSWINSZ _IOW('t', 103, struct winsize) /* set window size */
#define TIOCGWINSZ _IOR('t', 104, struct winsize) /* get window size */
#define TIOCNOTTY 0x5471 /* void tty association */
@@ -63,9 +63,9 @@
#define FIONREAD 0x467f
#define TIOCINQ FIONREAD
-#define TIOCGETP 0x7408
-#define TIOCSETP 0x7409
-#define TIOCSETN 0x740a /* TIOCSETP wo flush */
+#define TIOCGETP 0x7408
+#define TIOCSETP 0x7409
+#define TIOCSETN 0x740a /* TIOCSETP wo flush */
/* #define TIOCSETA _IOW('t', 20, struct termios) set termios struct */
/* #define TIOCSETAW _IOW('t', 21, struct termios) drain output, set */
@@ -74,9 +74,9 @@
/* #define TIOCSETD _IOW('t', 27, int) set line discipline */
/* 127-124 compat */
-#define TIOCSBRK 0x5427 /* BSD compatibility */
-#define TIOCCBRK 0x5428 /* BSD compatibility */
-#define TIOCGSID 0x7416 /* Return the session ID of FD */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x7416 /* Return the session ID of FD */
#define TCGETS2 _IOR('T', 0x2A, struct termios2)
#define TCSETS2 _IOW('T', 0x2B, struct termios2)
#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
@@ -104,10 +104,10 @@
#define TIOCGLCKTRMIOS 0x548b
#define TIOCSLCKTRMIOS 0x548c
#define TIOCSERGSTRUCT 0x548d /* For debugging only */
-#define TIOCSERGETLSR 0x548e /* Get line status register */
-#define TIOCSERGETMULTI 0x548f /* Get multiport config */
+#define TIOCSERGETLSR 0x548e /* Get line status register */
+#define TIOCSERGETMULTI 0x548f /* Get multiport config */
#define TIOCSERSETMULTI 0x5490 /* Set multiport config */
-#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */
-#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */
+#define TIOCMIWAIT 0x5491 /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x5492 /* read serial port inline interrupt counts */
#endif /* __ASM_IOCTLS_H */
diff --git a/arch/mips/include/uapi/asm/mman.h b/arch/mips/include/uapi/asm/mman.h
index 9a936ac9a94..cfcb876cae6 100644
--- a/arch/mips/include/uapi/asm/mman.h
+++ b/arch/mips/include/uapi/asm/mman.h
@@ -64,7 +64,7 @@
#define MADV_NORMAL 0 /* no further special treatment */
#define MADV_RANDOM 1 /* expect random page references */
-#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
#define MADV_WILLNEED 3 /* will need these pages */
#define MADV_DONTNEED 4 /* don't need these pages */
@@ -73,14 +73,14 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
-#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
+#define MADV_MERGEABLE 12 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */
-#define MADV_HWPOISON 100 /* poison a page for testing */
+#define MADV_HWPOISON 100 /* poison a page for testing */
#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */
-#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
+#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */
-#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
+#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump,
overrides the coredump filter bits */
#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index 1bc1f52f40d..4d58d846870 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -49,8 +49,8 @@ struct pt_regs {
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON
- unsigned long long mpl[3]; /* MTM{0,1,2} */
- unsigned long long mtp[3]; /* MTP{0,1,2} */
+ unsigned long long mpl[3]; /* MTM{0,1,2} */
+ unsigned long long mtp[3]; /* MTP{0,1,2} */
#endif
} __attribute__ ((aligned (8)));
@@ -67,14 +67,14 @@ struct pt_regs {
#define PTRACE_GET_THREAD_AREA 25
#define PTRACE_SET_THREAD_AREA 26
-/* Calls to trace a 64bit program from a 32bit program. */
+/* Calls to trace a 64bit program from a 32bit program. */
#define PTRACE_PEEKTEXT_3264 0xc0
#define PTRACE_PEEKDATA_3264 0xc1
#define PTRACE_POKETEXT_3264 0xc2
#define PTRACE_POKEDATA_3264 0xc3
#define PTRACE_GET_THREAD_AREA_3264 0xc4
-/* Read and write watchpoint registers. */
+/* Read and write watchpoint registers. */
enum pt_watch_style {
pt_watch_style_mips32,
pt_watch_style_mips64
diff --git a/arch/mips/include/uapi/asm/sembuf.h b/arch/mips/include/uapi/asm/sembuf.h
index 7281a4decaa..e1085ac880f 100644
--- a/arch/mips/include/uapi/asm/sembuf.h
+++ b/arch/mips/include/uapi/asm/sembuf.h
@@ -12,8 +12,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ __kernel_time_t sem_otime; /* last semop time */
+ __kernel_time_t sem_ctime; /* last change time */
unsigned long sem_nsems; /* no. of semaphores in array */
unsigned long __unused1;
unsigned long __unused2;
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index 73446508d84..6a8714193fb 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -11,7 +11,7 @@
#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(long) + 2*sizeof(int))
-#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
+#undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */
#define HAVE_ARCH_SIGINFO_T
@@ -55,7 +55,7 @@ typedef struct siginfo {
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
- int _sys_private; /* not to be passed to user */
+ int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
@@ -91,9 +91,9 @@ typedef struct siginfo {
short _addr_lsb;
} _sigfault;
- /* SIGPOLL, SIGXFSZ (To do ...) */
+ /* SIGPOLL, SIGXFSZ (To do ...) */
struct {
- __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
diff --git a/arch/mips/include/uapi/asm/signal.h b/arch/mips/include/uapi/asm/signal.h
index 6783c887a67..d6b18b4d0f3 100644
--- a/arch/mips/include/uapi/asm/signal.h
+++ b/arch/mips/include/uapi/asm/signal.h
@@ -24,28 +24,28 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SIGHUP 1 /* Hangup (POSIX). */
#define SIGINT 2 /* Interrupt (ANSI). */
#define SIGQUIT 3 /* Quit (POSIX). */
-#define SIGILL 4 /* Illegal instruction (ANSI). */
-#define SIGTRAP 5 /* Trace trap (POSIX). */
-#define SIGIOT 6 /* IOT trap (4.2 BSD). */
-#define SIGABRT SIGIOT /* Abort (ANSI). */
+#define SIGILL 4 /* Illegal instruction (ANSI). */
+#define SIGTRAP 5 /* Trace trap (POSIX). */
+#define SIGIOT 6 /* IOT trap (4.2 BSD). */
+#define SIGABRT SIGIOT /* Abort (ANSI). */
#define SIGEMT 7
#define SIGFPE 8 /* Floating-point exception (ANSI). */
#define SIGKILL 9 /* Kill, unblockable (POSIX). */
-#define SIGBUS 10 /* BUS error (4.2 BSD). */
+#define SIGBUS 10 /* BUS error (4.2 BSD). */
#define SIGSEGV 11 /* Segmentation violation (ANSI). */
#define SIGSYS 12
-#define SIGPIPE 13 /* Broken pipe (POSIX). */
-#define SIGALRM 14 /* Alarm clock (POSIX). */
-#define SIGTERM 15 /* Termination (ANSI). */
+#define SIGPIPE 13 /* Broken pipe (POSIX). */
+#define SIGALRM 14 /* Alarm clock (POSIX). */
+#define SIGTERM 15 /* Termination (ANSI). */
#define SIGUSR1 16 /* User-defined signal 1 (POSIX). */
#define SIGUSR2 17 /* User-defined signal 2 (POSIX). */
#define SIGCHLD 18 /* Child status has changed (POSIX). */
-#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
+#define SIGCLD SIGCHLD /* Same as SIGCHLD (System V). */
#define SIGPWR 19 /* Power failure restart (System V). */
#define SIGWINCH 20 /* Window size change (4.3 BSD, Sun). */
#define SIGURG 21 /* Urgent condition on socket (4.2 BSD). */
-#define SIGIO 22 /* I/O now possible (4.2 BSD). */
-#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
+#define SIGIO 22 /* I/O now possible (4.2 BSD). */
+#define SIGPOLL SIGIO /* Pollable event occurred (System V). */
#define SIGSTOP 23 /* Stop, unblockable (POSIX). */
#define SIGTSTP 24 /* Keyboard stop (POSIX). */
#define SIGCONT 25 /* Continue (POSIX). */
@@ -54,7 +54,7 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SIGVTALRM 28 /* Virtual alarm clock (4.2 BSD). */
#define SIGPROF 29 /* Profiling alarm clock (4.2 BSD). */
#define SIGXCPU 30 /* CPU limit exceeded (4.2 BSD). */
-#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */
+#define SIGXFSZ 31 /* File size limit exceeded (4.2 BSD). */
/* These should not be considered constants from userland. */
#define SIGRTMIN 32
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 3e68bfbda6b..47132f44c95 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -24,21 +24,21 @@
SIGPIPE when they die. */
#define SO_DONTROUTE 0x0010 /* Don't do local routing. */
#define SO_BROADCAST 0x0020 /* Allow transmission of
- broadcast messages. */
+ broadcast messages. */
#define SO_LINGER 0x0080 /* Block on close of a reliable
socket to transmit pending data. */
#define SO_OOBINLINE 0x0100 /* Receive out-of-band data in-band. */
#define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
#define SO_TYPE 0x1008 /* Compatible name for SO_STYLE. */
-#define SO_STYLE SO_TYPE /* Synonym */
+#define SO_STYLE SO_TYPE /* Synonym */
#define SO_ERROR 0x1007 /* get error status and clear */
#define SO_SNDBUF 0x1001 /* Send buffer size. */
#define SO_RCVBUF 0x1002 /* Receive buffer. */
#define SO_SNDLOWAT 0x1003 /* send low-water mark */
#define SO_RCVLOWAT 0x1004 /* receive low-water mark */
#define SO_SNDTIMEO 0x1005 /* send timeout */
-#define SO_RCVTIMEO 0x1006 /* receive timeout */
+#define SO_RCVTIMEO 0x1006 /* receive timeout */
#define SO_ACCEPTCONN 0x1009
#define SO_PROTOCOL 0x1028 /* protocol type */
#define SO_DOMAIN 0x1029 /* domain/socket family */
@@ -59,11 +59,11 @@
#define SO_BINDTODEVICE 25
/* Socket filtering */
-#define SO_ATTACH_FILTER 26
-#define SO_DETACH_FILTER 27
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
#define SO_GET_FILTER SO_ATTACH_FILTER
-#define SO_PEERNAME 28
+#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
@@ -79,7 +79,7 @@
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
-#define SO_RXQ_OVFL 40
+#define SO_RXQ_OVFL 40
#define SO_WIFI_STATUS 41
#define SCM_WIFI_STATUS SO_WIFI_STATUS
diff --git a/arch/mips/include/uapi/asm/sockios.h b/arch/mips/include/uapi/asm/sockios.h
index ed1a5f78d22..419fbe661da 100644
--- a/arch/mips/include/uapi/asm/sockios.h
+++ b/arch/mips/include/uapi/asm/sockios.h
@@ -14,7 +14,7 @@
/* Socket-level I/O control calls. */
#define FIOGETOWN _IOR('f', 123, int)
-#define FIOSETOWN _IOW('f', 124, int)
+#define FIOSETOWN _IOW('f', 124, int)
#define SIOCATMARK _IOR('s', 7, int)
#define SIOCSPGRP _IOW('s', 8, pid_t)
diff --git a/arch/mips/include/uapi/asm/stat.h b/arch/mips/include/uapi/asm/stat.h
index fe9a4c3ec5a..b47bc541bbc 100644
--- a/arch/mips/include/uapi/asm/stat.h
+++ b/arch/mips/include/uapi/asm/stat.h
@@ -23,7 +23,7 @@ struct stat {
__u32 st_nlink;
uid_t st_uid;
gid_t st_gid;
- unsigned st_rdev;
+ unsigned st_rdev;
long st_pad2[2];
off_t st_size;
long st_pad3;
diff --git a/arch/mips/include/uapi/asm/statfs.h b/arch/mips/include/uapi/asm/statfs.h
index 0f805c7a42a..3305c834fc1 100644
--- a/arch/mips/include/uapi/asm/statfs.h
+++ b/arch/mips/include/uapi/asm/statfs.h
@@ -15,7 +15,7 @@
#include <linux/types.h>
-typedef __kernel_fsid_t fsid_t;
+typedef __kernel_fsid_t fsid_t;
#endif
@@ -31,7 +31,7 @@ struct statfs {
long f_bavail;
/* Linux specials */
- __kernel_fsid_t f_fsid;
+ __kernel_fsid_t f_fsid;
long f_namelen;
long f_flags;
long f_spare[5];
@@ -73,7 +73,7 @@ struct statfs64 { /* Same as struct statfs */
long f_bavail;
/* Linux specials */
- __kernel_fsid_t f_fsid;
+ __kernel_fsid_t f_fsid;
long f_namelen;
long f_flags;
long f_spare[5];
diff --git a/arch/mips/include/uapi/asm/sysmips.h b/arch/mips/include/uapi/asm/sysmips.h
index 4f47b7d6a5f..ae637e90785 100644
--- a/arch/mips/include/uapi/asm/sysmips.h
+++ b/arch/mips/include/uapi/asm/sysmips.h
@@ -16,10 +16,10 @@
* sysmips(2) is deprecated - though some existing software uses it.
* We only support the following commands.
*/
-#define SETNAME 1 /* set hostname */
+#define SETNAME 1 /* set hostname */
#define FLUSH_CACHE 3 /* writeback and invalidate caches */
-#define MIPS_FIXADE 7 /* control address error fixing */
-#define MIPS_RDNVRAM 10 /* read NVRAM */
-#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
+#define MIPS_FIXADE 7 /* control address error fixing */
+#define MIPS_RDNVRAM 10 /* read NVRAM */
+#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
#endif /* _ASM_SYSMIPS_H */
diff --git a/arch/mips/include/uapi/asm/termbits.h b/arch/mips/include/uapi/asm/termbits.h
index 76630b396fa..2750203e1e7 100644
--- a/arch/mips/include/uapi/asm/termbits.h
+++ b/arch/mips/include/uapi/asm/termbits.h
@@ -53,7 +53,7 @@ struct ktermios {
};
/* c_cc characters */
-#define VINTR 0 /* Interrupt character [ISIG]. */
+#define VINTR 0 /* Interrupt character [ISIG]. */
#define VQUIT 1 /* Quit character [ISIG]. */
#define VERASE 2 /* Erase character [ICANON]. */
#define VKILL 3 /* Kill-line character [ICANON]. */
@@ -72,7 +72,7 @@ struct ktermios {
#define VDSUSP 11 /* Delayed suspend character [ISIG]. */
#endif
#define VREPRINT 12 /* Reprint-line character [ICANON]. */
-#define VDISCARD 13 /* Discard character [IEXTEN]. */
+#define VDISCARD 13 /* Discard character [IEXTEN]. */
#define VWERASE 14 /* Word-erase character [ICANON]. */
#define VLNEXT 15 /* Literal-next character [IEXTEN]. */
#define VEOF 16 /* End-of-file character [ICANON]. */
@@ -92,7 +92,7 @@ struct ktermios {
#define IXON 0002000 /* Enable start/stop output control. */
#define IXANY 0004000 /* Any character will restart after stop. */
#define IXOFF 0010000 /* Enable start/stop input control. */
-#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
+#define IMAXBEL 0020000 /* Ring bell when input queue is full. */
#define IUTF8 0040000 /* Input is UTF-8 */
/* c_oflag bits */
@@ -105,123 +105,123 @@ struct ktermios {
#define OFILL 0000100
#define OFDEL 0000200
#define NLDLY 0000400
-#define NL0 0000000
-#define NL1 0000400
+#define NL0 0000000
+#define NL1 0000400
#define CRDLY 0003000
-#define CR0 0000000
-#define CR1 0001000
-#define CR2 0002000
-#define CR3 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
#define TABDLY 0014000
-#define TAB0 0000000
-#define TAB1 0004000
-#define TAB2 0010000
-#define TAB3 0014000
-#define XTABS 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
#define BSDLY 0020000
-#define BS0 0000000
-#define BS1 0020000
+#define BS0 0000000
+#define BS1 0020000
#define VTDLY 0040000
-#define VT0 0000000
-#define VT1 0040000
+#define VT0 0000000
+#define VT1 0040000
#define FFDLY 0100000
-#define FF0 0000000
-#define FF1 0100000
+#define FF0 0000000
+#define FF1 0100000
/*
#define PAGEOUT ???
-#define WRAP ???
+#define WRAP ???
*/
/* c_cflag bit meaning */
#define CBAUD 0010017
-#define B0 0000000 /* hang up */
-#define B50 0000001
-#define B75 0000002
-#define B110 0000003
-#define B134 0000004
-#define B150 0000005
-#define B200 0000006
-#define B300 0000007
-#define B600 0000010
-#define B1200 0000011
-#define B1800 0000012
-#define B2400 0000013
-#define B4800 0000014
-#define B9600 0000015
-#define B19200 0000016
-#define B38400 0000017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
#define EXTA B19200
#define EXTB B38400
#define CSIZE 0000060 /* Number of bits per byte (mask). */
-#define CS5 0000000 /* 5 bits per byte. */
-#define CS6 0000020 /* 6 bits per byte. */
-#define CS7 0000040 /* 7 bits per byte. */
-#define CS8 0000060 /* 8 bits per byte. */
+#define CS5 0000000 /* 5 bits per byte. */
+#define CS6 0000020 /* 6 bits per byte. */
+#define CS7 0000040 /* 7 bits per byte. */
+#define CS8 0000060 /* 8 bits per byte. */
#define CSTOPB 0000100 /* Two stop bits instead of one. */
#define CREAD 0000200 /* Enable receiver. */
#define PARENB 0000400 /* Parity enable. */
-#define PARODD 0001000 /* Odd parity instead of even. */
+#define PARODD 0001000 /* Odd parity instead of even. */
#define HUPCL 0002000 /* Hang up on last close. */
#define CLOCAL 0004000 /* Ignore modem status lines. */
#define CBAUDEX 0010000
-#define BOTHER 0010000
-#define B57600 0010001
-#define B115200 0010002
-#define B230400 0010003
-#define B460800 0010004
-#define B500000 0010005
-#define B576000 0010006
-#define B921600 0010007
-#define B1000000 0010010
-#define B1152000 0010011
-#define B1500000 0010012
-#define B2000000 0010013
-#define B2500000 0010014
-#define B3000000 0010015
-#define B3500000 0010016
-#define B4000000 0010017
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
#define CIBAUD 002003600000 /* input baud rate */
-#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
-#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
/* c_lflag bits */
#define ISIG 0000001 /* Enable signals. */
#define ICANON 0000002 /* Do erase and kill processing. */
#define XCASE 0000004
-#define ECHO 0000010 /* Enable echo. */
+#define ECHO 0000010 /* Enable echo. */
#define ECHOE 0000020 /* Visual erase for ERASE. */
-#define ECHOK 0000040 /* Echo NL after KILL. */
-#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
+#define ECHOK 0000040 /* Echo NL after KILL. */
+#define ECHONL 0000100 /* Echo NL even if ECHO is off. */
#define NOFLSH 0000200 /* Disable flush after interrupt. */
#define IEXTEN 0000400 /* Enable DISCARD and LNEXT. */
-#define ECHOCTL 0001000 /* Echo control characters as ^X. */
-#define ECHOPRT 0002000 /* Hardcopy visual erase. */
+#define ECHOCTL 0001000 /* Echo control characters as ^X. */
+#define ECHOPRT 0002000 /* Hardcopy visual erase. */
#define ECHOKE 0004000 /* Visual erase for KILL. */
#define FLUSHO 0020000
#define PENDIN 0040000 /* Retype pending input (state). */
-#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
-#define ITOSTOP TOSTOP
-#define EXTPROC 0200000 /* External processing on pty */
+#define TOSTOP 0100000 /* Send SIGTTOU for background output. */
+#define ITOSTOP TOSTOP
+#define EXTPROC 0200000 /* External processing on pty */
/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
-#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
/* tcflow() and TCXONC use these */
-#define TCOOFF 0 /* Suspend output. */
-#define TCOON 1 /* Restart suspended output. */
-#define TCIOFF 2 /* Send a STOP character. */
-#define TCION 3 /* Send a START character. */
+#define TCOOFF 0 /* Suspend output. */
+#define TCOON 1 /* Restart suspended output. */
+#define TCIOFF 2 /* Send a STOP character. */
+#define TCION 3 /* Send a START character. */
/* tcflush() and TCFLSH use these */
-#define TCIFLUSH 0 /* Discard data received but not yet read. */
-#define TCOFLUSH 1 /* Discard data written but not yet sent. */
-#define TCIOFLUSH 2 /* Discard all pending data. */
+#define TCIFLUSH 0 /* Discard data received but not yet read. */
+#define TCOFLUSH 1 /* Discard data written but not yet sent. */
+#define TCIOFLUSH 2 /* Discard all pending data. */
/* tcsetattr uses these */
-#define TCSANOW TCSETS /* Change immediately. */
-#define TCSADRAIN TCSETSW /* Change when pending output is written. */
-#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
+#define TCSANOW TCSETS /* Change immediately. */
+#define TCSADRAIN TCSETSW /* Change when pending output is written. */
+#define TCSAFLUSH TCSETSF /* Flush pending input before changing. */
#endif /* _ASM_TERMBITS_H */
diff --git a/arch/mips/include/uapi/asm/termios.h b/arch/mips/include/uapi/asm/termios.h
index 574fbdfb720..baeb2fa8745 100644
--- a/arch/mips/include/uapi/asm/termios.h
+++ b/arch/mips/include/uapi/asm/termios.h
@@ -31,12 +31,12 @@ struct tchars {
};
struct ltchars {
- char t_suspc; /* stop process signal */
- char t_dsuspc; /* delayed stop process signal */
- char t_rprntc; /* reprint line */
- char t_flushc; /* flush output (toggles) */
- char t_werasc; /* word erase */
- char t_lnextc; /* literal next character */
+ char t_suspc; /* stop process signal */
+ char t_dsuspc; /* delayed stop process signal */
+ char t_rprntc; /* reprint line */
+ char t_flushc; /* flush output (toggles) */
+ char t_werasc; /* word erase */
+ char t_lnextc; /* literal next character */
};
/* TIOCGSIZE, TIOCSSIZE not defined yet. Only needed for SunOS source
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 0eebf3c3e03..16338b84fa7 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -20,16 +20,16 @@
* Linux o32 style syscalls are in the range from 4000 to 4999.
*/
#define __NR_Linux 4000
-#define __NR_syscall (__NR_Linux + 0)
-#define __NR_exit (__NR_Linux + 1)
-#define __NR_fork (__NR_Linux + 2)
-#define __NR_read (__NR_Linux + 3)
-#define __NR_write (__NR_Linux + 4)
-#define __NR_open (__NR_Linux + 5)
-#define __NR_close (__NR_Linux + 6)
-#define __NR_waitpid (__NR_Linux + 7)
-#define __NR_creat (__NR_Linux + 8)
-#define __NR_link (__NR_Linux + 9)
+#define __NR_syscall (__NR_Linux + 0)
+#define __NR_exit (__NR_Linux + 1)
+#define __NR_fork (__NR_Linux + 2)
+#define __NR_read (__NR_Linux + 3)
+#define __NR_write (__NR_Linux + 4)
+#define __NR_open (__NR_Linux + 5)
+#define __NR_close (__NR_Linux + 6)
+#define __NR_waitpid (__NR_Linux + 7)
+#define __NR_creat (__NR_Linux + 8)
+#define __NR_link (__NR_Linux + 9)
#define __NR_unlink (__NR_Linux + 10)
#define __NR_execve (__NR_Linux + 11)
#define __NR_chdir (__NR_Linux + 12)
@@ -386,16 +386,16 @@
* Linux 64-bit syscalls are in the range from 5000 to 5999.
*/
#define __NR_Linux 5000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
+#define __NR_read (__NR_Linux + 0)
+#define __NR_write (__NR_Linux + 1)
+#define __NR_open (__NR_Linux + 2)
+#define __NR_close (__NR_Linux + 3)
+#define __NR_stat (__NR_Linux + 4)
+#define __NR_fstat (__NR_Linux + 5)
+#define __NR_lstat (__NR_Linux + 6)
+#define __NR_poll (__NR_Linux + 7)
+#define __NR_lseek (__NR_Linux + 8)
+#define __NR_mmap (__NR_Linux + 9)
#define __NR_mprotect (__NR_Linux + 10)
#define __NR_munmap (__NR_Linux + 11)
#define __NR_brk (__NR_Linux + 12)
@@ -711,16 +711,16 @@
* Linux N32 syscalls are in the range from 6000 to 6999.
*/
#define __NR_Linux 6000
-#define __NR_read (__NR_Linux + 0)
-#define __NR_write (__NR_Linux + 1)
-#define __NR_open (__NR_Linux + 2)
-#define __NR_close (__NR_Linux + 3)
-#define __NR_stat (__NR_Linux + 4)
-#define __NR_fstat (__NR_Linux + 5)
-#define __NR_lstat (__NR_Linux + 6)
-#define __NR_poll (__NR_Linux + 7)
-#define __NR_lseek (__NR_Linux + 8)
-#define __NR_mmap (__NR_Linux + 9)
+#define __NR_read (__NR_Linux + 0)
+#define __NR_write (__NR_Linux + 1)
+#define __NR_open (__NR_Linux + 2)
+#define __NR_close (__NR_Linux + 3)
+#define __NR_stat (__NR_Linux + 4)
+#define __NR_fstat (__NR_Linux + 5)
+#define __NR_lstat (__NR_Linux + 6)
+#define __NR_poll (__NR_Linux + 7)
+#define __NR_lseek (__NR_Linux + 8)
+#define __NR_mmap (__NR_Linux + 9)
#define __NR_mprotect (__NR_Linux + 10)
#define __NR_munmap (__NR_Linux + 11)
#define __NR_brk (__NR_Linux + 12)
diff --git a/arch/mips/jazz/Makefile b/arch/mips/jazz/Makefile
index dd9d99bfcf7..624b0ee3e5d 100644
--- a/arch/mips/jazz/Makefile
+++ b/arch/mips/jazz/Makefile
@@ -2,4 +2,4 @@
# Makefile for the Jazz family specific parts of the kernel
#
-obj-y := irq.o jazzdma.o reset.o setup.o
+obj-y := irq.o jazzdma.o reset.o setup.o
diff --git a/arch/mips/jazz/irq.c b/arch/mips/jazz/irq.c
index f21868b28b2..e1ea4f625f7 100644
--- a/arch/mips/jazz/irq.c
+++ b/arch/mips/jazz/irq.c
@@ -111,7 +111,7 @@ asmlinkage void plat_irq_dispatch(void)
}
static void r4030_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
/* Nothing to do ... */
}
@@ -146,7 +146,7 @@ void __init plat_time_init(void)
BUG_ON(HZ != 100);
- cd->cpumask = cpumask_of(cpu);
+ cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(JAZZ_TIMER_IRQ, action);
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index 2d8e447cb82..db6f5afff4f 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -63,7 +63,7 @@ static inline void vdma_pgtbl_init(void)
static int __init vdma_init(void)
{
/*
- * Allocate 32k of memory for DMA page tables. This needs to be page
+ * Allocate 32k of memory for DMA page tables. This needs to be page
* aligned and should be uncached to avoid cache flushing after every
* update.
*/
@@ -218,14 +218,14 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
printk
("vdma_map: Invalid logical address: %08lx\n",
laddr);
- return -EINVAL; /* invalid logical address */
+ return -EINVAL; /* invalid logical address */
}
if (paddr > 0x1fffffff) {
if (vdma_debug)
printk
("vdma_map: Invalid physical address: %08lx\n",
paddr);
- return -EINVAL; /* invalid physical address */
+ return -EINVAL; /* invalid physical address */
}
pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
diff --git a/arch/mips/jazz/setup.c b/arch/mips/jazz/setup.c
index 820e926dacb..e4374a5651c 100644
--- a/arch/mips/jazz/setup.c
+++ b/arch/mips/jazz/setup.c
@@ -137,9 +137,9 @@ static struct resource jazz_esp_rsrc[] = {
};
static struct platform_device jazz_esp_pdev = {
- .name = "jazz_esp",
- .num_resources = ARRAY_SIZE(jazz_esp_rsrc),
- .resource = jazz_esp_rsrc
+ .name = "jazz_esp",
+ .num_resources = ARRAY_SIZE(jazz_esp_rsrc),
+ .resource = jazz_esp_rsrc
};
static struct resource jazz_sonic_rsrc[] = {
@@ -156,9 +156,9 @@ static struct resource jazz_sonic_rsrc[] = {
};
static struct platform_device jazz_sonic_pdev = {
- .name = "jazzsonic",
- .num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
- .resource = jazz_sonic_rsrc
+ .name = "jazzsonic",
+ .num_resources = ARRAY_SIZE(jazz_sonic_rsrc),
+ .resource = jazz_sonic_rsrc
};
static struct resource jazz_cmos_rsrc[] = {
@@ -175,13 +175,13 @@ static struct resource jazz_cmos_rsrc[] = {
};
static struct platform_device jazz_cmos_pdev = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
- .resource = jazz_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(jazz_cmos_rsrc),
+ .resource = jazz_cmos_rsrc
};
static struct platform_device pcspeaker_pdev = {
- .name = "pcspkr",
+ .name = "pcspkr",
.id = -1,
};
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 43d964d3628..be2b3deeef1 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -52,7 +52,7 @@ static bool is_avt2;
static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
.eccbytes = 36,
.eccpos = {
- 6, 7, 8, 9, 10, 11, 12, 13,
+ 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37,
@@ -210,7 +210,7 @@ static const uint32_t qi_lb60_keymap[] = {
KEY(6, 7, KEY_RIGHT), /* S57 */
KEY(7, 0, KEY_LEFTSHIFT), /* S58 */
- KEY(7, 1, KEY_LEFTALT), /* S59 */
+ KEY(7, 1, KEY_LEFTALT), /* S59 */
KEY(7, 2, KEY_QI_FN), /* S60 */
};
@@ -317,7 +317,7 @@ static struct spi_board_info qi_lb60_spi_board_info[] = {
/* Battery */
static struct jz_battery_platform_data qi_lb60_battery_pdata = {
- .gpio_charge = JZ_GPIO_PORTC(27),
+ .gpio_charge = JZ_GPIO_PORTC(27),
.gpio_charge_active_low = 1,
.info = {
.name = "battery",
@@ -344,7 +344,7 @@ static struct gpio_keys_platform_data qi_lb60_gpio_keys_data = {
};
static struct platform_device qi_lb60_gpio_keys = {
- .name = "gpio-keys",
+ .name = "gpio-keys",
.id = -1,
.dev = {
.platform_data = &qi_lb60_gpio_keys_data,
diff --git a/arch/mips/jz4740/clock-debugfs.c b/arch/mips/jz4740/clock-debugfs.c
index 330a0f2bf17..a8acdeff267 100644
--- a/arch/mips/jz4740/clock-debugfs.c
+++ b/arch/mips/jz4740/clock-debugfs.c
@@ -3,7 +3,7 @@
* JZ4740 SoC clock support debugfs entries
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/clock.c b/arch/mips/jz4740/clock.c
index 118a8a5562d..484d38a0864 100644
--- a/arch/mips/jz4740/clock.c
+++ b/arch/mips/jz4740/clock.c
@@ -3,7 +3,7 @@
* JZ4740 SoC clock support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -31,7 +31,7 @@
#define JZ_REG_CLOCK_LOW_POWER 0x04
#define JZ_REG_CLOCK_PLL 0x10
#define JZ_REG_CLOCK_GATE 0x20
-#define JZ_REG_CLOCK_SLEEP_CTRL 0x24
+#define JZ_REG_CLOCK_SLEEP_CTRL 0x24
#define JZ_REG_CLOCK_I2S 0x60
#define JZ_REG_CLOCK_LCD 0x64
#define JZ_REG_CLOCK_MMC 0x68
diff --git a/arch/mips/jz4740/dma.c b/arch/mips/jz4740/dma.c
index d7feb898692..317ec6fffb1 100644
--- a/arch/mips/jz4740/dma.c
+++ b/arch/mips/jz4740/dma.c
@@ -3,7 +3,7 @@
* JZ4740 SoC DMA support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c
index e1ddb95c05e..00b798d2fb7 100644
--- a/arch/mips/jz4740/gpio.c
+++ b/arch/mips/jz4740/gpio.c
@@ -3,7 +3,7 @@
* JZ4740 platform GPIO support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/irq.c b/arch/mips/jz4740/irq.c
index fc57ded326d..2531da1d3ad 100644
--- a/arch/mips/jz4740/irq.c
+++ b/arch/mips/jz4740/irq.c
@@ -3,7 +3,7 @@
* JZ4740 platform IRQ support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/irq.h b/arch/mips/jz4740/irq.h
index f75e39d6288..0f48720b5b6 100644
--- a/arch/mips/jz4740/irq.h
+++ b/arch/mips/jz4740/irq.h
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index 6d14dcdbd90..e9348fd26a3 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -3,7 +3,7 @@
* JZ4740 platform devices
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -102,7 +102,7 @@ struct platform_device jz4740_mmc_device = {
.dma_mask = &jz4740_mmc_device.dev.coherent_dma_mask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
- .num_resources = ARRAY_SIZE(jz4740_mmc_resources),
+ .num_resources = ARRAY_SIZE(jz4740_mmc_resources),
.resource = jz4740_mmc_resources,
};
@@ -114,7 +114,7 @@ static struct resource jz4740_rtc_resources[] = {
.flags = IORESOURCE_MEM,
},
{
- .start = JZ4740_IRQ_RTC,
+ .start = JZ4740_IRQ_RTC,
.end = JZ4740_IRQ_RTC,
.flags = IORESOURCE_IRQ,
},
@@ -144,7 +144,7 @@ static struct resource jz4740_i2c_resources[] = {
struct platform_device jz4740_i2c_device = {
.name = "jz4740-i2c",
.id = 0,
- .num_resources = ARRAY_SIZE(jz4740_i2c_resources),
+ .num_resources = ARRAY_SIZE(jz4740_i2c_resources),
.resource = jz4740_i2c_resources,
};
@@ -318,8 +318,8 @@ static struct resource jz4740_wdt_resources[] = {
};
struct platform_device jz4740_wdt_device = {
- .name = "jz4740-wdt",
- .id = -1,
+ .name = "jz4740-wdt",
+ .id = -1,
.num_resources = ARRAY_SIZE(jz4740_wdt_resources),
.resource = jz4740_wdt_resources,
};
diff --git a/arch/mips/jz4740/pm.c b/arch/mips/jz4740/pm.c
index 6744fa723f7..d8e21301016 100644
--- a/arch/mips/jz4740/pm.c
+++ b/arch/mips/jz4740/pm.c
@@ -3,7 +3,7 @@
* JZ4740 SoC power management support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/prom.c b/arch/mips/jz4740/prom.c
index 4a70407f55b..5a93f381590 100644
--- a/arch/mips/jz4740/prom.c
+++ b/arch/mips/jz4740/prom.c
@@ -3,7 +3,7 @@
* JZ4740 SoC prom code
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/reset.c b/arch/mips/jz4740/reset.c
index 6c0da5afcf1..b6c6343d283 100644
--- a/arch/mips/jz4740/reset.c
+++ b/arch/mips/jz4740/reset.c
@@ -2,7 +2,7 @@
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index d97cfbf882f..76eafcb79c8 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -4,7 +4,7 @@
* JZ4740 setup code
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c
index 39bb4bbf43e..5e430ce9ac7 100644
--- a/arch/mips/jz4740/time.c
+++ b/arch/mips/jz4740/time.c
@@ -3,7 +3,7 @@
* JZ4740 platform time support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/jz4740/timer.c b/arch/mips/jz4740/timer.c
index 22f11d73a17..4992461787a 100644
--- a/arch/mips/jz4740/timer.c
+++ b/arch/mips/jz4740/timer.c
@@ -3,7 +3,7 @@
* JZ4740 platform timer support
*
* This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 007c33d7371..f81d98f6184 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o
obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -39,7 +40,7 @@ obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_R3000) += r2300_fpu.o r2300_switch.o
obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o
obj-$(CONFIG_CPU_TX39XX) += r2300_fpu.o r2300_switch.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
@@ -53,7 +54,7 @@ obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
-obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_I8259) += i8259.o
obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
@@ -98,4 +99,35 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+#
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
+# to enable DSP assembler support here even if the MIPS Release 2 CPU we
+# are targetting does not support DSP because all code-paths making use of
+# it properly check that the running CPU *actually does* support these
+# instructions.
+#
+ifeq ($(CONFIG_CPU_MIPSR2), y)
+CFLAGS_DSP = -DHAVE_AS_DSP
+
+#
+# Check if assembler supports DSP ASE
+#
+ifeq ($(call cc-option-yn,-mdsp), y)
+CFLAGS_DSP += -mdsp
+endif
+
+#
+# Check if assembler supports DSP ASE Rev2
+#
+ifeq ($(call cc-option-yn,-mdspr2), y)
+CFLAGS_DSP += -mdspr2
+endif
+
+CFLAGS_signal.o = $(CFLAGS_DSP)
+CFLAGS_signal32.o = $(CFLAGS_DSP)
+CFLAGS_process.o = $(CFLAGS_DSP)
+CFLAGS_branch.o = $(CFLAGS_DSP)
+CFLAGS_ptrace.o = $(CFLAGS_DSP)
+endif
+
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 9fdd8bcdd21..e06f777e9c4 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -48,7 +48,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
#include <linux/module.h>
@@ -67,8 +67,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -88,7 +88,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index ff448233dab..556a4357d7f 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -6,7 +6,7 @@
*
* Heavily inspired by the 32-bit Sparc compat code which is
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
- * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
+ * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#define ELF_ARCH EM_MIPS
@@ -50,7 +50,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
-#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+#define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
#include <asm/processor.h>
@@ -86,8 +86,8 @@ struct elf_prstatus32
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct compat_timeval pr_utime; /* User time */
- struct compat_timeval pr_stime; /* System time */
+ struct compat_timeval pr_utime; /* User time */
+ struct compat_timeval pr_stime; /* System time */
struct compat_timeval pr_cutime;/* Cumulative user time */
struct compat_timeval pr_cstime;/* Cumulative system time */
elf_gregset_t pr_reg; /* GP registers */
@@ -107,7 +107,7 @@ struct elf_prpsinfo32
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
char pr_fname[16]; /* filename of executable */
- char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
+ char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
#define elf_caddr_t u32
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index e908e81330b..64c4fd62cf0 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -170,7 +170,7 @@ bmips_smp_entry:
/* switch to permanent stack and continue booting */
- .global bmips_secondary_reentry
+ .global bmips_secondary_reentry
bmips_secondary_reentry:
la k0, bmips_smp_boot_sp
lw sp, 0(k0)
@@ -182,7 +182,7 @@ bmips_secondary_reentry:
#endif /* CONFIG_SMP */
.align 4
- .global bmips_reset_nmi_vec_end
+ .global bmips_reset_nmi_vec_end
bmips_reset_nmi_vec_end:
END(bmips_reset_nmi_vec)
@@ -206,7 +206,7 @@ LEAF(bmips_smp_int_vec)
eret
.align 4
- .global bmips_smp_int_vec_end
+ .global bmips_smp_int_vec_end
bmips_smp_int_vec_end:
END(bmips_smp_int_vec)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 4d735d0e58f..83ffe950f71 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -57,7 +57,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
*/
case bcond_op:
switch (insn.i_format.rt) {
- case bltz_op:
+ case bltz_op:
case bltzl_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -197,8 +197,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
bit += (bit != 0);
bit += 23;
switch (insn.i_format.rt & 3) {
- case 0: /* bc1f */
- case 2: /* bc1fl */
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
if (~fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 2)
@@ -208,8 +208,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc = epc;
break;
- case 1: /* bc1t */
- case 3: /* bc1tl */
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
if (fcr31 & (1 << bit)) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == 3)
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
index 69bbfae183b..15f618b40cf 100644
--- a/arch/mips/kernel/cevt-bcm1480.c
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -41,7 +41,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -144,7 +144,7 @@ void __cpuinit sb1480_clockevent_init(void)
bcm1480_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
+ action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
index ed648cb5a69..ff1f01b7227 100644
--- a/arch/mips/kernel/cevt-ds1287.c
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -1,7 +1,7 @@
/*
* DS1287 clockevent driver
*
- * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -89,7 +89,7 @@ static void ds1287_event_handler(struct clock_event_device *dev)
static struct clock_event_device ds1287_clockevent = {
.name = "ds1287",
.features = CLOCK_EVT_FEAT_PERIODIC,
- .set_next_event = ds1287_set_next_event,
+ .set_next_event = ds1287_set_next_event,
.set_mode = ds1287_set_mode,
.event_handler = ds1287_event_handler,
};
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index 831b47585b7..f069460751a 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx clockevent routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -98,7 +98,7 @@ static struct clock_event_device gt641xx_timer0_clockevent = {
.name = "gt641xx-timer0",
.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
.irq = GT641XX_TIMER0_IRQ,
- .set_next_event = gt641xx_timer0_set_next_event,
+ .set_next_event = gt641xx_timer0_set_next_event,
.set_mode = gt641xx_timer0_set_mode,
.event_handler = gt641xx_timer0_event_handler,
};
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 75323925e53..07b847d77f5 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -25,7 +25,7 @@
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cnt;
int res;
@@ -66,7 +66,7 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
goto out;
/*
- * The same applies to performance counter interrupts. But with the
+ * The same applies to performance counter interrupts. But with the
* above we now know that the reason we got here must be a timer
* interrupt. Being the paranoiacs we are we check anyway.
*/
@@ -119,7 +119,7 @@ int c0_compare_int_usable(void)
unsigned int cnt;
/*
- * IP7 already pending? Try to clear it by acking the timer.
+ * IP7 already pending? Try to clear it by acking the timer.
*/
if (c0_compare_int_pending()) {
cnt = read_c0_count();
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
index e73439fd685..200f2778bf3 100644
--- a/arch/mips/kernel/cevt-sb1250.c
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -39,7 +39,7 @@
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
@@ -143,7 +143,7 @@ void __cpuinit sb1250_clockevent_init(void)
sb1250_unmask_irq(cpu, irq);
- action->handler = sibyte_counter_handler;
+ action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
index 2e72d30b2f0..9de5ed7ef1a 100644
--- a/arch/mips/kernel/cevt-smtc.c
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -49,7 +49,7 @@ static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
- * into Count register. Valid timestamps will never be zero.
+ * into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
index e5c30b1d086..2ae08462e46 100644
--- a/arch/mips/kernel/cevt-txx9.c
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -4,7 +4,7 @@
* for more details.
*
* Based on linux/arch/mips/kernel/cevt-r4k.c,
- * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
*
* Copyright 2001 MontaVista Software Inc.
* Copyright (C) 2000-2001 Toshiba Corporation
@@ -129,7 +129,7 @@ static struct txx9_clock_event_device txx9_clock_event_device = {
CLOCK_EVT_FEAT_ONESHOT,
.rating = 200,
.set_mode = txx9tmr_set_mode,
- .set_next_event = txx9tmr_set_next_event,
+ .set_next_event = txx9tmr_set_next_event,
},
};
@@ -139,7 +139,7 @@ static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
struct clock_event_device *cd = &txx9_cd->cd;
struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
- __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
cd->event_handler(cd);
return IRQ_HANDLED;
}
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index d6a18644365..de3c25ffd9f 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -84,9 +84,9 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
".set noreorder\n\t"
".set nomacro\n\t"
"mult %2, %3\n\t"
- "dsll32 %0, %4, %5\n\t"
+ "dsll32 %0, %4, %5\n\t"
"mflo $0\n\t"
- "dsll32 %1, %4, %5\n\t"
+ "dsll32 %1, %4, %5\n\t"
"nop\n\t"
".set pop"
: "=&r" (lv1), "=r" (lw)
@@ -239,7 +239,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = -1;
+int daddiu_bug = -1;
static inline void check_daddiu(void)
{
@@ -273,7 +273,7 @@ static inline void check_daddiu(void)
#ifdef HAVE_AS_SET_DADDI
".set daddi\n\t"
#endif
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2\n\t"
".set pop"
@@ -292,7 +292,7 @@ static inline void check_daddiu(void)
asm volatile(
"addiu %2, $0, %3\n\t"
"dsrl %2, %2, 1\n\t"
- "daddiu %0, %2, %4\n\t"
+ "daddiu %0, %2, %4\n\t"
"addiu %1, $0, %4\n\t"
"daddu %1, %2"
: "=&r" (v), "=&r" (w), "=&r" (tmp)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index cce3782c96c..6bfccc227a9 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -4,7 +4,7 @@
* Copyright (C) xxxx the Anonymous
* Copyright (C) 1994 - 2006 Ralf Baechle
* Copyright (C) 2003, 2004 Maciej W. Rozycki
- * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -69,12 +69,12 @@ void r4k_wait_irqoff(void)
" wait \n"
" .set pop \n");
local_irq_enable();
- __asm__(" .globl __pastwait \n"
+ __asm__(" .globl __pastwait \n"
"__pastwait: \n");
}
/*
- * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
* have any pending stores when the WAIT instruction is executed.
*/
static void rm7k_wait_irqoff(void)
@@ -201,6 +201,7 @@ void __init check_wait(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_1004K:
@@ -331,6 +332,34 @@ static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
#endif
}
+static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_I | MIPS_CPU_ISA_II |
+ MIPS_CPU_ISA_III;
+ break;
+
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ case MIPS_CPU_ISA_I:
+ c->isa_level |= MIPS_CPU_ISA_I;
+ break;
+ }
+}
+
static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
@@ -348,10 +377,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
case 0:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ set_isa(c, MIPS_CPU_ISA_M32R1);
break;
case 1:
- c->isa_level = MIPS_CPU_ISA_M32R2;
+ set_isa(c, MIPS_CPU_ISA_M32R2);
break;
default:
goto unknown;
@@ -360,10 +389,10 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
case 2:
switch ((config0 & MIPS_CONF_AR) >> 10) {
case 0:
- c->isa_level = MIPS_CPU_ISA_M64R1;
+ set_isa(c, MIPS_CPU_ISA_M64R1);
break;
case 1:
- c->isa_level = MIPS_CPU_ISA_M64R2;
+ set_isa(c, MIPS_CPU_ISA_M64R2);
break;
default:
goto unknown;
@@ -439,6 +468,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
return config3 & MIPS_CONF_M;
}
@@ -469,7 +502,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
ok = decode_config0(c); /* Read Config registers. */
- BUG_ON(!ok); /* Arch spec violation! */
+ BUG_ON(!ok); /* Arch spec violation! */
if (ok)
ok = decode_config1(c);
if (ok)
@@ -494,7 +527,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R2000:
c->cputype = CPU_R2000;
__cpu_name[cpu] = "R2000";
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -514,7 +547,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_R3000;
__cpu_name[cpu] = "R3000";
}
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
MIPS_CPU_NOFPUEX;
if (__cpu_has_fpu())
@@ -540,7 +573,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
}
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_VCE |
MIPS_CPU_LLSC;
@@ -580,14 +613,14 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS;
c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -595,7 +628,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -610,13 +643,13 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
*/
c->cputype = CPU_R4650;
__cpu_name[cpu] = "R4650";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
c->tlbsize = 48;
break;
#endif
case PRID_IMP_TX39:
- c->isa_level = MIPS_CPU_ISA_I;
+ set_isa(c, MIPS_CPU_ISA_I);
c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE;
if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) {
@@ -641,7 +674,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R4700:
c->cputype = CPU_R4700;
__cpu_name[cpu] = "R4700";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -649,7 +682,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_TX49:
c->cputype = CPU_TX49XX;
__cpu_name[cpu] = "R49XX";
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS | MIPS_CPU_LLSC;
if (!(c->processor_id & 0x08))
c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
@@ -658,7 +691,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5000:
c->cputype = CPU_R5000;
__cpu_name[cpu] = "R5000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -666,7 +699,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5432:
c->cputype = CPU_R5432;
__cpu_name[cpu] = "R5432";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -674,7 +707,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R5500:
c->cputype = CPU_R5500;
__cpu_name[cpu] = "R5500";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_WATCH | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -682,7 +715,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_NEVADA:
c->cputype = CPU_NEVADA;
__cpu_name[cpu] = "Nevada";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
c->tlbsize = 48;
@@ -690,7 +723,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R6000:
c->cputype = CPU_R6000;
__cpu_name[cpu] = "R6000";
- c->isa_level = MIPS_CPU_ISA_II;
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -698,7 +731,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R6000A:
c->cputype = CPU_R6000A;
__cpu_name[cpu] = "R6000A";
- c->isa_level = MIPS_CPU_ISA_II;
+ set_isa(c, MIPS_CPU_ISA_II);
c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
MIPS_CPU_LLSC;
c->tlbsize = 32;
@@ -706,38 +739,38 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_RM7000:
c->cputype = CPU_RM7000;
__cpu_name[cpu] = "RM7000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
- * Undocumented RM7000: Bit 29 in the info register of
+ * Undocumented RM7000: Bit 29 in the info register of
* the RM7000 v2.0 indicates if the TLB has 48 or 64
* entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_RM9000:
c->cputype = CPU_RM9000;
__cpu_name[cpu] = "RM9000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
/*
* Bit 29 in the info register of the RM9000
* indicates if the TLB has 48 or 64 entries.
*
- * 29 1 => 64 entry JTLB
- * 0 => 48 entry JTLB
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
*/
c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
break;
case PRID_IMP_R8000:
c->cputype = CPU_R8000;
__cpu_name[cpu] = "RM8000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
@@ -746,7 +779,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R10000:
c->cputype = CPU_R10000;
__cpu_name[cpu] = "R10000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -756,7 +789,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R12000:
c->cputype = CPU_R12000;
__cpu_name[cpu] = "R12000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -766,7 +799,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_R14000:
c->cputype = CPU_R14000;
__cpu_name[cpu] = "R14000";
- c->isa_level = MIPS_CPU_ISA_IV;
+ set_isa(c, MIPS_CPU_ISA_IV);
c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
@@ -786,7 +819,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
- c->isa_level = MIPS_CPU_ISA_III;
+ set_isa(c, MIPS_CPU_ISA_III);
c->options = R4K_OPTS |
MIPS_CPU_FPU | MIPS_CPU_LLSC |
MIPS_CPU_32FPR;
@@ -838,10 +871,13 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "MIPS 20Kc";
break;
case PRID_IMP_24K:
- case PRID_IMP_24KE:
c->cputype = CPU_24K;
__cpu_name[cpu] = "MIPS 24Kc";
break;
+ case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ __cpu_name[cpu] = "MIPS 24KEc";
+ break;
case PRID_IMP_25KF:
c->cputype = CPU_25KF;
__cpu_name[cpu] = "MIPS 25Kc";
@@ -858,6 +894,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_M14KC;
__cpu_name[cpu] = "MIPS M14Kc";
break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
case PRID_IMP_1004K:
c->cputype = CPU_1004K;
__cpu_name[cpu] = "MIPS 1004Kc";
@@ -946,7 +986,7 @@ static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_PR4450:
c->cputype = CPU_PR4450;
__cpu_name[cpu] = "Philips PR4450";
- c->isa_level = MIPS_CPU_ISA_M32R1;
+ set_isa(c, MIPS_CPU_ISA_M32R1);
break;
}
}
@@ -1053,12 +1093,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
return;
}
- c->options = (MIPS_CPU_TLB |
- MIPS_CPU_4KEX |
+ c->options = (MIPS_CPU_TLB |
+ MIPS_CPU_4KEX |
MIPS_CPU_COUNTER |
- MIPS_CPU_DIVEC |
- MIPS_CPU_WATCH |
- MIPS_CPU_EJTAG |
+ MIPS_CPU_DIVEC |
+ MIPS_CPU_WATCH |
+ MIPS_CPU_EJTAG |
MIPS_CPU_LLSC);
switch (c->processor_id & 0xff00) {
@@ -1105,12 +1145,12 @@ static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu)
}
if (c->cputype == CPU_XLP) {
- c->isa_level = MIPS_CPU_ISA_M64R2;
+ set_isa(c, MIPS_CPU_ISA_M64R2);
c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK);
/* This will be updated again after all threads are woken up */
c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
} else {
- c->isa_level = MIPS_CPU_ISA_M64R1;
+ set_isa(c, MIPS_CPU_ISA_M64R1);
c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1;
}
}
@@ -1129,7 +1169,7 @@ __cpuinit void cpu_probe(void)
struct cpuinfo_mips *c = &current_cpu_data;
unsigned int cpu = smp_processor_id();
- c->processor_id = PRID_IMP_UNKNOWN;
+ c->processor_id = PRID_IMP_UNKNOWN;
c->fpu_id = FPIR_IMP_NONE;
c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
index e7c98e2b78b..3237c5235f9 100644
--- a/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
+++ b/arch/mips/kernel/cpufreq/loongson2_cpufreq.c
@@ -107,6 +107,8 @@ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int i;
+ unsigned long rate;
+ int ret;
if (!cpu_online(policy->cpu))
return -ENODEV;
@@ -117,15 +119,22 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
return PTR_ERR(cpuclk);
}
- cpuclk->rate = cpu_clock_freq / 1000;
- if (!cpuclk->rate)
+ rate = cpu_clock_freq / 1000;
+ if (!rate) {
+ clk_put(cpuclk);
return -EINVAL;
+ }
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
/* clock table init */
for (i = 2;
(loongson2_clockmod_table[i].frequency != CPUFREQ_TABLE_END);
i++)
- loongson2_clockmod_table[i].frequency = (cpuclk->rate * i) / 8;
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
policy->cur = loongson2_cpufreq_get(policy->cpu);
@@ -195,8 +204,8 @@ static void loongson2_cpu_wait(void)
spin_lock_irqsave(&loongson2_wait_lock, flags);
cpu_freq = LOONGSON_CHIPCFG0;
- LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
- LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
+ LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
+ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
spin_unlock_irqrestore(&loongson2_wait_lock, flags);
}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index 0f53c39324b..93aa302948d 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -59,7 +59,7 @@ static void crash_kexec_prepare_cpus(void)
#else /* !defined(CONFIG_SMP) */
static void crash_kexec_prepare_cpus(void) {}
-#endif /* !defined(CONFIG_SMP) */
+#endif /* !defined(CONFIG_SMP) */
void default_machine_crash_shutdown(struct pt_regs *regs)
{
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
index f96f99c794a..468f3eba413 100644
--- a/arch/mips/kernel/csrc-bcm1480.c
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -35,7 +35,7 @@ static cycle_t bcm1480_hpt_read(struct clocksource *cs)
struct clocksource bcm1480_clocksource = {
.name = "zbbus-cycles",
- .rating = 200,
+ .rating = 200,
.read = bcm1480_hpt_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
new file mode 100644
index 00000000000..5dca24bce51
--- /dev/null
+++ b/arch/mips/kernel/csrc-gic.c
@@ -0,0 +1,49 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/gic.h>
+
+static cycle_t gic_hpt_read(struct clocksource *cs)
+{
+ unsigned int hi, hi2, lo;
+
+ do {
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
+ } while (hi2 != hi);
+
+ return (((cycle_t) hi) << 32) + lo;
+}
+
+static struct clocksource gic_clocksource = {
+ .name = "GIC",
+ .read = gic_hpt_read,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init gic_clocksource_init(unsigned int frequency)
+{
+ unsigned int config, bits;
+
+ /* Calculate the clocksource mask. */
+ GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
+ bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
+ (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
+
+ /* Set clocksource mask. */
+ gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
+
+ /* Calculate a somewhat reasonable rating value. */
+ gic_clocksource.rating = 200 + frequency / 10000000;
+
+ clocksource_register_hz(&gic_clocksource, frequency);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
index 46bd7fa98d6..0654bff9b69 100644
--- a/arch/mips/kernel/csrc-ioasic.c
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -1,7 +1,7 @@
/*
* DEC I/O ASIC's counter clocksource
*
- * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/kernel/csrc-powertv.c b/arch/mips/kernel/csrc-powertv.c
index 2e7c5232da8..abd99ea911a 100644
--- a/arch/mips/kernel/csrc-powertv.c
+++ b/arch/mips/kernel/csrc-powertv.c
@@ -45,7 +45,7 @@ unsigned int __init mips_get_pll_freq(void)
m = PLL_GET_M(pll_reg);
n = PLL_GET_N(pll_reg);
p = PLL_GET_P(pll_reg);
- pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
+ pr_info("MIPS PLL Register:0x%x M=%d N=%d P=%d\n", pll_reg, m, n, p);
/* Calculate clock frequency = (2 * N * 54MHz) / (M * (2**P)) */
fout = ((2 * n * fin) / (m * (0x01 << p)));
@@ -83,8 +83,8 @@ static void __init powertv_c0_hpt_clocksource_init(void)
/**
* struct tim_c - free running counter
- * @hi: High 16 bits of the counter
- * @lo: Low 32 bits of the counter
+ * @hi: High 16 bits of the counter
+ * @lo: Low 32 bits of the counter
*
* Lays out the structure of the free running counter in memory. This counter
* increments at a rate of 27 MHz/8 on all platforms.
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
index e9606d90768..6ecb77d8206 100644
--- a/arch/mips/kernel/csrc-sb1250.c
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -44,7 +44,7 @@ static cycle_t sb1250_hpt_read(struct clocksource *cs)
struct clocksource bcm1250_clocksource = {
.name = "bcm1250-counter-3",
- .rating = 200,
+ .rating = 200,
.read = sb1250_hpt_read,
.mask = CLOCKSOURCE_MASK(23),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
index 9ae813eb782..9e6440eaa45 100644
--- a/arch/mips/kernel/early_printk.c
+++ b/arch/mips/kernel/early_printk.c
@@ -14,8 +14,7 @@
extern void prom_putchar(char);
-static void __init
-early_console_write(struct console *con, const char *s, unsigned n)
+static void early_console_write(struct console *con, const char *s, unsigned n)
{
while (n-- && *s) {
if (*s == '\n')
@@ -25,7 +24,7 @@ early_console_write(struct console *con, const char *s, unsigned n)
}
}
-static struct console early_console __initdata = {
+static struct console early_console = {
.name = "early",
.write = early_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 83fa1460e29..cf5509f13dd 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -125,21 +125,21 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
*
* 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* move $12, ra_address
* jalr v1
* sub sp, sp, 8
- * 1: offset = 5 instructions
+ * 1: offset = 5 instructions
* 2.2 For the Other situations
*
- * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
* addiu v1, v1, low_16bit_of_mcount
* move at, ra
* jalr v1
* nop | move $12, ra_address | sub sp, sp, 8
- * 1: offset = 4 instructions
+ * 1: offset = 4 instructions
*/
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
@@ -228,8 +228,8 @@ int ftrace_disable_ftrace_graph_caller(void)
#ifndef KBUILD_MCOUNT_RA_ADDRESS
-#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
-#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 8a0096d6281..ecb347ce1b3 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -160,7 +160,7 @@ LEAF(r4k_wait)
.set pop
.endm
- .align 5
+ .align 5
BUILD_ROLLBACK_PROLOGUE handle_int
NESTED(handle_int, PT_SIZE, sp)
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -362,7 +362,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
SAVE_ALL
- move a0, sp
+ move a0, sp
jal nmi_exception_handler
RESTORE_ALL
.set mips3
@@ -409,7 +409,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
string escapes and emits bogus warnings if it believes to
recognize an unknown escape code. So make the arguments
start with an n and gas will believe \n is ok ... */
- .macro __BUILD_verbose nexception
+ .macro __BUILD_verbose nexception
LONG_L a1, PT_EPC(sp)
#ifdef CONFIG_32BIT
PRINT("Got \nexception at %08lx\012")
@@ -442,7 +442,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.endm
.macro BUILD_HANDLER exception handler clear verbose
- __BUILD_HANDLER \exception \handler \clear \verbose _int
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
.endm
BUILD_HANDLER adel ade ade silent /* #4 */
@@ -456,7 +456,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
BUILD_HANDLER tr tr sti silent /* #13 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
-#ifdef CONFIG_HARDWARE_WATCHPOINTS
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
/*
* For watch, interrupts will be enabled after the watch
* registers are read.
@@ -482,8 +482,8 @@ NESTED(nmi_handler, PT_SIZE, sp)
MFC0 k1, CP0_ENTRYHI
andi k1, 0xff /* ASID_MASK */
MFC0 k0, CP0_EPC
- PTR_SRL k0, _PAGE_SHIFT + 1
- PTR_SLL k0, _PAGE_SHIFT + 1
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index fcf97312f32..c61cdaed2b1 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -133,7 +133,7 @@ EXPORT(_stext)
#ifdef CONFIG_BOOT_RAW
/*
* Give us a fighting chance of running if execution beings at the
- * kernel load address. This is needed because this platform does
+ * kernel load address. This is needed because this platform does
* not have a ELF loader yet.
*/
FEXPORT(__kernel_entry)
@@ -201,7 +201,7 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
#ifdef CONFIG_SMP
/*
- * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
NESTED(smp_bootstrap, 16, sp)
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 32b397b646e..2b91fe80c43 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -178,7 +178,7 @@ handle_real_irq:
} else {
inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
outb(cached_master_mask, PIC_MASTER_IMR);
- outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
+ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
}
smtc_im_ack_irq(irq);
raw_spin_unlock_irqrestore(&i8259A_lock, flags);
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 883fc6cead3..44a1f792e39 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -1,7 +1,7 @@
/*
* GT641xx IRQ routines.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,7 +25,7 @@
#include <asm/gt64120.h>
-#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 14ac52c5ae8..fab40f7d2e0 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -86,7 +86,7 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
*/
void ll_msc_irq(void)
{
- unsigned int irq;
+ unsigned int irq;
/* read the interrupt vector register */
MSCIC_READ(MSC01_IC_VEC, irq);
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index b0662cf97ea..26f4e4c9db1 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -1,8 +1,8 @@
/*
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a5aa43d07c8..d1fea7a054b 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -48,7 +48,7 @@ again:
}
/*
- * Allocate the 16 legacy interrupts for i8259 devices. This happens early
+ * Allocate the 16 legacy interrupts for i8259 devices. This happens early
* in the kernel initialization so treating allocation failure as BUG() is
* ok.
*/
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 972263bcf40..72ef2d25cbf 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -3,13 +3,13 @@
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
* Copyright (C) 2001 Ralf Baechle
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- * Author: Maciej W. Rozycki <macro@mips.com>
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Author: Maciej W. Rozycki <macro@mips.com>
*
* This file define the irq handler for MIPS CPU interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -31,6 +31,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
@@ -113,3 +114,44 @@ void __init mips_cpu_irq_init(void)
irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
handle_percpu_irq);
}
+
+#ifdef CONFIG_IRQ_DOMAIN
+static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ static struct irq_chip *chip;
+
+ if (hw < 2 && cpu_has_mipsmt) {
+ /* Software interrupts are used for MT/CMT IPI */
+ chip = &mips_mt_cpu_irq_controller;
+ } else {
+ chip = &mips_cpu_irq_controller;
+ }
+
+ irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
+ .map = mips_cpu_intc_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int __init mips_cpu_intc_init(struct device_node *of_node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain;
+
+ /* Mask interrupts. */
+ clear_c0_status(ST0_IM);
+ clear_c0_cause(CAUSEF_IP);
+
+ domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
+ &mips_cpu_intc_irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain for MIPS CPU\n");
+
+ return 0;
+}
+#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index b0c55b50218..ab00e490482 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -1,12 +1,12 @@
/*
* Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
- * linux/arch/mips/tx4927/common/tx4927_irq.c,
- * linux/arch/mips/tx4938/common/irq.c
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
+ * ahennessy@mvista.com
+ * source@mvista.com
* Copyright (C) 2000-2001 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
@@ -122,7 +122,7 @@ static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
switch (flow_type & IRQF_TRIGGER_MASK) {
case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
- case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
default:
return -EINVAL;
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 23817a6e32b..fcaac2f132f 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -40,7 +40,7 @@ static struct hard_trap_info {
{ 6, SIGBUS }, /* instruction bus error */
{ 7, SIGBUS }, /* data bus error */
{ 9, SIGTRAP }, /* break */
-/* { 11, SIGILL }, */ /* CPU unusable */
+/* { 11, SIGILL }, */ /* CPU unusable */
{ 12, SIGFPE }, /* overflow */
{ 13, SIGTRAP }, /* trap */
{ 14, SIGSEGV }, /* virtual instruction cache coherency */
@@ -321,7 +321,7 @@ int kgdb_ll_trap(int cmd, const char *str,
.regs = regs,
.str = str,
.err = err,
- .trapnr = trap,
+ .trapnr = trap,
.signr = sig,
};
@@ -371,7 +371,7 @@ int kgdb_arch_init(void)
union mips_instruction insn = {
.r_format = {
.opcode = spec_op,
- .func = break_op,
+ .func = break_op,
}
};
memcpy(arch_kgdb_ops.gdb_bpt_instr, insn.byte, BREAK_INSTR_SIZE);
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index ce3f0807ad1..12bc4ebdf55 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -307,7 +307,7 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
/*
* Called after single-stepping. p->addr is the address of the
* instruction whose first byte has been replaced by the "break 0"
- * instruction. To avoid the SMP problems that can occur when we
+ * instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is p->ainsn.insn.
@@ -535,7 +535,7 @@ void jprobe_return_end(void);
void __kprobes jprobe_return(void)
{
- /* Assembler quirk necessitates this '0,code' business. */
+ /* Assembler quirk necessitates this '0,code' business. */
asm volatile(
"break 0,%0\n\t"
".globl jprobe_return_end\n"
@@ -614,9 +614,9 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
- * function, the first instance's ret_addr will point to the
- * real return address, and all the rest will point to
- * kretprobe_trampoline
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 253bd8ad744..8eeee1c860c 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -76,7 +76,7 @@ out:
return error;
}
-#define RLIM_INFINITY32 0x7fffffff
+#define RLIM_INFINITY32 0x7fffffff
#define RESOURCE32(x) ((x > RLIM_INFINITY32) ? RLIM_INFINITY32 : x)
struct rlimit32 {
@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
lseek back to original location. They fail just like lseek does on
- non-seekable files. */
+ non-seekable files. */
SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
unsigned long, unused, unsigned long, a4, unsigned long, a5)
@@ -247,7 +247,7 @@ SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
}
asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
- size_t count)
+ size_t count)
{
return sys_readahead(fd, merge_64(a2, a3), count);
}
@@ -276,7 +276,7 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
unsigned offset_a3, unsigned len_a4, unsigned len_a5)
{
return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
- merge_64(len_a4, len_a5));
+ merge_64(len_a4, len_a5));
}
asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
@@ -286,7 +286,7 @@ asmlinkage long sys32_lookup_dcookie(u32 a0, u32 a1, char __user *buf,
}
SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
- u64, a3, u64, a4, int, dfd, const char __user *, pathname)
+ u64, a3, u64, a4, int, dfd, const char __user *, pathname)
{
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index df1e3e455f9..6e58e97fcd3 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -17,9 +17,9 @@
extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_user_nocheck_asm(char *__to,
- const char *__from, long __len);
+ const char *__from, long __len);
extern long __strncpy_from_user_asm(char *__to, const char *__from,
- long __len);
+ long __len);
extern long __strlen_user_nocheck_asm(const char *s);
extern long __strlen_user_asm(const char *s);
extern long __strnlen_user_nocheck_asm(const char *s);
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 61d60028b88..2b70723071c 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -55,7 +55,7 @@ static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
return 0;
}
@@ -78,7 +78,7 @@ static int apply_r_mips_higher_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
+ ((((long long) v + 0x80008000LL) >> 32) & 0xffff);
return 0;
}
@@ -87,7 +87,7 @@ static int apply_r_mips_highest_rela(struct module *me, u32 *location,
Elf_Addr v)
{
*location = (*location & 0xffff0000) |
- ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
+ ((((long long) v + 0x800080008000LL) >> 48) & 0xffff);
return 0;
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 07ff5812ffa..977a623d925 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -79,7 +79,7 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
}
*location = (*location & ~0x03ffffff) |
- ((*location + (v >> 2)) & 0x03ffffff);
+ ((*location + (v >> 2)) & 0x03ffffff);
return 0;
}
@@ -122,7 +122,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
struct mips_hi16 *l;
Elf_Addr val, vallo;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (me->arch.r_mips_hi16_list != NULL) {
@@ -165,7 +165,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v)
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -230,7 +230,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
}
/*
- * Normally the hi16 list should be deallocated at this point. A
+ * Normally the hi16 list should be deallocated at this point. A
* malformed binary however could contain a series of R_MIPS_HI16
* relocations not followed by a R_MIPS_LO16 relocation. In that
* case, free up the list and return an error.
@@ -261,7 +261,7 @@ const struct exception_table_entry *search_module_dbetables(unsigned long addr)
spin_unlock_irqrestore(&dbe_lock, flags);
/* Now, if we found one, we are running inside it now, hence
- we cannot unload the module, hence no refcnt needed. */
+ we cannot unload the module, hence no refcnt needed. */
return e;
}
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
index 207f1341578..0e23343eb0a 100644
--- a/arch/mips/kernel/octeon_switch.S
+++ b/arch/mips/kernel/octeon_switch.S
@@ -30,7 +30,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 7
LEAF(resume)
@@ -69,7 +69,7 @@
1:
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
/* Check if we need to store CVMSEG state */
- mfc0 t0, $11,7 /* CvmMemCtl */
+ mfc0 t0, $11,7 /* CvmMemCtl */
bbit0 t0, 6, 3f /* Is user access enabled? */
/* Store the CVMSEG state */
@@ -77,8 +77,8 @@
andi t0, 0x3f
/* Multiply * (cache line size/sizeof(long)/2) */
sll t0, 7-LONGLOG-1
- li t1, -32768 /* Base address of CVMSEG */
- LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
synciobdma
2:
.set noreorder
@@ -89,13 +89,13 @@
LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
bnez t0, 2b /* Loop until we've copied it all */
- LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
.set reorder
/* Disable access to CVMSEG */
- mfc0 t0, $11,7 /* CvmMemCtl */
+ mfc0 t0, $11,7 /* CvmMemCtl */
xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
- mtc0 t0, $11,7 /* CvmMemCtl */
+ mtc0 t0, $11,7 /* CvmMemCtl */
#endif
3:
/*
@@ -133,7 +133,7 @@
dmfc0 t9, $9,7 /* CvmCtl register. */
- /* Save the COP2 CRC state */
+ /* Save the COP2 CRC state */
dmfc2 t0, 0x0201
dmfc2 t1, 0x0202
dmfc2 t2, 0x0200
@@ -149,30 +149,30 @@
sd t0, OCTEON_CP2_LLM_DAT(a0)
sd t1, OCTEON_CP2_LLM_DAT+8(a0)
-1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
/* Save the COP2 crypto state */
- /* this part is mostly common to both pass 1 and later revisions */
- dmfc2 t0, 0x0084
- dmfc2 t1, 0x0080
- dmfc2 t2, 0x0081
- dmfc2 t3, 0x0082
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
sd t0, OCTEON_CP2_3DES_IV(a0)
- dmfc2 t0, 0x0088
+ dmfc2 t0, 0x0088
sd t1, OCTEON_CP2_3DES_KEY(a0)
- dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
sd t2, OCTEON_CP2_3DES_KEY+8(a0)
- dmfc2 t2, 0x0102
+ dmfc2 t2, 0x0102
sd t3, OCTEON_CP2_3DES_KEY+16(a0)
- dmfc2 t3, 0x0103
+ dmfc2 t3, 0x0103
sd t0, OCTEON_CP2_3DES_RESULT(a0)
- dmfc2 t0, 0x0104
- sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
- dmfc2 t1, 0x0105
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
sd t2, OCTEON_CP2_AES_IV(a0)
dmfc2 t2, 0x0106
sd t3, OCTEON_CP2_AES_IV+8(a0)
- dmfc2 t3, 0x0107
+ dmfc2 t3, 0x0107
sd t0, OCTEON_CP2_AES_KEY(a0)
dmfc2 t0, 0x0110
sd t1, OCTEON_CP2_AES_KEY+8(a0)
@@ -180,7 +180,7 @@
sd t2, OCTEON_CP2_AES_KEY+16(a0)
dmfc2 t2, 0x0101
sd t3, OCTEON_CP2_AES_KEY+24(a0)
- mfc0 t3, $15,0 /* Get the processor ID register */
+ mfc0 t3, $15,0 /* Get the processor ID register */
sd t0, OCTEON_CP2_AES_KEYLEN(a0)
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
sd t1, OCTEON_CP2_AES_RESULT(a0)
@@ -188,7 +188,7 @@
/* Skip to the Pass1 version of the remainder of the COP2 state */
beq t3, t0, 2f
- /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
dmfc2 t1, 0x0240
dmfc2 t2, 0x0241
dmfc2 t3, 0x0242
@@ -214,7 +214,7 @@
sd t2, OCTEON_CP2_HSH_DATW+72(a0)
dmfc2 t2, 0x024D
sd t3, OCTEON_CP2_HSH_DATW+80(a0)
- dmfc2 t3, 0x024E
+ dmfc2 t3, 0x024E
sd t0, OCTEON_CP2_HSH_DATW+88(a0)
dmfc2 t0, 0x0250
sd t1, OCTEON_CP2_HSH_DATW+96(a0)
@@ -232,9 +232,9 @@
sd t3, OCTEON_CP2_HSH_IVW+24(a0)
dmfc2 t3, 0x0257
sd t0, OCTEON_CP2_HSH_IVW+32(a0)
- dmfc2 t0, 0x0258
+ dmfc2 t0, 0x0258
sd t1, OCTEON_CP2_HSH_IVW+40(a0)
- dmfc2 t1, 0x0259
+ dmfc2 t1, 0x0259
sd t2, OCTEON_CP2_HSH_IVW+48(a0)
dmfc2 t2, 0x025E
sd t3, OCTEON_CP2_HSH_IVW+56(a0)
@@ -247,7 +247,7 @@
sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
jr ra
-2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
dmfc2 t3, 0x0040
dmfc2 t0, 0x0041
dmfc2 t1, 0x0042
@@ -269,7 +269,7 @@
sd t3, OCTEON_CP2_HSH_IVW+8(a0)
sd t0, OCTEON_CP2_HSH_IVW+16(a0)
-3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
jr ra
END(octeon_cop2_save)
@@ -280,19 +280,19 @@
.set push
.set noreorder
LEAF(octeon_cop2_restore)
- /* First cache line was prefetched before the call */
- pref 4, 128(a0)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
dmfc0 t9, $9,7 /* CvmCtl register. */
- pref 4, 256(a0)
+ pref 4, 256(a0)
ld t0, OCTEON_CP2_CRC_IV(a0)
- pref 4, 384(a0)
+ pref 4, 384(a0)
ld t1, OCTEON_CP2_CRC_LENGTH(a0)
ld t2, OCTEON_CP2_CRC_POLY(a0)
/* Restore the COP2 CRC state */
dmtc2 t0, 0x0201
- dmtc2 t1, 0x1202
+ dmtc2 t1, 0x1202
bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
dmtc2 t2, 0x4200
@@ -310,19 +310,19 @@
ld t0, OCTEON_CP2_3DES_IV(a0)
ld t1, OCTEON_CP2_3DES_KEY(a0)
ld t2, OCTEON_CP2_3DES_KEY+8(a0)
- dmtc2 t0, 0x0084
+ dmtc2 t0, 0x0084
ld t0, OCTEON_CP2_3DES_KEY+16(a0)
- dmtc2 t1, 0x0080
+ dmtc2 t1, 0x0080
ld t1, OCTEON_CP2_3DES_RESULT(a0)
- dmtc2 t2, 0x0081
+ dmtc2 t2, 0x0081
ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
dmtc2 t0, 0x0082
ld t0, OCTEON_CP2_AES_IV(a0)
- dmtc2 t1, 0x0098
+ dmtc2 t1, 0x0098
ld t1, OCTEON_CP2_AES_IV+8(a0)
- dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
ld t2, OCTEON_CP2_AES_KEY(a0)
- dmtc2 t0, 0x0102
+ dmtc2 t0, 0x0102
ld t0, OCTEON_CP2_AES_KEY+8(a0)
dmtc2 t1, 0x0103
ld t1, OCTEON_CP2_AES_KEY+16(a0)
@@ -334,14 +334,14 @@
ld t1, OCTEON_CP2_AES_RESULT(a0)
dmtc2 t2, 0x0107
ld t2, OCTEON_CP2_AES_RESULT+8(a0)
- mfc0 t3, $15,0 /* Get the processor ID register */
+ mfc0 t3, $15,0 /* Get the processor ID register */
dmtc2 t0, 0x0110
li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
dmtc2 t1, 0x0100
bne t0, t3, 3f /* Skip the next stuff for non-pass1 */
dmtc2 t2, 0x0101
- /* this code is specific for pass 1 */
+ /* this code is specific for pass 1 */
ld t0, OCTEON_CP2_HSH_DATW(a0)
ld t1, OCTEON_CP2_HSH_DATW+8(a0)
ld t2, OCTEON_CP2_HSH_DATW+16(a0)
@@ -361,10 +361,10 @@
ld t0, OCTEON_CP2_HSH_IVW+16(a0)
dmtc2 t1, 0x0048
dmtc2 t2, 0x0049
- b done_restore /* unconditional branch */
+ b done_restore /* unconditional branch */
dmtc2 t0, 0x004A
-3: /* this is post-pass1 code */
+3: /* this is post-pass1 code */
ld t2, OCTEON_CP2_HSH_DATW(a0)
ld t0, OCTEON_CP2_HSH_DATW+8(a0)
ld t1, OCTEON_CP2_HSH_DATW+16(a0)
@@ -433,7 +433,7 @@ done_restore:
* sp is assumed to point to a struct pt_regs
*
* NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- * safely modify k0 and k1.
+ * safely modify k0 and k1.
*/
.align 7
.set push
@@ -446,14 +446,14 @@ done_restore:
/* Save the multiplier state */
v3mulu k0, $0, $0
v3mulu k1, $0, $0
- sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
v3mulu k0, $0, $0
sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
ori k1, $0, 1
v3mulu k1, k1, $0
sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
v3mulu k0, $0, $0
- sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
v3mulu k1, $0, $0
sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
jr ra
@@ -475,19 +475,19 @@ done_restore:
.set noreorder
LEAF(octeon_mult_restore)
dmfc0 k1, $9,7 /* CvmCtl register. */
- ld v0, PT_MPL(sp) /* MPL0 */
- ld v1, PT_MPL+8(sp) /* MPL1 */
- ld k0, PT_MPL+16(sp) /* MPL2 */
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */
/* Normally falls through, so no time wasted here */
nop
/* Restore the multiplier state */
- ld k1, PT_MTP+16(sp) /* P2 */
+ ld k1, PT_MTP+16(sp) /* P2 */
MTM0 v0 /* MPL0 */
ld v0, PT_MTP+8(sp) /* P1 */
MTM1 v1 /* MPL1 */
- ld v1, PT_MTP(sp) /* P0 */
+ ld v1, PT_MTP(sp) /* P0 */
MTM2 k0 /* MPL2 */
MTP2 k1 /* P2 */
MTP1 v0 /* P1 */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d9c81c5a6c9..45f1ffcf1a4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -103,13 +103,13 @@ static struct mips_pmu mipspmu;
#define M_CONFIG1_PC (1 << 4)
-#define M_PERFCTL_EXL (1 << 0)
-#define M_PERFCTL_KERNEL (1 << 1)
-#define M_PERFCTL_SUPERVISOR (1 << 2)
-#define M_PERFCTL_USER (1 << 3)
-#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
+#define M_PERFCTL_EXL (1 << 0)
+#define M_PERFCTL_KERNEL (1 << 1)
+#define M_PERFCTL_SUPERVISOR (1 << 2)
+#define M_PERFCTL_USER (1 << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#ifdef CONFIG_CPU_BMIPS5000
#define M_PERFCTL_MT_EN(filter) 0
@@ -117,13 +117,13 @@ static struct mips_pmu mipspmu;
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
#endif /* CONFIG_CPU_BMIPS5000 */
-#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
-#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
-#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
-#define M_PERFCTL_WIDE (1 << 30)
-#define M_PERFCTL_MORE (1 << 31)
-#define M_PERFCTL_TC (1 << 30)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1 << 30)
+#define M_PERFCTL_MORE (1 << 31)
+#define M_PERFCTL_TC (1 << 30)
#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
M_PERFCTL_KERNEL | \
@@ -827,7 +827,7 @@ static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
[PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
- [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
[PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
@@ -1371,7 +1371,7 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
(b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
(r) == 176 || ((b) >= 50 && (b) <= 55) || \
((b) >= 64 && (b) <= 67))
-#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
#endif
/* 74K */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 07dff54f2ce..135c4aadccb 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1995, 1996, 2001 Ralf Baechle
* Copyright (C) 2001, 2004 MIPS Technologies, Inc.
- * Copyright (C) 2004 Maciej W. Rozycki
+ * Copyright (C) 2004 Maciej W. Rozycki
*/
#include <linux/delay.h>
#include <linux/kernel.h>
@@ -64,6 +64,28 @@ static int show_cpuinfo(struct seq_file *m, void *v)
cpu_data[n].watch_reg_masks[i]);
seq_printf(m, "]\n");
}
+ if (cpu_has_mips_r) {
+ seq_printf(m, "isa\t\t\t:");
+ if (cpu_has_mips_1)
+ seq_printf(m, "%s", "mips1");
+ if (cpu_has_mips_2)
+ seq_printf(m, "%s", " mips2");
+ if (cpu_has_mips_3)
+ seq_printf(m, "%s", " mips3");
+ if (cpu_has_mips_4)
+ seq_printf(m, "%s", " mips4");
+ if (cpu_has_mips_5)
+ seq_printf(m, "%s", " mips5");
+ if (cpu_has_mips32r1)
+ seq_printf(m, "%s", " mips32r1");
+ if (cpu_has_mips32r2)
+ seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips64r1)
+ seq_printf(m, "%s", " mips64r1");
+ if (cpu_has_mips64r2)
+ seq_printf(m, "%s", " mips64r2");
+ seq_printf(m, "\n");
+ }
seq_printf(m, "ASEs implemented\t:");
if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
@@ -73,6 +95,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
+ if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
+ if (cpu_has_vz) seq_printf(m, "%s", " vz");
seq_printf(m, "\n");
seq_printf(m, "shadow register sets\t: %d\n",
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index a33d2ef8f27..3be4405c2d1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -154,8 +154,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
return 0;
}
*childregs = *regs;
- childregs->regs[7] = 0; /* Clear error flag */
- childregs->regs[2] = 0; /* Child gets zero as return value */
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
if (usp)
childregs->regs[29] = usp;
ti->addr_limit = USER_DS;
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 4812c6d916e..9c6299c733a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -50,7 +50,7 @@ void ptrace_disable(struct task_struct *child)
}
/*
- * Read a general register set. We always use the 64-bit format, even
+ * Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
*/
@@ -326,7 +326,7 @@ long arch_ptrace(struct task_struct *child, long request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
+ case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long irqflags;
@@ -520,10 +520,10 @@ static inline int audit_arch(void)
{
int arch = EM_MIPS;
#ifdef CONFIG_64BIT
- arch |= __AUDIT_ARCH_64BIT;
+ arch |= __AUDIT_ARCH_64BIT;
#endif
#if defined(__LITTLE_ENDIAN)
- arch |= __AUDIT_ARCH_LE;
+ arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
@@ -546,7 +546,7 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
@@ -581,7 +581,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
/* The 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ?
- 0x80 : 0));
+ 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index a3b017815ef..9486055ba66 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -124,7 +124,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
+ case FPC_EIR: { /* implementation / version register */
unsigned int flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned int irqflags;
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index 61c8a0f2a60..f31063dbdae 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -30,38 +30,38 @@
LEAF(_save_fp_context)
li v0, 0 # assume success
cfc1 t1,fcr31
- EX(swc1 $f0,(SC_FPREGS+0)(a0))
- EX(swc1 $f1,(SC_FPREGS+8)(a0))
- EX(swc1 $f2,(SC_FPREGS+16)(a0))
- EX(swc1 $f3,(SC_FPREGS+24)(a0))
- EX(swc1 $f4,(SC_FPREGS+32)(a0))
- EX(swc1 $f5,(SC_FPREGS+40)(a0))
- EX(swc1 $f6,(SC_FPREGS+48)(a0))
- EX(swc1 $f7,(SC_FPREGS+56)(a0))
- EX(swc1 $f8,(SC_FPREGS+64)(a0))
- EX(swc1 $f9,(SC_FPREGS+72)(a0))
- EX(swc1 $f10,(SC_FPREGS+80)(a0))
- EX(swc1 $f11,(SC_FPREGS+88)(a0))
- EX(swc1 $f12,(SC_FPREGS+96)(a0))
- EX(swc1 $f13,(SC_FPREGS+104)(a0))
- EX(swc1 $f14,(SC_FPREGS+112)(a0))
- EX(swc1 $f15,(SC_FPREGS+120)(a0))
- EX(swc1 $f16,(SC_FPREGS+128)(a0))
- EX(swc1 $f17,(SC_FPREGS+136)(a0))
- EX(swc1 $f18,(SC_FPREGS+144)(a0))
- EX(swc1 $f19,(SC_FPREGS+152)(a0))
- EX(swc1 $f20,(SC_FPREGS+160)(a0))
- EX(swc1 $f21,(SC_FPREGS+168)(a0))
- EX(swc1 $f22,(SC_FPREGS+176)(a0))
- EX(swc1 $f23,(SC_FPREGS+184)(a0))
- EX(swc1 $f24,(SC_FPREGS+192)(a0))
- EX(swc1 $f25,(SC_FPREGS+200)(a0))
- EX(swc1 $f26,(SC_FPREGS+208)(a0))
- EX(swc1 $f27,(SC_FPREGS+216)(a0))
- EX(swc1 $f28,(SC_FPREGS+224)(a0))
- EX(swc1 $f29,(SC_FPREGS+232)(a0))
- EX(swc1 $f30,(SC_FPREGS+240)(a0))
- EX(swc1 $f31,(SC_FPREGS+248)(a0))
+ EX(swc1 $f0,(SC_FPREGS+0)(a0))
+ EX(swc1 $f1,(SC_FPREGS+8)(a0))
+ EX(swc1 $f2,(SC_FPREGS+16)(a0))
+ EX(swc1 $f3,(SC_FPREGS+24)(a0))
+ EX(swc1 $f4,(SC_FPREGS+32)(a0))
+ EX(swc1 $f5,(SC_FPREGS+40)(a0))
+ EX(swc1 $f6,(SC_FPREGS+48)(a0))
+ EX(swc1 $f7,(SC_FPREGS+56)(a0))
+ EX(swc1 $f8,(SC_FPREGS+64)(a0))
+ EX(swc1 $f9,(SC_FPREGS+72)(a0))
+ EX(swc1 $f10,(SC_FPREGS+80)(a0))
+ EX(swc1 $f11,(SC_FPREGS+88)(a0))
+ EX(swc1 $f12,(SC_FPREGS+96)(a0))
+ EX(swc1 $f13,(SC_FPREGS+104)(a0))
+ EX(swc1 $f14,(SC_FPREGS+112)(a0))
+ EX(swc1 $f15,(SC_FPREGS+120)(a0))
+ EX(swc1 $f16,(SC_FPREGS+128)(a0))
+ EX(swc1 $f17,(SC_FPREGS+136)(a0))
+ EX(swc1 $f18,(SC_FPREGS+144)(a0))
+ EX(swc1 $f19,(SC_FPREGS+152)(a0))
+ EX(swc1 $f20,(SC_FPREGS+160)(a0))
+ EX(swc1 $f21,(SC_FPREGS+168)(a0))
+ EX(swc1 $f22,(SC_FPREGS+176)(a0))
+ EX(swc1 $f23,(SC_FPREGS+184)(a0))
+ EX(swc1 $f24,(SC_FPREGS+192)(a0))
+ EX(swc1 $f25,(SC_FPREGS+200)(a0))
+ EX(swc1 $f26,(SC_FPREGS+208)(a0))
+ EX(swc1 $f27,(SC_FPREGS+216)(a0))
+ EX(swc1 $f28,(SC_FPREGS+224)(a0))
+ EX(swc1 $f29,(SC_FPREGS+232)(a0))
+ EX(swc1 $f30,(SC_FPREGS+240)(a0))
+ EX(swc1 $f31,(SC_FPREGS+248)(a0))
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
@@ -82,38 +82,38 @@ LEAF(_save_fp_context)
LEAF(_restore_fp_context)
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
- EX(lwc1 $f0,(SC_FPREGS+0)(a0))
- EX(lwc1 $f1,(SC_FPREGS+8)(a0))
- EX(lwc1 $f2,(SC_FPREGS+16)(a0))
- EX(lwc1 $f3,(SC_FPREGS+24)(a0))
- EX(lwc1 $f4,(SC_FPREGS+32)(a0))
- EX(lwc1 $f5,(SC_FPREGS+40)(a0))
- EX(lwc1 $f6,(SC_FPREGS+48)(a0))
- EX(lwc1 $f7,(SC_FPREGS+56)(a0))
- EX(lwc1 $f8,(SC_FPREGS+64)(a0))
- EX(lwc1 $f9,(SC_FPREGS+72)(a0))
- EX(lwc1 $f10,(SC_FPREGS+80)(a0))
- EX(lwc1 $f11,(SC_FPREGS+88)(a0))
- EX(lwc1 $f12,(SC_FPREGS+96)(a0))
- EX(lwc1 $f13,(SC_FPREGS+104)(a0))
- EX(lwc1 $f14,(SC_FPREGS+112)(a0))
- EX(lwc1 $f15,(SC_FPREGS+120)(a0))
- EX(lwc1 $f16,(SC_FPREGS+128)(a0))
- EX(lwc1 $f17,(SC_FPREGS+136)(a0))
- EX(lwc1 $f18,(SC_FPREGS+144)(a0))
- EX(lwc1 $f19,(SC_FPREGS+152)(a0))
- EX(lwc1 $f20,(SC_FPREGS+160)(a0))
- EX(lwc1 $f21,(SC_FPREGS+168)(a0))
- EX(lwc1 $f22,(SC_FPREGS+176)(a0))
- EX(lwc1 $f23,(SC_FPREGS+184)(a0))
- EX(lwc1 $f24,(SC_FPREGS+192)(a0))
- EX(lwc1 $f25,(SC_FPREGS+200)(a0))
- EX(lwc1 $f26,(SC_FPREGS+208)(a0))
- EX(lwc1 $f27,(SC_FPREGS+216)(a0))
- EX(lwc1 $f28,(SC_FPREGS+224)(a0))
- EX(lwc1 $f29,(SC_FPREGS+232)(a0))
- EX(lwc1 $f30,(SC_FPREGS+240)(a0))
- EX(lwc1 $f31,(SC_FPREGS+248)(a0))
+ EX(lwc1 $f0,(SC_FPREGS+0)(a0))
+ EX(lwc1 $f1,(SC_FPREGS+8)(a0))
+ EX(lwc1 $f2,(SC_FPREGS+16)(a0))
+ EX(lwc1 $f3,(SC_FPREGS+24)(a0))
+ EX(lwc1 $f4,(SC_FPREGS+32)(a0))
+ EX(lwc1 $f5,(SC_FPREGS+40)(a0))
+ EX(lwc1 $f6,(SC_FPREGS+48)(a0))
+ EX(lwc1 $f7,(SC_FPREGS+56)(a0))
+ EX(lwc1 $f8,(SC_FPREGS+64)(a0))
+ EX(lwc1 $f9,(SC_FPREGS+72)(a0))
+ EX(lwc1 $f10,(SC_FPREGS+80)(a0))
+ EX(lwc1 $f11,(SC_FPREGS+88)(a0))
+ EX(lwc1 $f12,(SC_FPREGS+96)(a0))
+ EX(lwc1 $f13,(SC_FPREGS+104)(a0))
+ EX(lwc1 $f14,(SC_FPREGS+112)(a0))
+ EX(lwc1 $f15,(SC_FPREGS+120)(a0))
+ EX(lwc1 $f16,(SC_FPREGS+128)(a0))
+ EX(lwc1 $f17,(SC_FPREGS+136)(a0))
+ EX(lwc1 $f18,(SC_FPREGS+144)(a0))
+ EX(lwc1 $f19,(SC_FPREGS+152)(a0))
+ EX(lwc1 $f20,(SC_FPREGS+160)(a0))
+ EX(lwc1 $f21,(SC_FPREGS+168)(a0))
+ EX(lwc1 $f22,(SC_FPREGS+176)(a0))
+ EX(lwc1 $f23,(SC_FPREGS+184)(a0))
+ EX(lwc1 $f24,(SC_FPREGS+192)(a0))
+ EX(lwc1 $f25,(SC_FPREGS+200)(a0))
+ EX(lwc1 $f26,(SC_FPREGS+208)(a0))
+ EX(lwc1 $f27,(SC_FPREGS+216)(a0))
+ EX(lwc1 $f28,(SC_FPREGS+224)(a0))
+ EX(lwc1 $f29,(SC_FPREGS+232)(a0))
+ EX(lwc1 $f30,(SC_FPREGS+240)(a0))
+ EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
END(_restore_fp_context)
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 8d32d5a6b46..5266c6ee2b3 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -42,7 +42,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
LEAF(resume)
mfc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 8decdfacb44..5e51219990a 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -40,7 +40,7 @@
/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, int usedfpu)
*/
.align 5
LEAF(resume)
@@ -53,7 +53,7 @@
* check if we need to save FPU registers
*/
- beqz a3, 1f
+ beqz a3, 1f
PTR_L t3, TASK_THREAD_INFO(a0)
/*
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 804ebb2c34a..43d2d78d328 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -33,7 +33,7 @@ process_entry:
b process_entry
1:
- /* indirection page, update s0 */
+ /* indirection page, update s0 */
and s3, s2, 0x2
beq s3, zero, 1f
and s0, s2, ~0x2
@@ -69,7 +69,7 @@ done:
of kexec_flag. */
bal 1f
- 1: move t1,ra;
+ 1: move t1,ra;
PTR_LA t2,1b
PTR_LA t0,kexec_flag
PTR_SUB t0,t0,t2;
@@ -158,10 +158,10 @@ arg3: PTR 0x0
*/
secondary_kexec_args:
EXPORT(secondary_kexec_args)
-s_arg0: PTR 0x0
-s_arg1: PTR 0x0
-s_arg2: PTR 0x0
-s_arg3: PTR 0x0
+s_arg0: PTR 0x0
+s_arg1: PTR 0x0
+s_arg2: PTR 0x0
+s_arg3: PTR 0x0
.size secondary_kexec_args,PTRSIZE*4
kexec_flag:
LONG 0x1
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 88f7b50d541..93c070b41b0 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -252,12 +252,12 @@ int rtlx_release(int index)
unsigned int rtlx_read_poll(int index, int can_sleep)
{
- struct rtlx_channel *chan;
+ struct rtlx_channel *chan;
- if (rtlx == NULL)
- return 0;
+ if (rtlx == NULL)
+ return 0;
- chan = &rtlx->channel[index];
+ chan = &rtlx->channel[index];
/* data available to read? */
if (chan->lx_read == chan->lx_write) {
@@ -446,8 +446,8 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
return -EAGAIN;
__wait_event_interruptible(channel_wqs[minor].rt_queue,
- rtlx_write_poll(minor),
- ret);
+ rtlx_write_poll(minor),
+ ret);
if (ret)
return ret;
}
@@ -457,11 +457,11 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
static const struct file_operations rtlx_fops = {
.owner = THIS_MODULE,
- .open = file_open,
+ .open = file_open,
.release = file_release,
.write = file_write,
- .read = file_read,
- .poll = file_poll,
+ .read = file_read,
+ .poll = file_poll,
.llseek = noop_llseek,
};
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 80ff9422215..9ea29649fc2 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -24,7 +24,7 @@
/* Highest syscall used of any syscall flavour */
#define MAX_SYSCALL_NO __NR_O32_Linux + __NR_O32_Linux_syscalls
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -54,7 +54,7 @@ stack_done:
lw t0, TI_FLAGS($28) # syscall tracing enabled?
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
and t0, t1
- bnez t0, syscall_trace_entry # -> yes
+ bnez t0, syscall_trace_entry # -> yes
jalr t2 # Do The Real Thing (TM)
@@ -126,8 +126,8 @@ stackargs:
la t1, 5f # load up to 3 arguments
subu t1, t3
1: lw t5, 16(t0) # argument #5 from usp
- .set push
- .set noreorder
+ .set push
+ .set noreorder
.set nomacro
jr t1
addiu t1, 6f - 5f
@@ -205,7 +205,7 @@ illegal_syscall:
jr t2
/* Unreached */
-einval: li v0, -ENOSYS
+einval: li v0, -ENOSYS
jr ra
END(sys_syscall)
@@ -354,7 +354,7 @@ einval: li v0, -ENOSYS
sys sys_ni_syscall 0 /* was create_module */
sys sys_init_module 5
sys sys_delete_module 1
- sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
+ sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
sys sys_quotactl 4
sys sys_getpgid 1
sys sys_fchdir 1
@@ -589,7 +589,7 @@ einval: li v0, -ENOSYS
/* We pre-compute the number of _instruction_ bytes needed to
load or store the arguments 6-8. Negative values are ignored. */
- .macro sys function, nargs
+ .macro sys function, nargs
PTR \function
LONG (\nargs << 2) - (5 << 2)
.endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 9444ad9ea57..36cfd4060e1 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -25,7 +25,7 @@
#define handle_sys64 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sys64, PT_SIZE, sp)
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
/*
@@ -40,7 +40,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_64_Linux # check syscall number
- sltiu t0, t0, __NR_64_Linux_syscalls + 1
+ sltiu t0, t0, __NR_64_Linux_syscalls + 1
#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
ld t1, PT_EPC(sp) # skip syscall on return
daddiu t1, 4 # skip to next instruction
@@ -290,7 +290,7 @@ sys_call_table:
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 5175 for putpmsg */
+ PTR sys_ni_syscall /* 5175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 3b18a8e2921..693d60b0855 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -22,7 +22,7 @@
#define handle_sysn32 handle_sys
#endif
- .align 5
+ .align 5
NESTED(handle_sysn32, PT_SIZE, sp)
#ifndef CONFIG_MIPS32_O32
.set noat
@@ -33,7 +33,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#endif
dsubu t0, v0, __NR_N32_Linux # check syscall number
- sltiu t0, t0, __NR_N32_Linux_syscalls + 1
+ sltiu t0, t0, __NR_N32_Linux_syscalls + 1
#ifndef CONFIG_MIPS32_O32
ld t1, PT_EPC(sp) # skip syscall on return
@@ -279,7 +279,7 @@ EXPORT(sysn32_call_table)
PTR sys_quotactl
PTR sys_ni_syscall /* was nfsservctl */
PTR sys_ni_syscall /* res. for getpmsg */
- PTR sys_ni_syscall /* 6175 for putpmsg */
+ PTR sys_ni_syscall /* 6175 for putpmsg */
PTR sys_ni_syscall /* res. for afs_syscall */
PTR sys_ni_syscall /* res. for security */
PTR sys_gettid
@@ -402,8 +402,8 @@ EXPORT(sysn32_call_table)
PTR compat_sys_rt_tgsigqueueinfo /* 6295 */
PTR sys_perf_event_open
PTR sys_accept4
- PTR compat_sys_recvmmsg
- PTR sys_getdents64
+ PTR compat_sys_recvmmsg
+ PTR sys_getdents64
PTR sys_fanotify_init /* 6300 */
PTR sys_fanotify_mark
PTR sys_prlimit64
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 063cd0d6ddd..af8887f779f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -10,7 +10,7 @@
*
* Hairy, the userspace application uses a different argument passing
* convention than the kernel, so we have to translate things from o32
- * to ABI64 calling convention. 64-bit syscalls are also processed
+ * to ABI64 calling convention. 64-bit syscalls are also processed
* here for now.
*/
#include <linux/errno.h>
@@ -24,7 +24,7 @@
#include <asm/unistd.h>
#include <asm/sysmips.h>
- .align 5
+ .align 5
NESTED(handle_sys, PT_SIZE, sp)
.set noat
SAVE_SOME
@@ -185,7 +185,7 @@ LEAF(sys32_syscall)
jr t2
/* Unreached */
-einval: li v0, -ENOSYS
+einval: li v0, -ENOSYS
jr ra
END(sys32_syscall)
@@ -284,8 +284,8 @@ sys_call_table:
PTR compat_sys_old_readdir
PTR sys_mips_mmap /* 4090 */
PTR sys_munmap
- PTR sys_truncate
- PTR sys_ftruncate
+ PTR compat_sys_truncate
+ PTR compat_sys_ftruncate
PTR sys_fchmod
PTR sys_fchown /* 4095 */
PTR sys_getpriority
@@ -329,7 +329,7 @@ sys_call_table:
PTR sys_bdflush
PTR sys_sysfs /* 4135 */
PTR sys_32_personality
- PTR sys_ni_syscall /* for afs_syscall */
+ PTR sys_ni_syscall /* for afs_syscall */
PTR sys_setfsuid
PTR sys_setfsgid
PTR sys_32_llseek /* 4140 */
@@ -352,12 +352,12 @@ sys_call_table:
PTR sys_munlockall
PTR sys_sched_setparam
PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
+ PTR sys_sched_setscheduler /* 4160 */
PTR sys_sched_getscheduler
PTR sys_sched_yield
PTR sys_sched_get_priority_max
PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 4165 */
+ PTR compat_sys_sched_rr_get_interval /* 4165 */
PTR compat_sys_nanosleep
PTR sys_mremap
PTR sys_accept
@@ -387,7 +387,7 @@ sys_call_table:
PTR sys_prctl
PTR sys32_rt_sigreturn
PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask /* 4195 */
+ PTR compat_sys_rt_sigprocmask /* 4195 */
PTR compat_sys_rt_sigpending
PTR compat_sys_rt_sigtimedwait
PTR compat_sys_rt_sigqueueinfo
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8c41187801c..4c774d5d508 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -8,7 +8,7 @@
* Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
* Copyright (C) 1996 Stoned Elipot
* Copyright (C) 1999 Silicon Graphics, Inc.
- * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <linux/ioport.h>
@@ -449,7 +449,7 @@ static void __init bootmem_init(void)
* At this stage the bootmem allocator is ready to use.
*
* NOTE: historically plat_mem_setup did the entire platform initialization.
- * This was rather impractical because it meant plat_mem_setup had to
+ * This was rather impractical because it meant plat_mem_setup had to
* get away without any kind of memory allocator. To keep old code from
* breaking plat_setup was just renamed to plat_setup and a second platform
* initialization hook for anything else was introduced.
@@ -469,7 +469,7 @@ static int __init early_parse_mem(char *p)
if (usermem == 0) {
boot_mem_map.nr_map = 0;
usermem = 1;
- }
+ }
start = 0;
size = memparse(p, &p);
if (*p == '@')
@@ -480,34 +480,75 @@ static int __init early_parse_mem(char *p)
}
early_param("mem", early_parse_mem);
-static void __init arch_mem_init(char **cmdline_p)
+#ifdef CONFIG_PROC_VMCORE
+unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
+static int __init early_parse_elfcorehdr(char *p)
+{
+ int i;
+
+ setup_elfcorehdr = memparse(p, &p);
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ unsigned long start = boot_mem_map.map[i].addr;
+ unsigned long end = (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size);
+ if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ setup_elfcorehdr_size = end - setup_elfcorehdr;
+ break;
+ }
+ }
+ /*
+ * If we don't find it in the memory map, then we shouldn't
+ * have to worry about it, as the new kernel won't use it.
+ */
+ return 0;
+}
+early_param("elfcorehdr", early_parse_elfcorehdr);
+#endif
+
+static void __init arch_mem_addpart(phys_t mem, phys_t end, int type)
{
- phys_t init_mem, init_end, init_size;
+ phys_t size;
+ int i;
+
+ size = end - mem;
+ if (!size)
+ return;
+
+ /* Make sure it is in the boot_mem_map */
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (mem >= boot_mem_map.map[i].addr &&
+ mem < (boot_mem_map.map[i].addr +
+ boot_mem_map.map[i].size))
+ return;
+ }
+ add_memory_region(mem, size, type);
+}
+static void __init arch_mem_init(char **cmdline_p)
+{
extern void plat_mem_setup(void);
/* call board setup routine */
plat_mem_setup();
- init_mem = PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT;
- init_end = PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT;
- init_size = init_end - init_mem;
- if (init_size) {
- /* Make sure it is in the boot_mem_map */
- int i, found;
- found = 0;
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- if (init_mem >= boot_mem_map.map[i].addr &&
- init_mem < (boot_mem_map.map[i].addr +
- boot_mem_map.map[i].size)) {
- found = 1;
- break;
- }
- }
- if (!found)
- add_memory_region(init_mem, init_size,
- BOOT_MEM_INIT_RAM);
- }
+ /*
+ * Make sure all kernel memory is in the maps. The "UP" and
+ * "DOWN" are opposite for initdata since if it crosses over
+ * into another memory section you don't want that to be
+ * freed when the initdata is freed.
+ */
+ arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
+ PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
+ BOOT_MEM_RAM);
+ arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
+ PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
+ BOOT_MEM_INIT_RAM);
pr_info("Determined physical RAM map:\n");
print_memory_map();
@@ -537,6 +578,14 @@ static void __init arch_mem_init(char **cmdline_p)
}
bootmem_init();
+#ifdef CONFIG_PROC_VMCORE
+ if (setup_elfcorehdr && setup_elfcorehdr_size) {
+ printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
+ setup_elfcorehdr, setup_elfcorehdr_size);
+ reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
+ BOOTMEM_DEFAULT);
+ }
+#endif
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
@@ -571,7 +620,7 @@ static void __init mips_parse_crashkernel(void)
return;
crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
+ crashk_res.end = crash_base + crash_size - 1;
}
static void __init request_crashkernel(struct resource *res)
@@ -585,7 +634,7 @@ static void __init request_crashkernel(struct resource *res)
crashk_res.start + 1) >> 20),
(unsigned long)(crashk_res.start >> 20));
}
-#else /* !defined(CONFIG_KEXEC) */
+#else /* !defined(CONFIG_KEXEC) */
static void __init mips_parse_crashkernel(void)
{
}
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 95b019d92f5..b5e88fd8327 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -412,7 +412,7 @@ give_sigsegv:
#endif
static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
- struct pt_regs *regs, int signr, sigset_t *set,
+ struct pt_regs *regs, int signr, sigset_t *set,
siginfo_t *info)
{
struct rt_sigframe __user *frame;
@@ -425,7 +425,7 @@ static int setup_rt_frame(void *sig_return, struct k_sigaction *ka,
/* Create siginfo. */
err |= copy_siginfo_to_user(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(NULL, &frame->rs_uc.uc_link);
err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -468,7 +468,7 @@ struct mips_abi mips_abi = {
.setup_frame = setup_frame,
.signal_return_offset = offsetof(struct mips_vdso, signal_trampoline),
#endif
- .setup_rt_frame = setup_rt_frame,
+ .setup_rt_frame = setup_rt_frame,
.rt_signal_return_offset =
offsetof(struct mips_vdso, rt_signal_trampoline),
.restart = __NR_restart_syscall
@@ -500,7 +500,7 @@ static void handle_signal(unsigned long sig, siginfo_t *info,
regs->cp0_epc -= 4;
}
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
if (sig_uses_siginfo(ka))
@@ -524,7 +524,7 @@ static void do_signal(struct pt_regs *regs)
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
- /* Whee! Actually deliver the signal. */
+ /* Whee! Actually deliver the signal. */
handle_signal(signr, &info, &ka, regs);
return;
}
@@ -545,7 +545,7 @@ static void do_signal(struct pt_regs *regs)
regs->cp0_epc -= 4;
break;
}
- regs->regs[0] = 0; /* Don't deal with this again. */
+ regs->regs[0] = 0; /* Don't deal with this again. */
}
/*
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index ad7c2be0c33..57de8b75162 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -48,7 +48,7 @@ extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
-#define __NR_O32_restart_syscall 4253
+#define __NR_O32_restart_syscall 4253
/* 32-bit compatibility types */
@@ -56,11 +56,11 @@ typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct sigframe32 {
@@ -302,7 +302,7 @@ SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *,
return -EFAULT;
err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
- &oact->sa_handler);
+ &oact->sa_handler);
err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
err |= __put_user(0, &oact->sa_mask.sig[1]);
err |= __put_user(0, &oact->sa_mask.sig[2]);
@@ -507,7 +507,7 @@ static int setup_rt_frame_32(void *sig_return, struct k_sigaction *ka,
/* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -552,7 +552,7 @@ struct mips_abi mips_abi_32 = {
.setup_frame = setup_frame_32,
.signal_return_offset =
offsetof(struct mips_vdso, o32_signal_trampoline),
- .setup_rt_frame = setup_rt_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, o32_rt_signal_trampoline),
.restart = __NR_O32_restart_syscall
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 5f4ef2ae619..b2241bb9cac 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -51,11 +51,11 @@ extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
struct ucontextn32 {
- u32 uc_flags;
- s32 uc_link;
+ u32 uc_flags;
+ s32 uc_link;
compat_stack_t uc_stack;
struct sigcontext uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
};
struct rt_sigframe_n32 {
@@ -115,7 +115,7 @@ static int setup_rt_frame_n32(void *sig_return, struct k_sigaction *ka,
/* Create siginfo. */
err |= copy_siginfo_to_user32(&frame->rs_info, info);
- /* Create the ucontext. */
+ /* Create the ucontext. */
err |= __put_user(0, &frame->rs_uc.uc_flags);
err |= __put_user(0, &frame->rs_uc.uc_link);
err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
@@ -154,7 +154,7 @@ give_sigsegv:
}
struct mips_abi mips_abi_n32 = {
- .setup_rt_frame = setup_rt_frame_n32,
+ .setup_rt_frame = setup_rt_frame_n32,
.rt_signal_return_offset =
offsetof(struct mips_vdso, n32_rt_signal_trampoline),
.restart = __NR_N32_restart_syscall
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index 06cd0c610f4..c2e5d74739b 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -172,7 +172,7 @@ void __init cmp_smp_setup(void)
if (amon_cpu_avail(i)) {
set_cpu_possible(i, true);
__cpu_number_map[i] = ++ncpu;
- __cpu_logical_map[ncpu] = i;
+ __cpu_logical_map[ncpu] = i;
}
}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2defa2bbdaa..bfede063d96 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -71,7 +71,7 @@ static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
/* Record this as available CPU */
set_cpu_possible(tc, true);
__cpu_number_map[tc] = ++ncpu;
- __cpu_logical_map[ncpu] = tc;
+ __cpu_logical_map[ncpu] = tc;
}
/* Disable multi-threading with TC's */
@@ -215,7 +215,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
write_tc_gpr_gp((unsigned long)gp);
flush_icache_range((unsigned long)gp,
- (unsigned long)(gp + sizeof(struct thread_info)));
+ (unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 20938a4cb52..76016ac0a9c 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -65,7 +65,7 @@ FEXPORT(__smtc_ipi_vector)
1:
/*
* The IPI sender has put some information on the anticipated
- * kernel stack frame. If we were in user mode, this will be
+ * kernel stack frame. If we were in user mode, this will be
* built above the saved kernel SP. If we were already in the
* kernel, it will be built above the current CPU SP.
*
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
index 145771c0ed7..aee7c8177b5 100644
--- a/arch/mips/kernel/smtc-proc.c
+++ b/arch/mips/kernel/smtc-proc.c
@@ -35,7 +35,7 @@ static struct proc_dir_entry *smtc_stats;
atomic_t smtc_fpu_recoveries;
static int proc_read_smtc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
int totalen = 0;
int len;
@@ -68,7 +68,7 @@ static int proc_read_smtc(char *page, char **start, off_t off,
page += len;
}
len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
- atomic_read(&smtc_fpu_recoveries));
+ atomic_read(&smtc_fpu_recoveries));
totalen += len;
page += len;
@@ -87,5 +87,5 @@ void init_smtc_stats(void)
atomic_set(&smtc_fpu_recoveries, 0);
smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
- proc_read_smtc, NULL);
+ proc_read_smtc, NULL);
}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 1d47843d3cc..7186222dc5b 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -41,6 +41,7 @@
#include <asm/addrspace.h>
#include <asm/smtc.h>
#include <asm/smtc_proc.h>
+#include <asm/setup.h>
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
@@ -235,7 +236,7 @@ static void smtc_configure_tlb(void)
mips_ihb();
/* No need to un-Halt - that happens later anyway */
for (i=0; i < vpes; i++) {
- write_tc_c0_tcbind(i);
+ write_tc_c0_tcbind(i);
/*
* To be 100% sure we're really getting the right
* information, we exit the configuration state
@@ -286,7 +287,7 @@ static void smtc_configure_tlb(void)
/*
* Incrementally build the CPU map out of constituent MIPS MT cores,
- * using the specified available VPEs and TCs. Plaform code needs
+ * using the specified available VPEs and TCs. Plaform code needs
* to ensure that each MIPS MT core invokes this routine on reset,
* one at a time(!).
*
@@ -348,7 +349,7 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
{
/*
* FIXME: Multi-core SMTC hasn't been tested and the
- * maximum number of VPEs may change.
+ * maximum number of VPEs may change.
*/
cp1contexts[0] = smtc_nconf1[0] - 1;
cp1contexts[1] = smtc_nconf1[1];
@@ -761,9 +762,9 @@ void smtc_forward_irq(struct irq_data *d)
* mask has been purged of bits corresponding to nonexistent and
* offline "CPUs", and to TCs bound to VPEs other than the VPE
* connected to the physical interrupt input for the interrupt
- * in question. Otherwise we have a nasty problem with interrupt
+ * in question. Otherwise we have a nasty problem with interrupt
* mask management. This is best handled in non-performance-critical
- * platform IRQ affinity setting code, to minimize interrupt-time
+ * platform IRQ affinity setting code, to minimize interrupt-time
* checks.
*/
@@ -899,10 +900,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
mips_ihb();
/*
- * Inspect TCStatus - if IXMT is set, we have to queue
+ * Inspect TCStatus - if IXMT is set, we have to queue
* a message. Otherwise, we set up the "interrupt"
* of the other TC
- */
+ */
tcstatus = read_tc_c0_tcstatus();
if ((tcstatus & TCSTATUS_IXMT) != 0) {
@@ -964,7 +965,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
* CU bit of Status is indicator that TC was
* already running on a kernel stack...
*/
- if (tcstatus & ST0_CU0) {
+ if (tcstatus & ST0_CU0) {
/* Note that this "- 1" is pointer arithmetic */
kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
} else {
@@ -1288,7 +1289,7 @@ void smtc_idle_loop_hook(void)
for (tc = 0; tc < hook_ntcs; tc++) {
tcnoprog[tc] = 0;
clock_hang_reported[tc] = 0;
- }
+ }
for (vpe = 0; vpe < 2; vpe++)
for (im = 0; im < 8; im++)
imstuckcount[vpe][im] = 0;
@@ -1485,7 +1486,7 @@ static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done
- * by other TCs, halt them all. This code assumes that
+ * by other TCs, halt them all. This code assumes that
* a DVPE has already been done, so while their Halted
* state is theoretically architecturally unstable, in
* practice, it's not going to change while we're looking
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
index 7f1eca3858d..1ff43d5ac2c 100644
--- a/arch/mips/kernel/sync-r4k.c
+++ b/arch/mips/kernel/sync-r4k.c
@@ -25,7 +25,7 @@ static atomic_t __cpuinitdata count_count_start = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_count_stop = ATOMIC_INIT(0);
static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0);
-#define COUNTON 100
+#define COUNTON 100
#define NR_LOOPS 5
void __cpuinit synchronise_count_master(int cpu)
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b32466a1a1d..b79d13f95bf 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -41,9 +41,9 @@
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
- * convention. It returns results in registers $v0 / $v1 which means there
+ * convention. It returns results in registers $v0 / $v1 which means there
* is no need for it to do verify the validity of a userspace pointer
- * argument. Historically that used to be expensive in Linux. These days
+ * argument. Historically that used to be expensive in Linux. These days
* the performance advantage is negligible.
*/
asmlinkage int sysm_pipe(void)
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 99d73b72b00..9d686bf97b0 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -5,8 +5,8 @@
*
* Common time service routines for MIPS machines.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -62,8 +62,8 @@ EXPORT_SYMBOL(perf_irq);
* time_init() - it does the following things.
*
* 1) plat_time_init() -
- * a) (optional) set up RTC routines,
- * b) (optional) calibrate and set the mips_hpt_frequency
+ * a) (optional) set up RTC routines,
+ * b) (optional) calibrate and set the mips_hpt_frequency
* (only needed if you intended to use cpu counter as timer interrupt
* source)
* 2) calculate a couple of cached variables for later usage
@@ -75,7 +75,7 @@ unsigned int mips_hpt_frequency;
* This function exists in order to cause an error due to a duplicate
* definition if platform code should have its own implementation. The hook
* to use instead is plat_time_init. plat_time_init does not receive the
- * irqaction pointer argument anymore. This is because any function which
+ * irqaction pointer argument anymore. This is because any function which
* initializes an interrupt timer now takes care of its own request_irq rsp.
* setup_irq calls and each clock_event_device should use its own
* struct irqrequest.
@@ -93,7 +93,7 @@ static __init int cpu_has_mfc0_count_bug(void)
case CPU_R4000MC:
/*
* V3.0 is documented as suffering from the mfc0 from count bug.
- * Afaik this is the last version of the R4000. Later versions
+ * Afaik this is the last version of the R4000. Later versions
* were marketed as R4400.
*/
return 1;
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9007966d56d..a200b5bdbb8 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -164,7 +164,7 @@ static void show_stacktrace(struct task_struct *task,
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
- printk("\n ");
+ printk("\n ");
if (i > 39) {
printk(" ...");
break;
@@ -279,7 +279,7 @@ static void __show_regs(const struct pt_regs *regs)
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
- printk("Status: %08x ", (uint32_t) regs->cp0_status);
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
@@ -441,7 +441,7 @@ asmlinkage void do_be(struct pt_regs *regs)
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
- /* XXX For now. Fixme, this searches the wrong table ... */
+ /* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
@@ -518,7 +518,7 @@ static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
@@ -558,7 +558,7 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
offset >>= 16;
vaddr = (unsigned long __user *)
- ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
@@ -739,7 +739,7 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
- own_fpu(1); /* Using the FPU again. */
+ own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
@@ -966,7 +966,7 @@ int cu2_notifier_call_chain(unsigned long val, void *v)
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
- void *data)
+ void *data)
{
struct pt_regs *regs = data;
@@ -974,7 +974,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
default:
die_if_kernel("Unhandled kernel unaligned access or invalid "
"instruction", regs);
- /* Fall through */
+ /* Fall through */
case CU2_EXCEPTION:
force_sig(SIGILL, current);
@@ -1029,10 +1029,10 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/*
* Old (MIPS I and MIPS II) processors will set this code
* for COP1X opcode instructions that replaced the original
- * COP3 space. We don't limit COP1 space instructions in
+ * COP3 space. We don't limit COP1 space instructions in
* the emulator according to the CPU ISA, so we want to
* treat COP1X instructions consistently regardless of which
- * code the CPU chose. Therefore we redirect this trap to
+ * code the CPU chose. Therefore we redirect this trap to
* the FP emulator too.
*
* Then some newer FPU-less processors use this code
@@ -1044,9 +1044,9 @@ asmlinkage void do_cpu(struct pt_regs *regs)
/* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
+ if (used_math()) /* Using the FPU again. */
own_fpu(1);
- else { /* First time FPU user. */
+ else { /* First time FPU user. */
init_fpu();
set_used_math();
}
@@ -1114,7 +1114,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
show_regs(regs);
if (multi_match) {
- printk("Index : %0x\n", read_c0_index());
+ printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
@@ -1181,7 +1181,7 @@ asmlinkage void do_dsp(struct pt_regs *regs)
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
- * Game over - no way to handle this if it ever occurs. Most probably
+ * Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
@@ -1705,7 +1705,7 @@ void __init trap_init(void)
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
- return; /* Already done */
+ return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
@@ -1799,7 +1799,7 @@ void __init trap_init(void)
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
- * written yet. Well, anyway there is no R6000 machine on the
+ * written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 9c58bdf58f2..6087a54c86a 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -21,11 +21,11 @@
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
- * problems with user programs have been fixed. For programmers this is the
+ * problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
- * across fork(2) and execve(2) calls. If you really want to use the
+ * across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
@@ -43,34 +43,34 @@
* #include <sys/sysmips.h>
*
* struct foo {
- * unsigned char bar[8];
+ * unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
- * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
- * unsigned int *p = (unsigned int *) (x.bar + 3);
- * int i;
+ * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ * unsigned int *p = (unsigned int *) (x.bar + 3);
+ * int i;
*
- * if (argc > 1)
- * sysmips(MIPS_FIXADE, atoi(argv[1]));
+ * if (argc > 1)
+ * sysmips(MIPS_FIXADE, atoi(argv[1]));
*
- * printf("*p = %08lx\n", *p);
+ * printf("*p = %08lx\n", *p);
*
- * *p = 0xdeadface;
+ * *p = 0xdeadface;
*
- * for(i = 0; i <= 7; i++)
- * printf("%02x ", x.bar[i]);
- * printf("\n");
+ * for(i = 0; i <= 7; i++)
+ * printf("%02x ", x.bar[i]);
+ * printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
- * exception for the R6000.
- * A store crossing a page boundary might be executed only partially.
- * Undo the partial store in this case.
+ * exception for the R6000.
+ * A store crossing a page boundary might be executed only partially.
+ * Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/signal.h>
@@ -86,7 +86,7 @@
#include <asm/inst.h>
#include <asm/uaccess.h>
-#define STR(x) __STR(x)
+#define STR(x) __STR(x)
#define __STR(x) #x
enum {
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 0a4336b803e..05826d20a79 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -22,12 +22,12 @@ PHDRS {
#ifdef CONFIG_32BIT
#ifdef CONFIG_CPU_LITTLE_ENDIAN
- jiffies = jiffies_64;
+ jiffies = jiffies_64;
#else
- jiffies = jiffies_64 + 4;
+ jiffies = jiffies_64 + 4;
#endif
#else
- jiffies = jiffies_64;
+ jiffies = jiffies_64;
#endif
SECTIONS
@@ -139,7 +139,7 @@ SECTIONS
/*
* Force .bss to 64K alignment so that .bss..swapper_pg_dir
- * gets that alignment. .sbss should be empty, so there will be
+ * gets that alignment. .sbss should be empty, so there will be
* no holes after __init_end. */
BSS_SECTION(0, 0x10000, 0)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 29933652ff4..1765bab000a 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -254,7 +254,7 @@ static void __maybe_unused dump_mtregs(void)
val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
}
-/* Find some VPE program space */
+/* Find some VPE program space */
static void *alloc_progmem(unsigned long len)
{
void *addr;
@@ -292,7 +292,7 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr)
}
/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
- might -- code, read-only data, read-write data, small data. Tally
+ might -- code, read-only data, read-write data, small data. Tally
sizes, and place the offsets into sh_entsize fields: high bit means it
belongs in init. */
static void layout_sections(struct module *mod, const Elf_Ehdr * hdr,
@@ -386,7 +386,7 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
if( (rel > 32768) || (rel < -32768) ) {
printk(KERN_DEBUG "VPE loader: "
- "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
+ "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
return -ENOEXEC;
}
@@ -458,7 +458,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
Elf32_Addr val, vallo;
struct mips_hi16 *l, *next;
- /* Sign extend the addend we extract from the lo insn. */
+ /* Sign extend the addend we extract from the lo insn. */
vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
if (mips_hi16_list != NULL) {
@@ -470,7 +470,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
/*
* The value for the HI16 had best be the same.
*/
- if (v != l->value) {
+ if (v != l->value) {
printk(KERN_DEBUG "VPE loader: "
"apply_r_mips_lo16/hi16: \t"
"inconsistent value information\n");
@@ -505,7 +505,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
}
/*
- * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
*/
val = v + vallo;
insnlo = (insnlo & ~0xffff) | (val & 0xffff);
@@ -579,7 +579,7 @@ static int apply_relocations(Elf32_Shdr *sechdrs,
res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
if( res ) {
char *r = rstrs[ELF32_R_TYPE(r_info)];
- printk(KERN_WARNING "VPE loader: .text+0x%x "
+ printk(KERN_WARNING "VPE loader: .text+0x%x "
"relocation type %s for symbol \"%s\" failed\n",
rel[i].r_offset, r ? r : "UNKNOWN",
strtab + sym->st_name);
@@ -697,18 +697,7 @@ static int vpe_run(struct vpe * v)
dmt_flag = dmt();
vpeflags = dvpe();
- if (!list_empty(&v->tc)) {
- if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
- evpe(vpeflags);
- emt(dmt_flag);
- local_irq_restore(flags);
-
- printk(KERN_WARNING
- "VPE loader: TC %d is already in use.\n",
- v->tc->index);
- return -ENOEXEC;
- }
- } else {
+ if (list_empty(&v->tc)) {
evpe(vpeflags);
emt(dmt_flag);
local_irq_restore(flags);
@@ -720,6 +709,8 @@ static int vpe_run(struct vpe * v)
return -ENOEXEC;
}
+ t = list_first_entry(&v->tc, struct tc, tc);
+
/* Put MVPE's into 'configuration state' */
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -772,7 +763,7 @@ static int vpe_run(struct vpe * v)
/* Set up the XTC bit in vpeconf0 to point at our tc */
write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
- | (t->index << VPECONF0_XTC_SHIFT));
+ | (t->index << VPECONF0_XTC_SHIFT));
back_to_back_c0_hazard();
@@ -926,34 +917,34 @@ static int vpe_elfload(struct vpe * v)
secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
}
- /* Fix up syms, so that st_value is a pointer to location. */
- simplify_symbols(sechdrs, symindex, strtab, secstrings,
- hdr->e_shnum, &mod);
-
- /* Now do relocations. */
- for (i = 1; i < hdr->e_shnum; i++) {
- const char *strtab = (char *)sechdrs[strindex].sh_addr;
- unsigned int info = sechdrs[i].sh_info;
-
- /* Not a valid relocation section? */
- if (info >= hdr->e_shnum)
- continue;
-
- /* Don't bother with non-allocated sections */
- if (!(sechdrs[info].sh_flags & SHF_ALLOC))
- continue;
-
- if (sechdrs[i].sh_type == SHT_REL)
- err = apply_relocations(sechdrs, strtab, symindex, i,
- &mod);
- else if (sechdrs[i].sh_type == SHT_RELA)
- err = apply_relocate_add(sechdrs, strtab, symindex, i,
- &mod);
- if (err < 0)
- return err;
-
- }
- } else {
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocations(sechdrs, strtab, symindex, i,
+ &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab, symindex, i,
+ &mod);
+ if (err < 0)
+ return err;
+
+ }
+ } else {
struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff);
for (i = 0; i < hdr->e_phnum; i++) {
@@ -968,16 +959,16 @@ static int vpe_elfload(struct vpe * v)
}
for (i = 0; i < hdr->e_shnum; i++) {
- /* Internal symbols and strings. */
- if (sechdrs[i].sh_type == SHT_SYMTAB) {
- symindex = i;
- strindex = sechdrs[i].sh_link;
- strtab = (char *)hdr + sechdrs[strindex].sh_offset;
-
- /* mark the symtab's address for when we try to find the
- magic symbols */
- sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
- }
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr + sechdrs[strindex].sh_offset;
+
+ /* mark the symtab's address for when we try to find the
+ magic symbols */
+ sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
+ }
}
}
@@ -1049,7 +1040,7 @@ static int getcwd(char *buff, int size)
return ret;
}
-/* checks VPE is unused and gets ready to load program */
+/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
@@ -1121,11 +1112,11 @@ static int vpe_release(struct inode *inode, struct file *filp)
if (vpe_elfload(v) >= 0) {
vpe_run(v);
} else {
- printk(KERN_WARNING "VPE loader: ELF load failed.\n");
+ printk(KERN_WARNING "VPE loader: ELF load failed.\n");
ret = -ENOEXEC;
}
} else {
- printk(KERN_WARNING "VPE loader: only elf files are supported\n");
+ printk(KERN_WARNING "VPE loader: only elf files are supported\n");
ret = -ENOEXEC;
}
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index c1540696803..7726f6157d9 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -12,7 +12,7 @@
#include <asm/watch.h>
/*
- * Install the watch registers for the current thread. A maximum of
+ * Install the watch registers for the current thread. A maximum of
* four registers are installed although the machine may have more.
*/
void mips_install_watch_registers(void)
@@ -72,7 +72,7 @@ void mips_read_watch_registers(void)
}
/*
- * Disable all watch registers. Although only four registers are
+ * Disable all watch registers. Although only four registers are
* installed, all are cleared to eliminate the possibility of endless
* looping in the watch handler.
*/
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index ce2f129b081..3fc2e6d70c7 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -26,13 +26,15 @@
#include "prom.h"
/* lantiq socs have 3 static clocks */
-static struct clk cpu_clk_generic[3];
+static struct clk cpu_clk_generic[4];
-void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
+void clkdev_add_static(unsigned long cpu, unsigned long fpi,
+ unsigned long io, unsigned long ppe)
{
cpu_clk_generic[0].rate = cpu;
cpu_clk_generic[1].rate = fpi;
cpu_clk_generic[2].rate = io;
+ cpu_clk_generic[3].rate = ppe;
}
struct clk *clk_get_cpu(void)
@@ -51,6 +53,12 @@ struct clk *clk_get_io(void)
return &cpu_clk_generic[2];
}
+struct clk *clk_get_ppe(void)
+{
+ return &cpu_clk_generic[3];
+}
+EXPORT_SYMBOL_GPL(clk_get_ppe);
+
static inline int clk_good(struct clk *clk)
{
return clk && !IS_ERR(clk);
@@ -145,9 +153,9 @@ static inline u32 get_counter_resolution(void)
u32 res;
__asm__ __volatile__(
- ".set push\n"
- ".set mips32r2\n"
- "rdhwr %0, $3\n"
+ ".set push\n"
+ ".set mips32r2\n"
+ "rdhwr %0, $3\n"
".set pop\n"
: "=&r" (res)
: /* no input */
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index fa670602b91..77e4bdb1fe8 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -27,12 +27,15 @@
#define CLOCK_167M 166666667
#define CLOCK_196_608M 196608000
#define CLOCK_200M 200000000
+#define CLOCK_222M 222000000
+#define CLOCK_240M 240000000
#define CLOCK_250M 250000000
#define CLOCK_266M 266666666
#define CLOCK_300M 300000000
#define CLOCK_333M 333333333
#define CLOCK_393M 393215332
#define CLOCK_400M 400000000
+#define CLOCK_450M 450000000
#define CLOCK_500M 500000000
#define CLOCK_600M 600000000
@@ -64,15 +67,17 @@ struct clk {
};
extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
- unsigned long io);
+ unsigned long io, unsigned long ppe);
extern unsigned long ltq_danube_cpu_hz(void);
extern unsigned long ltq_danube_fpi_hz(void);
+extern unsigned long ltq_danube_pp32_hz(void);
extern unsigned long ltq_ar9_cpu_hz(void);
extern unsigned long ltq_ar9_fpi_hz(void);
extern unsigned long ltq_vr9_cpu_hz(void);
extern unsigned long ltq_vr9_fpi_hz(void);
+extern unsigned long ltq_vr9_pp32_hz(void);
#endif
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
index 3a4520f009c..d4c59e00370 100644
--- a/arch/mips/lantiq/dts/danube.dtsi
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -97,7 +97,7 @@
compatible = "lantiq,pci-xway";
bus-range = <0x0 0x0>;
ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
- 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
+ 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
reg = <0x7000000 0x8000 /* config space */
0xE105400 0x400>; /* pci bridge */
};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
index 68c17310bc8..fac1f5b178e 100644
--- a/arch/mips/lantiq/dts/easy50712.dts
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -103,7 +103,7 @@
lantiq,bus-clock = <33333333>;
interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
interrupt-map = <
- 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
+ 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
>;
gpios-reset = <&gpio 21 0>;
req-mask = <0x1>; /* GNT1 */
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 2d4ced332b3..ff4894a833e 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -241,9 +241,9 @@ void __init ltq_soc_init(void)
/* get our 3 static rates for cpu, fpi and io clocks */
if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
- clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
+ clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M, 0);
else
- clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
+ clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M, 0);
/* add our clock domains */
clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index a7935bf0fec..51194875f15 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -33,17 +33,10 @@
/* register definitions - external irqs */
#define LTQ_EIU_EXIN_C 0x0000
#define LTQ_EIU_EXIN_INIC 0x0004
+#define LTQ_EIU_EXIN_INC 0x0008
#define LTQ_EIU_EXIN_INEN 0x000C
-/* irq numbers used by the external interrupt unit (EIU) */
-#define LTQ_EIU_IR0 (INT_NUM_IM4_IRL0 + 30)
-#define LTQ_EIU_IR1 (INT_NUM_IM3_IRL0 + 31)
-#define LTQ_EIU_IR2 (INT_NUM_IM1_IRL0 + 26)
-#define LTQ_EIU_IR3 INT_NUM_IM1_IRL0
-#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
-#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
-#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
-#define XWAY_EXIN_COUNT 3
+/* number of external interrupts */
#define MAX_EIU 6
/* the performance counter */
@@ -72,20 +65,19 @@
int gic_present;
#endif
-static unsigned short ltq_eiu_irq[MAX_EIU] = {
- LTQ_EIU_IR0,
- LTQ_EIU_IR1,
- LTQ_EIU_IR2,
- LTQ_EIU_IR3,
- LTQ_EIU_IR4,
- LTQ_EIU_IR5,
-};
-
static int exin_avail;
+static struct resource ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
+int ltq_eiu_get_irq(int exin)
+{
+ if (exin < exin_avail)
+ return ltq_eiu_irq[exin].start;
+ return -1;
+}
+
void ltq_disable_irq(struct irq_data *d)
{
u32 ier = LTQ_ICU_IM0_IER;
@@ -128,19 +120,65 @@ void ltq_enable_irq(struct irq_data *d)
ltq_icu_w32(im, ltq_icu_r32(im, ier) | BIT(offset), ier);
}
+static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < MAX_EIU; i++) {
+ if (d->hwirq == ltq_eiu_irq[i].start) {
+ int val = 0;
+ int edge = 0;
+
+ switch (type) {
+ case IRQF_TRIGGER_NONE:
+ break;
+ case IRQF_TRIGGER_RISING:
+ val = 1;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ val = 2;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING:
+ val = 3;
+ edge = 1;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ val = 5;
+ break;
+ case IRQF_TRIGGER_LOW:
+ val = 6;
+ break;
+ default:
+ pr_err("invalid type %d for irq %ld\n",
+ type, d->hwirq);
+ return -EINVAL;
+ }
+
+ if (edge)
+ irq_set_handler(d->hwirq, handle_edge_irq);
+
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
+ (val << (i * 4)), LTQ_EIU_EXIN_C);
+ }
+ }
+
+ return 0;
+}
+
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i]) {
- /* low level - we should really handle set_type */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
- (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
+ if (d->hwirq == ltq_eiu_irq[i].start) {
+ /* by default we are low level triggered */
+ ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
- LTQ_EIU_EXIN_INIC);
+ ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INC) & ~BIT(i),
+ LTQ_EIU_EXIN_INC);
/* enable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -157,7 +195,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i]) {
+ if (d->hwirq == ltq_eiu_irq[i].start) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -186,6 +224,7 @@ static struct irq_chip ltq_eiu_type = {
.irq_ack = ltq_ack_irq,
.irq_mask = ltq_disable_irq,
.irq_mask_ack = ltq_mask_and_ack_irq,
+ .irq_set_type = ltq_eiu_settype,
};
static void ltq_hw_irqdispatch(int module)
@@ -301,7 +340,7 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
return 0;
for (i = 0; i < exin_avail; i++)
- if (hw == ltq_eiu_irq[i])
+ if (hw == ltq_eiu_irq[i].start)
chip = &ltq_eiu_type;
irq_set_chip_and_handler(hw, chip, handle_level_irq);
@@ -323,7 +362,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
{
struct device_node *eiu_node;
struct resource res;
- int i;
+ int i, ret;
for (i = 0; i < MAX_IM; i++) {
if (of_address_to_resource(node, i, &res))
@@ -340,17 +379,19 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
}
/* the external interrupts are optional and xway only */
- eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
+ eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
- const __be32 *count = of_get_property(node,
- "lantiq,count", NULL);
+ exin_avail = of_irq_count(eiu_node);
- if (count)
- exin_avail = *count;
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
+ ret = of_irq_to_resource_table(eiu_node,
+ ltq_eiu_irq, exin_avail);
+ if (ret != exin_avail)
+ panic("failed to load external irq resources\n");
+
if (request_mem_region(res.start, resource_size(&res),
res.name) < 0)
pr_err("Failed to request eiu memory");
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index a3fa1a2bfaa..8e07b5f28ef 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,7 +10,7 @@
#define _LTQ_PROM_H__
#define LTQ_SYS_TYPE_LEN 0x100
-#define LTQ_SYS_REV_LEN 0x10
+#define LTQ_SYS_REV_LEN 0x10
struct ltq_soc_info {
unsigned char *name;
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 9aa17f79a74..1ab576dc9bd 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -53,6 +53,29 @@ unsigned long ltq_danube_cpu_hz(void)
}
}
+unsigned long ltq_danube_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 7) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_240M;
+ break;
+ case 2:
+ clk = CLOCK_222M;
+ break;
+ case 3:
+ clk = CLOCK_133M;
+ break;
+ default:
+ clk = CLOCK_266M;
+ break;
+ }
+
+ return clk;
+}
+
unsigned long ltq_ar9_sys_hz(void)
{
if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
@@ -149,3 +172,23 @@ unsigned long ltq_vr9_fpi_hz(void)
return clk;
}
+
+unsigned long ltq_vr9_pp32_hz(void)
+{
+ unsigned int clksys = (ltq_cgu_r32(CGU_SYS) >> 16) & 3;
+ unsigned long clk;
+
+ switch (clksys) {
+ case 1:
+ clk = CLOCK_450M;
+ break;
+ case 2:
+ clk = CLOCK_300M;
+ break;
+ default:
+ clk = CLOCK_500M;
+ break;
+ }
+
+ return clk;
+}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 544dbb7fb42..1fa0f175357 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -78,10 +78,19 @@ static struct ltq_xrx200_gphy_reset {
/* reset and boot a gphy. these phys only exist on xrx200 SoC */
int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr)
{
+ struct clk *clk;
+
if (!of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200")) {
dev_err(dev, "this SoC has no GPHY\n");
return -EINVAL;
}
+
+ clk = clk_get_sys("1f203000.rcu", "gphy");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_enable(clk);
+
if (id > 1) {
dev_err(dev, "%u is an invalid gphy id\n", id);
return -EINVAL;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3925e6609ac..c24924fe087 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -305,7 +305,7 @@ void __init ltq_soc_init(void)
/* check if all the core register ranges are available */
if (!np_pmu || !np_cgu || !np_ebu)
- panic("Failed to load core nodess from devicetree");
+ panic("Failed to load core nodes from devicetree");
if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
of_address_to_resource(np_cgu, 0, &res_cgu) ||
@@ -356,14 +356,16 @@ void __init ltq_soc_init(void)
if (of_machine_is_compatible("lantiq,ase")) {
if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
- clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_static(CLOCK_266M, CLOCK_133M,
+ CLOCK_133M, CLOCK_266M);
else
- clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
+ clkdev_add_static(CLOCK_133M, CLOCK_133M,
+ CLOCK_133M, CLOCK_133M);
clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
} else if (of_machine_is_compatible("lantiq,vr9")) {
clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
- ltq_vr9_fpi_hz());
+ ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz());
clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
@@ -374,12 +376,13 @@ void __init ltq_soc_init(void)
PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM |
PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 |
PMU_PPE_QSB | PMU_PPE_TOP);
+ clkdev_add_pmu("1f203000.rcu", "gphy", 0, PMU_GPHY);
} else if (of_machine_is_compatible("lantiq,ar9")) {
clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
- ltq_ar9_fpi_hz());
+ ltq_ar9_fpi_hz(), CLOCK_250M);
clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
} else {
clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
- ltq_danube_fpi_hz());
+ ltq_danube_fpi_hz(), ltq_danube_pp32_hz());
}
}
diff --git a/arch/mips/lasat/Makefile b/arch/mips/lasat/Makefile
index 9cc4e4db8b9..869bd3b37c1 100644
--- a/arch/mips/lasat/Makefile
+++ b/arch/mips/lasat/Makefile
@@ -2,7 +2,7 @@
# Makefile for the LASAT specific kernel interface routines under Linux.
#
-obj-y += reset.o setup.o prom.o lasat_board.o \
+obj-y += reset.o setup.o prom.o lasat_board.o \
at93c.o interrupt.o serial.o
obj-$(CONFIG_LASAT_SYSCTL) += sysctl.o
diff --git a/arch/mips/lasat/ds1603.h b/arch/mips/lasat/ds1603.h
index 2da3704044f..3e718b1cca0 100644
--- a/arch/mips/lasat/ds1603.h
+++ b/arch/mips/lasat/ds1603.h
@@ -25,7 +25,7 @@ void ds1603_enable(void);
void ds1603_disable(void);
void ds1603_init(struct ds_defs *);
-#define TRIMMER_DEFAULT 3
+#define TRIMMER_DEFAULT 3
#define TRIMMER_DISABLE_RTC 0
#endif
diff --git a/arch/mips/lasat/image/Makefile b/arch/mips/lasat/image/Makefile
index 460626b6d62..dfb509d21d8 100644
--- a/arch/mips/lasat/image/Makefile
+++ b/arch/mips/lasat/image/Makefile
@@ -28,7 +28,7 @@ $(obj)/head.o: $(obj)/head.S $(KERNEL_IMAGE)
OBJECTS = head.o kImage.o
-rom.sw: $(obj)/rom.sw
+rom.sw: $(obj)/rom.sw
rom.bin: $(obj)/rom.bin
$(obj)/rom.sw: $(obj)/rom.bin
diff --git a/arch/mips/lasat/image/head.S b/arch/mips/lasat/image/head.S
index e0ecda92c40..41babbe43a8 100644
--- a/arch/mips/lasat/image/head.S
+++ b/arch/mips/lasat/image/head.S
@@ -7,7 +7,7 @@
/* Magic words identifying a software image */
.word LASAT_K_MAGIC0_VAL
- .word LASAT_K_MAGIC1_VAL
+ .word LASAT_K_MAGIC1_VAL
/* Image header version */
.word 0x00000002
diff --git a/arch/mips/lasat/picvue.c b/arch/mips/lasat/picvue.c
index d3d04c392e2..7eb33489269 100644
--- a/arch/mips/lasat/picvue.c
+++ b/arch/mips/lasat/picvue.c
@@ -163,12 +163,12 @@ int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE])
}
#define FUNC_SET_CMD 0x20
-#define EIGHT_BYTE (1 << 4)
-#define FOUR_BYTE 0
-#define TWO_LINES (1 << 3)
-#define ONE_LINE 0
-#define LARGE_FONT (1 << 2)
-#define SMALL_FONT 0
+#define EIGHT_BYTE (1 << 4)
+#define FOUR_BYTE 0
+#define TWO_LINES (1 << 3)
+#define ONE_LINE 0
+#define LARGE_FONT (1 << 2)
+#define SMALL_FONT 0
static void pvc_funcset(u8 cmd)
{
@@ -177,9 +177,9 @@ static void pvc_funcset(u8 cmd)
}
#define ENTRYMODE_CMD 0x4
-#define AUTO_INC (1 << 1)
-#define AUTO_DEC 0
-#define CURSOR_FOLLOWS_DISP (1 << 0)
+#define AUTO_INC (1 << 1)
+#define AUTO_DEC 0
+#define CURSOR_FOLLOWS_DISP (1 << 0)
static void pvc_entrymode(u8 cmd)
{
@@ -188,20 +188,20 @@ static void pvc_entrymode(u8 cmd)
}
#define DISP_CNT_CMD 0x08
-#define DISP_OFF 0
-#define DISP_ON (1 << 2)
-#define CUR_ON (1 << 1)
-#define CUR_BLINK (1 << 0)
+#define DISP_OFF 0
+#define DISP_ON (1 << 2)
+#define CUR_ON (1 << 1)
+#define CUR_BLINK (1 << 0)
void pvc_dispcnt(u8 cmd)
{
pvc_write(DISP_CNT_CMD | (cmd & (DISP_ON|CUR_ON|CUR_BLINK)), MODE_INST);
}
#define MOVE_CMD 0x10
-#define DISPLAY (1 << 3)
-#define CURSOR 0
-#define RIGHT (1 << 2)
-#define LEFT 0
+#define DISPLAY (1 << 3)
+#define CURSOR 0
+#define RIGHT (1 << 2)
+#define LEFT 0
void pvc_move(u8 cmd)
{
pvc_write(MOVE_CMD | (cmd & (DISPLAY|RIGHT)), MODE_INST);
diff --git a/arch/mips/lasat/picvue.h b/arch/mips/lasat/picvue.h
index 2f0757738fd..d0119fca386 100644
--- a/arch/mips/lasat/picvue.h
+++ b/arch/mips/lasat/picvue.h
@@ -29,16 +29,16 @@ void pvc_dump_string(const unsigned char *str);
int pvc_program_cg(int charnum, u8 bitmap[BM_SIZE]);
void pvc_dispcnt(u8 cmd);
-#define DISP_OFF 0
-#define DISP_ON (1 << 2)
-#define CUR_ON (1 << 1)
-#define CUR_BLINK (1 << 0)
+#define DISP_OFF 0
+#define DISP_ON (1 << 2)
+#define CUR_ON (1 << 1)
+#define CUR_BLINK (1 << 0)
void pvc_move(u8 cmd);
-#define DISPLAY (1 << 3)
-#define CURSOR 0
-#define RIGHT (1 << 2)
-#define LEFT 0
+#define DISPLAY (1 << 3)
+#define CURSOR 0
+#define RIGHT (1 << 2)
+#define LEFT 0
void pvc_clear(void);
void pvc_home(void);
diff --git a/arch/mips/lasat/serial.c b/arch/mips/lasat/serial.c
index 5bcb6e89ab7..2e5fbed8120 100644
--- a/arch/mips/lasat/serial.c
+++ b/arch/mips/lasat/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of Lasat UART platform device.
*
- * Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
+ * Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c
index d87ffd04cb0..f27694fb2ad 100644
--- a/arch/mips/lasat/sysctl.c
+++ b/arch/mips/lasat/sysctl.c
@@ -134,8 +134,8 @@ int proc_lasat_ip(ctl_table *table, int write,
} else {
ip = *(unsigned int *)(table->data);
sprintf(ipbuf, "%d.%d.%d.%d",
- (ip) & 0xff,
- (ip >> 8) & 0xff,
+ (ip) & 0xff,
+ (ip >> 8) & 0xff,
(ip >> 16) & 0xff,
(ip >> 24) & 0xff);
len = strlen(ipbuf);
diff --git a/arch/mips/lib/bitops.c b/arch/mips/lib/bitops.c
index 239a9c957b0..81f1dcfdcab 100644
--- a/arch/mips/lib/bitops.c
+++ b/arch/mips/lib/bitops.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(__mips_clear_bit);
/**
- * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
+ * __mips_change_bit - Toggle a bit in memory. This is called by change_bit()
* if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to start counting from
@@ -155,7 +155,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit);
/**
- * __mips_test_and_change_bit - Change a bit and return its old value. This is
+ * __mips_test_and_change_bit - Change a bit and return its old value. This is
* called by test_and_change_bit() if it cannot find a faster solution.
* @nr: Bit to change
* @addr: Address to count from
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 6b876ca299e..507147aebd4 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -67,8 +67,8 @@
#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
LOAD _t0, (offset + UNIT(0))(src); \
LOAD _t1, (offset + UNIT(1))(src); \
- LOAD _t2, (offset + UNIT(2))(src); \
- LOAD _t3, (offset + UNIT(3))(src); \
+ LOAD _t2, (offset + UNIT(2))(src); \
+ LOAD _t3, (offset + UNIT(3))(src); \
ADDC(sum, _t0); \
ADDC(sum, _t1); \
ADDC(sum, _t2); \
@@ -285,7 +285,7 @@ LEAF(csum_partial)
1:
#endif
.set reorder
- /* Add the passed partial csum. */
+ /* Add the passed partial csum. */
ADDC32(sum, a2)
jr ra
.set noreorder
@@ -298,7 +298,7 @@ LEAF(csum_partial)
* csum_partial_copy_nocheck(src, dst, len, sum)
* __csum_partial_copy_user(src, dst, len, sum, errp)
*
- * See "Spec" in memcpy.S for details. Unlike __copy_user, all
+ * See "Spec" in memcpy.S for details. Unlike __copy_user, all
* function in this file use the standard calling convention.
*/
@@ -371,16 +371,16 @@ LEAF(csum_partial)
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#define SHIFT_DISCARD_REVERT SRLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#define SHIFT_DISCARD_REVERT SLLV
#endif
@@ -430,7 +430,7 @@ FEXPORT(csum_partial_copy_nocheck)
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
nop
SUB len, 8*NBYTES # subtract here for bgez loop
@@ -518,7 +518,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -532,7 +532,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
SHIFT_DISCARD_REVERT t0, t0, bits
@@ -551,7 +551,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -568,9 +568,9 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -578,13 +578,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
ADD src, src, 4*NBYTES
@@ -634,7 +634,7 @@ EXC( STORE t0, 0(dst), .Ls_exc)
#define SHIFT_INC -8
#endif
move t2, zero # partial word
- li t3, SHIFT_START # shift
+ li t3, SHIFT_START # shift
/* use .Ll_exc_copy here to return correct sum on fault */
#define COPY_BYTE(N) \
EXC( lbu t0, N(src), .Ll_exc_copy); \
@@ -642,7 +642,7 @@ EXC( lbu t0, N(src), .Ll_exc_copy); \
EXC( sb t0, N(dst), .Ls_exc); \
SLLV t0, t0, t3; \
addu t3, SHIFT_INC; \
- beqz len, .Lcopy_bytes_done; \
+ beqz len, .Lcopy_bytes_done; \
or t2, t0
COPY_BYTE(0)
diff --git a/arch/mips/lib/delay.c b/arch/mips/lib/delay.c
index 288f7954988..44713af15a6 100644
--- a/arch/mips/lib/delay.c
+++ b/arch/mips/lib/delay.c
@@ -36,7 +36,7 @@ EXPORT_SYMBOL(__delay);
* Division by multiplication: you don't have to worry about
* loss of precision.
*
- * Use only for very small delays ( < 1 msec). Should probably use a
+ * Use only for very small delays ( < 1 msec). Should probably use a
* lookup table, really, as the multiplications take much too long with
* short delays. This is a "reasonable" implementation, though (and the
* first constant multiplications gets optimized away if the delay is
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index a99c1d3fc56..32b9f21bfd8 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -63,7 +63,7 @@ static void dump_tlb(int first, int last)
tlb_read();
BARRIER();
pagemask = read_c0_pagemask();
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 65192c06781..c5c40dad0bb 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -156,15 +156,15 @@
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -235,7 +235,7 @@ __copy_user_common:
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
PREF( 0, 3*32(src) )
@@ -313,7 +313,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -327,7 +327,7 @@ EXC( STORE t0, 0(dst), .Ls_exc_p1u)
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
jr ra
@@ -343,7 +343,7 @@ EXC( STREST t0, -1(t1), .Ls_exc)
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -357,10 +357,10 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
PREF( 0, 3*32(src) )
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
PREF( 1, 3*32(dst) )
1:
/*
@@ -370,13 +370,13 @@ EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
* are to the same unit (unless src is aligned, but it's not).
*/
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
@@ -388,7 +388,7 @@ EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
@@ -502,7 +502,7 @@ EXC( lb t1, 0(src), .Ll_exc)
#define SEXC(n) \
- .set reorder; /* DADDI_WAR */ \
+ .set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u: \
ADD len, len, n*NBYTES; \
jr ra; \
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 606c8a9efe3..053d3b0b031 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -21,8 +21,8 @@
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
- .section __ex_table,"a"; \
- PTR 9b, handler; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 9cee907975a..91615c2ef0c 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -30,7 +30,7 @@ static void dump_tlb(int first, int last)
"tlbr\n\t"
"nop\n\t"
".set\treorder");
- entryhi = read_c0_entryhi();
+ entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
/* Unused entries have a virtual address of KSEG0. */
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 7201b2ff08c..bad53948750 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -23,7 +23,7 @@
/*
* Ugly special case have to check: we might get passed a user space
- * pointer which wraps into the kernel space. We don't deal with that. If
+ * pointer which wraps into the kernel space. We don't deal with that. If
* it happens at most some bytes of the exceptions handlers will be copied.
*/
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 64457162f7e..beea03c8c0c 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -21,9 +21,9 @@
* maximum of a1 or 0 in case of error.
*
* Note: for performance reasons we deliberately accept that a user may
- * make strlen_user and strnlen_user access the first few KSEG0
- * bytes. There's nothing secret there. On 64-bit accessing beyond
- * the maximum is a tad hairier ...
+ * make strlen_user and strnlen_user access the first few KSEG0
+ * bytes. There's nothing secret there. On 64-bit accessing beyond
+ * the maximum is a tad hairier ...
*/
LEAF(__strnlen_user_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
index a6d1c77034d..65e3dfc4e58 100644
--- a/arch/mips/lib/uncached.c
+++ b/arch/mips/lib/uncached.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2005 Thiemo Seufer
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
* Author: Maciej W. Rozycki <macro@mips.com>
*/
diff --git a/arch/mips/loongson/Makefile b/arch/mips/loongson/Makefile
index 2b76cb0fb07..0dc0055754c 100644
--- a/arch/mips/loongson/Makefile
+++ b/arch/mips/loongson/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_MACH_LOONGSON) += common/
# Lemote Fuloong mini-PC (Loongson 2E-based)
#
-obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
+obj-$(CONFIG_LEMOTE_FULOONG2E) += fuloong-2e/
#
# Lemote loongson2f family machines
diff --git a/arch/mips/loongson/common/bonito-irq.c b/arch/mips/loongson/common/bonito-irq.c
index f27d7ccca92..cc0e4fd548e 100644
--- a/arch/mips/loongson/common/bonito-irq.c
+++ b/arch/mips/loongson/common/bonito-irq.c
@@ -6,9 +6,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 353e1d2e41a..72fed003a53 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -12,8 +12,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_acc.c b/arch/mips/loongson/common/cs5536/cs5536_acc.c
index b3fd5eab654..ab4d6cc5738 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_acc.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_acc.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ehci.c b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
index 5b5cbba699b..ec2e360267a 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ehci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ehci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ide.c b/arch/mips/loongson/common/cs5536/cs5536_ide.c
index 681d1291a2c..a73414d9ee5 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ide.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ide.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_isa.c b/arch/mips/loongson/common/cs5536/cs5536_isa.c
index 4d9f65abeaf..a6eb2e853d9 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_isa.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_isa.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
index 5d1f48fa1a5..c639b9db001 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_mfgpt.c
@@ -9,9 +9,9 @@
*
* Reference: AMD Geode(TM) CS5536 Companion Device Data Book
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_ohci.c b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
index bdedf512baf..f7c905e50dc 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_ohci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_ohci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c
index 6dfeab11af0..81bed9d1806 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_pci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c
@@ -7,8 +7,8 @@
* Copyright (C) 2009 Lemote, Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c
index a71736f0044..ced461b3906 100644
--- a/arch/mips/loongson/common/early_printk.c
+++ b/arch/mips/loongson/common/early_printk.c
@@ -4,9 +4,9 @@
* Copyright (c) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/serial_reg.h>
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index d93830ad611..0a18fcf2d37 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -12,8 +12,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
index e8a0ffa935b..21869908aaa 100644
--- a/arch/mips/loongson/common/gpio.c
+++ b/arch/mips/loongson/common/gpio.c
@@ -1,7 +1,7 @@
/*
* STLS2F GPIO Support
*
- * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
+ * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com>
* Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -123,13 +123,13 @@ static void ls2f_gpio_set_value(struct gpio_chip *chip,
}
static struct gpio_chip ls2f_chip = {
- .label = "ls2f",
- .direction_input = ls2f_gpio_direction_input,
- .get = ls2f_gpio_get_value,
- .direction_output = ls2f_gpio_direction_output,
- .set = ls2f_gpio_set_value,
- .base = 0,
- .ngpio = STLS2F_N_GPIO,
+ .label = "ls2f",
+ .direction_input = ls2f_gpio_direction_input,
+ .get = ls2f_gpio_get_value,
+ .direction_output = ls2f_gpio_direction_output,
+ .set = ls2f_gpio_set_value,
+ .base = 0,
+ .ngpio = STLS2F_N_GPIO,
};
static int __init ls2f_gpio_setup(void)
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index 19d34159125..ae7af1fd5d5 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/irq.c b/arch/mips/loongson/common/irq.c
index 5897471dedc..687003b19b4 100644
--- a/arch/mips/loongson/common/irq.c
+++ b/arch/mips/loongson/common/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/delay.h>
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 2efd5d9dee2..4becd4f9ef2 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -4,8 +4,8 @@
*
* Copyright (c) 2009 Zhang Le <r0bertz@gentoo.org>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -19,15 +19,15 @@
#define MACHTYPE_LEN 50
static const char *system_types[] = {
- [MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
- [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
- [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
- [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
- [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
- [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
+ [MACH_LOONGSON_UNKNOWN] "unknown loongson machine",
+ [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box",
+ [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box",
+ [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches",
+ [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches",
+ [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f",
[MACH_LEMOTE_NAS] "lemote-nas-2f",
- [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
- [MACH_LOONGSON_END] NULL,
+ [MACH_LEMOTE_LL2F] "lemote-lynloong-2f",
+ [MACH_LOONGSON_END] NULL,
};
const char *get_system_type(void)
diff --git a/arch/mips/loongson/common/mem.c b/arch/mips/loongson/common/mem.c
index 30eba600120..8626a42f5b9 100644
--- a/arch/mips/loongson/common/mem.c
+++ b/arch/mips/loongson/common/mem.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/pci.c b/arch/mips/loongson/common/pci.c
index 31d8c5ecd16..fa778445972 100644
--- a/arch/mips/loongson/common/pci.c
+++ b/arch/mips/loongson/common/pci.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/pci.h>
@@ -13,25 +13,25 @@
#include <loongson.h>
static struct resource loongson_pci_mem_resource = {
- .name = "pci memory space",
- .start = LOONGSON_PCI_MEM_START,
- .end = LOONGSON_PCI_MEM_END,
- .flags = IORESOURCE_MEM,
+ .name = "pci memory space",
+ .start = LOONGSON_PCI_MEM_START,
+ .end = LOONGSON_PCI_MEM_END,
+ .flags = IORESOURCE_MEM,
};
static struct resource loongson_pci_io_resource = {
- .name = "pci io space",
- .start = LOONGSON_PCI_IO_START,
- .end = IO_SPACE_LIMIT,
- .flags = IORESOURCE_IO,
+ .name = "pci io space",
+ .start = LOONGSON_PCI_IO_START,
+ .end = IO_SPACE_LIMIT,
+ .flags = IORESOURCE_IO,
};
static struct pci_controller loongson_pci_controller = {
- .pci_ops = &loongson_pci_ops,
- .io_resource = &loongson_pci_io_resource,
- .mem_resource = &loongson_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_offset = 0x00000000UL,
+ .pci_ops = &loongson_pci_ops,
+ .io_resource = &loongson_pci_io_resource,
+ .mem_resource = &loongson_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_offset = 0x00000000UL,
};
static void __init setup_pcimap(void)
@@ -42,7 +42,7 @@ static void __init setup_pcimap(void)
* we set pcimap_lo[0,1,2] to map it to pci space[0M,64M], [320M,448M]
*
* pcimap: PCI_MAP2 PCI_Mem_Lo2 PCI_Mem_Lo1 PCI_Mem_Lo0
- * [<2G] [384M,448M] [320M,384M] [0M,64M]
+ * [<2G] [384M,448M] [320M,384M] [0M,64M]
*/
LOONGSON_PCIMAP = LOONGSON_PCIMAP_PCIMAP_2 |
LOONGSON_PCIMAP_WIN(2, LOONGSON_PCILO2_BASE) |
diff --git a/arch/mips/loongson/common/platform.c b/arch/mips/loongson/common/platform.c
index 502b059de42..0ed38321a9a 100644
--- a/arch/mips/loongson/common/platform.c
+++ b/arch/mips/loongson/common/platform.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 9e10d6225d9..35c8c646849 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -26,9 +26,9 @@ static inline void loongson_reboot(void)
func = (void *)ioremap_nocache(LOONGSON_BOOT_BASE, 4);
__asm__ __volatile__(
- " .set noat \n"
- " jr %[func] \n"
- " .set at \n"
+ " .set noat \n"
+ " jr %[func] \n"
+ " .set at \n"
: /* No outputs */
: [func] "r" (func));
#endif
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
index 7580873143c..5f2b78ae97c 100644
--- a/arch/mips/loongson/common/serial.c
+++ b/arch/mips/loongson/common/serial.c
@@ -39,15 +39,15 @@
}
static struct plat_serial8250_port uart8250_data[][2] = {
- [MACH_LOONGSON_UNKNOWN] {},
- [MACH_LEMOTE_FL2E] {PORT(4), {} },
- [MACH_LEMOTE_FL2F] {PORT(3), {} },
- [MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
- [MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
- [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
- [MACH_LEMOTE_NAS] {PORT_M(3), {} },
- [MACH_LEMOTE_LL2F] {PORT(3), {} },
- [MACH_LOONGSON_END] {},
+ [MACH_LOONGSON_UNKNOWN] {},
+ [MACH_LEMOTE_FL2E] {PORT(4), {} },
+ [MACH_LEMOTE_FL2F] {PORT(3), {} },
+ [MACH_LEMOTE_ML2F7] {PORT_M(3), {} },
+ [MACH_LEMOTE_YL2F89] {PORT_M(3), {} },
+ [MACH_DEXXON_GDIUM2F10] {PORT_M(3), {} },
+ [MACH_LEMOTE_NAS] {PORT_M(3), {} },
+ [MACH_LEMOTE_LL2F] {PORT(3), {} },
+ [MACH_LOONGSON_END] {},
};
static struct platform_device uart8250_device = {
diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c
index 27d826bc710..8223f8acfd5 100644
--- a/arch/mips/loongson/common/setup.c
+++ b/arch/mips/loongson/common/setup.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c
index 9fdd01f6c56..262a1f65b05 100644
--- a/arch/mips/loongson/common/time.c
+++ b/arch/mips/loongson/common/time.c
@@ -5,9 +5,9 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <asm/mc146818-time.h>
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
index d69ea54bc3d..e192ad021ed 100644
--- a/arch/mips/loongson/common/uart_base.c
+++ b/arch/mips/loongson/common/uart_base.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c
index 3cf1fef29f0..ef5ec8f3de5 100644
--- a/arch/mips/loongson/fuloong-2e/irq.c
+++ b/arch/mips/loongson/fuloong-2e/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc. & Insititute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/interrupt.h>
@@ -48,9 +48,9 @@ static struct irqaction cascade_irqaction = {
void __init mach_init_irq(void)
{
/* init all controller
- * 0-15 ------> i8259 interrupt
- * 16-23 ------> mips cpu interrupt
- * 32-63 ------> bonito irq
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
*/
/* most bonito irq should be level triggered */
diff --git a/arch/mips/loongson/fuloong-2e/reset.c b/arch/mips/loongson/fuloong-2e/reset.c
index bc39ec62c8c..da4d2ae2a1f 100644
--- a/arch/mips/loongson/fuloong-2e/reset.c
+++ b/arch/mips/loongson/fuloong-2e/reset.c
@@ -4,8 +4,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson/lemote-2f/ec_kb3310b.h b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
index 1595a21b315..5a3f1860d4d 100644
--- a/arch/mips/loongson/lemote-2f/ec_kb3310b.h
+++ b/arch/mips/loongson/lemote-2f/ec_kb3310b.h
@@ -30,141 +30,141 @@ extern sci_handler yeeloong_report_lid_status;
* 2, fill the PORT_LOW as EC register low part.
* 3, fill the PORT_DATA as EC register write data or get the data from it.
*/
-#define EC_IO_PORT_HIGH 0x0381
-#define EC_IO_PORT_LOW 0x0382
-#define EC_IO_PORT_DATA 0x0383
+#define EC_IO_PORT_HIGH 0x0381
+#define EC_IO_PORT_LOW 0x0382
+#define EC_IO_PORT_DATA 0x0383
/*
* EC delay time is 500us for register and status access
*/
-#define EC_REG_DELAY 500 /* unit : us */
-#define EC_CMD_TIMEOUT 0x1000
+#define EC_REG_DELAY 500 /* unit : us */
+#define EC_CMD_TIMEOUT 0x1000
/*
* EC access port for SCI communication
*/
-#define EC_CMD_PORT 0x66
-#define EC_STS_PORT 0x66
-#define EC_DAT_PORT 0x62
-#define CMD_INIT_IDLE_MODE 0xdd
-#define CMD_EXIT_IDLE_MODE 0xdf
-#define CMD_INIT_RESET_MODE 0xd8
-#define CMD_REBOOT_SYSTEM 0x8c
-#define CMD_GET_EVENT_NUM 0x84
-#define CMD_PROGRAM_PIECE 0xda
+#define EC_CMD_PORT 0x66
+#define EC_STS_PORT 0x66
+#define EC_DAT_PORT 0x62
+#define CMD_INIT_IDLE_MODE 0xdd
+#define CMD_EXIT_IDLE_MODE 0xdf
+#define CMD_INIT_RESET_MODE 0xd8
+#define CMD_REBOOT_SYSTEM 0x8c
+#define CMD_GET_EVENT_NUM 0x84
+#define CMD_PROGRAM_PIECE 0xda
/* temperature & fan registers */
-#define REG_TEMPERATURE_VALUE 0xF458
-#define REG_FAN_AUTO_MAN_SWITCH 0xF459
-#define BIT_FAN_AUTO 0
-#define BIT_FAN_MANUAL 1
-#define REG_FAN_CONTROL 0xF4D2
-#define BIT_FAN_CONTROL_ON (1 << 0)
-#define BIT_FAN_CONTROL_OFF (0 << 0)
-#define REG_FAN_STATUS 0xF4DA
-#define BIT_FAN_STATUS_ON (1 << 0)
-#define BIT_FAN_STATUS_OFF (0 << 0)
-#define REG_FAN_SPEED_HIGH 0xFE22
-#define REG_FAN_SPEED_LOW 0xFE23
-#define REG_FAN_SPEED_LEVEL 0xF4CC
+#define REG_TEMPERATURE_VALUE 0xF458
+#define REG_FAN_AUTO_MAN_SWITCH 0xF459
+#define BIT_FAN_AUTO 0
+#define BIT_FAN_MANUAL 1
+#define REG_FAN_CONTROL 0xF4D2
+#define BIT_FAN_CONTROL_ON (1 << 0)
+#define BIT_FAN_CONTROL_OFF (0 << 0)
+#define REG_FAN_STATUS 0xF4DA
+#define BIT_FAN_STATUS_ON (1 << 0)
+#define BIT_FAN_STATUS_OFF (0 << 0)
+#define REG_FAN_SPEED_HIGH 0xFE22
+#define REG_FAN_SPEED_LOW 0xFE23
+#define REG_FAN_SPEED_LEVEL 0xF4CC
/* fan speed divider */
-#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
+#define FAN_SPEED_DIVIDER 480000 /* (60*1000*1000/62.5/2)*/
/* battery registers */
-#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
-#define REG_BAT_DESIGN_CAP_LOW 0xF77E
-#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
-#define REG_BAT_FULLCHG_CAP_LOW 0xF781
-#define REG_BAT_DESIGN_VOL_HIGH 0xF782
-#define REG_BAT_DESIGN_VOL_LOW 0xF783
-#define REG_BAT_CURRENT_HIGH 0xF784
-#define REG_BAT_CURRENT_LOW 0xF785
-#define REG_BAT_VOLTAGE_HIGH 0xF786
-#define REG_BAT_VOLTAGE_LOW 0xF787
-#define REG_BAT_TEMPERATURE_HIGH 0xF788
-#define REG_BAT_TEMPERATURE_LOW 0xF789
-#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
-#define REG_BAT_RELATIVE_CAP_LOW 0xF493
-#define REG_BAT_VENDOR 0xF4C4
-#define FLAG_BAT_VENDOR_SANYO 0x01
-#define FLAG_BAT_VENDOR_SIMPLO 0x02
-#define REG_BAT_CELL_COUNT 0xF4C6
-#define FLAG_BAT_CELL_3S1P 0x03
-#define FLAG_BAT_CELL_3S2P 0x06
-#define REG_BAT_CHARGE 0xF4A2
-#define FLAG_BAT_CHARGE_DISCHARGE 0x01
-#define FLAG_BAT_CHARGE_CHARGE 0x02
-#define FLAG_BAT_CHARGE_ACPOWER 0x00
-#define REG_BAT_STATUS 0xF4B0
-#define BIT_BAT_STATUS_LOW (1 << 5)
-#define BIT_BAT_STATUS_DESTROY (1 << 2)
-#define BIT_BAT_STATUS_FULL (1 << 1)
-#define BIT_BAT_STATUS_IN (1 << 0)
-#define REG_BAT_CHARGE_STATUS 0xF4B1
-#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
-#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
-#define REG_BAT_STATE 0xF482
-#define BIT_BAT_STATE_CHARGING (1 << 1)
-#define BIT_BAT_STATE_DISCHARGING (1 << 0)
-#define REG_BAT_POWER 0xF440
-#define BIT_BAT_POWER_S3 (1 << 2)
-#define BIT_BAT_POWER_ON (1 << 1)
-#define BIT_BAT_POWER_ACIN (1 << 0)
+#define REG_BAT_DESIGN_CAP_HIGH 0xF77D
+#define REG_BAT_DESIGN_CAP_LOW 0xF77E
+#define REG_BAT_FULLCHG_CAP_HIGH 0xF780
+#define REG_BAT_FULLCHG_CAP_LOW 0xF781
+#define REG_BAT_DESIGN_VOL_HIGH 0xF782
+#define REG_BAT_DESIGN_VOL_LOW 0xF783
+#define REG_BAT_CURRENT_HIGH 0xF784
+#define REG_BAT_CURRENT_LOW 0xF785
+#define REG_BAT_VOLTAGE_HIGH 0xF786
+#define REG_BAT_VOLTAGE_LOW 0xF787
+#define REG_BAT_TEMPERATURE_HIGH 0xF788
+#define REG_BAT_TEMPERATURE_LOW 0xF789
+#define REG_BAT_RELATIVE_CAP_HIGH 0xF492
+#define REG_BAT_RELATIVE_CAP_LOW 0xF493
+#define REG_BAT_VENDOR 0xF4C4
+#define FLAG_BAT_VENDOR_SANYO 0x01
+#define FLAG_BAT_VENDOR_SIMPLO 0x02
+#define REG_BAT_CELL_COUNT 0xF4C6
+#define FLAG_BAT_CELL_3S1P 0x03
+#define FLAG_BAT_CELL_3S2P 0x06
+#define REG_BAT_CHARGE 0xF4A2
+#define FLAG_BAT_CHARGE_DISCHARGE 0x01
+#define FLAG_BAT_CHARGE_CHARGE 0x02
+#define FLAG_BAT_CHARGE_ACPOWER 0x00
+#define REG_BAT_STATUS 0xF4B0
+#define BIT_BAT_STATUS_LOW (1 << 5)
+#define BIT_BAT_STATUS_DESTROY (1 << 2)
+#define BIT_BAT_STATUS_FULL (1 << 1)
+#define BIT_BAT_STATUS_IN (1 << 0)
+#define REG_BAT_CHARGE_STATUS 0xF4B1
+#define BIT_BAT_CHARGE_STATUS_OVERTEMP (1 << 2)
+#define BIT_BAT_CHARGE_STATUS_PRECHG (1 << 1)
+#define REG_BAT_STATE 0xF482
+#define BIT_BAT_STATE_CHARGING (1 << 1)
+#define BIT_BAT_STATE_DISCHARGING (1 << 0)
+#define REG_BAT_POWER 0xF440
+#define BIT_BAT_POWER_S3 (1 << 2)
+#define BIT_BAT_POWER_ON (1 << 1)
+#define BIT_BAT_POWER_ACIN (1 << 0)
/* other registers */
/* Audio: rd/wr */
-#define REG_AUDIO_VOLUME 0xF46C
-#define REG_AUDIO_MUTE 0xF4E7
-#define REG_AUDIO_BEEP 0xF4D0
+#define REG_AUDIO_VOLUME 0xF46C
+#define REG_AUDIO_MUTE 0xF4E7
+#define REG_AUDIO_BEEP 0xF4D0
/* USB port power or not: rd/wr */
-#define REG_USB0_FLAG 0xF461
-#define REG_USB1_FLAG 0xF462
-#define REG_USB2_FLAG 0xF463
-#define BIT_USB_FLAG_ON 1
-#define BIT_USB_FLAG_OFF 0
+#define REG_USB0_FLAG 0xF461
+#define REG_USB1_FLAG 0xF462
+#define REG_USB2_FLAG 0xF463
+#define BIT_USB_FLAG_ON 1
+#define BIT_USB_FLAG_OFF 0
/* LID */
-#define REG_LID_DETECT 0xF4BD
-#define BIT_LID_DETECT_ON 1
-#define BIT_LID_DETECT_OFF 0
+#define REG_LID_DETECT 0xF4BD
+#define BIT_LID_DETECT_ON 1
+#define BIT_LID_DETECT_OFF 0
/* CRT */
-#define REG_CRT_DETECT 0xF4AD
-#define BIT_CRT_DETECT_PLUG 1
-#define BIT_CRT_DETECT_UNPLUG 0
+#define REG_CRT_DETECT 0xF4AD
+#define BIT_CRT_DETECT_PLUG 1
+#define BIT_CRT_DETECT_UNPLUG 0
/* LCD backlight brightness adjust: 9 levels */
-#define REG_DISPLAY_BRIGHTNESS 0xF4F5
+#define REG_DISPLAY_BRIGHTNESS 0xF4F5
/* Black screen Status */
-#define BIT_DISPLAY_LCD_ON 1
-#define BIT_DISPLAY_LCD_OFF 0
+#define BIT_DISPLAY_LCD_ON 1
+#define BIT_DISPLAY_LCD_OFF 0
/* LCD backlight control: off/restore */
-#define REG_BACKLIGHT_CTRL 0xF7BD
-#define BIT_BACKLIGHT_ON 1
-#define BIT_BACKLIGHT_OFF 0
+#define REG_BACKLIGHT_CTRL 0xF7BD
+#define BIT_BACKLIGHT_ON 1
+#define BIT_BACKLIGHT_OFF 0
/* Reset the machine auto-clear: rd/wr */
-#define REG_RESET 0xF4EC
-#define BIT_RESET_ON 1
+#define REG_RESET 0xF4EC
+#define BIT_RESET_ON 1
/* Light the led: rd/wr */
-#define REG_LED 0xF4C8
-#define BIT_LED_RED_POWER (1 << 0)
-#define BIT_LED_ORANGE_POWER (1 << 1)
-#define BIT_LED_GREEN_CHARGE (1 << 2)
-#define BIT_LED_RED_CHARGE (1 << 3)
-#define BIT_LED_NUMLOCK (1 << 4)
+#define REG_LED 0xF4C8
+#define BIT_LED_RED_POWER (1 << 0)
+#define BIT_LED_ORANGE_POWER (1 << 1)
+#define BIT_LED_GREEN_CHARGE (1 << 2)
+#define BIT_LED_RED_CHARGE (1 << 3)
+#define BIT_LED_NUMLOCK (1 << 4)
/* Test led mode, all led on/off */
-#define REG_LED_TEST 0xF4C2
-#define BIT_LED_TEST_IN 1
-#define BIT_LED_TEST_OUT 0
+#define REG_LED_TEST 0xF4C2
+#define BIT_LED_TEST_IN 1
+#define BIT_LED_TEST_OUT 0
/* Camera on/off */
-#define REG_CAMERA_STATUS 0xF46A
-#define BIT_CAMERA_STATUS_ON 1
-#define BIT_CAMERA_STATUS_OFF 0
-#define REG_CAMERA_CONTROL 0xF7B7
-#define BIT_CAMERA_CONTROL_OFF 0
-#define BIT_CAMERA_CONTROL_ON 1
+#define REG_CAMERA_STATUS 0xF46A
+#define BIT_CAMERA_STATUS_ON 1
+#define BIT_CAMERA_STATUS_OFF 0
+#define REG_CAMERA_CONTROL 0xF7B7
+#define BIT_CAMERA_CONTROL_OFF 0
+#define BIT_CAMERA_CONTROL_ON 1
/* Wlan Status */
-#define REG_WLAN 0xF4FA
-#define BIT_WLAN_ON 1
-#define BIT_WLAN_OFF 0
-#define REG_DISPLAY_LCD 0xF79F
+#define REG_WLAN 0xF4FA
+#define BIT_WLAN_ON 1
+#define BIT_WLAN_OFF 0
+#define REG_DISPLAY_LCD 0xF79F
/* SCI Event Number from EC */
enum {
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 14b081841b6..6f8682e4448 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -2,9 +2,9 @@
* Copyright (C) 2007 Lemote Inc.
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -18,10 +18,10 @@
#include <loongson.h>
#include <machine.h>
-#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
-#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
-#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
-#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
+#define LOONGSON_TIMER_IRQ (MIPS_CPU_IRQ_BASE + 7) /* cpu timer */
+#define LOONGSON_NORTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 6) /* bonito */
+#define LOONGSON_UART_IRQ (MIPS_CPU_IRQ_BASE + 3) /* cpu serial port */
+#define LOONGSON_SOUTH_BRIDGE_IRQ (MIPS_CPU_IRQ_BASE + 2) /* i8259 */
#define LOONGSON_INT_BIT_INT0 (1 << 11)
#define LOONGSON_INT_BIT_INT1 (1 << 12)
@@ -108,9 +108,9 @@ struct irqaction cascade_irqaction = {
void __init mach_init_irq(void)
{
/* init all controller
- * 0-15 ------> i8259 interrupt
- * 16-23 ------> mips cpu interrupt
- * 32-63 ------> bonito irq
+ * 0-15 ------> i8259 interrupt
+ * 16-23 ------> mips cpu interrupt
+ * 32-63 ------> bonito irq
*/
/* setup cs5536 as high level trigger */
diff --git a/arch/mips/loongson/lemote-2f/machtype.c b/arch/mips/loongson/lemote-2f/machtype.c
index e860a2705c2..b55e6eece5e 100644
--- a/arch/mips/loongson/lemote-2f/machtype.c
+++ b/arch/mips/loongson/lemote-2f/machtype.c
@@ -2,8 +2,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -22,11 +22,11 @@ void __init mach_prom_init_machtype(void)
* machines, this will help the users a lot.
*
* If no "machtype=" passed, get machine type from "PMON_VER=".
- * PMON_VER=LM8089 Lemote 8.9'' netbook
- * LM8101 Lemote 10.1'' netbook
- * (The above two netbooks have the same kernel support)
- * LM6XXX Lemote FuLoong(2F) box series
- * LM9XXX Lemote LynLoong PC series
+ * PMON_VER=LM8089 Lemote 8.9'' netbook
+ * LM8101 Lemote 10.1'' netbook
+ * (The above two netbooks have the same kernel support)
+ * LM6XXX Lemote FuLoong(2F) box series
+ * LM9XXX Lemote LynLoong PC series
*/
if (strstr(arcs_cmdline, "PMON_VER=LM")) {
if (strstr(arcs_cmdline, "PMON_VER=LM8"))
diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c
index 36020a07e18..90962a3a173 100644
--- a/arch/mips/loongson/lemote-2f/reset.c
+++ b/arch/mips/loongson/lemote-2f/reset.c
@@ -5,8 +5,8 @@
* Copyright (C) 2009 Lemote Inc.
* Author: Wu Zhangjin, wuzhangjin@gmail.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -90,9 +90,9 @@ void ml2f_reboot(void)
#define EC_SHUTDOWN_IO_PORT_HIGH 0xff2d
#define EC_SHUTDOWN_IO_PORT_LOW 0xff2e
#define EC_SHUTDOWN_IO_PORT_DATA 0xff2f
-#define REG_SHUTDOWN_HIGH 0xFC
-#define REG_SHUTDOWN_LOW 0x29
-#define BIT_SHUTDOWN_ON (1 << 1)
+#define REG_SHUTDOWN_HIGH 0xFC
+#define REG_SHUTDOWN_LOW 0x29
+#define BIT_SHUTDOWN_ON (1 << 1)
static void ml2f_shutdown(void)
{
diff --git a/arch/mips/loongson1/Platform b/arch/mips/loongson1/Platform
index 99bdefe627a..11863441dea 100644
--- a/arch/mips/loongson1/Platform
+++ b/arch/mips/loongson1/Platform
@@ -1,4 +1,4 @@
-cflags-$(CONFIG_CPU_LOONGSON1) += \
+cflags-$(CONFIG_CPU_LOONGSON1) += \
$(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
index 07133defa14..b4437f19c3d 100644
--- a/arch/mips/loongson1/common/clock.c
+++ b/arch/mips/loongson1/common/clock.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/irq.c b/arch/mips/loongson1/common/irq.c
index 41bc8ffe7bb..455a7704a90 100644
--- a/arch/mips/loongson1/common/irq.c
+++ b/arch/mips/loongson1/common/irq.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
index 69dad4cfaaf..fdf8cb5987a 100644
--- a/arch/mips/loongson1/common/platform.c
+++ b/arch/mips/loongson1/common/platform.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -23,7 +23,7 @@
.mapbase = LS1X_UART ## _id ## _BASE, \
.irq = LS1X_UART ## _id ## _IRQ, \
.iotype = UPIO_MEM, \
- .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
+ .flags = UPF_IOREMAP | UPF_FIXED_TYPE, \
.type = PORT_16550A, \
}
diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c
index 1f8e49f9886..2a47af5a55c 100644
--- a/arch/mips/loongson1/common/prom.c
+++ b/arch/mips/loongson1/common/prom.c
@@ -3,8 +3,8 @@
*
* Modified from arch/mips/pnx833x/common/prom.c.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -73,7 +73,7 @@ void __init prom_free_prom_memory(void)
#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset))
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
int timeout;
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index fb979a784ec..d4f610f9604 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/common/setup.c b/arch/mips/loongson1/common/setup.c
index 62128cc27e6..62f41afee24 100644
--- a/arch/mips/loongson1/common/setup.c
+++ b/arch/mips/loongson1/common/setup.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
index 1fbd5264f66..b26b10dac70 100644
--- a/arch/mips/loongson1/ls1b/board.c
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -1,8 +1,8 @@
/*
* Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 96607230d9e..121a848a359 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -9,4 +9,3 @@ obj-y := cp1emu.o ieee754m.o ieee754d.o ieee754dp.o ieee754sp.o ieee754.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
-
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 47c77e7ffbf..afb5a0bcf7a 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -169,7 +169,7 @@ static int isBranchInstr(mips_instruction * i)
/*
* In the Linux kernel, we support selection of FPR format on the
- * basis of the Status.FR bit. If an FPU is not present, the FR bit
+ * basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs so we rather look at TIF_32BIT_REGS.
* FPU emu is slow and bulky and optimizing this function offers fairly
@@ -234,7 +234,7 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
if (xcp->cp0_cause & CAUSEF_BD) {
/*
* The instruction to be emulated is in a branch delay slot
- * which means that we have to emulate the branch instruction
+ * which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
@@ -1335,8 +1335,8 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
else {
/*
* The 'ieee754_csr' is an alias of
- * ctx->fcr31. No need to copy ctx->fcr31 to
- * ieee754_csr. But ieee754_csr.rm is ieee
+ * ctx->fcr31. No need to copy ctx->fcr31 to
+ * ieee754_csr. But ieee754_csr.rm is ieee
* library modes. (not mips rounding mode)
*/
/* convert to ieee library modes */
diff --git a/arch/mips/math-emu/dp_add.c b/arch/mips/math-emu/dp_add.c
index b422fcad852..c57c8adc42c 100644
--- a/arch/mips/math-emu/dp_add.c
+++ b/arch/mips/math-emu/dp_add.c
@@ -153,7 +153,7 @@ ieee754dp ieee754dp_add(ieee754dp x, ieee754dp y)
xe = xe;
xs = xs;
- if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm);
xe++;
}
diff --git a/arch/mips/math-emu/dp_sqrt.c b/arch/mips/math-emu/dp_sqrt.c
index a2a51b87ae8..b874d60a942 100644
--- a/arch/mips/math-emu/dp_sqrt.c
+++ b/arch/mips/math-emu/dp_sqrt.c
@@ -87,7 +87,7 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
if (xe > 512) { /* x > 2**-512? */
xe -= 512; /* x = x / 2**512 */
scalx += 256;
- } else if (xe < -512) { /* x < 2**-512? */
+ } else if (xe < -512) { /* x < 2**-512? */
xe += 512; /* x = x * 2**512 */
scalx -= 256;
}
@@ -108,13 +108,13 @@ ieee754dp ieee754dp_sqrt(ieee754dp x)
y.bits &= 0xffffffff00000000LL;
/* triple to almost 56 sig. bits: y ~= sqrt(x) to within 1 ulp */
- /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
+ /* t=y*y; z=t; pt[n0]+=0x00100000; t+=z; z=(x-z)*y; */
z = t = ieee754dp_mul(y, y);
t.parts.bexp += 0x001;
t = ieee754dp_add(t, z);
z = ieee754dp_mul(ieee754dp_sub(x, z), y);
- /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
+ /* t=z/(t+x) ; pt[n0]+=0x00100000; y+=t; */
t = ieee754dp_div(z, ieee754dp_add(t, x));
t.parts.bexp += 0x001;
y = ieee754dp_add(y, t);
diff --git a/arch/mips/math-emu/dp_sub.c b/arch/mips/math-emu/dp_sub.c
index 0de098cbc77..91e0a4b5cbc 100644
--- a/arch/mips/math-emu/dp_sub.c
+++ b/arch/mips/math-emu/dp_sub.c
@@ -158,7 +158,7 @@ ieee754dp ieee754dp_sub(ieee754dp x, ieee754dp y)
xe = xe;
xs = xs;
- if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (DP_MBITS + 1 + 3)) { /* carry out */
xm = XDPSRS1(xm); /* shift preserving sticky */
xe++;
}
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 30554e1c67b..0015cf1989d 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -56,21 +56,21 @@
#endif
const struct ieee754dp_konst __ieee754dp_spcvals[] = {
- DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */
- DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */
+ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* + zero */
+ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 0), /* - zero */
DPSTR(0, DP_EBIAS, 0, 0), /* + 1.0 */
DPSTR(1, DP_EBIAS, 0, 0), /* - 1.0 */
DPSTR(0, 3 + DP_EBIAS, 0x40000, 0), /* + 10.0 */
DPSTR(1, 3 + DP_EBIAS, 0x40000, 0), /* - 10.0 */
- DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
- DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
+ DPSTR(0, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* + infinity */
+ DPSTR(1, DP_EMAX + 1 + DP_EBIAS, 0, 0), /* - infinity */
DPSTR(0, DP_EMAX+1+DP_EBIAS, 0x7FFFF, 0xFFFFFFFF), /* + indef quiet Nan */
DPSTR(0, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* + max */
DPSTR(1, DP_EMAX + DP_EBIAS, 0xFFFFF, 0xFFFFFFFF), /* - max */
DPSTR(0, DP_EMIN + DP_EBIAS, 0, 0), /* + min normal */
DPSTR(1, DP_EMIN + DP_EBIAS, 0, 0), /* - min normal */
- DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
- DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
+ DPSTR(0, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* + min denormal */
+ DPSTR(1, DP_EMIN - 1 + DP_EBIAS, 0, 1), /* - min denormal */
DPSTR(0, 31 + DP_EBIAS, 0, 0), /* + 1.0e31 */
DPSTR(0, 63 + DP_EBIAS, 0, 0), /* + 1.0e63 */
};
@@ -84,9 +84,9 @@ const struct ieee754sp_konst __ieee754sp_spcvals[] = {
SPSTR(1, 3 + SP_EBIAS, 0x200000), /* - 10.0 */
SPSTR(0, SP_EMAX + 1 + SP_EBIAS, 0), /* + infinity */
SPSTR(1, SP_EMAX + 1 + SP_EBIAS, 0), /* - infinity */
- SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */
- SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
- SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
+ SPSTR(0, SP_EMAX+1+SP_EBIAS, 0x3FFFFF), /* + indef quiet Nan */
+ SPSTR(0, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* + max normal */
+ SPSTR(1, SP_EMAX + SP_EBIAS, 0x7FFFFF), /* - max normal */
SPSTR(0, SP_EMIN + SP_EBIAS, 0), /* + min normal */
SPSTR(1, SP_EMIN + SP_EBIAS, 0), /* - min normal */
SPSTR(0, SP_EMIN - 1 + SP_EBIAS, 1), /* + min denormal */
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 080b5ca03fc..068e56be8de 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -116,7 +116,7 @@ static u64 get_rounding(int sn, u64 xm)
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
- if (sn) /* ?? */
+ if (sn) /* ?? */
xm += 0x8;
break;
}
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2a7d43f4f16..4b6c6fb3530 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -56,7 +56,7 @@
#define CLPAIR(x, y) ((x)*6+(y))
-#define CLEARCX \
+#define CLEARCX \
(ieee754_csr.cx = 0)
#define SETCX(x) \
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index 271d00d6113..15d1e36cfe6 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -117,7 +117,7 @@ static unsigned get_rounding(int sn, unsigned xm)
xm += 0x8;
break;
case IEEE754_RD: /* toward -Infinity */
- if (sn) /* ?? */
+ if (sn) /* ?? */
xm += 0x8;
break;
}
diff --git a/arch/mips/math-emu/ieee754xcpt.c b/arch/mips/math-emu/ieee754xcpt.c
index b99a693c05a..967167116ae 100644
--- a/arch/mips/math-emu/ieee754xcpt.c
+++ b/arch/mips/math-emu/ieee754xcpt.c
@@ -25,7 +25,7 @@
* Added preprocessor hacks to map to Linux kernel diagnostics.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*************************************************************************/
#include <linux/kernel.h>
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
index 52e6c58c8de..1c586575fe1 100644
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -1,6 +1,6 @@
/*
* Kevin D. Kissell, kevink@mips and Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index ae1a327ccac..c446e64637e 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -148,7 +148,7 @@ ieee754sp ieee754sp_add(ieee754sp x, ieee754sp y)
xe = xe;
xs = xs;
- if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
SPXSRSX1();
}
} else {
diff --git a/arch/mips/math-emu/sp_mul.c b/arch/mips/math-emu/sp_mul.c
index 2722a2570ea..fa4675cf2aa 100644
--- a/arch/mips/math-emu/sp_mul.c
+++ b/arch/mips/math-emu/sp_mul.c
@@ -131,7 +131,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
hrm = hxm * hym; /* 16 * 16 => 32 */
{
- unsigned t = lxm * hym; /* 16 * 16 => 32 */
+ unsigned t = lxm * hym; /* 16 * 16 => 32 */
{
unsigned at = lrm + (t << 16);
hrm += at < lrm;
@@ -141,7 +141,7 @@ ieee754sp ieee754sp_mul(ieee754sp x, ieee754sp y)
}
{
- unsigned t = hxm * lym; /* 16 * 16 => 32 */
+ unsigned t = hxm * lym; /* 16 * 16 => 32 */
{
unsigned at = lrm + (t << 16);
hrm += at < lrm;
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index 886ed5bcfef..e595c6f3d0b 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -153,7 +153,7 @@ ieee754sp ieee754sp_sub(ieee754sp x, ieee754sp y)
xe = xe;
xs = xs;
- if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ if (xm >> (SP_MBITS + 1 + 3)) { /* carry out */
SPXSRSX1(); /* shift preserving sticky */
}
} else {
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 90ceb963aaf..1dcec30ad1c 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -16,9 +16,9 @@ obj-$(CONFIG_CPU_R3000) += c-r3k.o tlb-r3k.o
obj-$(CONFIG_CPU_R8000) += c-r4k.o cex-gen.o tlb-r8k.o
obj-$(CONFIG_CPU_SB1) += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
obj-$(CONFIG_CPU_TX39XX) += c-tx39.o tlb-r3k.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += c-octeon.o cex-oct.o tlb-r4k.o
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
-obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
-obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
+obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
+obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 6ec04daf423..8557fb55286 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -106,7 +106,7 @@ static void octeon_flush_icache_all(void)
* Called to flush all memory associated with a memory
* context.
*
- * @mm: Memory context to flush
+ * @mm: Memory context to flush
*/
static void octeon_flush_cache_mm(struct mm_struct *mm)
{
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 031c4c2cdf2..704dc735a59 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -119,7 +119,7 @@ static void r3k_flush_icache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -176,7 +176,7 @@ static void r3k_flush_dcache_range(unsigned long start, unsigned long end)
write_c0_status((ST0_ISC|flags)&~ST0_IEC);
for (i = 0; i < size; i += 0x080) {
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
"sb\t$0, 0x008(%0)\n\t"
"sb\t$0, 0x00c(%0)\n\t"
@@ -285,13 +285,13 @@ static void r3k_flush_cache_sigtramp(unsigned long addr)
write_c0_status(flags&~ST0_IEC);
/* Fill the TLB to avoid an exception with caches isolated. */
- asm( "lw\t$0, 0x000(%0)\n\t"
+ asm( "lw\t$0, 0x000(%0)\n\t"
"lw\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
write_c0_status((ST0_ISC|ST0_SWC|flags)&~ST0_IEC);
- asm( "sb\t$0, 0x000(%0)\n\t"
+ asm( "sb\t$0, 0x000(%0)\n\t"
"sb\t$0, 0x004(%0)\n\t"
: : "r" (addr) );
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 0f7d788e881..ecca559b8d7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -160,7 +160,7 @@ static void __cpuinit r4k_blast_dcache_setup(void)
"1:\n\t" \
)
#define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
-#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
+#define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
static inline void blast_r4600_v1_icache32(void)
{
@@ -177,7 +177,7 @@ static inline void tx49_blast_icache32(void)
unsigned long end = start + current_cpu_data.icache.waysize;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -208,7 +208,7 @@ static inline void tx49_blast_icache32_page_indexed(unsigned long page)
unsigned long end = start + PAGE_SIZE;
unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
unsigned long ws_end = current_cpu_data.icache.ways <<
- current_cpu_data.icache.waybit;
+ current_cpu_data.icache.waybit;
unsigned long ws, addr;
CACHE32_UNROLL32_ALIGN2;
@@ -637,7 +637,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
* for the cache instruction on MIPS processors and
* some processors, among them the RM5200 and RM7000
* QED processors will throw an address error for cache
- * hit ops with insufficient alignment. Solved by
+ * hit ops with insufficient alignment. Solved by
* aligning the address to cache line size.
*/
blast_inv_scache_range(addr, addr + size);
@@ -864,7 +864,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -923,7 +923,7 @@ static void __cpuinit probe_pcache(void)
icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
- c->icache.waybit = 0; /* doesn't matter */
+ c->icache.waybit = 0; /* doesn't matter */
dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
@@ -986,8 +986,8 @@ static void __cpuinit probe_pcache(void)
c->icache.ways = 1 + ((config1 >> 16) & 7);
icache_size = c->icache.sets *
- c->icache.ways *
- c->icache.linesz;
+ c->icache.ways *
+ c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
if (config & 0x8) /* VI bit */
@@ -1006,8 +1006,8 @@ static void __cpuinit probe_pcache(void)
c->dcache.ways = 1 + ((config1 >> 7) & 7);
dcache_size = c->dcache.sets *
- c->dcache.ways *
- c->dcache.linesz;
+ c->dcache.ways *
+ c->dcache.linesz;
c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
c->options |= MIPS_CPU_PREFETCH;
@@ -1016,7 +1016,7 @@ static void __cpuinit probe_pcache(void)
/*
* Processor configuration sanity check for the R4000SC erratum
- * #5. With page sizes larger than 32kB there is no possibility
+ * #5. With page sizes larger than 32kB there is no possibility
* to get a VCE exception anymore so we don't care about this
* misconfiguration. The case is rather theoretical anyway;
* presumably no vendor is shipping his hardware in the "bad"
@@ -1057,6 +1057,7 @@ static void __cpuinit probe_pcache(void)
break;
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_24K:
case CPU_34K:
case CPU_74K:
@@ -1088,7 +1089,7 @@ static void __cpuinit probe_pcache(void)
break;
}
-#ifdef CONFIG_CPU_LOONGSON2
+#ifdef CONFIG_CPU_LOONGSON2
/*
* LOONGSON2 has 4 way icache, but when using indexed cache op,
* one op will act on all 4 ways
@@ -1228,7 +1229,7 @@ static void __cpuinit setup_scache(void)
#ifdef CONFIG_R5000_CPU_SCACHE
r5k_sc_init();
#endif
- return;
+ return;
case CPU_RM7000:
#ifdef CONFIG_RM7000_CPU_SCACHE
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index 87d23cada6d..ba9da270289 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -33,9 +33,9 @@ extern int r3k_have_wired_reg; /* in r3k-tlb.c */
/* This sequence is required to ensure icache is disabled immediately */
#define TX39_STOP_STREAMING() \
__asm__ __volatile__( \
- ".set push\n\t" \
- ".set noreorder\n\t" \
- "b 1f\n\t" \
+ ".set push\n\t" \
+ ".set noreorder\n\t" \
+ "b 1f\n\t" \
"nop\n\t" \
"1:\n\t" \
".set pop" \
@@ -361,7 +361,7 @@ void __cpuinit tx39_cache_init(void)
/* TX39/H core (writethru direct-map cache) */
__flush_cache_vmap = tx39__flush_cache_vmap;
__flush_cache_vunmap = tx39__flush_cache_vunmap;
- flush_cache_all = tx39h_flush_icache_all;
+ flush_cache_all = tx39h_flush_icache_all;
__flush_cache_all = tx39h_flush_icache_all;
flush_cache_mm = (void *) tx39h_flush_icache_all;
flush_cache_range = (void *) tx39h_flush_icache_all;
@@ -409,8 +409,8 @@ void __cpuinit tx39_cache_init(void)
_dma_cache_inv = tx39_dma_cache_inv;
shm_align_mask = max_t(unsigned long,
- (dcache_size / current_cpu_data.dcache.ways) - 1,
- PAGE_SIZE - 1);
+ (dcache_size / current_cpu_data.dcache.ways) - 1,
+ PAGE_SIZE - 1);
break;
}
diff --git a/arch/mips/mm/cerr-sb1.c b/arch/mips/mm/cerr-sb1.c
index 3571090ba17..576add33bf5 100644
--- a/arch/mips/mm/cerr-sb1.c
+++ b/arch/mips/mm/cerr-sb1.c
@@ -27,7 +27,7 @@
/*
* We'd like to dump the L2_ECC_TAG register on errors, but errata make
- * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
+ * that unsafe... So for now we don't. (BCM1250/BCM112x erratum SOC-48.)
*/
#undef DUMP_L2_ECC_TAG_ON_ERROR
@@ -48,7 +48,7 @@
#define CP0_CERRI_EXTERNAL (1 << 26)
#define CP0_CERRI_IDX_VALID(c) (!((c) & CP0_CERRI_EXTERNAL))
-#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
+#define CP0_CERRI_DATA (CP0_CERRI_DATA_PARITY)
#define CP0_CERRD_MULTIPLE (1 << 31)
#define CP0_CERRD_TAG_STATE (1 << 30)
@@ -56,8 +56,8 @@
#define CP0_CERRD_DATA_SBE (1 << 28)
#define CP0_CERRD_DATA_DBE (1 << 27)
#define CP0_CERRD_EXTERNAL (1 << 26)
-#define CP0_CERRD_LOAD (1 << 25)
-#define CP0_CERRD_STORE (1 << 24)
+#define CP0_CERRD_LOAD (1 << 25)
+#define CP0_CERRD_STORE (1 << 24)
#define CP0_CERRD_FILLWB (1 << 23)
#define CP0_CERRD_COHERENCY (1 << 22)
#define CP0_CERRD_DUPTAG (1 << 21)
@@ -69,10 +69,10 @@
(CP0_CERRD_LOAD | CP0_CERRD_STORE | CP0_CERRD_FILLWB | CP0_CERRD_COHERENCY | CP0_CERRD_DUPTAG)
#define CP0_CERRD_TYPES \
(CP0_CERRD_TAG_STATE | CP0_CERRD_TAG_ADDRESS | CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE | CP0_CERRD_EXTERNAL)
-#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
+#define CP0_CERRD_DATA (CP0_CERRD_DATA_SBE | CP0_CERRD_DATA_DBE)
-static uint32_t extract_ic(unsigned short addr, int data);
-static uint32_t extract_dc(unsigned short addr, int data);
+static uint32_t extract_ic(unsigned short addr, int data);
+static uint32_t extract_dc(unsigned short addr, int data);
static inline void breakout_errctl(unsigned int val)
{
@@ -209,11 +209,11 @@ asmlinkage void sb1_cache_error(void)
"=r" (dpahi), "=r" (dpalo), "=r" (eepc));
cerr_dpa = (((uint64_t)dpahi) << 32) | dpalo;
- printk(" c0_errorepc == %08x\n", eepc);
- printk(" c0_errctl == %08x", errctl);
+ printk(" c0_errorepc == %08x\n", eepc);
+ printk(" c0_errctl == %08x", errctl);
breakout_errctl(errctl);
if (errctl & CP0_ERRCTL_ICACHE) {
- printk(" c0_cerr_i == %08x", cerr_i);
+ printk(" c0_cerr_i == %08x", cerr_i);
breakout_cerri(cerr_i);
if (CP0_CERRI_IDX_VALID(cerr_i)) {
/* Check index of EPC, allowing for delay slot */
@@ -229,7 +229,7 @@ asmlinkage void sb1_cache_error(void)
}
}
if (errctl & CP0_ERRCTL_DCACHE) {
- printk(" c0_cerr_d == %08x", cerr_d);
+ printk(" c0_cerr_d == %08x", cerr_d);
breakout_cerrd(cerr_d);
if (CP0_CERRD_DPA_VALID(cerr_d)) {
printk(" c0_cerr_dpa == %010llx\n", cerr_dpa);
@@ -256,7 +256,7 @@ asmlinkage void sb1_cache_error(void)
/*
* Calling panic() when a fatal cache error occurs scrambles the
* state of the system (and the cache), making it difficult to
- * investigate after the fact. However, if you just stall the CPU,
+ * investigate after the fact. However, if you just stall the CPU,
* the other CPU may keep on running, which is typically very
* undesirable.
*/
@@ -411,7 +411,7 @@ static uint32_t extract_ic(unsigned short addr, int data)
" dmfc0 $1, $28, 1\n\t"
" dsrl32 %1, $1, 0 \n\t"
" sll %2, $1, 0 \n\t"
- " .set pop \n"
+ " .set pop \n"
: "=r" (datahi), "=r" (insta), "=r" (instb)
: "r" ((way << 13) | addr | (offset << 3)));
predecode = (datahi >> 8) & 0xff;
@@ -441,8 +441,8 @@ static uint8_t dc_ecc(uint64_t dword)
{
uint64_t t;
uint32_t w;
- uint8_t p;
- int i;
+ uint8_t p;
+ int i;
p = 0;
for (i = 7; i >= 0; i--)
diff --git a/arch/mips/mm/cex-gen.S b/arch/mips/mm/cex-gen.S
index e743622fd24..45dff5cd4b8 100644
--- a/arch/mips/mm/cex-gen.S
+++ b/arch/mips/mm/cex-gen.S
@@ -14,17 +14,17 @@
#include <asm/stackframe.h>
/*
- * Game over. Go to the button. Press gently. Swear where allowed by
+ * Game over. Go to the button. Press gently. Swear where allowed by
* legislation.
*/
LEAF(except_vec2_generic)
.set noreorder
.set noat
- .set mips0
+ .set mips0
/*
* This is a very bad place to be. Our cache error
* detection has triggered. If we have write-back data
- * in the cache, we may not be able to recover. As a
+ * in the cache, we may not be able to recover. As a
* first-order desperate measure, turn off KSEG0 cacheing.
*/
mfc0 k0,CP0_CONFIG
diff --git a/arch/mips/mm/cex-oct.S b/arch/mips/mm/cex-oct.S
index 3db8553fcd3..9029092aa74 100644
--- a/arch/mips/mm/cex-oct.S
+++ b/arch/mips/mm/cex-oct.S
@@ -18,7 +18,7 @@
*/
LEAF(except_vec2_octeon)
- .set push
+ .set push
.set mips64r2
.set noreorder
.set noat
@@ -27,19 +27,19 @@
/* due to an errata we need to read the COP0 CacheErr (Dcache)
* before any cache/DRAM access */
- rdhwr k0, $0 /* get core_id */
- PTR_LA k1, cache_err_dcache
- sll k0, k0, 3
+ rdhwr k0, $0 /* get core_id */
+ PTR_LA k1, cache_err_dcache
+ sll k0, k0, 3
PTR_ADDU k1, k0, k1 /* k1 = &cache_err_dcache[core_id] */
- dmfc0 k0, CP0_CACHEERR, 1
- sd k0, (k1)
- dmtc0 $0, CP0_CACHEERR, 1
+ dmfc0 k0, CP0_CACHEERR, 1
+ sd k0, (k1)
+ dmtc0 $0, CP0_CACHEERR, 1
- /* check whether this is a nested exception */
- mfc0 k1, CP0_STATUS
- andi k1, k1, ST0_EXL
- beqz k1, 1f
+ /* check whether this is a nested exception */
+ mfc0 k1, CP0_STATUS
+ andi k1, k1, ST0_EXL
+ beqz k1, 1f
nop
j cache_parity_error_octeon_non_recoverable
nop
@@ -48,22 +48,22 @@
1: j handle_cache_err
nop
- .set pop
+ .set pop
END(except_vec2_octeon)
/* We need to jump to handle_cache_err so that the previous handler
* can fit within 0x80 bytes. We also move from 0xFFFFFFFFAXXXXXXX
- * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
+ * space (uncached) to the 0xFFFFFFFF8XXXXXXX space (cached). */
LEAF(handle_cache_err)
- .set push
- .set noreorder
- .set noat
+ .set push
+ .set noreorder
+ .set noat
SAVE_ALL
KMODE
- jal cache_parity_error_octeon_recoverable
+ jal cache_parity_error_octeon_recoverable
nop
- j ret_from_exception
+ j ret_from_exception
nop
.set pop
diff --git a/arch/mips/mm/cex-sb1.S b/arch/mips/mm/cex-sb1.S
index 89c412bc4b6..fe1d887e8d7 100644
--- a/arch/mips/mm/cex-sb1.S
+++ b/arch/mips/mm/cex-sb1.S
@@ -24,9 +24,9 @@
#include <asm/cacheops.h>
#include <asm/sibyte/board.h>
-#define C0_ERRCTL $26 /* CP0: Error info */
-#define C0_CERR_I $27 /* CP0: Icache error */
-#define C0_CERR_D $27,1 /* CP0: Dcache error */
+#define C0_ERRCTL $26 /* CP0: Error info */
+#define C0_CERR_I $27 /* CP0: Icache error */
+#define C0_CERR_D $27,1 /* CP0: Dcache error */
/*
* Based on SiByte sample software cache-err/cerr.S
@@ -88,7 +88,7 @@ attempt_recovery:
/*
* k0 has C0_ERRCTL << 1, which puts 'DC' at bit 31. Any
* Dcache errors we can recover from will take more extensive
- * processing. For now, they are considered "unrecoverable".
+ * processing. For now, they are considered "unrecoverable".
* Note that 'DC' becoming set (outside of ERL mode) will
* cause 'IC' to clear; so if there's an Icache error, we'll
* only find out about it if we recover from this error and
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 3fab2046c8a..f9ef83829a5 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -4,7 +4,7 @@
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
- * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
*/
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index ddcec1e1a0c..0fead53d1c2 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -52,7 +52,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
#ifdef CONFIG_KPROBES
/*
- * This is to notify the fault handler of the kprobes. The
+ * This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
@@ -216,7 +216,7 @@ bad_area_nosemaphore:
}
no_context:
- /* Are we prepared to handle this kernel fault? */
+ /* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index dcfd573871c..d4ea5c9c4a9 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -249,7 +249,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* @nr_pages: number of pages from start to pin
* @write: whether pages will be written to
* @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
+ * Should be at least nr_pages long.
*
* Attempt to pin user pages in memory without taking mm->mmap_sem.
* If not successful, it will fall back to taking the lock and
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index be9acb2b959..67929251286 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -66,7 +66,7 @@
/*
* We have up to 8 empty zeroed pages so we can map one of the right colour
- * when needed. This is necessary only on R4000 / R4400 SC and MC versions
+ * when needed. This is necessary only on R4000 / R4400 SC and MC versions
* where we have to avoid VCED / VECI exceptions for good performance at
* any price. Since page is never written to after the initialization we
* don't have to care about aliases on other CPUs.
@@ -380,7 +380,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
totalram_pages += free_all_bootmem();
- totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
+ totalram_pages -= setup_zero_pages(); /* Setup zeroed pages. */
reservedpages = ram = 0;
for (tmp = 0; tmp < max_low_pfn; tmp++)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index cacfd31e8ec..7f840bc08ab 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -22,7 +22,7 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
phys_t end;
unsigned long pfn;
pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
- | __WRITEABLE | flags);
+ | __WRITEABLE | flags);
address &= ~PMD_MASK;
end = address + size;
@@ -185,7 +185,7 @@ void __iounmap(const volatile void __iomem *addr)
if (!p)
printk(KERN_ERR "iounmap: bad address %p\n", addr);
- kfree(p);
+ kfree(p);
}
EXPORT_SYMBOL(__ioremap);
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 8e666c55f4d..a29fba55b53 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -271,7 +271,7 @@ void __cpuinit build_clear_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
- * cache_line_size : 0;
+ * cache_line_size : 0;
while (off) {
build_clear_pref(&buf, -off);
off -= cache_line_size;
@@ -417,13 +417,13 @@ void __cpuinit build_copy_page(void)
uasm_i_lui(&buf, AT, 0xa000);
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_load_pref(&buf, -off);
off -= cache_line_size;
}
off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
- cache_line_size : 0;
+ cache_line_size : 0;
while (off) {
build_copy_store_pref(&buf, -off);
off -= cache_line_size;
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index ee331bbd8f8..e8adc0069d6 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -24,7 +24,7 @@ void pgd_init(unsigned long page)
entry = (unsigned long)invalid_pmd_table;
#endif
- p = (unsigned long *) page;
+ p = (unsigned long *) page;
end = p + PTRS_PER_PGD;
do {
@@ -45,7 +45,7 @@ void pmd_init(unsigned long addr, unsigned long pagetable)
{
unsigned long *p, *end;
- p = (unsigned long *) addr;
+ p = (unsigned long *) addr;
end = p + PTRS_PER_PMD;
do {
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index 1eb708ef75f..c6aaed934d5 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -159,7 +159,7 @@ static inline int __init indy_sc_probe(void)
}
/* XXX Check with wje if the Indy caches can differenciate between
- writeback + invalidate and just invalidate. */
+ writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
.bc_disable = indy_sc_disable,
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 8d90ff25b12..8bc67720e14 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -58,7 +58,7 @@ static void r5k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
static void r5k_sc_enable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
set_c0_config(R5K_CONF_SE);
@@ -68,7 +68,7 @@ static void r5k_sc_enable(void)
static void r5k_sc_disable(void)
{
- unsigned long flags;
+ unsigned long flags;
local_irq_save(flags);
blast_r5000_scache();
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 2a7c9725b2a..493131c81a2 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -424,7 +424,7 @@ void __cpuinit tlb_init(void)
write_c0_pagegrain(pg);
}
- /* From this point on the ARC firmware is dead. */
+ /* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
/* Did I tell you that ARC SUCKS? */
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 1c8ac49ec72..820e6612d74 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -5,8 +5,8 @@
*
* Synthesize TLB refill handlers at runtime.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
- * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2008, 2009 Cavium Networks, Inc.
* Copyright (C) 2011 MIPS Technologies, Inc.
@@ -212,7 +212,7 @@ static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
/*
* pgtable bits are assigned dynamically depending on processor feature
* and statically based on kernel configuration. This spits out the actual
- * values the kernel is using. Required to make sense from disassembled
+ * values the kernel is using. Required to make sense from disassembled
* TLB exception handlers.
*/
static void output_pgtable_bits_defines(void)
@@ -464,8 +464,8 @@ static u32 final_handler[64] __cpuinitdata;
* From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
* 2. A timing hazard exists for the TLBP instruction.
*
- * stalling_instruction
- * TLBP
+ * stalling_instruction
+ * TLBP
*
* The JTLB is being read for the TLBP throughout the stall generated by the
* previous instruction. This is not really correct as the stalling instruction
@@ -476,7 +476,7 @@ static u32 final_handler[64] __cpuinitdata;
* The software work-around is to not allow the instruction preceding the TLBP
* to stall - make it an NOP or some other instruction guaranteed not to stall.
*
- * Errata 2 will not be fixed. This errata is also on the R5000.
+ * Errata 2 will not be fixed. This errata is also on the R5000.
*
* As if we MIPS hackers wouldn't know how to nop pipelines happy ...
*/
@@ -581,6 +581,7 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
case CPU_4KC:
case CPU_4KEC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -748,7 +749,7 @@ static __cpuinit void build_huge_update_entries(u32 **p,
*/
small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
- /* We can clobber tmp. It isn't used after this.*/
+ /* We can clobber tmp. It isn't used after this.*/
if (!small_sequence)
uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
@@ -830,12 +831,12 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
/* Clear lower 23 bits of context. */
uasm_i_dins(p, ptr, 0, 0, 23);
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
#elif defined(CONFIG_SMP)
-# ifdef CONFIG_MIPS_MT_SMTC
+# ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -955,7 +956,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
-#ifdef CONFIG_MIPS_MT_SMTC
+#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC uses TCBind value as "CPU" index
*/
@@ -965,7 +966,7 @@ build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
#else
/*
* smp_processor_id() << 3 is stored in CONTEXT.
- */
+ */
uasm_i_mfc0(p, ptr, C0_CONTEXT);
UASM_i_LA_mostly(p, tmp, pgdc);
uasm_i_srl(p, ptr, ptr, 23);
@@ -1153,7 +1154,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
if (pgd_reg == -1) {
vmalloc_branch_delay_filled = 1;
- /* 1 0 1 0 1 << 6 xkphys cached */
+ /* 1 0 1 0 1 << 6 xkphys cached */
uasm_i_ori(p, ptr, ptr, 0x540);
uasm_i_drotr(p, ptr, ptr, 11);
}
@@ -1171,9 +1172,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_l_vmalloc_done(l, *p);
/*
- * tmp ptr
- * fall-through case = badvaddr *pgd_current
- * vmalloc case = badvaddr swapper_pg_dir
+ * tmp ptr
+ * fall-through case = badvaddr *pgd_current
+ * vmalloc case = badvaddr swapper_pg_dir
*/
if (vmalloc_branch_delay_filled)
@@ -1212,7 +1213,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
/*
* The in the LWX case we don't want to do the load in the
- * delay slot. It cannot issue in the same cycle and may be
+ * delay slot. It cannot issue in the same cycle and may be
* speculative and unneeded.
*/
if (use_lwx_insns())
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 39b89105622..942ff6c2eba 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -7,7 +7,7 @@
* support a subset of instructions, and does not try to hide pipeline
* effects like branch delay slots.
*
- * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
+ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
* Copyright (C) 2005, 2007 Maciej W. Rozycki
* Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
*/
@@ -119,30 +119,30 @@ static struct insn insn_table[] __uasminitdata = {
{ insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE },
{ insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
- { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
+ { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
- { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
- { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
+ { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
{ insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
- { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
+ { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+ { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
{ insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE },
- { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
+ { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD },
{ insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM},
{ insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 },
@@ -345,7 +345,7 @@ Ip_u2u1msbu3(op) \
} \
UASM_EXPORT_SYMBOL(uasm_i##op);
-#define I_u2u1msbdu3(op) \
+#define I_u2u1msbdu3(op) \
Ip_u2u1msbu3(op) \
{ \
build_insn(buf, insn##op, b, a, d-1, c); \
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 469d9b0cee6..1e478445801 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -70,12 +70,12 @@ void amon_cpu_start(int cpu,
launch->sp = sp;
launch->a0 = a0;
- smp_wmb(); /* Target must see parameters before go */
+ smp_wmb(); /* Target must see parameters before go */
launch->flags |= LAUNCH_FGO;
- smp_wmb(); /* Target must see go before we poll */
+ smp_wmb(); /* Target must see go before we poll */
while ((launch->flags & LAUNCH_FGONE) == 0)
;
- smp_rmb(); /* Target will be updating flags soon */
+ smp_rmb(); /* Target will be updating flags soon */
pr_debug("launch: cpu%d gone!\n", cpu);
}
diff --git a/arch/mips/mti-malta/malta-cmdline.c b/arch/mips/mti-malta/malta-cmdline.c
index 1871c30ed2e..5576a306a14 100644
--- a/arch/mips/mti-malta/malta-cmdline.c
+++ b/arch/mips/mti-malta/malta-cmdline.c
@@ -46,7 +46,7 @@ void __init prom_init_cmdline(void)
cp = &(arcs_cmdline[0]);
while(actr < prom_argc) {
- strcpy(cp, prom_argv(actr));
+ strcpy(cp, prom_argv(actr));
cp += strlen(prom_argv(actr));
*cp++ = ' ';
actr++;
diff --git a/arch/mips/mti-malta/malta-display.c b/arch/mips/mti-malta/malta-display.c
index 7c8828fcb0a..9bc58a24e80 100644
--- a/arch/mips/mti-malta/malta-display.c
+++ b/arch/mips/mti-malta/malta-display.c
@@ -37,10 +37,10 @@ void mips_display_message(const char *str)
display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int));
for (i = 0; i <= 14; i=i+2) {
- if (*str)
- __raw_writel(*str++, display + i);
+ if (*str)
+ __raw_writel(*str++, display + i);
else
- __raw_writel(' ', display + i);
+ __raw_writel(' ', display + i);
}
}
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 27a6cdb36e3..c2cbce9e435 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
@@ -110,20 +110,20 @@ static inline void str2eaddr(unsigned char *ea, unsigned char *str)
int get_ethernet_addr(char *ethernet_addr)
{
- char *ethaddr_str;
+ char *ethaddr_str;
- ethaddr_str = prom_getenv("ethaddr");
+ ethaddr_str = prom_getenv("ethaddr");
if (!ethaddr_str) {
- printk("ethaddr not set in boot prom\n");
+ printk("ethaddr not set in boot prom\n");
return -1;
}
str2eaddr(ethernet_addr, ethaddr_str);
if (init_debug > 1) {
- int i;
+ int i;
printk("get_ethernet_addr: ");
- for (i=0; i<5; i++)
- printk("%02x:", (unsigned char)*(ethernet_addr+i));
+ for (i=0; i<5; i++)
+ printk("%02x:", (unsigned char)*(ethernet_addr+i));
printk("%02x\n", *(ethernet_addr+i));
}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 647b8638318..e364af70e6c 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -84,10 +84,10 @@ static inline int mips_pcibios_iack(void)
/* Flush Bonito register block */
(void) BONITO_PCIMAP_CFG;
- iob(); /* sync */
+ iob(); /* sync */
irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg);
- iob(); /* sync */
+ iob(); /* sync */
irq &= 0xff;
BONITO_PCIMAP_CFG = 0;
break;
@@ -136,7 +136,7 @@ static void malta_ipi_irqdispatch(void)
irq = gic_get_int();
if (irq < 0)
- return; /* interrupt has already been cleared */
+ return; /* interrupt has already been cleared */
do_IRQ(MIPS_GIC_IRQ_BASE + irq);
}
@@ -149,7 +149,7 @@ static void corehi_irqdispatch(void)
struct pt_regs *regs = get_irq_regs();
printk(KERN_EMERG "CoreHI interrupt, shouldn't happen, we die here!\n");
- printk(KERN_EMERG "epc : %08lx\nStatus: %08lx\n"
+ printk(KERN_EMERG "epc : %08lx\nStatus: %08lx\n"
"Cause : %08lx\nbadVaddr : %08lx\n",
regs->cp0_epc, regs->cp0_status,
regs->cp0_cause, regs->cp0_badvaddr);
@@ -249,20 +249,20 @@ static inline unsigned int irq_ffs(unsigned int pending)
* on hardware interrupt 0 (MIPS IRQ 2)) like:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Combined hardware interrupt (hw0)
- * 3 Hardware (ignored)
- * 4 Hardware (ignored)
- * 5 Hardware (ignored)
- * 6 Hardware (ignored)
- * 7 R4k timer (what we use)
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Combined hardware interrupt (hw0)
+ * 3 Hardware (ignored)
+ * 4 Hardware (ignored)
+ * 5 Hardware (ignored)
+ * 6 Hardware (ignored)
+ * 7 R4k timer (what we use)
*
* We handle the IRQ according to _our_ priority which is:
*
- * Highest ---- R4k Timer
- * Lowest ---- Combined hardware interrupt
+ * Highest ---- R4k Timer
+ * Lowest ---- Combined hardware interrupt
*
* then we just return, if multiple IRQs are pending then we will just take
* another exception, big deal.
@@ -396,7 +396,7 @@ static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ X, X, X, X, 0 },
- { X, X, X, X, 0 },
+ { X, X, X, X, 0 },
{ X, X, X, X, 0 },
{ 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
@@ -410,7 +410,7 @@ static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
{ 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
{ 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
- { X, X, X, X, 0 },
+ { X, X, X, X, 0 },
/* The remainder of this table is initialised by fill_ipi_map */
};
#undef X
@@ -634,7 +634,7 @@ void malta_be_init(void)
static char *tr[8] = {
"mem", "gcr", "gic", "mmio",
- "0x04", "0x05", "0x06", "0x07"
+ "0x04", "0x05", "0x06", "0x07"
};
static char *mcmd[32] = {
@@ -673,10 +673,10 @@ static char *mcmd[32] = {
};
static char *core[8] = {
- "Invalid/OK", "Invalid/Data",
+ "Invalid/OK", "Invalid/Data",
"Shared/OK", "Shared/Data",
"Modified/OK", "Modified/Data",
- "Exclusive/OK", "Exclusive/Data"
+ "Exclusive/OK", "Exclusive/Data"
};
static char *causes[32] = {
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index a96d281f922..f3d43aa023a 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -47,7 +47,7 @@ static char *mtypes[3] = {
};
#endif
-/* determined physical memory size, not overridden by command line args */
+/* determined physical memory size, not overridden by command line args */
unsigned long physical_memsize = 0L;
static struct prom_pmemblock * __init prom_getmdesc(void)
@@ -158,7 +158,7 @@ void __init prom_meminit(void)
size = p->size;
add_memory_region(base, size, type);
- p++;
+ p++;
}
}
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 2147cb34e70..37134ddfeaa 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
@@ -127,7 +127,7 @@ void __init mips_pcibios_init(void)
map = map1;
}
mask = ~(start ^ end);
- /* We don't support remapping with a discontiguous mask. */
+ /* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_mem_resource.start = start;
@@ -144,7 +144,7 @@ void __init mips_pcibios_init(void)
map = GT_READ(GT_PCI0IOREMAP_OFS);
end = (end & GT_PCI_HD_MSK) | (start & ~GT_PCI_HD_MSK);
mask = ~(start ^ end);
- /* We don't support remapping with a discontiguous mask. */
+ /* We don't support remapping with a discontiguous mask. */
BUG_ON((start & GT_PCI_HD_MSK) != (map & GT_PCI_HD_MSK) &&
mask != ~((mask & -mask) - 1));
gt64120_io_resource.start = map & mask;
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 74732177851..132f8663825 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -93,7 +93,7 @@ static struct mtd_partition malta_mtd_partitions[] = {
.mask_flags = MTD_WRITEABLE
}, {
.name = "User FS",
- .offset = 0x100000,
+ .offset = 0x100000,
.size = 0x2e0000
}, {
.name = "Board Config",
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 2e28f653f66..200f64df2c9 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -78,9 +78,9 @@ const char *get_system_type(void)
}
#if defined(CONFIG_MIPS_MT_SMTC)
-const char display_string[] = " SMTC LINUX ON MALTA ";
+const char display_string[] = " SMTC LINUX ON MALTA ";
#else
-const char display_string[] = " LINUX ON MALTA ";
+const char display_string[] = " LINUX ON MALTA ";
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_BLK_DEV_FD
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 1efc8c39448..becbf47506a 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -126,7 +126,7 @@ int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
* to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
* that signal is brought to IP2 of both VPEs. To avoid racing
* concurrent interrupt service events, IP2 is enabled only on
- * one VPE, by convention VPE0. So long as no bits are ever
+ * one VPE, by convention VPE0. So long as no bits are ever
* cleared in the affinity mask, there will never be any
* interrupt forwarding. But as soon as a program or operator
* sets affinity for one of the related IRQs, we need to make
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 115f5bc0600..a144b89cf9b 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -17,7 +17,6 @@
*
* Setting up the clock on the MIPS boards.
*/
-
#include <linux/types.h>
#include <linux/i8253.h>
#include <linux/init.h>
@@ -25,7 +24,6 @@
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/time.h>
#include <linux/timex.h>
#include <linux/mc146818rtc.h>
@@ -34,11 +32,11 @@
#include <asm/hardirq.h>
#include <asm/irq.h>
#include <asm/div64.h>
-#include <asm/cpu.h>
#include <asm/setup.h>
#include <asm/time.h>
#include <asm/mc146818-time.h>
#include <asm/msc01_ic.h>
+#include <asm/gic.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/prom.h>
@@ -46,6 +44,7 @@
#include <asm/mips-boards/maltaint.h>
unsigned long cpu_khz;
+int gic_frequency;
static int mips_cpu_timer_irq;
static int mips_cpu_perf_irq;
@@ -61,44 +60,50 @@ static void mips_perf_dispatch(void)
do_IRQ(mips_cpu_perf_irq);
}
+static unsigned int freqround(unsigned int freq, unsigned int amount)
+{
+ freq += amount;
+ freq -= freq % (amount*2);
+ return freq;
+}
+
/*
- * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
+ * Estimate CPU and GIC frequencies.
*/
-static unsigned int __init estimate_cpu_frequency(void)
+static void __init estimate_frequencies(void)
{
- unsigned int prid = read_c0_prid() & 0xffff00;
- unsigned int count;
-
unsigned long flags;
- unsigned int start;
+ unsigned int count, start;
+ unsigned int giccount = 0, gicstart = 0;
local_irq_save(flags);
- /* Start counter exactly on falling edge of update flag */
+ /* Start counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
- /* Start r4k counter. */
+ /* Initialize counters. */
start = read_c0_count();
+ if (gic_present)
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart);
- /* Read counter exactly on falling edge of update flag */
+ /* Read counter exactly on falling edge of update flag. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
- count = read_c0_count() - start;
+ count = read_c0_count();
+ if (gic_present)
+ GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount);
- /* restore interrupts */
local_irq_restore(flags);
- mips_hpt_frequency = count;
- if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
- (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
- count *= 2;
-
- count += 5000; /* round */
- count -= count%10000;
+ count -= start;
+ if (gic_present)
+ giccount -= gicstart;
- return count;
+ mips_hpt_frequency = count;
+ if (gic_present)
+ gic_frequency = giccount;
}
void read_persistent_clock(struct timespec *ts)
@@ -144,22 +149,34 @@ unsigned int __cpuinit get_c0_compare_int(void)
void __init plat_time_init(void)
{
- unsigned int est_freq;
-
- /* Set Data mode - binary. */
- CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL);
-
- est_freq = estimate_cpu_frequency();
+ unsigned int prid = read_c0_prid() & 0xffff00;
+ unsigned int freq;
- printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
- (est_freq%1000000)*100/1000000);
+ estimate_frequencies();
- cpu_khz = est_freq / 1000;
+ freq = mips_hpt_frequency;
+ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+ (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+ freq *= 2;
+ freq = freqround(freq, 5000);
+ pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+ cpu_khz = freq / 1000;
+
+ if (gic_present) {
+ freq = freqround(gic_frequency, 5000);
+ pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000,
+ (freq%1000000)*100/1000000);
+ gic_clocksource_init(gic_frequency);
+ } else
+ init_r4k_clocksource();
- mips_scroll_message();
-#ifdef CONFIG_I8253 /* Only Malta has a PIT */
+#ifdef CONFIG_I8253
+ /* Only Malta has a PIT. */
setup_pit_timer();
#endif
+ mips_scroll_message();
+
plat_perf_setup();
}
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index 626afeac438..10ec701ce6c 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -5,10 +5,12 @@
# Copyright (C) 2008 Wind River Systems, Inc.
# written by Ralf Baechle <ralf@linux-mips.org>
#
+# Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved.
+# Steven J. Hill <sjhill@mips.com>
+#
obj-y := sead3-lcd.o sead3-cmdline.o \
sead3-display.o sead3-init.o sead3-int.o \
- sead3-mtd.o sead3-net.o \
- sead3-memory.o sead3-platform.o \
+ sead3-mtd.o sead3-net.o sead3-platform.o \
sead3-reset.o sead3-setup.o sead3-time.o
obj-y += sead3-i2c-dev.o sead3-i2c.o \
@@ -17,3 +19,7 @@ obj-y += sead3-i2c-dev.o sead3-i2c.o \
obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
obj-$(CONFIG_USB_EHCI_HCD) += sead3-ehci.o
+obj-$(CONFIG_OF) += sead3.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+ $(call if_changed,dtc)
diff --git a/arch/mips/mti-sead3/leds-sead3.c b/arch/mips/mti-sead3/leds-sead3.c
index a95ac598520..322148c353e 100644
--- a/arch/mips/mti-sead3/leds-sead3.c
+++ b/arch/mips/mti-sead3/leds-sead3.c
@@ -33,12 +33,12 @@ static void sead3_fled_set(struct led_classdev *led_cdev,
static struct led_classdev sead3_pled = {
.name = "sead3::pled",
- .brightness_set = sead3_pled_set,
+ .brightness_set = sead3_pled_set,
};
static struct led_classdev sead3_fled = {
.name = "sead3::fled",
- .brightness_set = sead3_fled_set,
+ .brightness_set = sead3_fled_set,
};
#ifdef CONFIG_PM
@@ -125,4 +125,3 @@ module_exit(sead3_led_exit);
MODULE_AUTHOR("Kristian Kielhofner <kris@krisk.org>");
MODULE_DESCRIPTION("SEAD3 LED driver");
MODULE_LICENSE("GPL");
-
diff --git a/arch/mips/mti-sead3/sead3-console.c b/arch/mips/mti-sead3/sead3-console.c
index b36739108a0..2ddef19a9ad 100644
--- a/arch/mips/mti-sead3/sead3-console.c
+++ b/arch/mips/mti-sead3/sead3-console.c
@@ -10,8 +10,8 @@
#include <linux/serial_reg.h>
#include <linux/io.h>
-#define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */
-#define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */
+#define SEAD_UART1_REGS_BASE 0xbf000800 /* ttyS1 = DB9 port */
+#define SEAD_UART0_REGS_BASE 0xbf000900 /* ttyS0 = USB port */
#define PORT(base_addr, offset) ((unsigned int __iomem *)(base_addr+(offset)*4))
static char console_port = 1;
diff --git a/arch/mips/mti-sead3/sead3-display.c b/arch/mips/mti-sead3/sead3-display.c
index 8308c7fc188..e389326cfa4 100644
--- a/arch/mips/mti-sead3/sead3-display.c
+++ b/arch/mips/mti-sead3/sead3-display.c
@@ -21,7 +21,7 @@ static unsigned int max_display_count;
#define LCD_SETDDRAM 0x80
#define LCD_IR_BF 0x80
-const char display_string[] = " LINUX ON SEAD3 ";
+const char display_string[] = " LINUX ON SEAD3 ";
static void scroll_display_message(unsigned long data);
static DEFINE_TIMER(mips_scroll_timer, scroll_display_message, HZ, 0);
diff --git a/arch/mips/mti-sead3/sead3-i2c-drv.c b/arch/mips/mti-sead3/sead3-i2c-drv.c
index 7aa2225e75b..1f787a6a787 100644
--- a/arch/mips/mti-sead3/sead3-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-i2c-drv.c
@@ -13,32 +13,32 @@
#include <linux/platform_device.h>
#define PIC32_I2CxCON 0x0000
-#define PIC32_I2CCON_ON (1<<15)
-#define PIC32_I2CCON_ACKDT (1<<5)
-#define PIC32_I2CCON_ACKEN (1<<4)
-#define PIC32_I2CCON_RCEN (1<<3)
-#define PIC32_I2CCON_PEN (1<<2)
-#define PIC32_I2CCON_RSEN (1<<1)
-#define PIC32_I2CCON_SEN (1<<0)
+#define PIC32_I2CCON_ON (1<<15)
+#define PIC32_I2CCON_ACKDT (1<<5)
+#define PIC32_I2CCON_ACKEN (1<<4)
+#define PIC32_I2CCON_RCEN (1<<3)
+#define PIC32_I2CCON_PEN (1<<2)
+#define PIC32_I2CCON_RSEN (1<<1)
+#define PIC32_I2CCON_SEN (1<<0)
#define PIC32_I2CxCONCLR 0x0004
#define PIC32_I2CxCONSET 0x0008
#define PIC32_I2CxSTAT 0x0010
#define PIC32_I2CxSTATCLR 0x0014
-#define PIC32_I2CSTAT_ACKSTAT (1<<15)
-#define PIC32_I2CSTAT_TRSTAT (1<<14)
-#define PIC32_I2CSTAT_BCL (1<<10)
-#define PIC32_I2CSTAT_IWCOL (1<<7)
-#define PIC32_I2CSTAT_I2COV (1<<6)
+#define PIC32_I2CSTAT_ACKSTAT (1<<15)
+#define PIC32_I2CSTAT_TRSTAT (1<<14)
+#define PIC32_I2CSTAT_BCL (1<<10)
+#define PIC32_I2CSTAT_IWCOL (1<<7)
+#define PIC32_I2CSTAT_I2COV (1<<6)
#define PIC32_I2CxBRG 0x0040
#define PIC32_I2CxTRN 0x0050
#define PIC32_I2CxRCV 0x0060
static DEFINE_SPINLOCK(pic32_bus_lock);
-static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
static void __iomem *bus_status = (void __iomem *)0xbf000060;
-#define DELAY() udelay(100)
+#define DELAY() udelay(100)
static inline unsigned int ioready(void)
{
diff --git a/arch/mips/mti-sead3/sead3-init.c b/arch/mips/mti-sead3/sead3-init.c
index a958cad6fff..f95abaa1aa5 100644
--- a/arch/mips/mti-sead3/sead3-init.c
+++ b/arch/mips/mti-sead3/sead3-init.c
@@ -77,7 +77,6 @@ void __init prom_init(void)
board_ejtag_handler_setup = mips_ejtag_setup;
prom_init_cmdline();
- prom_meminit();
#ifdef CONFIG_EARLY_PRINTK
if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL)
prom_init_early_console(0);
@@ -89,3 +88,7 @@ void __init prom_init(void)
strcat(prom_getcmdline(), " console=ttyS0,38400n8r");
#endif
}
+
+void prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/mti-sead3/sead3-memory.c b/arch/mips/mti-sead3/sead3-memory.c
deleted file mode 100644
index da9244106f8..00000000000
--- a/arch/mips/mti-sead3/sead3-memory.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- */
-#include <linux/bootmem.h>
-
-#include <asm/bootinfo.h>
-#include <asm/sections.h>
-#include <asm/mips-boards/prom.h>
-
-enum yamon_memtypes {
- yamon_dontuse,
- yamon_prom,
- yamon_free,
-};
-
-static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
-
-/* determined physical memory size, not overridden by command line args */
-unsigned long physical_memsize = 0L;
-
-struct prom_pmemblock * __init prom_getmdesc(void)
-{
- char *memsize_str, *ptr;
- unsigned int memsize;
- static char cmdline[COMMAND_LINE_SIZE] __initdata;
- long val;
- int tmp;
-
- /* otherwise look in the environment */
- memsize_str = prom_getenv("memsize");
- if (!memsize_str) {
- pr_warn("memsize not set in boot prom, set to default 32Mb\n");
- physical_memsize = 0x02000000;
- } else {
- tmp = kstrtol(memsize_str, 0, &val);
- physical_memsize = (unsigned long)val;
- }
-
-#ifdef CONFIG_CPU_BIG_ENDIAN
- /* SOC-it swaps, or perhaps doesn't swap, when DMA'ing the last
- word of physical memory */
- physical_memsize -= PAGE_SIZE;
-#endif
-
- /* Check the command line for a memsize directive that overrides
- the physical/default amount */
- strcpy(cmdline, arcs_cmdline);
- ptr = strstr(cmdline, "memsize=");
- if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
- ptr = strstr(ptr, " memsize=");
-
- if (ptr)
- memsize = memparse(ptr + 8, &ptr);
- else
- memsize = physical_memsize;
-
- memset(mdesc, 0, sizeof(mdesc));
-
- mdesc[0].type = yamon_dontuse;
- mdesc[0].base = 0x00000000;
- mdesc[0].size = 0x00001000;
-
- mdesc[1].type = yamon_prom;
- mdesc[1].base = 0x00001000;
- mdesc[1].size = 0x000ef000;
-
- /*
- * The area 0x000f0000-0x000fffff is allocated for BIOS memory by the
- * south bridge and PCI access always forwarded to the ISA Bus and
- * BIOSCS# is always generated.
- * This mean that this area can't be used as DMA memory for PCI
- * devices.
- */
- mdesc[2].type = yamon_dontuse;
- mdesc[2].base = 0x000f0000;
- mdesc[2].size = 0x00010000;
-
- mdesc[3].type = yamon_dontuse;
- mdesc[3].base = 0x00100000;
- mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
- mdesc[3].base;
-
- mdesc[4].type = yamon_free;
- mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
- mdesc[4].size = memsize - mdesc[4].base;
-
- return &mdesc[0];
-}
-
-static int __init prom_memtype_classify(unsigned int type)
-{
- switch (type) {
- case yamon_free:
- return BOOT_MEM_RAM;
- case yamon_prom:
- return BOOT_MEM_ROM_DATA;
- default:
- return BOOT_MEM_RESERVED;
- }
-}
-
-void __init prom_meminit(void)
-{
- struct prom_pmemblock *p;
-
- p = prom_getmdesc();
-
- while (p->size) {
- long type;
- unsigned long base, size;
-
- type = prom_memtype_classify(p->type);
- base = p->base;
- size = p->size;
-
- add_memory_region(base, size, type);
- p++;
- }
-}
-
-void __init prom_free_prom_memory(void)
-{
- unsigned long addr;
- int i;
-
- for (i = 0; i < boot_mem_map.nr_map; i++) {
- if (boot_mem_map.map[i].type != BOOT_MEM_ROM_DATA)
- continue;
-
- addr = boot_mem_map.map[i].addr;
- free_init_pages("prom memory",
- addr, addr + boot_mem_map.map[i].size);
- }
-}
diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c
index 04d704df609..dd11e7eb771 100644
--- a/arch/mips/mti-sead3/sead3-net.c
+++ b/arch/mips/mti-sead3/sead3-net.c
@@ -19,8 +19,8 @@ static struct smsc911x_platform_config sead3_smsc911x_data = {
struct resource sead3_net_resourcess[] = {
{
- .start = 0x1f010000,
- .end = 0x1f01ffff,
+ .start = 0x1f010000,
+ .end = 0x1f01ffff,
.flags = IORESOURCE_MEM
},
{
diff --git a/arch/mips/mti-sead3/sead3-pic32-bus.c b/arch/mips/mti-sead3/sead3-pic32-bus.c
index 9f0d89bc800..eb2bf936d10 100644
--- a/arch/mips/mti-sead3/sead3-pic32-bus.c
+++ b/arch/mips/mti-sead3/sead3-pic32-bus.c
@@ -17,16 +17,16 @@
#define PIC32_SYSRD 0x02
#define PIC32_WR 0x10
#define PIC32_SYSWR 0x20
-#define PIC32_IRQ_CLR 0x40
+#define PIC32_IRQ_CLR 0x40
#define PIC32_STATUS 0x80
-#define DELAY() udelay(100) /* FIXME: needed? */
+#define DELAY() udelay(100) /* FIXME: needed? */
/* spinlock to ensure atomic access to PIC32 */
static DEFINE_SPINLOCK(pic32_bus_lock);
/* FIXME: io_remap these */
-static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
+static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
static void __iomem *bus_status = (void __iomem *)0xbf000060;
static inline unsigned int ioready(void)
diff --git a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
index 514675ed0cd..b921e5ec507 100644
--- a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
+++ b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
@@ -19,40 +19,40 @@
#define PIC32_I2CxCONCLR 0x0004
#define PIC32_I2CxCONSET 0x0008
#define PIC32_I2CxCONINV 0x000C
-#define I2CCON_ON (1<<15)
-#define I2CCON_FRZ (1<<14)
-#define I2CCON_SIDL (1<<13)
-#define I2CCON_SCLREL (1<<12)
-#define I2CCON_STRICT (1<<11)
-#define I2CCON_A10M (1<<10)
-#define I2CCON_DISSLW (1<<9)
-#define I2CCON_SMEN (1<<8)
-#define I2CCON_GCEN (1<<7)
-#define I2CCON_STREN (1<<6)
-#define I2CCON_ACKDT (1<<5)
-#define I2CCON_ACKEN (1<<4)
-#define I2CCON_RCEN (1<<3)
-#define I2CCON_PEN (1<<2)
-#define I2CCON_RSEN (1<<1)
-#define I2CCON_SEN (1<<0)
+#define I2CCON_ON (1<<15)
+#define I2CCON_FRZ (1<<14)
+#define I2CCON_SIDL (1<<13)
+#define I2CCON_SCLREL (1<<12)
+#define I2CCON_STRICT (1<<11)
+#define I2CCON_A10M (1<<10)
+#define I2CCON_DISSLW (1<<9)
+#define I2CCON_SMEN (1<<8)
+#define I2CCON_GCEN (1<<7)
+#define I2CCON_STREN (1<<6)
+#define I2CCON_ACKDT (1<<5)
+#define I2CCON_ACKEN (1<<4)
+#define I2CCON_RCEN (1<<3)
+#define I2CCON_PEN (1<<2)
+#define I2CCON_RSEN (1<<1)
+#define I2CCON_SEN (1<<0)
#define PIC32_I2CxSTAT 0x0010
#define PIC32_I2CxSTATCLR 0x0014
#define PIC32_I2CxSTATSET 0x0018
#define PIC32_I2CxSTATINV 0x001C
-#define I2CSTAT_ACKSTAT (1<<15)
-#define I2CSTAT_TRSTAT (1<<14)
-#define I2CSTAT_BCL (1<<10)
-#define I2CSTAT_GCSTAT (1<<9)
-#define I2CSTAT_ADD10 (1<<8)
-#define I2CSTAT_IWCOL (1<<7)
-#define I2CSTAT_I2COV (1<<6)
-#define I2CSTAT_DA (1<<5)
-#define I2CSTAT_P (1<<4)
-#define I2CSTAT_S (1<<3)
-#define I2CSTAT_RW (1<<2)
-#define I2CSTAT_RBF (1<<1)
-#define I2CSTAT_TBF (1<<0)
+#define I2CSTAT_ACKSTAT (1<<15)
+#define I2CSTAT_TRSTAT (1<<14)
+#define I2CSTAT_BCL (1<<10)
+#define I2CSTAT_GCSTAT (1<<9)
+#define I2CSTAT_ADD10 (1<<8)
+#define I2CSTAT_IWCOL (1<<7)
+#define I2CSTAT_I2COV (1<<6)
+#define I2CSTAT_DA (1<<5)
+#define I2CSTAT_P (1<<4)
+#define I2CSTAT_S (1<<3)
+#define I2CSTAT_RW (1<<2)
+#define I2CSTAT_RBF (1<<1)
+#define I2CSTAT_TBF (1<<0)
#define PIC32_I2CxADD 0x0020
#define PIC32_I2CxADDCLR 0x0024
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index 8ad46ad31b4..f012fd164ce 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -6,6 +6,12 @@
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
#include <linux/init.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/bootmem.h>
+
+#include <asm/mips-boards/generic.h>
+#include <asm/prom.h>
int coherentio; /* 0 => no DMA cache coherency (may be set by user) */
int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */
@@ -17,4 +23,25 @@ const char *get_system_type(void)
void __init plat_mem_setup(void)
{
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ unflatten_device_tree();
}
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 048e781a17a..239e4e32757 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -43,11 +43,11 @@ static unsigned int __init estimate_cpu_frequency(void)
local_irq_save(flags);
- orig = readl(status_reg) & 0x2; /* get original sample */
+ orig = readl(status_reg) & 0x2; /* get original sample */
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
- orig = orig ^ 0x2; /* flip the bit */
+ orig = orig ^ 0x2; /* flip the bit */
write_c0_count(0);
@@ -56,7 +56,7 @@ static unsigned int __init estimate_cpu_frequency(void)
/* wait for transition */
while ((readl(status_reg) & 0x2) == orig)
;
- orig = orig ^ 0x2; /* flip the bit */
+ orig = orig ^ 0x2; /* flip the bit */
tick++;
}
@@ -71,7 +71,7 @@ static unsigned int __init estimate_cpu_frequency(void)
(prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
freq *= 2;
- freq += 5000; /* rounding */
+ freq += 5000; /* rounding */
freq -= freq%10000;
return freq ;
diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/mti-sead3/sead3.dts
new file mode 100644
index 00000000000..658f4378705
--- /dev/null
+++ b/arch/mips/mti-sead3/sead3.dts
@@ -0,0 +1,26 @@
+/dts-v1/;
+
+/memreserve/ 0x00000000 0x00001000; // reserved
+/memreserve/ 0x00001000 0x000ef000; // ROM data
+/memreserve/ 0x000f0000 0x004cc000; // reserved
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "mti,sead-3";
+
+ cpus {
+ cpu@0 {
+ compatible = "mti,mips14KEc", "mti,mips14Kc";
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS1,38400 rootdelay=10 root=/dev/sda3";
+ };
+
+ memory {
+ device_type = "memory";
+ reg = <0x0 0x08000000>;
+ };
+};
diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform
index cdfc9abbbb7..fb8eb4c0c6e 100644
--- a/arch/mips/netlogic/Platform
+++ b/arch/mips/netlogic/Platform
@@ -13,5 +13,5 @@ cflags-$(CONFIG_CPU_XLP) += $(call cc-option,-march=xlp,-march=mips64r2)
#
# NETLOGIC processor support
#
-platform-$(CONFIG_NLM_COMMON) += netlogic/
-load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
+platform-$(CONFIG_NLM_COMMON) += netlogic/
+load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 00dcc7a2bc5..9f84c60bf53 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -69,7 +69,7 @@
#else
#define SMP_IRQ_MASK 0
#endif
-#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
(1ull << IRQ_FMN))
struct nlm_pic_irq {
@@ -105,21 +105,23 @@ static void xlp_pic_disable(struct irq_data *d)
static void xlp_pic_mask_ack(struct irq_data *d)
{
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- uint64_t mask = 1ull << pd->picirq;
- write_c0_eirr(mask); /* ack by writing EIRR */
+ clear_c0_eimr(pd->picirq);
+ ack_c0_eirr(pd->picirq);
}
static void xlp_pic_unmask(struct irq_data *d)
{
struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- if (!pd)
- return;
+ BUG_ON(!pd);
if (pd->extra_ack)
pd->extra_ack(d);
+ /* re-enable the intr on this cpu */
+ set_c0_eimr(pd->picirq);
+
/* Ack is a single write, no need to lock */
nlm_pic_ack(pd->node->picbase, pd->irt);
}
@@ -134,32 +136,17 @@ static struct irq_chip xlp_pic = {
static void cpuintr_disable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr & ~mask);
+ clear_c0_eimr(d->irq);
}
static void cpuintr_enable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr | mask);
+ set_c0_eimr(d->irq);
}
static void cpuintr_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
-
- write_c0_eirr(mask);
-}
-
-static void cpuintr_nop(struct irq_data *d)
-{
- WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq);
+ ack_c0_eirr(d->irq);
}
/*
@@ -170,9 +157,9 @@ struct irq_chip nlm_cpu_intr = {
.name = "XLP-CPU-INTR",
.irq_enable = cpuintr_enable,
.irq_disable = cpuintr_disable,
- .irq_mask = cpuintr_nop,
- .irq_ack = cpuintr_nop,
- .irq_eoi = cpuintr_ack,
+ .irq_mask = cpuintr_disable,
+ .irq_ack = cpuintr_ack,
+ .irq_eoi = cpuintr_enable,
};
static void __init nlm_init_percpu_irqs(void)
@@ -230,7 +217,7 @@ static void nlm_init_node_irqs(int node)
nlm_setup_pic_irq(node, i, i, irt);
/* set interrupts to first cpu in node */
nlm_pic_init_irt(nodep->picbase, irt, i,
- node * NLM_CPUS_PER_NODE);
+ node * NLM_CPUS_PER_NODE, 0);
irqmask |= (1ull << i);
}
nodep->irqmask = irqmask;
@@ -265,7 +252,7 @@ asmlinkage void plat_irq_dispatch(void)
int i, node;
node = nlm_nodeid();
- eirr = read_c0_eirr() & read_c0_eimr();
+ eirr = read_c0_eirr_and_eimr();
i = __ilog2_u64(eirr);
if (i == -1)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index a080d9ee3cd..2bb95dcfe20 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -84,15 +84,19 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
/* IRQ_IPI_SMP_FUNCTION Handler */
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
smp_call_function_interrupt();
+ set_c0_eimr(irq);
}
/* IRQ_IPI_SMP_RESCHEDULE handler */
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
scheduler_ipi();
+ set_c0_eimr(irq);
}
/*
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index a0b74874beb..02651748858 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -49,12 +49,12 @@
#include <asm/netlogic/xlp-hal/sys.h>
#include <asm/netlogic/xlp-hal/cpucontrol.h>
-#define CP0_EBASE $15
+#define CP0_EBASE $15
#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
-#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
+#define XLP_AX_WORKAROUND /* enable Ax silicon workarounds */
/* Enable XLP features and workarounds in the LSU */
.macro xlp_config_lsu
@@ -69,6 +69,12 @@
#endif
mtcr t1, t0
+ li t0, ICU_DEFEATURE
+ mfcr t1, t0
+ ori t1, 0x1000 /* Enable Icache partitioning */
+ mtcr t1, t0
+
+
#ifdef XLP_AX_WORKAROUND
li t0, SCHED_DEFEATURE
lui t1, 0x0100 /* Disable BRU accepting ALU ops */
@@ -85,7 +91,7 @@
li t0, LSU_DEBUG_DATA0
li t1, LSU_DEBUG_ADDR
li t2, 0 /* index */
- li t3, 0x1000 /* loop count */
+ li t3, 0x1000 /* loop count */
1:
sll v0, t2, 5
mtcr zero, t0
@@ -134,7 +140,7 @@ FEXPORT(nlm_reset_entry)
and k1, k0, k1
beqz k1, 1f /* go to real reset entry */
nop
- li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
ld k0, BOOT_NMI_HANDLER(k1)
jr k0
nop
@@ -235,7 +241,7 @@ EXPORT(nlm_reset_entry_end)
FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
xlp_config_lsu
- dmtc0 sp, $4, 2 /* SP saved in UserLocal */
+ dmtc0 sp, $4, 2 /* SP saved in UserLocal */
SAVE_ALL
sync
/* find the location to which nlm_boot_siblings was relocated */
@@ -301,13 +307,13 @@ NESTED(nlm_rmiboot_preboot, 16, sp)
*/
li t0, 0x400
mfcr t1, t0
- li t2, 6 /* XLR thread mode mask */
+ li t2, 6 /* XLR thread mode mask */
nor t3, t2, zero
and t2, t1, t2 /* t2 - current thread mode */
li v0, CKSEG1ADDR(RESET_DATA_PHYS)
lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
sll v1, 1
- beq v1, t2, 1f /* same as request value */
+ beq v1, t2, 1f /* same as request value */
nop /* nothing to do */
and t2, t1, t3 /* mask out old thread mode */
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index bd3e498157f..5c56555380b 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -35,17 +35,73 @@
#include <linux/init.h>
#include <asm/time.h>
+#include <asm/cpu-features.h>
+
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
unsigned int __cpuinit get_c0_compare_int(void)
{
return IRQ_TIMER;
}
+static cycle_t nlm_get_pic_timer(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+ .name = "PIC",
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+ if (current_cpu_data.cputype == CPU_XLR) {
+ csrc_pic.mask = CLOCKSOURCE_MASK(32);
+ csrc_pic.read = nlm_get_pic_timer32;
+ } else {
+ csrc_pic.mask = CLOCKSOURCE_MASK(64);
+ csrc_pic.read = nlm_get_pic_timer;
+ }
+ csrc_pic.rating = 1000;
+ clocksource_register_hz(&csrc_pic, PIC_CLK_HZ);
+}
+
void __init plat_time_init(void)
{
+ nlm_init_pic_timer();
mips_hpt_frequency = nlm_get_cpu_frequency();
+ if (current_cpu_type() == CPU_XLR)
+ preset_lpj = mips_hpt_frequency / (3 * HZ);
+ else
+ preset_lpj = mips_hpt_frequency / (2 * HZ);
pr_info("MIPS counter frequency [%ld]\n",
(unsigned long)mips_hpt_frequency);
}
diff --git a/arch/mips/netlogic/dts/xlp_evp.dts b/arch/mips/netlogic/dts/xlp_evp.dts
index e14f4230806..7628b5464fc 100644
--- a/arch/mips/netlogic/dts/xlp_evp.dts
+++ b/arch/mips/netlogic/dts/xlp_evp.dts
@@ -20,7 +20,7 @@
#address-cells = <2>;
#size-cells = <1>;
compatible = "simple-bus";
- ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
+ ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG
1 0 0 0x16000000 0x01000000>; // GBU chipselects
serial0: serial@30000 {
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 529e74742d9..c68fd402610 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -111,8 +111,8 @@ unsigned int nlm_get_core_frequency(int node, int core)
dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
pll_divf = ((rstval >> 10) & 0x7f) + 1;
pll_divr = ((rstval >> 8) & 0x3) + 1;
- ext_div = ((rstval >> 30) & 0x3) + 1;
- dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
+ ext_div = ((rstval >> 30) & 0x3) + 1;
+ dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
num = 800000000ULL * pll_divf;
denom = 3 * pll_divr * ext_div * dfs_div;
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
index dbe083a9353..1d0b66c62fd 100644
--- a/arch/mips/netlogic/xlp/usb-init.c
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -52,7 +52,7 @@ static void nlm_usb_intr_en(int node, int port)
port_addr = nlm_get_usb_regbase(node, port);
val = nlm_read_usb_reg(port_addr, USB_INT_EN);
val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
- USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT1_EN | USB_CTRL_INTERRUPT_EN |
USB_OHCI_INTERRUPT_EN | USB_OHCI_INTERRUPT2_EN;
nlm_write_usb_reg(port_addr, USB_INT_EN, val);
}
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index cb9010642ac..abb3e08cc05 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -51,7 +51,7 @@
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/sys.h>
-static int xlp_wakeup_core(uint64_t sysbase, int core)
+static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
{
uint32_t coremask, value;
int count;
@@ -82,36 +82,51 @@ static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
struct nlm_soc_info *nodep;
uint64_t syspcibase;
uint32_t syscoremask;
- int core, n, cpu;
+ int core, n, cpu, count, val;
for (n = 0; n < NLM_NR_NODES; n++) {
syspcibase = nlm_get_sys_pcibase(n);
if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
break;
- /* read cores in reset from SYS and account for boot cpu */
- nlm_node_init(n);
+ /* read cores in reset from SYS */
+ if (n != 0)
+ nlm_node_init(n);
nodep = nlm_get_node(n);
syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
- if (n == 0)
+ /* The boot cpu */
+ if (n == 0) {
syscoremask |= 1;
+ nodep->coremask = 1;
+ }
for (core = 0; core < NLM_CORES_PER_NODE; core++) {
+ /* we will be on node 0 core 0 */
+ if (n == 0 && core == 0)
+ continue;
+
/* see if the core exists */
if ((syscoremask & (1 << core)) == 0)
continue;
- /* see if at least the first thread is enabled */
+ /* see if at least the first hw thread is enabled */
cpu = (n * NLM_CORES_PER_NODE + core)
* NLM_THREADS_PER_CORE;
if (!cpumask_test_cpu(cpu, wakeup_mask))
continue;
/* wake up the core */
- if (xlp_wakeup_core(nodep->sysbase, core))
- nodep->coremask |= 1u << core;
- else
- pr_err("Failed to enable core %d\n", core);
+ if (!xlp_wakeup_core(nodep->sysbase, n, core))
+ continue;
+
+ /* core is up */
+ nodep->coremask |= 1u << core;
+
+ /* spin until the first hw thread sets its ready */
+ count = 0x20000000;
+ do {
+ val = *(volatile int *)&nlm_cpu_ready[cpu];
+ } while (val == 0 && --count > 0);
}
}
}
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
index bed2cffa100..ed3bf0e3f30 100644
--- a/arch/mips/netlogic/xlr/fmn-config.c
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -164,8 +164,8 @@ static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
int i, j;
for (i = 0; i < num_core; i++) {
- cpu[i].start_stn_id = (8 * i);
- cpu[i].end_stn_id = (8 * i + 8);
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
xlr_board_fmn_config.bucket_size[j] = 32;
@@ -216,6 +216,8 @@ void xlr_board_info_setup(void)
case PRID_IMP_NETLOGIC_XLS404B:
case PRID_IMP_NETLOGIC_XLS408B:
case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS616B:
setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
FMN_STNID_GMAC0_TX3, 8, 8, 32);
setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
index 340ab1601c4..6d3c727e0ef 100644
--- a/arch/mips/netlogic/xlr/platform-flash.c
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -36,7 +36,7 @@ static struct mtd_partition xlr_nor_parts[] = {
{
.name = "User FS",
.offset = 0x800000,
- .size = MTDPART_SIZ_FULL,
+ .size = MTDPART_SIZ_FULL,
}
};
@@ -46,13 +46,13 @@ static struct mtd_partition xlr_nor_parts[] = {
static struct mtd_partition xlr_nand_parts[] = {
{
.name = "Root Filesystem",
- .offset = 64 * 64 * 2048,
+ .offset = 64 * 64 * 2048,
.size = 432 * 64 * 2048,
},
{
.name = "Home Filesystem",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL,
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
},
};
@@ -74,8 +74,8 @@ static struct platform_device xlr_nor_dev = {
.dev = {
.platform_data = &xlr_nor_data,
},
- .num_resources = ARRAY_SIZE(xlr_nor_res),
- .resource = xlr_nor_res,
+ .num_resources = ARRAY_SIZE(xlr_nor_res),
+ .resource = xlr_nor_res,
};
const char *xlr_part_probes[] = { "cmdlinepart", NULL };
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
index 507230eeb76..7b96a91f477 100644
--- a/arch/mips/netlogic/xlr/platform.c
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -64,7 +64,7 @@ void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
.iotype = UPIO_MEM32, \
.flags = (UPF_SKIP_TEST | \
UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
- .uartclk = PIC_CLKS_PER_SEC, \
+ .uartclk = PIC_CLK_HZ, \
.type = PORT_16550A, \
.serial_in = nlm_xlr_uart_in, \
.serial_out = nlm_xlr_uart_out, \
@@ -162,18 +162,18 @@ int xls_platform_usb_init(void)
nlm_write_reg(usb_mmio, 50, 0x1f000000);
/* Enable ports */
- nlm_write_reg(usb_mmio, 1, 0x07000500);
+ nlm_write_reg(usb_mmio, 1, 0x07000500);
val = nlm_read_reg(gpio_mmio, 21);
if (((val >> 22) & 0x01) == 0) {
pr_info("Detected USB Device mode - Not supported!\n");
- nlm_write_reg(usb_mmio, 0, 0x01000000);
+ nlm_write_reg(usb_mmio, 0, 0x01000000);
return 0;
}
pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
/* Clear reset, host mode */
- nlm_write_reg(usb_mmio, 0, 0x02000000);
+ nlm_write_reg(usb_mmio, 0, 0x02000000);
/* Memory resource for various XLS usb ports */
usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
@@ -221,8 +221,8 @@ static struct resource i2c_resources[] = {
};
static struct platform_device nlm_xlr_i2c_1 = {
- .name = "xlr-i2cbus",
- .id = 1,
+ .name = "xlr-i2cbus",
+ .id = 1,
.num_resources = 1,
.resource = i2c_resources,
};
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index c5ce6992ac4..e3e094100e3 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -70,7 +70,7 @@ static void __init nlm_early_serial_setup(void)
s.iotype = UPIO_MEM32;
s.regshift = 2;
s.irq = PIC_UART_0_IRQ;
- s.uartclk = PIC_CLKS_PER_SEC;
+ s.uartclk = PIC_CLK_HZ;
s.serial_in = nlm_xlr_uart_in;
s.serial_out = nlm_xlr_uart_out;
s.mapbase = uart_base;
@@ -163,7 +163,7 @@ static void prom_add_memory(void)
{
struct nlm_boot_mem_map *bootm;
u64 start, size;
- u64 pref_backup = 512; /* avoid pref walking beyond end */
+ u64 pref_backup = 512; /* avoid pref walking beyond end */
int i;
bootm = (void *)(long)nlm_prom_info.psb_mem_map;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index e32db1ff02c..af763e838fd 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -27,10 +27,10 @@ static int op_mips_setup(void)
/* Pre-compute the values to stuff in the hardware registers. */
model->reg_setup(ctr);
- /* Configure the registers on all cpus. */
+ /* Configure the registers on all cpus. */
on_each_cpu(model->cpu_setup, NULL, 1);
- return 0;
+ return 0;
}
static int op_mips_create_files(struct super_block *sb, struct dentry *root)
@@ -78,6 +78,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
switch (current_cpu_type()) {
case CPU_5KC:
case CPU_M14KC:
+ case CPU_M14KEC:
case CPU_20KC:
case CPU_24K:
case CPU_25KF:
@@ -110,7 +111,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
ops->create_files = op_mips_create_files;
ops->setup = op_mips_setup;
- //ops->shutdown = op_mips_shutdown;
+ //ops->shutdown = op_mips_shutdown;
ops->start = op_mips_start;
ops->stop = op_mips_stop;
ops->cpu_type = lmodel->cpu_type;
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
index 60d3ea60211..b249ec0bebb 100644
--- a/arch/mips/oprofile/op_model_loongson2.c
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -18,13 +18,13 @@
#define LOONGSON2_CPU_TYPE "mips/loongson2"
-#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
+#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
-#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
-#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
-#define LOONGSON2_PERFCTRL_USER (1UL << 3)
-#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
+#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
+#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
+#define LOONGSON2_PERFCTRL_USER (1UL << 3)
+#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
(((event) & 0x0f) << ((idx) ? 9 : 5))
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 78625463040..1fd361462c0 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -14,25 +14,25 @@
#include "op_impl.h"
-#define M_PERFCTL_EXL (1UL << 0)
-#define M_PERFCTL_KERNEL (1UL << 1)
-#define M_PERFCTL_SUPERVISOR (1UL << 2)
-#define M_PERFCTL_USER (1UL << 3)
-#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
+#define M_PERFCTL_EXL (1UL << 0)
+#define M_PERFCTL_KERNEL (1UL << 1)
+#define M_PERFCTL_SUPERVISOR (1UL << 2)
+#define M_PERFCTL_USER (1UL << 3)
+#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
-#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
-#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
-#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
-#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
-#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
-#define M_PERFCTL_WIDE (1UL << 30)
-#define M_PERFCTL_MORE (1UL << 31)
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
+#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
+#define M_PERFCTL_WIDE (1UL << 30)
+#define M_PERFCTL_MORE (1UL << 31)
-#define M_COUNTER_OVERFLOW (1UL << 31)
+#define M_COUNTER_OVERFLOW (1UL << 31)
/* Netlogic XLR specific, count events in all threads in a core */
-#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
+#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
static int (*save_perf_irq)(void);
@@ -143,7 +143,7 @@ static struct mipsxx_register_config {
unsigned int counter[4];
} reg;
-/* Compute all of the registers in preparation for enabling profiling. */
+/* Compute all of the registers in preparation for enabling profiling. */
static void mipsxx_reg_setup(struct op_counter_config *ctr)
{
@@ -159,7 +159,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
continue;
reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
- M_PERFCTL_INTERRUPT_ENABLE;
+ M_PERFCTL_INTERRUPT_ENABLE;
if (ctr[i].kernel)
reg.control[i] |= M_PERFCTL_KERNEL;
if (ctr[i].user)
@@ -172,7 +172,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
}
}
-/* Program all of the registers in preparation for enabling profiling. */
+/* Program all of the registers in preparation for enabling profiling. */
static void mipsxx_cpu_setup(void *args)
{
@@ -351,6 +351,10 @@ static int __init mipsxx_init(void)
op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
break;
+ case CPU_M14KEC:
+ op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
+ break;
+
case CPU_20KC:
op_model_mipsxx_ops.cpu_type = "mips/20K";
break;
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index ce995d3d944..2cb1d315d22 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -27,7 +27,6 @@ obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
#
obj-$(CONFIG_LASAT) += pci-lasat.o
obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o
-obj-$(CONFIG_SOC_PNX8550) += fixup-pnx8550.o ops-pnx8550.o
obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o
obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
obj-$(CONFIG_MIPS_MALTA) += fixup-malta.o
@@ -55,10 +54,10 @@ obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
obj-$(CONFIG_WR_PPMC) += fixup-wrppmc.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += pci-octeon.o pcie-octeon.o
obj-$(CONFIG_CPU_XLR) += pci-xlr.o
obj-$(CONFIG_CPU_XLP) += pci-xlp.o
ifdef CONFIG_PCI_MSI
-obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
+obj-$(CONFIG_CPU_CAVIUM_OCTEON) += msi-octeon.o
endif
diff --git a/arch/mips/pci/fixup-cobalt.c b/arch/mips/pci/fixup-cobalt.c
index 9553b14002d..a138e8ee5cf 100644
--- a/arch/mips/pci/fixup-cobalt.c
+++ b/arch/mips/pci/fixup-cobalt.c
@@ -94,14 +94,14 @@ static void qube_raq_galileo_fixup(struct pci_dev *dev)
* --x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--x--
*
* On all machines prior to Q2, we had the STOP line disconnected
- * from Galileo to VIA on PCI. The new Galileo does not function
+ * from Galileo to VIA on PCI. The new Galileo does not function
* correctly unless we have it connected.
*
* Therefore we must set the disconnect/retry cycle values to
* something sensible when using the new Galileo.
*/
- printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
+ printk(KERN_INFO "Galileo: revision %u\n", dev->revision);
#if 0
if (dev->revision >= 0x10) {
@@ -149,30 +149,30 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0,
qube_raq_via_board_id_fixup);
static char irq_tab_qube1[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = QUBE1_ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = 0
+ [COBALT_PCICONF_ETH1] = 0
};
static char irq_tab_cobalt[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = ETH1_IRQ
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
};
static char irq_tab_raq2[] __initdata = {
- [COBALT_PCICONF_CPU] = 0,
- [COBALT_PCICONF_ETH0] = ETH0_IRQ,
+ [COBALT_PCICONF_CPU] = 0,
+ [COBALT_PCICONF_ETH0] = ETH0_IRQ,
[COBALT_PCICONF_RAQSCSI] = RAQ2_SCSI_IRQ,
- [COBALT_PCICONF_VIA] = 0,
+ [COBALT_PCICONF_VIA] = 0,
[COBALT_PCICONF_PCISLOT] = PCISLOT_IRQ,
- [COBALT_PCICONF_ETH1] = ETH1_IRQ
+ [COBALT_PCICONF_ETH1] = ETH1_IRQ
};
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/mips/pci/fixup-emma2rh.c b/arch/mips/pci/fixup-emma2rh.c
index beaec32b02e..19caf775c20 100644
--- a/arch/mips/pci/fixup-emma2rh.c
+++ b/arch/mips/pci/fixup-emma2rh.c
@@ -42,7 +42,7 @@
*
*/
-#define MAX_SLOT_NUM 10
+#define MAX_SLOT_NUM 10
static unsigned char irq_map[][5] __initdata = {
[3] = {0, MARKEINS_PCI_IRQ_INTB, MARKEINS_PCI_IRQ_INTC,
MARKEINS_PCI_IRQ_INTD, 0,},
diff --git a/arch/mips/pci/fixup-fuloong2e.c b/arch/mips/pci/fixup-fuloong2e.c
index 63ab4a042cd..50da773faed 100644
--- a/arch/mips/pci/fixup-fuloong2e.c
+++ b/arch/mips/pci/fixup-fuloong2e.c
@@ -6,9 +6,9 @@
* Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
* Author: Fuxin Zhang, zhangfx@lemote.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/init.h>
@@ -152,7 +152,7 @@ static void loongson2e_686b_func1_fixup(struct pci_dev *pdev)
/* disable read prefetch/write post buffers */
pci_write_config_byte(pdev, 0x41, 0x02);
- /* use 3/4 as fifo thresh hold */
+ /* use 3/4 as fifo thresh hold */
pci_write_config_byte(pdev, 0x43, 0x0a);
pci_write_config_byte(pdev, 0x44, 0x00);
diff --git a/arch/mips/pci/fixup-ip32.c b/arch/mips/pci/fixup-ip32.c
index 190fffd08d3..133685e215e 100644
--- a/arch/mips/pci/fixup-ip32.c
+++ b/arch/mips/pci/fixup-ip32.c
@@ -22,13 +22,13 @@
#define INTC MACEPCI_SHARED1_IRQ
#define INTD MACEPCI_SHARED2_IRQ
static char irq_tab_mace[][5] __initdata = {
- /* Dummy INT#A INT#B INT#C INT#D */
- {0, 0, 0, 0, 0}, /* This is placeholder row - never used */
- {0, SCSI0, SCSI0, SCSI0, SCSI0},
- {0, SCSI1, SCSI1, SCSI1, SCSI1},
- {0, INTA0, INTB, INTC, INTD},
- {0, INTA1, INTC, INTD, INTB},
- {0, INTA2, INTD, INTB, INTC},
+ /* Dummy INT#A INT#B INT#C INT#D */
+ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */
+ {0, SCSI0, SCSI0, SCSI0, SCSI0},
+ {0, SCSI1, SCSI1, SCSI1, SCSI1},
+ {0, INTA0, INTB, INTC, INTD},
+ {0, INTA1, INTC, INTD, INTB},
+ {0, INTA2, INTD, INTB, INTC},
};
diff --git a/arch/mips/pci/fixup-lemote2f.c b/arch/mips/pci/fixup-lemote2f.c
index 519daaebb5d..95ab9a1bd01 100644
--- a/arch/mips/pci/fixup-lemote2f.c
+++ b/arch/mips/pci/fixup-lemote2f.c
@@ -31,7 +31,7 @@
/* all the pci device has the PCIA pin, check the datasheet. */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
+ /* INTA INTB INTC INTD */
{0, 0, 0, 0, 0}, /* 11: Unused */
{0, 0, 0, 0, 0}, /* 12: Unused */
{0, 0, 0, 0, 0}, /* 13: Unused */
@@ -69,15 +69,15 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
case 2:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_IDE_INTR);
- return CS5536_IDE_INTR; /* for IDE */
+ return CS5536_IDE_INTR; /* for IDE */
case 3:
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_ACC_INTR);
- return CS5536_ACC_INTR; /* for AUDIO */
- case 4: /* for OHCI */
- case 5: /* for EHCI */
- case 6: /* for UDC */
- case 7: /* for OTG */
+ return CS5536_ACC_INTR; /* for AUDIO */
+ case 4: /* for OHCI */
+ case 5: /* for EHCI */
+ case 6: /* for UDC */
+ case 7: /* for OTG */
pci_write_config_byte(dev, PCI_INTERRUPT_LINE,
CS5536_USB_INTR);
return CS5536_USB_INTR;
diff --git a/arch/mips/pci/fixup-malta.c b/arch/mips/pci/fixup-malta.c
index 75d03f6be3b..07ada7f8441 100644
--- a/arch/mips/pci/fixup-malta.c
+++ b/arch/mips/pci/fixup-malta.c
@@ -12,7 +12,7 @@ static char pci_irq[5] = {
};
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
+ /* INTA INTB INTC INTD */
{0, 0, 0, 0, 0 }, /* 0: GT64120 PCI bridge */
{0, 0, 0, 0, 0 }, /* 1: Unused */
{0, 0, 0, 0, 0 }, /* 2: Unused */
@@ -23,7 +23,7 @@ static char irq_tab[][5] __initdata = {
{0, 0, 0, 0, 0 }, /* 7: Unused */
{0, 0, 0, 0, 0 }, /* 8: Unused */
{0, 0, 0, 0, 0 }, /* 9: Unused */
- {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
+ {0, 0, 0, 0, PCID }, /* 10: PIIX4 USB */
{0, PCIB, 0, 0, 0 }, /* 11: AMD 79C973 Ethernet */
{0, PCIC, 0, 0, 0 }, /* 12: Crystal 4281 Sound */
{0, 0, 0, 0, 0 }, /* 13: Unused */
@@ -31,9 +31,9 @@ static char irq_tab[][5] __initdata = {
{0, 0, 0, 0, 0 }, /* 15: Unused */
{0, 0, 0, 0, 0 }, /* 16: Unused */
{0, 0, 0, 0, 0 }, /* 17: Bonito/SOC-it PCI Bridge*/
- {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
- {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
- {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
+ {0, PCIA, PCIB, PCIC, PCID }, /* 18: PCI Slot 1 */
+ {0, PCIB, PCIC, PCID, PCIA }, /* 19: PCI Slot 2 */
+ {0, PCIC, PCID, PCIA, PCIB }, /* 20: PCI Slot 3 */
{0, PCID, PCIA, PCIB, PCIC } /* 21: PCI Slot 4 */
};
@@ -54,8 +54,8 @@ static void malta_piix_func0_fixup(struct pci_dev *pdev)
{
unsigned char reg_val;
static int piixirqmap[16] = { /* PIIX PIRQC[A:D] irq mappings */
- 0, 0, 0, 3,
- 4, 5, 6, 7,
+ 0, 0, 0, 3,
+ 4, 5, 6, 7,
0, 9, 10, 11,
12, 0, 14, 15
};
diff --git a/arch/mips/pci/fixup-pmcmsp.c b/arch/mips/pci/fixup-pmcmsp.c
index 65735b1b766..fab405c21c2 100644
--- a/arch/mips/pci/fixup-pmcmsp.c
+++ b/arch/mips/pci/fixup-pmcmsp.c
@@ -48,117 +48,117 @@
#if defined(CONFIG_PMC_MSP7120_GW)
/* Garibaldi Board IRQ wiring to PCI slots */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
- {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
- {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
- {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */
- {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
+ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
+ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
+ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */
+ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */
};
#elif defined(CONFIG_PMC_MSP7120_EVAL)
/* MSP7120 Eval Board IRQ wiring to PCI slots */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */
- {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */
- {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */
- {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
- {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */
+ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */
+ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */
+ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
+ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
};
#else
/* Unknown board -- don't assign any IRQs */
static char irq_tab[][5] __initdata = {
- /* INTA INTB INTC INTD */
- {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
- {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
- {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
- {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
- {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
- {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
- {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
- {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
- {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
- {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
- {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
- {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
- {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
- {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
- {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
- {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
- {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
- {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
- {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
- {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
- {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
- {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
- {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
- {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
+ /* INTA INTB INTC INTD */
+ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */
+ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */
+ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */
+ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */
+ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */
+ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */
+ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */
+ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */
+ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */
+ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */
+ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */
+ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */
+ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */
+ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */
+ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */
+ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */
+ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */
+ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */
+ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */
+ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */
+ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */
+ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */
+ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */
+ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */
};
#endif
@@ -168,14 +168,14 @@ static char irq_tab[][5] __initdata = {
* _________________________________________________________________________
*
* DESCRIPTION: Perform platform specific device initialization at
- * pci_enable_device() time.
- * None are needed for the MSP7120 PCI Controller.
+ * pci_enable_device() time.
+ * None are needed for the MSP7120 PCI Controller.
*
- * INPUTS: dev - structure describing the PCI device
+ * INPUTS: dev - structure describing the PCI device
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL
+ * RETURNS: PCIBIOS_SUCCESSFUL
*
****************************************************************************/
int pcibios_plat_dev_init(struct pci_dev *dev)
@@ -190,16 +190,16 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
*
* DESCRIPTION: Perform board supplied PCI IRQ mapping routine.
*
- * INPUTS: dev - unused
- * slot - PCI slot. Identified by which bit of the AD[] bus
- * drives the IDSEL line. AD[10] is 0, AD[31] is
- * slot 21.
- * pin - numbered using the scheme of the PCI_INTERRUPT_PIN
- * field of the config header.
+ * INPUTS: dev - unused
+ * slot - PCI slot. Identified by which bit of the AD[] bus
+ * drives the IDSEL line. AD[10] is 0, AD[31] is
+ * slot 21.
+ * pin - numbered using the scheme of the PCI_INTERRUPT_PIN
+ * field of the config header.
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: IRQ number
+ * RETURNS: IRQ number
*
****************************************************************************/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
diff --git a/arch/mips/pci/fixup-pnx8550.c b/arch/mips/pci/fixup-pnx8550.c
deleted file mode 100644
index 96857ac63bf..00000000000
--- a/arch/mips/pci/fixup-pnx8550.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Philips PNX8550 pci fixups.
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddealley.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/int.h>
-
-
-#undef DEBUG
-#ifdef DEBUG
-#define DBG(x...) printk(x)
-#else
-#define DBG(x...)
-#endif
-
-extern char pnx8550_irq_tab[][5];
-
-void __init pcibios_fixup_resources(struct pci_dev *dev)
-{
- /* no need to fixup IO resources */
-}
-
-void __init pcibios_fixup(void)
-{
- /* nothing to do here */
-}
-
-int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- return pnx8550_irq_tab[slot][pin];
-}
-
-/* Do platform specific device initialization at pci_enable_device() time */
-int pcibios_plat_dev_init(struct pci_dev *dev)
-{
- return 0;
-}
diff --git a/arch/mips/pci/fixup-sni.c b/arch/mips/pci/fixup-sni.c
index 5c8a79bb266..f67ebeeb420 100644
--- a/arch/mips/pci/fixup-sni.c
+++ b/arch/mips/pci/fixup-sni.c
@@ -41,12 +41,12 @@
* Logic CL-GD5434 VGA is device 3.
*/
static char irq_tab_rm200[8][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
- { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
- { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
@@ -58,20 +58,20 @@ static char irq_tab_rm200[8][5] __initdata = {
* The VGA card is optional for RM300 systems.
*/
static char irq_tab_rm300d[8][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* EISA bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* EISA bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 1 */
{ INTB, INTB, INTB, INTB, INTB }, /* VGA */
- { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
{ 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
{ 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
};
static char irq_tab_rm300e[5][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */
{ 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */
{ 0, INTD, INTA, INTB, INTC }, /* Slot 1 */
@@ -97,30 +97,30 @@ static char irq_tab_rm300e[5][5] __initdata = {
#define INTD PCIT_IRQ_INTD
static char irq_tab_pcit[13][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
{ SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */
{ SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */
- { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
- { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
- { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
- { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
- { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
- { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
+ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */
+ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */
+ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */
+ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */
+ { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */
};
static char irq_tab_pcit_cplus[13][5] __initdata = {
- /* INTA INTB INTC INTD */
- { 0, 0, 0, 0, 0 }, /* HOST bridge */
- { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
- { 0, 0, 0, 0, 0 }, /* PCI-EISA */
- { 0, 0, 0, 0, 0 }, /* Unused */
- { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
- { 0, INTB, INTC, INTD, INTA }, /* fixup */
+ /* INTA INTB INTC INTD */
+ { 0, 0, 0, 0, 0 }, /* HOST bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */
+ { 0, 0, 0, 0, 0 }, /* PCI-EISA */
+ { 0, 0, 0, 0, 0 }, /* Unused */
+ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */
+ { 0, INTB, INTC, INTD, INTA }, /* fixup */
};
static inline int is_rm300_revd(void)
@@ -146,18 +146,18 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
}
return irq_tab_pcit_cplus[slot][pin];
case SNI_BRD_PCI_TOWER:
- return irq_tab_pcit[slot][pin];
+ return irq_tab_pcit[slot][pin];
case SNI_BRD_PCI_MTOWER:
- if (is_rm300_revd())
- return irq_tab_rm300d[slot][pin];
- /* fall through */
+ if (is_rm300_revd())
+ return irq_tab_rm300d[slot][pin];
+ /* fall through */
case SNI_BRD_PCI_DESKTOP:
- return irq_tab_rm200[slot][pin];
+ return irq_tab_rm200[slot][pin];
case SNI_BRD_PCI_MTOWER_CPLUS:
- return irq_tab_rm300e[slot][pin];
+ return irq_tab_rm300e[slot][pin];
}
return 0;
diff --git a/arch/mips/pci/fixup-tb0219.c b/arch/mips/pci/fixup-tb0219.c
index 8084b17d440..d0b0083fbd2 100644
--- a/arch/mips/pci/fixup-tb0219.c
+++ b/arch/mips/pci/fixup-tb0219.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0219.c, The TANBAC TB0219 specific PCI fixups.
*
- * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
+ * Copyright (C) 2003 Megasolution Inc. <matsu@megasolution.jp>
* Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/arch/mips/pci/fixup-tb0287.c b/arch/mips/pci/fixup-tb0287.c
index 2fe29db4372..8c5039ed75d 100644
--- a/arch/mips/pci/fixup-tb0287.c
+++ b/arch/mips/pci/fixup-tb0287.c
@@ -1,7 +1,7 @@
/*
* fixup-tb0287.c, The TANBAC TB0287 specific PCI fixups.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pci/fixup-wrppmc.c b/arch/mips/pci/fixup-wrppmc.c
index 3d277549d5d..29737edd121 100644
--- a/arch/mips/pci/fixup-wrppmc.c
+++ b/arch/mips/pci/fixup-wrppmc.c
@@ -20,7 +20,7 @@
#define PCI_SLOT_MAXNR 32 /* Each PCI bus has 32 physical slots */
static char pci_irq_tab[PCI_SLOT_MAXNR][5] __initdata = {
- /* 0 INTA INTB INTC INTD */
+ /* 0 INTA INTB INTC INTD */
[0] = {0, 0, 0, 0, 0}, /* Slot 0: GT64120 PCI bridge */
[6] = {0, WRPPMC_PCI_INTA_IRQ, 0, 0, 0},
};
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 4a156629e95..6144bb337e4 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -174,8 +174,8 @@ static int bcm63xx_pci_write(struct pci_bus *bus, unsigned int devfn,
}
struct pci_ops bcm63xx_pci_ops = {
- .read = bcm63xx_pci_read,
- .write = bcm63xx_pci_write
+ .read = bcm63xx_pci_read,
+ .write = bcm63xx_pci_write
};
#ifdef CONFIG_CARDBUS
@@ -370,8 +370,8 @@ static int bcm63xx_cb_read(struct pci_bus *bus, unsigned int devfn,
return fake_cb_bridge_read(where, size, val);
}
- /* a configuration cycle for the device behind the cardbus
- * bridge is actually done as a type 0 cycle on the primary
+ /* a configuration cycle for the device behind the cardbus
+ * bridge is actually done as a type 0 cycle on the primary
* bus. This means that only one device can be on the cardbus
* bus */
if (fake_cb_bridge_regs.bus_assigned &&
@@ -403,8 +403,8 @@ static int bcm63xx_cb_write(struct pci_bus *bus, unsigned int devfn,
}
struct pci_ops bcm63xx_cb_ops = {
- .read = bcm63xx_cb_read,
- .write = bcm63xx_cb_write,
+ .read = bcm63xx_cb_read,
+ .write = bcm63xx_cb_write,
};
/*
@@ -523,6 +523,6 @@ static int bcm63xx_pcie_write(struct pci_bus *bus, unsigned int devfn,
struct pci_ops bcm63xx_pcie_ops = {
- .read = bcm63xx_pcie_read,
- .write = bcm63xx_pcie_write
+ .read = bcm63xx_pcie_read,
+ .write = bcm63xx_pcie_write
};
diff --git a/arch/mips/pci/ops-bonito64.c b/arch/mips/pci/ops-bonito64.c
index 1b3e03f20c5..830352e3aed 100644
--- a/arch/mips/pci/ops-bonito64.c
+++ b/arch/mips/pci/ops-bonito64.c
@@ -26,7 +26,7 @@
#include <asm/mips-boards/bonito64.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) (void *)CKSEG1ADDR(_pcictrl_bonito_pcicfg + (offset))
@@ -137,7 +137,7 @@ static int bonito64_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (bonito64_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return -1;
if (size == 1)
diff --git a/arch/mips/pci/ops-gt64xxx_pci0.c b/arch/mips/pci/ops-gt64xxx_pci0.c
index 3d896c5f413..effcbda9f52 100644
--- a/arch/mips/pci/ops-gt64xxx_pci0.c
+++ b/arch/mips/pci/ops-gt64xxx_pci0.c
@@ -23,21 +23,21 @@
#include <asm/gt64120.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -50,7 +50,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
/* Clear cause register bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
- GT_INTRCAUSE_TARABORT0_BIT));
+ GT_INTRCAUSE_TARABORT0_BIT));
/* Setup address */
GT_WRITE(GT_PCI0_CFGADDR_OFS,
@@ -87,7 +87,7 @@ static int gt64xxx_pci0_pcibios_config_access(unsigned char access_type,
/* Clear bits */
GT_WRITE(GT_INTRCAUSE_OFS, ~(GT_INTRCAUSE_MASABORT0_BIT |
- GT_INTRCAUSE_TARABORT0_BIT));
+ GT_INTRCAUSE_TARABORT0_BIT));
return -1;
}
@@ -106,7 +106,7 @@ static int gt64xxx_pci0_pcibios_read(struct pci_bus *bus, unsigned int devfn,
u32 data = 0;
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
@@ -128,7 +128,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_READ, bus,
- devfn, where, &data))
+ devfn, where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
if (size == 1)
@@ -140,7 +140,7 @@ static int gt64xxx_pci0_pcibios_write(struct pci_bus *bus, unsigned int devfn,
}
if (gt64xxx_pci0_pcibios_config_access(PCI_ACCESS_WRITE, bus, devfn,
- where, &data))
+ where, &data))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
index 1f2afb55cc7..16e7c2526d7 100644
--- a/arch/mips/pci/ops-lantiq.c
+++ b/arch/mips/pci/ops-lantiq.c
@@ -23,7 +23,7 @@
#define LTQ_PCI_CFG_DEVNUM_SHF 11
#define LTQ_PCI_CFG_FUNNUM_SHF 8
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
static int ltq_pci_config_access(unsigned char access_type, struct pci_bus *bus,
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index afd221122d2..98254afa028 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -24,7 +24,7 @@
#include <cs5536/cs5536.h>
#endif
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define CFG_SPACE_REG(offset) \
diff --git a/arch/mips/pci/ops-msc.c b/arch/mips/pci/ops-msc.c
index 5d9fbb0f467..92a8543361b 100644
--- a/arch/mips/pci/ops-msc.c
+++ b/arch/mips/pci/ops-msc.c
@@ -1,8 +1,8 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
- * Maciej W. Rozycki <macro@mips.com>
+ * Maciej W. Rozycki <macro@mips.com>
* Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
*
* This program is free software; you can distribute it and/or modify it
@@ -28,21 +28,21 @@
#include <asm/mips-boards/msc01_pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/*
* PCI configuration cycle AD bus definition
*/
/* Type 0 */
-#define PCI_CFG_TYPE0_REG_SHF 0
-#define PCI_CFG_TYPE0_FUNC_SHF 8
+#define PCI_CFG_TYPE0_REG_SHF 0
+#define PCI_CFG_TYPE0_FUNC_SHF 8
/* Type 1 */
-#define PCI_CFG_TYPE1_REG_SHF 0
-#define PCI_CFG_TYPE1_FUNC_SHF 8
-#define PCI_CFG_TYPE1_DEV_SHF 11
-#define PCI_CFG_TYPE1_BUS_SHF 16
+#define PCI_CFG_TYPE1_REG_SHF 0
+#define PCI_CFG_TYPE1_FUNC_SHF 8
+#define PCI_CFG_TYPE1_DEV_SHF 11
+#define PCI_CFG_TYPE1_BUS_SHF 16
static int msc_pcibios_config_access(unsigned char access_type,
struct pci_bus *bus, unsigned int devfn, int where, u32 * data)
@@ -97,7 +97,7 @@ static int msc_pcibios_read(struct pci_bus *bus, unsigned int devfn,
return PCIBIOS_BAD_REGISTER_NUMBER;
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
- &data))
+ &data))
return -1;
if (size == 1)
@@ -124,7 +124,7 @@ static int msc_pcibios_write(struct pci_bus *bus, unsigned int devfn,
data = val;
else {
if (msc_pcibios_config_access(PCI_ACCESS_READ, bus, devfn,
- where, &data))
+ where, &data))
return -1;
if (size == 1)
diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c
index 99929cf8841..499e35c3eb3 100644
--- a/arch/mips/pci/ops-nile4.c
+++ b/arch/mips/pci/ops-nile4.c
@@ -6,7 +6,7 @@
#include <asm/lasat/lasat.h>
#include <asm/nile4.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
#define LO(reg) (reg / 4)
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 389bf669d56..d0b6f8399b0 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -9,8 +9,8 @@
* Much of the code is derived from the original DDB5074 port by
* Geert Uytterhoeven <geert@sonycom.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -57,18 +57,18 @@ static void pci_proc_init(void);
* _________________________________________________________________________
*
* DESCRIPTION: Prints the count of how many times each PCI
- * interrupt has asserted. Can be invoked by the
- * /proc filesystem.
+ * interrupt has asserted. Can be invoked by the
+ * /proc filesystem.
*
- * INPUTS: page - part of STDOUT calculation
- * off - part of STDOUT calculation
- * count - part of STDOUT calculation
- * data - unused
+ * INPUTS: page - part of STDOUT calculation
+ * off - part of STDOUT calculation
+ * count - part of STDOUT calculation
+ * data - unused
*
- * OUTPUTS: start - new start location
- * eof - end of file pointer
+ * OUTPUTS: start - new start location
+ * eof - end of file pointer
*
- * RETURNS: len - STDOUT length
+ * RETURNS: len - STDOUT length
*
****************************************************************************/
static int read_msp_pci_counts(char *page, char **start, off_t off,
@@ -106,21 +106,21 @@ static int read_msp_pci_counts(char *page, char **start, off_t off,
* _________________________________________________________________________
*
* DESCRIPTION: Generates a configuration write cycle for debug purposes.
- * The IDSEL line asserted and location and data written are
- * immaterial. Just want to be able to prove that a
- * configuration write can be correctly generated on the
- * PCI bus. Intent is that this function by invocable from
- * the /proc filesystem.
+ * The IDSEL line asserted and location and data written are
+ * immaterial. Just want to be able to prove that a
+ * configuration write can be correctly generated on the
+ * PCI bus. Intent is that this function by invocable from
+ * the /proc filesystem.
*
- * INPUTS: page - part of STDOUT calculation
- * off - part of STDOUT calculation
- * count - part of STDOUT calculation
- * data - unused
+ * INPUTS: page - part of STDOUT calculation
+ * off - part of STDOUT calculation
+ * count - part of STDOUT calculation
+ * data - unused
*
- * OUTPUTS: start - new start location
- * eof - end of file pointer
+ * OUTPUTS: start - new start location
+ * eof - end of file pointer
*
- * RETURNS: len - STDOUT length
+ * RETURNS: len - STDOUT length
*
****************************************************************************/
static int gen_pci_cfg_wr(char *page, char **start, off_t off,
@@ -190,11 +190,11 @@ static int gen_pci_cfg_wr(char *page, char **start, off_t off,
*
* DESCRIPTION: Create entries in the /proc filesystem for debug access.
*
- * INPUTS: none
+ * INPUTS: none
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: none
+ * RETURNS: none
*
****************************************************************************/
static void pci_proc_init(void)
@@ -214,44 +214,44 @@ static DEFINE_SPINLOCK(bpci_lock);
* _________________________________________________________________________
*
* DESCRIPTION: Defines the address range that pciauto() will use to
- * assign to the I/O BARs of PCI devices.
- *
- * Use the start and end addresses of the MSP7120 PCI Host
- * Controller I/O space, in the form that they appear on the
- * PCI bus AFTER MSP7120 has performed address translation.
- *
- * For I/O accesses, MSP7120 ignores OATRAN and maps I/O
- * accesses into the bottom 0xFFF region of address space,
- * so that is the range to put into the pci_io_resource
- * struct.
- *
- * In MSP4200, the start address was 0x04 instead of the
- * expected 0x00. Will just assume there was a good reason
- * for this!
- *
- * NOTES: Linux, by default, will assign I/O space to the lowest
- * region of address space. Since MSP7120 and Linux,
- * by default, have no offset in between how they map, the
- * io_offset element of pci_controller struct should be set
- * to zero.
+ * assign to the I/O BARs of PCI devices.
+ *
+ * Use the start and end addresses of the MSP7120 PCI Host
+ * Controller I/O space, in the form that they appear on the
+ * PCI bus AFTER MSP7120 has performed address translation.
+ *
+ * For I/O accesses, MSP7120 ignores OATRAN and maps I/O
+ * accesses into the bottom 0xFFF region of address space,
+ * so that is the range to put into the pci_io_resource
+ * struct.
+ *
+ * In MSP4200, the start address was 0x04 instead of the
+ * expected 0x00. Will just assume there was a good reason
+ * for this!
+ *
+ * NOTES: Linux, by default, will assign I/O space to the lowest
+ * region of address space. Since MSP7120 and Linux,
+ * by default, have no offset in between how they map, the
+ * io_offset element of pci_controller struct should be set
+ * to zero.
* ELEMENTS:
- * name - String used for a meaningful name.
+ * name - String used for a meaningful name.
*
- * start - Start address of MSP7120's I/O space, as MSP7120 presents
- * the address on the PCI bus.
+ * start - Start address of MSP7120's I/O space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * end - End address of MSP7120's I/O space, as MSP7120 presents
- * the address on the PCI bus.
+ * end - End address of MSP7120's I/O space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * flags - Attributes indicating the type of resource. In this case,
- * indicate I/O space.
+ * flags - Attributes indicating the type of resource. In this case,
+ * indicate I/O space.
*
****************************************************************************/
static struct resource pci_io_resource = {
.name = "pci IO space",
.start = 0x04,
.end = 0x0FFF,
- .flags = IORESOURCE_IO /* I/O space */
+ .flags = IORESOURCE_IO /* I/O space */
};
/*****************************************************************************
@@ -260,26 +260,26 @@ static struct resource pci_io_resource = {
* _________________________________________________________________________
*
* DESCRIPTION: Defines the address range that pciauto() will use to
- * assign to the memory BARs of PCI devices.
+ * assign to the memory BARs of PCI devices.
*
- * The .start and .end values are dependent upon how address
- * translation is performed by the OATRAN regiser.
+ * The .start and .end values are dependent upon how address
+ * translation is performed by the OATRAN regiser.
*
- * The values to use for .start and .end are the values
- * in the form they appear on the PCI bus AFTER MSP7120 has
- * performed OATRAN address translation.
+ * The values to use for .start and .end are the values
+ * in the form they appear on the PCI bus AFTER MSP7120 has
+ * performed OATRAN address translation.
*
* ELEMENTS:
- * name - String used for a meaningful name.
+ * name - String used for a meaningful name.
*
- * start - Start address of MSP7120's memory space, as MSP7120 presents
- * the address on the PCI bus.
+ * start - Start address of MSP7120's memory space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * end - End address of MSP7120's memory space, as MSP7120 presents
- * the address on the PCI bus.
+ * end - End address of MSP7120's memory space, as MSP7120 presents
+ * the address on the PCI bus.
*
- * flags - Attributes indicating the type of resource. In this case,
- * indicate memory space.
+ * flags - Attributes indicating the type of resource. In this case,
+ * indicate memory space.
*
****************************************************************************/
static struct resource pci_mem_resource = {
@@ -295,17 +295,17 @@ static struct resource pci_mem_resource = {
* _________________________________________________________________________
*
* DESCRIPTION: PCI status interrupt handler. Updates the count of how
- * many times each status bit has been set, then clears
- * the status bits. If the appropriate macros are defined,
- * these counts can be viewed via the /proc filesystem.
+ * many times each status bit has been set, then clears
+ * the status bits. If the appropriate macros are defined,
+ * these counts can be viewed via the /proc filesystem.
*
- * INPUTS: irq - unused
- * dev_id - unused
- * pt_regs - unused
+ * INPUTS: irq - unused
+ * dev_id - unused
+ * pt_regs - unused
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
*
****************************************************************************/
static irqreturn_t bpci_interrupt(int irq, void *dev_id)
@@ -335,41 +335,41 @@ static irqreturn_t bpci_interrupt(int irq, void *dev_id)
* _________________________________________________________________________
*
* DESCRIPTION: Performs a PCI configuration access (rd or wr), then
- * checks that the access succeeded by querying MSP7120's
- * PCI status bits.
+ * checks that the access succeeded by querying MSP7120's
+ * PCI status bits.
*
* INPUTS:
- * access_type - kind of PCI configuration cycle to perform
- * (read or write). Legal values are
- * PCI_ACCESS_WRITE and PCI_ACCESS_READ.
- *
- * bus - pointer to the bus number of the device to
- * be targeted for the configuration cycle.
- * The only element of the pci_bus structure
- * used is bus->number. This argument determines
- * if the configuration access will be Type 0 or
- * Type 1. Since MSP7120 assumes itself to be the
- * PCI Host, any non-zero bus->number generates
- * a Type 1 access.
- *
- * devfn - this is an 8-bit field. The lower three bits
- * specify the function number of the device to
- * be targeted for the configuration cycle, with
- * all three-bit combinations being legal. The
- * upper five bits specify the device number,
- * with legal values being 10 to 31.
- *
- * where - address within the Configuration Header
- * space to access.
- *
- * data - for write accesses, contains the data to
- * write.
+ * access_type - kind of PCI configuration cycle to perform
+ * (read or write). Legal values are
+ * PCI_ACCESS_WRITE and PCI_ACCESS_READ.
+ *
+ * bus - pointer to the bus number of the device to
+ * be targeted for the configuration cycle.
+ * The only element of the pci_bus structure
+ * used is bus->number. This argument determines
+ * if the configuration access will be Type 0 or
+ * Type 1. Since MSP7120 assumes itself to be the
+ * PCI Host, any non-zero bus->number generates
+ * a Type 1 access.
+ *
+ * devfn - this is an 8-bit field. The lower three bits
+ * specify the function number of the device to
+ * be targeted for the configuration cycle, with
+ * all three-bit combinations being legal. The
+ * upper five bits specify the device number,
+ * with legal values being 10 to 31.
+ *
+ * where - address within the Configuration Header
+ * space to access.
+ *
+ * data - for write accesses, contains the data to
+ * write.
*
* OUTPUTS:
- * data - for read accesses, contains the value read.
+ * data - for read accesses, contains the value read.
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - access failure
*
****************************************************************************/
int msp_pcibios_config_access(unsigned char access_type,
@@ -429,7 +429,7 @@ int msp_pcibios_config_access(unsigned char access_type,
* for this Block Copy, called Block Copy 0 Fault (BC0F) and
* Block Copy 1 Fault (BC1F). MSP4200 and MSP7120 don't have this
* dedicated Block Copy block, so these two interrupts are now
- * marked reserved. In case the Block Copy is resurrected in a
+ * marked reserved. In case the Block Copy is resurrected in a
* future design, maintain the code that treats these two interrupts
* specially.
*
@@ -439,7 +439,7 @@ int msp_pcibios_config_access(unsigned char access_type,
preg->if_status = ~(BPCI_IFSTATUS_BC0F | BPCI_IFSTATUS_BC1F);
/* Setup address that is to appear on PCI bus */
- preg->config_addr = BPCI_CFGADDR_ENABLE |
+ preg->config_addr = BPCI_CFGADDR_ENABLE |
(bus_num << BPCI_CFGADDR_BUSNUM_SHF) |
(dev_fn << BPCI_CFGADDR_FUNCTNUM_SHF) |
(where & 0xFC);
@@ -494,21 +494,21 @@ int msp_pcibios_config_access(unsigned char access_type,
* _________________________________________________________________________
*
* DESCRIPTION: Read a byte from PCI configuration address spac
- * Since the hardware can't address 8 bit chunks
- * directly, read a 32-bit chunk, then mask off extraneous
- * bits.
+ * Since the hardware can't address 8 bit chunks
+ * directly, read a 32-bit chunk, then mask off extraneous
+ * bits.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -541,22 +541,22 @@ msp_pcibios_read_config_byte(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Read a word (16 bits) from PCI configuration address space.
- * Since the hardware can't address 16 bit chunks
- * directly, read a 32-bit chunk, then mask off extraneous
- * bits.
+ * Since the hardware can't address 16 bit chunks
+ * directly, read a 32-bit chunk, then mask off extraneous
+ * bits.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -600,20 +600,20 @@ msp_pcibios_read_config_word(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Read a double word (32 bits) from PCI configuration
- * address space.
+ * address space.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the read is destined for.
- * devfn - device/function combination that the read is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the read is destined for.
+ * devfn - device/function combination that the read is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
*
- * OUTPUTS val - read data
+ * OUTPUTS val - read data
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - read access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - read access failure
*
****************************************************************************/
static int
@@ -652,21 +652,21 @@ msp_pcibios_read_config_dword(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a byte to PCI configuration address space.
- * Since the hardware can't address 8 bit chunks
- * directly, a read-modify-write is performed.
+ * Since the hardware can't address 8 bit chunks
+ * directly, a read-modify-write is performed.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -700,22 +700,22 @@ msp_pcibios_write_config_byte(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a word (16-bits) to PCI configuration address space.
- * Since the hardware can't address 16 bit chunks
- * directly, a read-modify-write is performed.
+ * Since the hardware can't address 16 bit chunks
+ * directly, a read-modify-write is performed.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -753,21 +753,21 @@ msp_pcibios_write_config_word(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Write a double word (32-bits) to PCI configuration address
- * space.
+ * space.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * val - value to write
*
- * OUTPUTS none
+ * OUTPUTS none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
- * -1 - write access failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * PCIBIOS_BAD_REGISTER_NUMBER - bad register address
+ * -1 - write access failure
*
****************************************************************************/
static int
@@ -794,22 +794,22 @@ msp_pcibios_write_config_dword(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Interface the PCI configuration read request with
- * the appropriate function, based on how many bytes
- * the read request is.
+ * the appropriate function, based on how many bytes
+ * the read request is.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * size - in units of bytes, should be 1, 2, or 4.
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * size - in units of bytes, should be 1, 2, or 4.
*
- * OUTPUTS val - value read, with any extraneous bytes masked
- * to zero.
+ * OUTPUTS val - value read, with any extraneous bytes masked
+ * to zero.
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - failure
*
****************************************************************************/
int
@@ -845,22 +845,22 @@ msp_pcibios_read_config(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: Interface the PCI configuration write request with
- * the appropriate function, based on how many bytes
- * the read request is.
+ * the appropriate function, based on how many bytes
+ * the read request is.
*
- * INPUTS bus - structure containing attributes for the PCI bus
- * that the write is destined for.
- * devfn - device/function combination that the write is
- * destined for.
- * where - register within the Configuration Header space
- * to access.
- * size - in units of bytes, should be 1, 2, or 4.
- * val - value to write
+ * INPUTS bus - structure containing attributes for the PCI bus
+ * that the write is destined for.
+ * devfn - device/function combination that the write is
+ * destined for.
+ * where - register within the Configuration Header space
+ * to access.
+ * size - in units of bytes, should be 1, 2, or 4.
+ * val - value to write
*
- * OUTPUTS: none
+ * OUTPUTS: none
*
- * RETURNS: PCIBIOS_SUCCESSFUL - success
- * -1 - failure
+ * RETURNS: PCIBIOS_SUCCESSFUL - success
+ * -1 - failure
*
****************************************************************************/
int
@@ -897,11 +897,11 @@ msp_pcibios_write_config(struct pci_bus *bus,
* _________________________________________________________________________
*
* DESCRIPTION: structure to abstract the hardware specific PCI
- * configuration accesses.
+ * configuration accesses.
*
* ELEMENTS:
- * read - function for Linux to generate PCI Configuration reads.
- * write - function for Linux to generate PCI Configuration writes.
+ * read - function for Linux to generate PCI Configuration reads.
+ * write - function for Linux to generate PCI Configuration writes.
*
****************************************************************************/
struct pci_ops msp_pci_ops = {
@@ -917,27 +917,27 @@ struct pci_ops msp_pci_ops = {
* Describes the attributes of the MSP7120 PCI Host Controller
*
* ELEMENTS:
- * pci_ops - abstracts the hardware specific PCI configuration
- * accesses.
+ * pci_ops - abstracts the hardware specific PCI configuration
+ * accesses.
*
* mem_resource - address range pciauto() uses to assign to PCI device
- * memory BARs.
+ * memory BARs.
*
* mem_offset - offset between how MSP7120 outbound PCI memory
- * transaction addresses appear on the PCI bus and how Linux
- * wants to configure memory BARs of the PCI devices.
- * MSP7120 does nothing funky, so just set to zero.
+ * transaction addresses appear on the PCI bus and how Linux
+ * wants to configure memory BARs of the PCI devices.
+ * MSP7120 does nothing funky, so just set to zero.
*
* io_resource - address range pciauto() uses to assign to PCI device
- * I/O BARs.
+ * I/O BARs.
*
- * io_offset - offset between how MSP7120 outbound PCI I/O
- * transaction addresses appear on the PCI bus and how
- * Linux defaults to configure I/O BARs of the PCI devices.
- * MSP7120 maps outbound I/O accesses into the bottom
- * bottom 4K of PCI address space (and ignores OATRAN).
- * Since the Linux default is to configure I/O BARs to the
- * bottom 4K, no special offset is needed. Just set to zero.
+ * io_offset - offset between how MSP7120 outbound PCI I/O
+ * transaction addresses appear on the PCI bus and how
+ * Linux defaults to configure I/O BARs of the PCI devices.
+ * MSP7120 maps outbound I/O accesses into the bottom
+ * bottom 4K of PCI address space (and ignores OATRAN).
+ * Since the Linux default is to configure I/O BARs to the
+ * bottom 4K, no special offset is needed. Just set to zero.
*
****************************************************************************/
static struct pci_controller msp_pci_controller = {
@@ -955,7 +955,7 @@ static struct pci_controller msp_pci_controller = {
* _________________________________________________________________________
*
* DESCRIPTION: Initialize the PCI Host Controller and register it with
- * Linux so Linux can seize control of the PCI bus.
+ * Linux so Linux can seize control of the PCI bus.
*
****************************************************************************/
void __init msp_pci_init(void)
@@ -979,7 +979,7 @@ void __init msp_pci_init(void)
*(unsigned long *)QFLUSH_REG_1 = 3;
/* Configure PCI Host Controller. */
- preg->if_status = ~0; /* Clear cause register bits */
+ preg->if_status = ~0; /* Clear cause register bits */
preg->config_addr = 0; /* Clear config access */
preg->oatran = MSP_PCI_OATRAN; /* PCI outbound addr translation */
preg->if_mask = 0xF8BF87C0; /* Enable all PCI status interrupts */
diff --git a/arch/mips/pci/ops-pnx8550.c b/arch/mips/pci/ops-pnx8550.c
deleted file mode 100644
index 1e6213fa7bd..00000000000
--- a/arch/mips/pci/ops-pnx8550.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- * Based on:
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/delay.h>
-
-#include <asm/mach-pnx8550/pci.h>
-#include <asm/mach-pnx8550/glb.h>
-
-static inline void clear_status(void)
-{
- unsigned long pci_stat;
-
- pci_stat = inl(PCI_BASE | PCI_GPPM_STATUS);
- outl(pci_stat, PCI_BASE | PCI_GPPM_ICLR);
-}
-
-static inline unsigned int
-calc_cfg_addr(struct pci_bus *bus, unsigned int devfn, int where)
-{
- unsigned int addr;
-
- addr = ((bus->number > 0) ? (((bus->number & 0xff) << PCI_CFG_BUS_SHIFT) | 1) : 0);
- addr |= ((devfn & 0xff) << PCI_CFG_FUNC_SHIFT) | (where & 0xfc);
-
- return addr;
-}
-
-static int
-config_access(unsigned int pci_cmd, struct pci_bus *bus, unsigned int devfn, int where, unsigned int pci_mode, unsigned int *val)
-{
- unsigned int flags;
- unsigned long loops = 0;
- unsigned long ioaddr = calc_cfg_addr(bus, devfn, where);
-
- local_irq_save(flags);
- /*Clear pending interrupt status */
- if (inl(PCI_BASE | PCI_GPPM_STATUS)) {
- clear_status();
- while (!(inl(PCI_BASE | PCI_GPPM_STATUS) == 0)) ;
- }
-
- outl(ioaddr, PCI_BASE | PCI_GPPM_ADDR);
-
- if ((pci_cmd == PCI_CMD_IOW) || (pci_cmd == PCI_CMD_CONFIG_WRITE))
- outl(*val, PCI_BASE | PCI_GPPM_WDAT);
-
- outl(INIT_PCI_CYCLE | pci_cmd | (pci_mode & PCI_BYTE_ENABLE_MASK),
- PCI_BASE | PCI_GPPM_CTRL);
-
- loops =
- ((loops_per_jiffy *
- PCI_IO_JIFFIES_TIMEOUT) >> (PCI_IO_JIFFIES_SHIFT));
- while (1) {
- if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_DONE) {
- if ((pci_cmd == PCI_CMD_IOR) ||
- (pci_cmd == PCI_CMD_CONFIG_READ))
- *val = inl(PCI_BASE | PCI_GPPM_RDAT);
- clear_status();
- local_irq_restore(flags);
- return PCIBIOS_SUCCESSFUL;
- } else if (inl(PCI_BASE | PCI_GPPM_STATUS) & GPPM_R_MABORT) {
- break;
- }
-
- loops--;
- if (loops == 0) {
- printk("%s : Arbiter Locked.\n", __func__);
- }
- }
-
- clear_status();
- if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_IOW)) {
- printk("%s timeout (GPPM_CTRL=%X) ioaddr %lX pci_cmd %X\n",
- __func__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr,
- pci_cmd);
- }
-
- if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_CONFIG_READ))
- *val = 0xffffffff;
- local_irq_restore(flags);
- return PCIBIOS_DEVICE_NOT_FOUND;
-}
-
-/*
- * We can't address 8 and 16 bit words directly. Instead we have to
- * read/write a 32bit word and mask/modify the data we actually want.
- */
-static int
-read_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 * val)
-{
- unsigned int data = 0;
- int err;
-
- if (bus == NULL)
- return -1;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(1 << (where & 3)), &data);
- switch (where & 0x03) {
- case 0:
- *val = (unsigned char)(data & 0x000000ff);
- break;
- case 1:
- *val = (unsigned char)((data & 0x0000ff00) >> 8);
- break;
- case 2:
- *val = (unsigned char)((data & 0x00ff0000) >> 16);
- break;
- case 3:
- *val = (unsigned char)((data & 0xff000000) >> 24);
- break;
- }
-
- return err;
-}
-
-static int
-read_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 * val)
-{
- unsigned int data = 0;
- int err;
-
- if (bus == NULL)
- return -1;
-
- if (where & 0x01)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, ~(3 << (where & 3)), &data);
- switch (where & 0x02) {
- case 0:
- *val = (unsigned short)(data & 0x0000ffff);
- break;
- case 2:
- *val = (unsigned short)((data & 0xffff0000) >> 16);
- break;
- }
-
- return err;
-}
-
-static int
-read_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 * val)
-{
- int err;
- if (bus == NULL)
- return -1;
-
- if (where & 0x03)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_READ, bus, devfn, where, 0, val);
-
- return err;
-}
-
-static int
-write_config_byte(struct pci_bus *bus, unsigned int devfn, int where, u8 val)
-{
- unsigned int data = (unsigned int)val;
- int err;
-
- if (bus == NULL)
- return -1;
-
- switch (where & 0x03) {
- case 1:
- data = (data << 8);
- break;
- case 2:
- data = (data << 16);
- break;
- case 3:
- data = (data << 24);
- break;
- default:
- break;
- }
-
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(1 << (where & 3)), &data);
-
- return err;
-}
-
-static int
-write_config_word(struct pci_bus *bus, unsigned int devfn, int where, u16 val)
-{
- unsigned int data = (unsigned int)val;
- int err;
-
- if (bus == NULL)
- return -1;
-
- if (where & 0x01)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- switch (where & 0x02) {
- case 2:
- data = (data << 16);
- break;
- default:
- break;
- }
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, ~(3 << (where & 3)), &data);
-
- return err;
-}
-
-static int
-write_config_dword(struct pci_bus *bus, unsigned int devfn, int where, u32 val)
-{
- int err;
- if (bus == NULL)
- return -1;
-
- if (where & 0x03)
- return PCIBIOS_BAD_REGISTER_NUMBER;
-
- err = config_access(PCI_CMD_CONFIG_WRITE, bus, devfn, where, 0, &val);
-
- return err;
-}
-
-static int config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * val)
-{
- switch (size) {
- case 1: {
- u8 _val;
- int rc = read_config_byte(bus, devfn, where, &_val);
- *val = _val;
- return rc;
- }
- case 2: {
- u16 _val;
- int rc = read_config_word(bus, devfn, where, &_val);
- *val = _val;
- return rc;
- }
- default:
- return read_config_dword(bus, devfn, where, val);
- }
-}
-
-static int config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
-{
- switch (size) {
- case 1:
- return write_config_byte(bus, devfn, where, (u8) val);
- case 2:
- return write_config_word(bus, devfn, where, (u16) val);
- default:
- return write_config_dword(bus, devfn, where, val);
- }
-}
-
-struct pci_ops pnx8550_pci_ops = {
- config_read,
- config_write
-};
diff --git a/arch/mips/pci/ops-rc32434.c b/arch/mips/pci/ops-rc32434.c
index d1f8fa210ca..7c7182e2350 100644
--- a/arch/mips/pci/ops-rc32434.c
+++ b/arch/mips/pci/ops-rc32434.c
@@ -35,7 +35,7 @@
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
diff --git a/arch/mips/pci/ops-sni.c b/arch/mips/pci/ops-sni.c
index 97ed25b92ed..35daa7fe657 100644
--- a/arch/mips/pci/ops-sni.c
+++ b/arch/mips/pci/ops-sni.c
@@ -14,8 +14,8 @@
/*
* It seems that on the RM200 only lower 3 bits of the 5 bit PCI device
- * address are decoded. We therefore manually have to reject attempts at
- * reading outside this range. Being on the paranoid side we only do this
+ * address are decoded. We therefore manually have to reject attempts at
+ * reading outside this range. Being on the paranoid side we only do this
* test for bus 0 and hope forwarding and decoding work properly for any
* subordinated busses.
*
@@ -31,8 +31,8 @@ static int set_config_address(unsigned int busno, unsigned int devfn, int reg)
*(volatile u32 *)PCIMT_CONFIG_ADDRESS =
((busno & 0xff) << 16) |
- ((devfn & 0xff) << 8) |
- (reg & 0xfc);
+ ((devfn & 0xff) << 8) |
+ (reg & 0xfc);
return PCIBIOS_SUCCESSFUL;
}
diff --git a/arch/mips/pci/ops-tx4927.c b/arch/mips/pci/ops-tx4927.c
index 0d69d6f4ea4..3d5df514d02 100644
--- a/arch/mips/pci/ops-tx4927.c
+++ b/arch/mips/pci/ops-tx4927.c
@@ -2,16 +2,16 @@
* Define the pci_ops for the PCIC on Toshiba TX4927, TX4938, etc.
*
* Based on linux/arch/mips/pci/ops-tx4938.c,
- * linux/arch/mips/pci/fixup-rbtx4938.c,
- * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * linux/arch/mips/pci/fixup-rbtx4938.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* 2003-2005 (c) MontaVista Software, Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pci/ops-vr41xx.c b/arch/mips/pci/ops-vr41xx.c
index 28962a7c660..551128c7d92 100644
--- a/arch/mips/pci/ops-vr41xx.c
+++ b/arch/mips/pci/ops-vr41xx.c
@@ -33,7 +33,7 @@
#define PCICONFAREG (void __iomem *)KSEG1ADDR(0x0f000c18)
static inline int set_pci_configuration_address(unsigned char number,
- unsigned int devfn, int where)
+ unsigned int devfn, int where)
{
if (number == 0) {
/*
@@ -59,7 +59,7 @@ static inline int set_pci_configuration_address(unsigned char number,
}
static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t *val)
+ int size, uint32_t *val)
{
uint32_t data;
@@ -87,7 +87,7 @@ static int pci_config_read(struct pci_bus *bus, unsigned int devfn, int where,
}
static int pci_config_write(struct pci_bus *bus, unsigned int devfn, int where,
- int size, uint32_t val)
+ int size, uint32_t val)
{
uint32_t data;
int shift;
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index c4ea6cc55f9..38a80c83fd6 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -29,7 +29,7 @@
#define PCI_ACCESS_WRITE 1
struct alchemy_pci_context {
- struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
+ struct pci_controller alchemy_pci_ctrl; /* leave as first member! */
void __iomem *regs; /* ctrl base */
/* tools for wired entry for config space access */
unsigned long last_elo0;
@@ -381,7 +381,7 @@ static int alchemy_pci_probe(struct platform_device *pdev)
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
- dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
+ dev_err(&pdev->dev, "no pcictl ctrl regs resource\n");
ret = -ENODEV;
goto out1;
}
@@ -482,7 +482,7 @@ out:
static struct platform_driver alchemy_pcictl_driver = {
.probe = alchemy_pci_probe,
- .driver = {
+ .driver = {
.name = "alchemy-pci",
.owner = THIS_MODULE,
},
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index 6eaa4f2d0e3..412ec025cf5 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -18,26 +18,11 @@
#include <linux/pci.h>
#include <linux/pci_regs.h>
#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR71XX_PCI_MEM_BASE 0x10000000
-#define AR71XX_PCI_MEM_SIZE 0x07000000
-
-#define AR71XX_PCI_WIN0_OFFS 0x10000000
-#define AR71XX_PCI_WIN1_OFFS 0x11000000
-#define AR71XX_PCI_WIN2_OFFS 0x12000000
-#define AR71XX_PCI_WIN3_OFFS 0x13000000
-#define AR71XX_PCI_WIN4_OFFS 0x14000000
-#define AR71XX_PCI_WIN5_OFFS 0x15000000
-#define AR71XX_PCI_WIN6_OFFS 0x16000000
-#define AR71XX_PCI_WIN7_OFFS 0x07000000
-
-#define AR71XX_PCI_CFG_BASE \
- (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
-#define AR71XX_PCI_CFG_SIZE 0x100
#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
#define AR71XX_PCI_REG_CRP_WRDATA 0x04
@@ -63,8 +48,15 @@
#define AR71XX_PCI_IRQ_COUNT 5
-static DEFINE_SPINLOCK(ar71xx_pci_lock);
-static void __iomem *ar71xx_pcicfg_base;
+struct ar71xx_pci_controller {
+ void __iomem *cfg_base;
+ spinlock_t lock;
+ int irq;
+ int irq_base;
+ struct pci_controller pci_ctrl;
+ struct resource io_res;
+ struct resource mem_res;
+};
/* Byte lane enable bits */
static const u8 ar71xx_pci_ble_table[4][4] = {
@@ -107,9 +99,18 @@ static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
return ret;
}
-static int ar71xx_pci_check_error(int quiet)
+static inline struct ar71xx_pci_controller *
+pci_bus_to_ar71xx_controller(struct pci_bus *bus)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar71xx_pci_controller, pci_ctrl);
+}
+
+static int ar71xx_pci_check_error(struct ar71xx_pci_controller *apc, int quiet)
+{
+ void __iomem *base = apc->cfg_base;
u32 pci_err;
u32 ahb_err;
@@ -144,9 +145,10 @@ static int ar71xx_pci_check_error(int quiet)
return !!(ahb_err | pci_err);
}
-static inline void ar71xx_pci_local_write(int where, int size, u32 value)
+static inline void ar71xx_pci_local_write(struct ar71xx_pci_controller *apc,
+ int where, int size, u32 value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ void __iomem *base = apc->cfg_base;
u32 ad_cbe;
value = value << (8 * (where & 3));
@@ -162,7 +164,8 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 cmd)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
u32 addr;
addr = ar71xx_pci_bus_addr(bus, devfn, where);
@@ -171,13 +174,14 @@ static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
__raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
base + AR71XX_PCI_REG_CFG_CBE);
- return ar71xx_pci_check_error(1);
+ return ar71xx_pci_check_error(apc, 1);
}
static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
unsigned long flags;
u32 data;
int err;
@@ -186,7 +190,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
ret = PCIBIOS_SUCCESSFUL;
data = ~0;
- spin_lock_irqsave(&ar71xx_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_READ);
@@ -195,7 +199,7 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
else
data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
- spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
*value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
@@ -205,7 +209,8 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 value)
{
- void __iomem *base = ar71xx_pcicfg_base;
+ struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
+ void __iomem *base = apc->cfg_base;
unsigned long flags;
int err;
int ret;
@@ -213,7 +218,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
value = value << (8 * (where & 3));
ret = PCIBIOS_SUCCESSFUL;
- spin_lock_irqsave(&ar71xx_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
AR71XX_PCI_CFG_CMD_WRITE);
@@ -222,7 +227,7 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
else
__raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
- spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return ret;
}
@@ -232,45 +237,28 @@ static struct pci_ops ar71xx_pci_ops = {
.write = ar71xx_pci_write_config,
};
-static struct resource ar71xx_pci_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ar71xx_pci_mem_resource = {
- .name = "PCI memory space",
- .start = AR71XX_PCI_MEM_BASE,
- .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM
-};
-
-static struct pci_controller ar71xx_pci_controller = {
- .pci_ops = &ar71xx_pci_ops,
- .mem_resource = &ar71xx_pci_mem_resource,
- .io_resource = &ar71xx_pci_io_resource,
-};
-
static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ struct ar71xx_pci_controller *apc;
void __iomem *base = ath79_reset_base;
u32 pending;
+ apc = irq_get_handler_data(irq);
+
pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
__raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
if (pending & AR71XX_PCI_INT_DEV0)
- generic_handle_irq(ATH79_PCI_IRQ(0));
+ generic_handle_irq(apc->irq_base + 0);
else if (pending & AR71XX_PCI_INT_DEV1)
- generic_handle_irq(ATH79_PCI_IRQ(1));
+ generic_handle_irq(apc->irq_base + 1);
else if (pending & AR71XX_PCI_INT_DEV2)
- generic_handle_irq(ATH79_PCI_IRQ(2));
+ generic_handle_irq(apc->irq_base + 2);
else if (pending & AR71XX_PCI_INT_CORE)
- generic_handle_irq(ATH79_PCI_IRQ(4));
+ generic_handle_irq(apc->irq_base + 4);
else
spurious_interrupt();
@@ -278,10 +266,14 @@ static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
static void ar71xx_pci_irq_unmask(struct irq_data *d)
{
- unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
@@ -291,10 +283,14 @@ static void ar71xx_pci_irq_unmask(struct irq_data *d)
static void ar71xx_pci_irq_mask(struct irq_data *d)
{
- unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
+ struct ar71xx_pci_controller *apc;
+ unsigned int irq;
void __iomem *base = ath79_reset_base;
u32 t;
+ apc = irq_data_get_irq_chip_data(d);
+ irq = d->irq - apc->irq_base;
+
t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
__raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
@@ -309,7 +305,7 @@ static struct irq_chip ar71xx_pci_irq_chip = {
.irq_mask_ack = ar71xx_pci_irq_mask,
};
-static __init void ar71xx_pci_irq_init(void)
+static void ar71xx_pci_irq_init(struct ar71xx_pci_controller *apc)
{
void __iomem *base = ath79_reset_base;
int i;
@@ -319,15 +315,19 @@ static __init void ar71xx_pci_irq_init(void)
BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
- for (i = ATH79_PCI_IRQ_BASE;
- i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
+ apc->irq_base = ATH79_PCI_IRQ_BASE;
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR71XX_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
- irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
+ irq_set_handler_data(apc->irq, apc);
+ irq_set_chained_handler(apc->irq, ar71xx_pci_irq_handler);
}
-static __init void ar71xx_pci_reset(void)
+static void ar71xx_pci_reset(void)
{
void __iomem *ddr_base = ath79_ddr_base;
@@ -349,27 +349,83 @@ static __init void ar71xx_pci_reset(void)
mdelay(100);
}
-__init int ar71xx_pcibios_init(void)
+static int ar71xx_pci_probe(struct platform_device *pdev)
{
+ struct ar71xx_pci_controller *apc;
+ struct resource *res;
u32 t;
- ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
- if (ar71xx_pcicfg_base == NULL)
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar71xx_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
+
+ spin_lock_init(&apc->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!apc->cfg_base)
return -ENOMEM;
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
ar71xx_pci_reset();
/* setup COMMAND register */
t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
| PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
- ar71xx_pci_local_write(PCI_COMMAND, 4, t);
+ ar71xx_pci_local_write(apc, PCI_COMMAND, 4, t);
/* clear bus errors */
- ar71xx_pci_check_error(1);
+ ar71xx_pci_check_error(apc, 1);
+
+ ar71xx_pci_irq_init(apc);
- ar71xx_pci_irq_init();
+ apc->pci_ctrl.pci_ops = &ar71xx_pci_ops;
+ apc->pci_ctrl.mem_resource = &apc->mem_res;
+ apc->pci_ctrl.io_resource = &apc->io_res;
- register_pci_controller(&ar71xx_pci_controller);
+ register_pci_controller(&apc->pci_ctrl);
return 0;
}
+
+static struct platform_driver ar71xx_pci_driver = {
+ .probe = ar71xx_pci_probe,
+ .driver = {
+ .name = "ar71xx-pci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar71xx_pci_init(void)
+{
+ return platform_driver_register(&ar71xx_pci_driver);
+}
+
+postcore_initcall(ar71xx_pci_init);
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index c11c75be2d7..8a0700d448f 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -9,19 +9,13 @@
* by the Free Software Foundation.
*/
+#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
-#include <asm/mach-ath79/pci.h>
-
-#define AR724X_PCI_CFG_BASE 0x14000000
-#define AR724X_PCI_CFG_SIZE 0x1000
-#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
-#define AR724X_PCI_CTRL_SIZE 0x100
-
-#define AR724X_PCI_MEM_BASE 0x10000000
-#define AR724X_PCI_MEM_SIZE 0x04000000
#define AR724X_PCI_REG_RESET 0x18
#define AR724X_PCI_REG_INT_STATUS 0x4c
@@ -35,38 +29,112 @@
#define AR7240_BAR0_WAR_VALUE 0xffff
-static DEFINE_SPINLOCK(ar724x_pci_lock);
-static void __iomem *ar724x_pci_devcfg_base;
-static void __iomem *ar724x_pci_ctrl_base;
+#define AR724X_PCI_CMD_INIT (PCI_COMMAND_MEMORY | \
+ PCI_COMMAND_MASTER | \
+ PCI_COMMAND_INVALIDATE | \
+ PCI_COMMAND_PARITY | \
+ PCI_COMMAND_SERR | \
+ PCI_COMMAND_FAST_BACK)
+
+struct ar724x_pci_controller {
+ void __iomem *devcfg_base;
+ void __iomem *ctrl_base;
+ void __iomem *crp_base;
+
+ int irq;
+ int irq_base;
+
+ bool link_up;
+ bool bar0_is_cached;
+ u32 bar0_value;
-static u32 ar724x_pci_bar0_value;
-static bool ar724x_pci_bar0_is_cached;
-static bool ar724x_pci_link_up;
+ spinlock_t lock;
-static inline bool ar724x_pci_check_link(void)
+ struct pci_controller pci_controller;
+ struct resource io_res;
+ struct resource mem_res;
+};
+
+static inline bool ar724x_pci_check_link(struct ar724x_pci_controller *apc)
{
u32 reset;
- reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET);
+ reset = __raw_readl(apc->ctrl_base + AR724X_PCI_REG_RESET);
return reset & AR724X_PCI_RESET_LINK_UP;
}
+static inline struct ar724x_pci_controller *
+pci_bus_to_ar724x_controller(struct pci_bus *bus)
+{
+ struct pci_controller *hose;
+
+ hose = (struct pci_controller *) bus->sysdata;
+ return container_of(hose, struct ar724x_pci_controller, pci_controller);
+}
+
+static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
+ int where, int size, u32 value)
+{
+ unsigned long flags;
+ void __iomem *base;
+ u32 data;
+ int s;
+
+ WARN_ON(where & (size - 1));
+
+ if (!apc->link_up)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ base = apc->crp_base;
+
+ spin_lock_irqsave(&apc->lock, flags);
+ data = __raw_readl(base + (where & ~3));
+
+ switch (size) {
+ case 1:
+ s = ((where & 3) * 8);
+ data &= ~(0xff << s);
+ data |= ((value & 0xff) << s);
+ break;
+ case 2:
+ s = ((where & 2) * 8);
+ data &= ~(0xffff << s);
+ data |= ((value & 0xffff) << s);
+ break;
+ case 4:
+ data = value;
+ break;
+ default:
+ spin_unlock_irqrestore(&apc->lock, flags);
+ return PCIBIOS_BAD_REGISTER_NUMBER;
+ }
+
+ __raw_writel(data, base + (where & ~3));
+ /* flush write */
+ __raw_readl(base + (where & ~3));
+ spin_unlock_irqrestore(&apc->lock, flags);
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t *value)
{
+ struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
- if (!ar724x_pci_link_up)
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
return PCIBIOS_DEVICE_NOT_FOUND;
- base = ar724x_pci_devcfg_base;
+ base = apc->devcfg_base;
- spin_lock_irqsave(&ar724x_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -85,17 +153,17 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
case 4:
break;
default:
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
- ar724x_pci_bar0_is_cached) {
+ apc->bar0_is_cached) {
/* use the cached value */
- *value = ar724x_pci_bar0_value;
+ *value = apc->bar0_value;
} else {
*value = data;
}
@@ -106,12 +174,14 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, uint32_t value)
{
+ struct ar724x_pci_controller *apc;
unsigned long flags;
void __iomem *base;
u32 data;
int s;
- if (!ar724x_pci_link_up)
+ apc = pci_bus_to_ar724x_controller(bus);
+ if (!apc->link_up)
return PCIBIOS_DEVICE_NOT_FOUND;
if (devfn)
@@ -129,18 +199,18 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
* BAR0 register in order to make the device memory
* accessible.
*/
- ar724x_pci_bar0_is_cached = true;
- ar724x_pci_bar0_value = value;
+ apc->bar0_is_cached = true;
+ apc->bar0_value = value;
value = AR7240_BAR0_WAR_VALUE;
} else {
- ar724x_pci_bar0_is_cached = false;
+ apc->bar0_is_cached = false;
}
}
- base = ar724x_pci_devcfg_base;
+ base = apc->devcfg_base;
- spin_lock_irqsave(&ar724x_pci_lock, flags);
+ spin_lock_irqsave(&apc->lock, flags);
data = __raw_readl(base + (where & ~3));
switch (size) {
@@ -158,7 +228,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
data = value;
break;
default:
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_BAD_REGISTER_NUMBER;
}
@@ -166,7 +236,7 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
__raw_writel(data, base + (where & ~3));
/* flush write */
__raw_readl(base + (where & ~3));
- spin_unlock_irqrestore(&ar724x_pci_lock, flags);
+ spin_unlock_irqrestore(&apc->lock, flags);
return PCIBIOS_SUCCESSFUL;
}
@@ -176,38 +246,20 @@ static struct pci_ops ar724x_pci_ops = {
.write = ar724x_pci_write,
};
-static struct resource ar724x_io_resource = {
- .name = "PCI IO space",
- .start = 0,
- .end = 0,
- .flags = IORESOURCE_IO,
-};
-
-static struct resource ar724x_mem_resource = {
- .name = "PCI memory space",
- .start = AR724X_PCI_MEM_BASE,
- .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
- .flags = IORESOURCE_MEM,
-};
-
-static struct pci_controller ar724x_pci_controller = {
- .pci_ops = &ar724x_pci_ops,
- .io_resource = &ar724x_io_resource,
- .mem_resource = &ar724x_mem_resource,
-};
-
static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
u32 pending;
- base = ar724x_pci_ctrl_base;
+ apc = irq_get_handler_data(irq);
+ base = apc->ctrl_base;
pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
__raw_readl(base + AR724X_PCI_REG_INT_MASK);
if (pending & AR724X_PCI_INT_DEV0)
- generic_handle_irq(ATH79_PCI_IRQ(0));
+ generic_handle_irq(apc->irq_base + 0);
else
spurious_interrupt();
@@ -215,13 +267,17 @@ static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
static void ar724x_pci_irq_unmask(struct irq_data *d)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
+ int offset;
u32 t;
- base = ar724x_pci_ctrl_base;
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
- switch (d->irq) {
- case ATH79_PCI_IRQ(0):
+ switch (offset) {
+ case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t | AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
@@ -232,13 +288,17 @@ static void ar724x_pci_irq_unmask(struct irq_data *d)
static void ar724x_pci_irq_mask(struct irq_data *d)
{
+ struct ar724x_pci_controller *apc;
void __iomem *base;
+ int offset;
u32 t;
- base = ar724x_pci_ctrl_base;
+ apc = irq_data_get_irq_chip_data(d);
+ base = apc->ctrl_base;
+ offset = apc->irq_base - d->irq;
- switch (d->irq) {
- case ATH79_PCI_IRQ(0):
+ switch (offset) {
+ case 0:
t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
__raw_writel(t & ~AR724X_PCI_INT_DEV0,
base + AR724X_PCI_REG_INT_MASK);
@@ -262,53 +322,123 @@ static struct irq_chip ar724x_pci_irq_chip = {
.irq_mask_ack = ar724x_pci_irq_mask,
};
-static void __init ar724x_pci_irq_init(int irq)
+static void ar724x_pci_irq_init(struct ar724x_pci_controller *apc,
+ int id)
{
void __iomem *base;
int i;
- base = ar724x_pci_ctrl_base;
+ base = apc->ctrl_base;
__raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
__raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
- BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
+ apc->irq_base = ATH79_PCI_IRQ_BASE + (id * AR724X_PCI_IRQ_COUNT);
- for (i = ATH79_PCI_IRQ_BASE;
- i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
+ for (i = apc->irq_base;
+ i < apc->irq_base + AR724X_PCI_IRQ_COUNT; i++) {
irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
handle_level_irq);
+ irq_set_chip_data(i, apc);
+ }
- irq_set_chained_handler(irq, ar724x_pci_irq_handler);
+ irq_set_handler_data(apc->irq, apc);
+ irq_set_chained_handler(apc->irq, ar724x_pci_irq_handler);
}
-int __init ar724x_pcibios_init(int irq)
+static int ar724x_pci_probe(struct platform_device *pdev)
{
- int ret;
+ struct ar724x_pci_controller *apc;
+ struct resource *res;
+ int id;
- ret = -ENOMEM;
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
- ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
- AR724X_PCI_CFG_SIZE);
- if (ar724x_pci_devcfg_base == NULL)
- goto err;
+ apc = devm_kzalloc(&pdev->dev, sizeof(struct ar724x_pci_controller),
+ GFP_KERNEL);
+ if (!apc)
+ return -ENOMEM;
- ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
- AR724X_PCI_CTRL_SIZE);
- if (ar724x_pci_ctrl_base == NULL)
- goto err_unmap_devcfg;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl_base");
+ if (!res)
+ return -EINVAL;
- ar724x_pci_link_up = ar724x_pci_check_link();
- if (!ar724x_pci_link_up)
- pr_warn("ar724x: PCIe link is down\n");
+ apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (apc->ctrl_base == NULL)
+ return -EBUSY;
- ar724x_pci_irq_init(irq);
- register_pci_controller(&ar724x_pci_controller);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
+ if (!res)
+ return -EINVAL;
- return PCIBIOS_SUCCESSFUL;
+ apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!apc->devcfg_base)
+ return -EBUSY;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base");
+ if (!res)
+ return -EINVAL;
-err_unmap_devcfg:
- iounmap(ar724x_pci_devcfg_base);
-err:
- return ret;
+ apc->crp_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (apc->crp_base == NULL)
+ return -EBUSY;
+
+ apc->irq = platform_get_irq(pdev, 0);
+ if (apc->irq < 0)
+ return -EINVAL;
+
+ spin_lock_init(&apc->lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->io_res.parent = res;
+ apc->io_res.name = "PCI IO space";
+ apc->io_res.start = res->start;
+ apc->io_res.end = res->end;
+ apc->io_res.flags = IORESOURCE_IO;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem_base");
+ if (!res)
+ return -EINVAL;
+
+ apc->mem_res.parent = res;
+ apc->mem_res.name = "PCI memory space";
+ apc->mem_res.start = res->start;
+ apc->mem_res.end = res->end;
+ apc->mem_res.flags = IORESOURCE_MEM;
+
+ apc->pci_controller.pci_ops = &ar724x_pci_ops;
+ apc->pci_controller.io_resource = &apc->io_res;
+ apc->pci_controller.mem_resource = &apc->mem_res;
+
+ apc->link_up = ar724x_pci_check_link(apc);
+ if (!apc->link_up)
+ dev_warn(&pdev->dev, "PCIe link is down\n");
+
+ ar724x_pci_irq_init(apc, id);
+
+ ar724x_pci_local_write(apc, PCI_COMMAND, 4, AR724X_PCI_CMD_INIT);
+
+ register_pci_controller(&apc->pci_controller);
+
+ return 0;
}
+
+static struct platform_driver ar724x_pci_driver = {
+ .probe = ar724x_pci_probe,
+ .driver = {
+ .name = "ar724x-pci",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar724x_pci_init(void)
+{
+ return platform_driver_register(&ar724x_pci_driver);
+}
+
+postcore_initcall(ar724x_pci_init);
diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c
index 37b52dc3d27..e2e69e1e9fe 100644
--- a/arch/mips/pci/pci-bcm1480.c
+++ b/arch/mips/pci/pci-bcm1480.c
@@ -54,8 +54,8 @@
static void *cfg_space;
-#define PCI_BUS_ENABLED 1
-#define PCI_DEVICE_MODE 2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
static int bcm1480_bus_status;
@@ -194,7 +194,7 @@ struct pci_controller bcm1480_controller = {
.pci_ops = &bcm1480_pci_ops,
.mem_resource = &bcm1480_mem_resource,
.io_resource = &bcm1480_io_resource,
- .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
+ .io_offset = A_BCM1480_PHYS_PCI_IO_MATCH_BYTES,
};
@@ -227,7 +227,7 @@ static int __init bcm1480_pcibios_init(void)
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
- ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 1; /* XXX */
}
diff --git a/arch/mips/pci/pci-bcm1480ht.c b/arch/mips/pci/pci-bcm1480ht.c
index 50cc6e9e824..1263c5e7dbe 100644
--- a/arch/mips/pci/pci-bcm1480ht.c
+++ b/arch/mips/pci/pci-bcm1480ht.c
@@ -53,8 +53,8 @@
static void *ht_cfg_space;
-#define PCI_BUS_ENABLED 1
-#define PCI_DEVICE_MODE 2
+#define PCI_BUS_ENABLED 1
+#define PCI_DEVICE_MODE 2
static int bcm1480ht_bus_status;
@@ -191,7 +191,7 @@ struct pci_controller bcm1480ht_controller = {
.io_resource = &bcm1480ht_io_resource,
.index = 1,
.get_busno = bcm1480ht_pcibios_get_busno,
- .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
+ .io_offset = A_BCM1480_PHYS_HT_IO_MATCH_BYTES,
};
static int __init bcm1480ht_pcibios_init(void)
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
index c682468010c..76f16eaed0a 100644
--- a/arch/mips/pci/pci-bcm47xx.c
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -91,7 +91,7 @@ static int bcm47xx_pcibios_plat_dev_init_bcma(struct pci_dev *dev)
int pcibios_plat_dev_init(struct pci_dev *dev)
{
#ifdef CONFIG_BCM47XX_SSB
- if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
+ if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_SSB)
return bcm47xx_pcibios_plat_dev_init_ssb(dev);
else
#endif
diff --git a/arch/mips/pci/pci-bcm63xx.c b/arch/mips/pci/pci-bcm63xx.c
index ca179b6ff39..88e781c6b5b 100644
--- a/arch/mips/pci/pci-bcm63xx.c
+++ b/arch/mips/pci/pci-bcm63xx.c
@@ -25,21 +25,21 @@
int bcm63xx_pci_enabled;
static struct resource bcm_pci_mem_resource = {
- .name = "bcm63xx PCI memory space",
- .start = BCM_PCI_MEM_BASE_PA,
- .end = BCM_PCI_MEM_END_PA,
- .flags = IORESOURCE_MEM
+ .name = "bcm63xx PCI memory space",
+ .start = BCM_PCI_MEM_BASE_PA,
+ .end = BCM_PCI_MEM_END_PA,
+ .flags = IORESOURCE_MEM
};
static struct resource bcm_pci_io_resource = {
- .name = "bcm63xx PCI IO space",
- .start = BCM_PCI_IO_BASE_PA,
+ .name = "bcm63xx PCI IO space",
+ .start = BCM_PCI_IO_BASE_PA,
#ifdef CONFIG_CARDBUS
- .end = BCM_PCI_IO_HALF_PA,
+ .end = BCM_PCI_IO_HALF_PA,
#else
- .end = BCM_PCI_IO_END_PA,
+ .end = BCM_PCI_IO_END_PA,
#endif
- .flags = IORESOURCE_IO
+ .flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_controller = {
@@ -55,17 +55,17 @@ struct pci_controller bcm63xx_controller = {
*/
#ifdef CONFIG_CARDBUS
static struct resource bcm_cb_mem_resource = {
- .name = "bcm63xx Cardbus memory space",
- .start = BCM_CB_MEM_BASE_PA,
- .end = BCM_CB_MEM_END_PA,
- .flags = IORESOURCE_MEM
+ .name = "bcm63xx Cardbus memory space",
+ .start = BCM_CB_MEM_BASE_PA,
+ .end = BCM_CB_MEM_END_PA,
+ .flags = IORESOURCE_MEM
};
static struct resource bcm_cb_io_resource = {
- .name = "bcm63xx Cardbus IO space",
- .start = BCM_PCI_IO_HALF_PA + 1,
- .end = BCM_PCI_IO_END_PA,
- .flags = IORESOURCE_IO
+ .name = "bcm63xx Cardbus IO space",
+ .start = BCM_PCI_IO_HALF_PA + 1,
+ .end = BCM_PCI_IO_END_PA,
+ .flags = IORESOURCE_IO
};
struct pci_controller bcm63xx_cb_controller = {
@@ -76,17 +76,17 @@ struct pci_controller bcm63xx_cb_controller = {
#endif
static struct resource bcm_pcie_mem_resource = {
- .name = "bcm63xx PCIe memory space",
- .start = BCM_PCIE_MEM_BASE_PA,
- .end = BCM_PCIE_MEM_END_PA,
- .flags = IORESOURCE_MEM,
+ .name = "bcm63xx PCIe memory space",
+ .start = BCM_PCIE_MEM_BASE_PA,
+ .end = BCM_PCIE_MEM_END_PA,
+ .flags = IORESOURCE_MEM,
};
static struct resource bcm_pcie_io_resource = {
- .name = "bcm63xx PCIe IO space",
- .start = 0,
- .end = 0,
- .flags = 0,
+ .name = "bcm63xx PCIe IO space",
+ .start = 0,
+ .end = 0,
+ .flags = 0,
};
struct pci_controller bcm63xx_pcie_controller = {
@@ -111,7 +111,7 @@ static void bcm63xx_int_cfg_writel(u32 val, u32 reg)
u32 tmp;
tmp = reg & MPI_PCICFGCTL_CFGADDR_MASK;
- tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
+ tmp |= MPI_PCICFGCTL_WRITEEN_MASK;
bcm_mpi_writel(tmp, MPI_PCICFGCTL_REG);
bcm_mpi_writel(val, MPI_PCICFGDATA_REG);
}
@@ -211,7 +211,7 @@ static int __init bcm63xx_register_pci(void)
* first bytes to access it from CPU.
*
* this means that no io access from CPU should happen while
- * we do a configuration cycle, but there's no way we can add
+ * we do a configuration cycle, but there's no way we can add
* a spinlock for each io access, so this is currently kind of
* broken on SMP.
*/
@@ -244,9 +244,9 @@ static int __init bcm63xx_register_pci(void)
bcm_mpi_writel(0, MPI_L2PMEMREMAP2_REG);
#endif
- /* setup local bus to PCI access (IO memory), we have only 1
- * IO window for both PCI and cardbus, but it cannot handle
- * both at the same time, assume standard PCI for now, if
+ /* setup local bus to PCI access (IO memory), we have only 1
+ * IO window for both PCI and cardbus, but it cannot handle
+ * both at the same time, assume standard PCI for now, if
* cardbus card has IO zone, PCI fixup will change window to
* cardbus */
val = BCM_PCI_IO_BASE_PA & MPI_L2P_BASE_MASK;
@@ -284,7 +284,7 @@ static int __init bcm63xx_register_pci(void)
bcm_mpi_writel(0, MPI_SP1_RANGE_REG);
}
- /* change host bridge retry counter to infinite number of
+ /* change host bridge retry counter to infinite number of
* retry, needed for some broadcom wifi cards with Silicon
* Backplane bus where access to srom seems very slow */
val = bcm63xx_int_cfg_readl(BCMPCI_REG_TIMERS);
diff --git a/arch/mips/pci/pci-bcm63xx.h b/arch/mips/pci/pci-bcm63xx.h
index e6736d558ac..ffab4da7bd0 100644
--- a/arch/mips/pci/pci-bcm63xx.h
+++ b/arch/mips/pci/pci-bcm63xx.h
@@ -7,7 +7,7 @@
#include <bcm63xx_dev_pci.h>
/*
- * Cardbus shares the PCI bus, but has no IDSEL, so a special id is
+ * Cardbus shares the PCI bus, but has no IDSEL, so a special id is
* reserved for it. If you have a standard PCI device at this id, you
* need to change the following definition.
*/
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index 7f4f49b09b5..6eb65e44d9e 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -30,7 +30,7 @@
/*
* XXX: No kmalloc available when we do our crosstalk scan,
- * we should try to move it later in the boot process.
+ * we should try to move it later in the boot process.
*/
static struct bridge_controller bridges[MAX_PCI_BUSSES];
@@ -103,7 +103,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
* swap pio's to pci mem and io space (big windows)
*/
bridge->b_wid_control |= BRIDGE_CTRL_IO_SWAP |
- BRIDGE_CTRL_MEM_SWAP;
+ BRIDGE_CTRL_MEM_SWAP;
#ifdef CONFIG_PAGE_SIZE_4KB
bridge->b_wid_control &= ~BRIDGE_CTRL_PAGE_SIZE;
#else /* 16kB or larger */
@@ -123,7 +123,7 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
bridge->b_device[slot].reg |= BRIDGE_DEV_SWAP_DIR;
bc->pci_int[slot] = -1;
}
- bridge->b_wid_tflush; /* wait until Bridge PIO complete */
+ bridge->b_wid_tflush; /* wait until Bridge PIO complete */
bc->base = bridge;
@@ -184,7 +184,7 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
}
/*
- * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
+ * Device might live on a subordinate PCI bus. XXX Walk up the chain of buses
* to find the slot number in sense of the bridge device register.
* XXX This also means multiple devices might rely on conflicting bridge
* settings.
diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c
index 532b561b444..b1e061f7fdc 100644
--- a/arch/mips/pci/pci-ip32.c
+++ b/arch/mips/pci/pci-ip32.c
@@ -18,9 +18,9 @@
/*
* Handle errors from the bridge. This includes master and target aborts,
- * various command and address errors, and the interrupt test. This gets
- * registered on the bridge error irq. It's conceivable that some of these
- * conditions warrant a panic. Anybody care to say which ones?
+ * various command and address errors, and the interrupt test. This gets
+ * registered on the bridge error irq. It's conceivable that some of these
+ * conditions warrant a panic. Anybody care to say which ones?
*/
static irqreturn_t macepci_error(int irq, void *dev)
{
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 910fb4c20b9..879077b0115 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -129,8 +129,16 @@ static int ltq_pci_startup(struct platform_device *pdev)
/* setup reset gpio used by pci */
reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
- if (gpio_is_valid(reset_gpio))
- devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
+ if (gpio_is_valid(reset_gpio)) {
+ int ret = devm_gpio_request(&pdev->dev,
+ reset_gpio, "pci-reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to request gpio %d\n", reset_gpio);
+ return ret;
+ }
+ gpio_direction_output(reset_gpio, 1);
+ }
/* enable auto-switching between PCI and EBU */
ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
diff --git a/arch/mips/pci/pci-lasat.c b/arch/mips/pci/pci-lasat.c
index a98e543a514..40d2797d2bc 100644
--- a/arch/mips/pci/pci-lasat.c
+++ b/arch/mips/pci/pci-lasat.c
@@ -51,15 +51,15 @@ static int __init lasat_pci_setup(void)
arch_initcall(lasat_pci_setup);
-#define LASAT_IRQ_ETH1 (LASAT_IRQ_BASE + 0)
-#define LASAT_IRQ_ETH0 (LASAT_IRQ_BASE + 1)
-#define LASAT_IRQ_HDC (LASAT_IRQ_BASE + 2)
-#define LASAT_IRQ_COMP (LASAT_IRQ_BASE + 3)
-#define LASAT_IRQ_HDLC (LASAT_IRQ_BASE + 4)
-#define LASAT_IRQ_PCIA (LASAT_IRQ_BASE + 5)
-#define LASAT_IRQ_PCIB (LASAT_IRQ_BASE + 6)
-#define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7)
-#define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8)
+#define LASAT_IRQ_ETH1 (LASAT_IRQ_BASE + 0)
+#define LASAT_IRQ_ETH0 (LASAT_IRQ_BASE + 1)
+#define LASAT_IRQ_HDC (LASAT_IRQ_BASE + 2)
+#define LASAT_IRQ_COMP (LASAT_IRQ_BASE + 3)
+#define LASAT_IRQ_HDLC (LASAT_IRQ_BASE + 4)
+#define LASAT_IRQ_PCIA (LASAT_IRQ_BASE + 5)
+#define LASAT_IRQ_PCIB (LASAT_IRQ_BASE + 6)
+#define LASAT_IRQ_PCIC (LASAT_IRQ_BASE + 7)
+#define LASAT_IRQ_PCID (LASAT_IRQ_BASE + 8)
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
@@ -69,13 +69,13 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
case 3:
return LASAT_IRQ_PCIA + (((slot-1) + (pin-1)) % 4);
case 4:
- return LASAT_IRQ_ETH1; /* Ethernet 1 (LAN 2) */
+ return LASAT_IRQ_ETH1; /* Ethernet 1 (LAN 2) */
case 5:
- return LASAT_IRQ_ETH0; /* Ethernet 0 (LAN 1) */
+ return LASAT_IRQ_ETH0; /* Ethernet 0 (LAN 1) */
case 6:
- return LASAT_IRQ_HDC; /* IDE controller */
+ return LASAT_IRQ_HDC; /* IDE controller */
default:
- return 0xff; /* Illegal */
+ return 0xff; /* Illegal */
}
return -1;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5b5ed76c6f4..95c2ea815ca 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -30,8 +30,8 @@
* addresses. Use PCI endian swapping 1 so no address swapping is
* necessary. The Linux io routines will endian swap the data.
*/
-#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
-#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
+#define OCTEON_PCI_IOSPACE_BASE 0x80011a0400000000ull
+#define OCTEON_PCI_IOSPACE_SIZE (1ull<<32)
/* Octeon't PCI controller uses did=3, subdid=3 for PCI memory. */
#define OCTEON_PCI_MEMSPACE_OFFSET (0x00011b0000000000ull)
@@ -68,10 +68,10 @@ enum octeon_dma_bar_type octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_INVALID;
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
@@ -120,8 +120,8 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
/* Enable the PCIe normal error reporting */
config = PCI_EXP_DEVCTL_CERE; /* Correctable Error Reporting */
config |= PCI_EXP_DEVCTL_NFERE; /* Non-Fatal Error Reporting */
- config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
- config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
+ config |= PCI_EXP_DEVCTL_FERE; /* Fatal Error Reporting */
+ config |= PCI_EXP_DEVCTL_URRE; /* Unsupported Request */
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, config);
/* Find the Advanced Error Reporting capability */
@@ -226,10 +226,10 @@ const char *octeon_get_pci_interrupts(void)
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init octeon_pci_pcibios_map_irq(const struct pci_dev *dev,
@@ -404,8 +404,8 @@ static void octeon_pci_initialize(void)
ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */
ctl_status_2.s.bb_ca = 1; /* Don't use L2 with big bars */
ctl_status_2.s.bb_es = 1; /* Big bar in byte swap mode */
- ctl_status_2.s.bb1 = 1; /* BAR1 is big */
- ctl_status_2.s.bb0 = 1; /* BAR0 is big */
+ ctl_status_2.s.bb1 = 1; /* BAR1 is big */
+ ctl_status_2.s.bb0 = 1; /* BAR0 is big */
}
octeon_npi_write32(CVMX_NPI_PCI_CTL_STATUS_2, ctl_status_2.u32);
@@ -446,7 +446,7 @@ static void octeon_pci_initialize(void)
* count. [1..31] and 0=32. NOTE: If the user
* programs these bits beyond the Designed Maximum
* outstanding count, then the designed maximum table
- * depth will be used instead. No additional
+ * depth will be used instead. No additional
* Deferred/Split transactions will be accepted if
* this outstanding maximum count is
* reached. Furthermore, no additional deferred/split
@@ -456,7 +456,7 @@ static void octeon_pci_initialize(void)
cfg19.s.tdomc = 4;
/*
* Master Deferred Read Request Outstanding Max Count
- * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
+ * (PCI only). CR4C[26:24] Max SAC cycles MAX DAC
* cycles 000 8 4 001 1 0 010 2 1 011 3 1 100 4 2 101
* 5 2 110 6 3 111 7 3 For example, if these bits are
* programmed to 100, the core can support 2 DAC
@@ -550,7 +550,7 @@ static void octeon_pci_initialize(void)
/*
* Affects PCI performance when OCTEON services reads to its
- * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
+ * BAR1/BAR2. Refer to Section 10.6.1. The recommended values are
* 0x22, 0x33, and 0x33 for PCI_READ_CMD_6, PCI_READ_CMD_C, and
* PCI_READ_CMD_E, respectively. Unfortunately due to errata DDR-700,
* these values need to be changed so they won't possibly prefetch off
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c
index 5f3a69cebad..b128cb973eb 100644
--- a/arch/mips/pci/pci-rc32434.c
+++ b/arch/mips/pci/pci-rc32434.c
@@ -33,7 +33,7 @@
#include <asm/mach-rc32434/rc32434.h>
#include <asm/mach-rc32434/pci.h>
-#define PCI_ACCESS_READ 0
+#define PCI_ACCESS_READ 0
#define PCI_ACCESS_WRITE 1
/* define an unsigned array for the PCI registers */
@@ -82,11 +82,11 @@ extern struct pci_ops rc32434_pci_ops;
#define PCI_MEM2_START (PCI_ADDR_START + CPUTOPCI_MEM_WIN)
#define PCI_MEM2_END (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) - 1)
#define PCI_IO1_START (PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN))
-#define PCI_IO1_END \
+#define PCI_IO1_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN - 1)
#define PCI_IO2_START \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + CPUTOPCI_IO_WIN)
-#define PCI_IO2_END \
+#define PCI_IO2_END \
(PCI_ADDR_START + (2 * CPUTOPCI_MEM_WIN) + (2 * CPUTOPCI_IO_WIN) - 1)
struct pci_controller rc32434_controller2;
diff --git a/arch/mips/pci/pci-sb1250.c b/arch/mips/pci/pci-sb1250.c
index dd97f3a83ba..cdefcc4cb8d 100644
--- a/arch/mips/pci/pci-sb1250.c
+++ b/arch/mips/pci/pci-sb1250.c
@@ -55,9 +55,9 @@
static void *cfg_space;
-#define PCI_BUS_ENABLED 1
-#define LDT_BUS_ENABLED 2
-#define PCI_DEVICE_MODE 4
+#define PCI_BUS_ENABLED 1
+#define LDT_BUS_ENABLED 2
+#define PCI_DEVICE_MODE 4
static int sb1250_bus_status;
@@ -239,7 +239,7 @@ static int __init sb1250_pcibios_init(void)
PCI_COMMAND));
if (!(cmdreg & PCI_COMMAND_MASTER)) {
printk
- ("PCI: Skipping PCI probe. Bus is not initialized.\n");
+ ("PCI: Skipping PCI probe. Bus is not initialized.\n");
iounmap(cfg_space);
return 0;
}
diff --git a/arch/mips/pci/pci-vr41xx.c b/arch/mips/pci/pci-vr41xx.c
index 444b8d8004a..157c7715b7c 100644
--- a/arch/mips/pci/pci-vr41xx.c
+++ b/arch/mips/pci/pci-vr41xx.c
@@ -69,17 +69,17 @@ static struct pci_target_address_window pci_target_window1 = {
};
static struct resource pci_mem_resource = {
- .name = "PCI Memory resources",
- .start = PCI_MEM_RESOURCE_START,
- .end = PCI_MEM_RESOURCE_END,
- .flags = IORESOURCE_MEM,
+ .name = "PCI Memory resources",
+ .start = PCI_MEM_RESOURCE_START,
+ .end = PCI_MEM_RESOURCE_END,
+ .flags = IORESOURCE_MEM,
};
static struct resource pci_io_resource = {
- .name = "PCI I/O resources",
- .start = PCI_IO_RESOURCE_START,
- .end = PCI_IO_RESOURCE_END,
- .flags = IORESOURCE_IO,
+ .name = "PCI I/O resources",
+ .start = PCI_IO_RESOURCE_START,
+ .end = PCI_IO_RESOURCE_END,
+ .flags = IORESOURCE_IO,
};
static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
@@ -97,7 +97,7 @@ static struct pci_controller_unit_setup vr41xx_pci_controller_unit_setup = {
};
static struct pci_controller vr41xx_pci_controller = {
- .pci_ops = &vr41xx_pci_ops,
+ .pci_ops = &vr41xx_pci_ops,
.mem_resource = &pci_mem_resource,
.io_resource = &pci_io_resource,
};
@@ -148,7 +148,7 @@ static int __init vr41xx_pciu_init(void)
else if ((vtclock / 2) < pci_clock_max)
pciu_write(PCICLKSELREG, HALF_VTCLOCK);
else if (current_cpu_data.processor_id >= PRID_VR4131_REV2_1 &&
- (vtclock / 3) < pci_clock_max)
+ (vtclock / 3) < pci_clock_max)
pciu_write(PCICLKSELREG, ONE_THIRD_VTCLOCK);
else if ((vtclock / 4) < pci_clock_max)
pciu_write(PCICLKSELREG, QUARTER_VTCLOCK);
@@ -281,7 +281,7 @@ static int __init vr41xx_pciu_init(void)
pciu_write(PCIAPCNTREG, val);
pciu_write(COMMANDREG, PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
+ PCI_COMMAND_MASTER | PCI_COMMAND_PARITY |
PCI_COMMAND_SERR);
/* Clear bus error */
diff --git a/arch/mips/pci/pci-vr41xx.h b/arch/mips/pci/pci-vr41xx.h
index 6b1ae2eb1c0..e6b4a1b969f 100644
--- a/arch/mips/pci/pci-vr41xx.h
+++ b/arch/mips/pci/pci-vr41xx.h
@@ -1,7 +1,7 @@
/*
* pci-vr41xx.h, Include file for PCI Control Unit of the NEC VR4100 series.
*
- * Copyright (C) 2002 MontaVista Software Inc.
+ * Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2004-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
diff --git a/arch/mips/pci/pci-xlp.c b/arch/mips/pci/pci-xlp.c
index 140557a2048..653d2db9e0c 100644
--- a/arch/mips/pci/pci-xlp.c
+++ b/arch/mips/pci/pci-xlp.c
@@ -46,6 +46,7 @@
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/xlp-hal/pic.h>
@@ -55,7 +56,7 @@
static void *pci_config_base;
-#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
/* PCI ops */
static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -64,8 +65,12 @@ static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
u32 data;
u32 *cfgaddr;
+ where &= ~3;
+ if (bus->number == 0 && PCI_SLOT(devfn) == 1 && where == 0x954)
+ return 0xffffffff;
+
cfgaddr = (u32 *)(pci_config_base +
- pci_cfg_addr(bus->number, devfn, where & ~3));
+ pci_cfg_addr(bus->number, devfn, where));
data = *cfgaddr;
return data;
}
@@ -135,54 +140,60 @@ struct pci_ops nlm_pci_ops = {
};
static struct resource nlm_pci_mem_resource = {
- .name = "XLP PCI MEM",
- .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
- .end = 0xdfffffffUL,
- .flags = IORESOURCE_MEM,
+ .name = "XLP PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
};
static struct resource nlm_pci_io_resource = {
- .name = "XLP IO MEM",
- .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
- .end = 0x17ffffffUL,
- .flags = IORESOURCE_IO,
+ .name = "XLP IO MEM",
+ .start = 0x14000000UL, /* 64MB PCI IO @ 0x1000_0000 */
+ .end = 0x17ffffffUL,
+ .flags = IORESOURCE_IO,
};
struct pci_controller nlm_pci_controller = {
- .index = 0,
- .pci_ops = &nlm_pci_ops,
- .mem_resource = &nlm_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &nlm_pci_io_resource,
- .io_offset = 0x00000000UL,
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
};
-static int get_irq_vector(const struct pci_dev *dev)
+static struct pci_dev *xlp_get_pcie_link(const struct pci_dev *dev)
{
- /*
- * For XLP PCIe, there is an IRQ per Link, find out which
- * link the device is on to assign interrupts
- */
- if (dev->bus->self == NULL)
- return 0;
+ struct pci_bus *bus, *p;
- switch (dev->bus->self->devfn) {
- case 0x8:
- return PIC_PCIE_LINK_0_IRQ;
- case 0x9:
- return PIC_PCIE_LINK_1_IRQ;
- case 0xa:
- return PIC_PCIE_LINK_2_IRQ;
- case 0xb:
- return PIC_PCIE_LINK_3_IRQ;
- }
- WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn);
- return 0;
+ /* Find the bridge on bus 0 */
+ bus = dev->bus;
+ for (p = bus->parent; p && p->number != 0; p = p->parent)
+ bus = p;
+
+ return p ? bus->self : NULL;
+}
+
+static inline int nlm_pci_link_to_irq(int link)
+{
+ return PIC_PCIE_LINK_0_IRQ + link;
}
int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
- return get_irq_vector(dev);
+ struct pci_dev *lnkdev;
+ int lnkslot, lnkfunc;
+
+ /*
+ * For XLP PCIe, there is an IRQ per Link, find out which
+ * link the device is on to assign interrupts
+ */
+ lnkdev = xlp_get_pcie_link(dev);
+ if (lnkdev == NULL)
+ return 0;
+ lnkfunc = PCI_FUNC(lnkdev->devfn);
+ lnkslot = PCI_SLOT(lnkdev->devfn);
+ return nlm_irq_to_xirq(lnkslot / 8, nlm_pci_link_to_irq(lnkfunc));
}
/* Do platform specific device initialization at pci_enable_device() time */
@@ -191,51 +202,76 @@ int pcibios_plat_dev_init(struct pci_dev *dev)
return 0;
}
-static int xlp_enable_pci_bswap(void)
+/*
+ * If big-endian, enable hardware byteswap on the PCIe bridges.
+ * This will make both the SoC and PCIe devices behave consistently with
+ * readl/writel.
+ */
+#ifdef __BIG_ENDIAN
+static void xlp_config_pci_bswap(int node, int link)
{
- uint64_t pciebase, sysbase;
- int node, i;
+ uint64_t nbubase, lnkbase;
u32 reg;
- /* Chip-0 so node set to 0 */
- node = 0;
- sysbase = nlm_get_bridge_regbase(node);
+ nbubase = nlm_get_bridge_regbase(node);
+ lnkbase = nlm_get_pcie_base(node, link);
+
/*
* Enable byte swap in hardware. Program each link's PCIe SWAP regions
* from the link's address ranges.
*/
- for (i = 0; i < 4; i++) {
- pciebase = nlm_pcicfg_base(XLP_IO_PCIE_OFFSET(node, i));
- if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
- continue;
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_BASE, reg);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_BASE0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_BASE, reg);
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEMEM_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_MEM_LIM, reg | 0xfff);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEMEM_LIMIT0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_MEM_LIM,
- reg | 0xfff);
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_BASE0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_BASE, reg);
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_BASE0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_BASE, reg);
-
- reg = nlm_read_bridge_reg(sysbase, BRIDGE_PCIEIO_LIMIT0 + i);
- nlm_write_pci_reg(pciebase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
- }
- return 0;
+ reg = nlm_read_bridge_reg(nbubase, BRIDGE_PCIEIO_LIMIT0 + link);
+ nlm_write_pci_reg(lnkbase, PCIE_BYTE_SWAP_IO_LIM, reg | 0xfff);
}
+#else
+/* Swap configuration not needed in little-endian mode */
+static inline void xlp_config_pci_bswap(int node, int link) {}
+#endif /* __BIG_ENDIAN */
static int __init pcibios_init(void)
{
+ struct nlm_soc_info *nodep;
+ uint64_t pciebase;
+ int link, n;
+ u32 reg;
+
/* Firmware assigns PCI resources */
pci_set_flags(PCI_PROBE_ONLY);
pci_config_base = ioremap(XLP_DEFAULT_PCI_ECFG_BASE, 64 << 20);
/* Extend IO port for memory mapped io */
- ioport_resource.start = 0;
+ ioport_resource.start = 0;
ioport_resource.end = ~0;
- xlp_enable_pci_bswap();
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ nodep = nlm_get_node(n);
+ if (!nodep->coremask)
+ continue; /* node does not exist */
+
+ for (link = 0; link < 4; link++) {
+ pciebase = nlm_get_pcie_base(n, link);
+ if (nlm_read_pci_reg(pciebase, 0) == 0xffffffff)
+ continue;
+ xlp_config_pci_bswap(n, link);
+
+ /* put in intpin and irq - u-boot does not */
+ reg = nlm_read_pci_reg(pciebase, 0xf);
+ reg &= ~0x1fu;
+ reg |= (1 << 8) | nlm_pci_link_to_irq(link);
+ nlm_write_pci_reg(pciebase, 0xf, reg);
+ pr_info("XLP PCIe: Link %d-%d initialized.\n", n, link);
+ }
+ }
+
set_io_port_base(CKSEG1);
nlm_pci_controller.io_map_base = CKSEG1;
diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c
index 0c18ccc7962..4427abbd48b 100644
--- a/arch/mips/pci/pci-xlr.c
+++ b/arch/mips/pci/pci-xlr.c
@@ -56,7 +56,7 @@
static void *pci_config_base;
-#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
+#define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off))
/* PCI ops */
static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn,
@@ -136,26 +136,26 @@ struct pci_ops nlm_pci_ops = {
};
static struct resource nlm_pci_mem_resource = {
- .name = "XLR PCI MEM",
- .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
- .end = 0xdfffffffUL,
- .flags = IORESOURCE_MEM,
+ .name = "XLR PCI MEM",
+ .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */
+ .end = 0xdfffffffUL,
+ .flags = IORESOURCE_MEM,
};
static struct resource nlm_pci_io_resource = {
- .name = "XLR IO MEM",
- .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
- .end = 0x100fffffUL,
- .flags = IORESOURCE_IO,
+ .name = "XLR IO MEM",
+ .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */
+ .end = 0x100fffffUL,
+ .flags = IORESOURCE_IO,
};
struct pci_controller nlm_pci_controller = {
- .index = 0,
- .pci_ops = &nlm_pci_ops,
- .mem_resource = &nlm_pci_mem_resource,
- .mem_offset = 0x00000000UL,
- .io_resource = &nlm_pci_io_resource,
- .io_offset = 0x00000000UL,
+ .index = 0,
+ .pci_ops = &nlm_pci_ops,
+ .mem_resource = &nlm_pci_mem_resource,
+ .mem_offset = 0x00000000UL,
+ .io_resource = &nlm_pci_io_resource,
+ .io_offset = 0x00000000UL,
};
/*
@@ -259,7 +259,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
MSI_ADDR_REDIRECTION_CPU;
msg.data = MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
+ MSI_DATA_LEVEL_ASSERT |
MSI_DATA_DELIVERY_FIXED;
ret = irq_set_msi_desc(irq, desc);
@@ -344,7 +344,7 @@ static int __init pcibios_init(void)
pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20);
/* Extend IO port for memory mapped io */
- ioport_resource.start = 0;
+ ioport_resource.start = 0;
ioport_resource.end = ~0;
set_io_port_base(CKSEG1);
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index a1843448fad..0872f12f268 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -1,6 +1,6 @@
/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
@@ -175,9 +175,20 @@ static DEFINE_MUTEX(pci_scan_mutex);
void register_pci_controller(struct pci_controller *hose)
{
- if (request_resource(&iomem_resource, hose->mem_resource) < 0)
+ struct resource *parent;
+
+ parent = hose->mem_resource->parent;
+ if (!parent)
+ parent = &iomem_resource;
+
+ if (request_resource(parent, hose->mem_resource) < 0)
goto out;
- if (request_resource(&ioport_resource, hose->io_resource) < 0) {
+
+ parent = hose->io_resource->parent;
+ if (!parent)
+ parent = &ioport_resource;
+
+ if (request_resource(parent, hose->io_resource) < 0) {
release_resource(hose->mem_resource);
goto out;
}
diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
index fdb4d558c0c..5e36c33e554 100644
--- a/arch/mips/pci/pcie-octeon.c
+++ b/arch/mips/pci/pcie-octeon.c
@@ -43,7 +43,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 1 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
@@ -74,7 +74,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 2 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t es:2; /* Endian swap = 1 */
@@ -85,7 +85,7 @@ union cvmx_pcie_address {
uint64_t upper:2; /* Normally 2 for XKPHYS */
uint64_t reserved_49_61:13; /* Must be zero */
uint64_t io:1; /* 1 for IO space access */
- uint64_t did:5; /* PCIe DID = 3 */
+ uint64_t did:5; /* PCIe DID = 3 */
uint64_t subdid:3; /* PCIe SubDID = 3-6 */
uint64_t reserved_36_39:4; /* Must be zero */
uint64_t address:36; /* PCIe Mem address */
@@ -166,7 +166,7 @@ static inline uint64_t cvmx_pcie_get_mem_size(int pcie_port)
* Read a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
- * @pcie_port: PCIe port to read from
+ * @pcie_port: PCIe port to read from
* @cfg_offset: Address to read
*
* Returns Value read
@@ -194,9 +194,9 @@ static uint32_t cvmx_pcie_cfgx_read(int pcie_port, uint32_t cfg_offset)
* Write a PCIe config space register indirectly. This is used for
* registers of the form PCIEEP_CFG??? and PCIERC?_CFG???.
*
- * @pcie_port: PCIe port to write to
+ * @pcie_port: PCIe port to write to
* @cfg_offset: Address to write
- * @val: Value to write
+ * @val: Value to write
*/
static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
uint32_t val)
@@ -222,7 +222,7 @@ static void cvmx_pcie_cfgx_write(int pcie_port, uint32_t cfg_offset,
* @pcie_port: PCIe port to access
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns 64bit Octeon IO address
@@ -259,7 +259,7 @@ static inline uint64_t __cvmx_pcie_build_config_addr(int pcie_port, int bus,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -281,7 +281,7 @@ static uint8_t cvmx_pcie_config_read8(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -303,7 +303,7 @@ static uint16_t cvmx_pcie_config_read16(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
*
* Returns Result of the read
@@ -325,7 +325,7 @@ static uint32_t cvmx_pcie_config_read32(int pcie_port, int bus, int dev,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -344,7 +344,7 @@ static void cvmx_pcie_config_write8(int pcie_port, int bus, int dev, int fn,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -363,7 +363,7 @@ static void cvmx_pcie_config_write16(int pcie_port, int bus, int dev, int fn,
* @pcie_port: PCIe port the device is on
* @bus: Sub bus
* @dev: Device ID
- * @fn: Device sub function
+ * @fn: Device sub function
* @reg: Register to access
* @val: Value to write
*/
@@ -883,14 +883,14 @@ retry:
/* Store merge control (NPEI_MEM_ACCESS_CTL[TIMER,MAX_WORD]) */
npei_mem_access_ctl.u64 = cvmx_read_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL);
- npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
- npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
+ npei_mem_access_ctl.s.max_word = 0; /* Allow 16 words to combine */
+ npei_mem_access_ctl.s.timer = 127; /* Wait up to 127 cycles for more data */
cvmx_write_csr(CVMX_PEXP_NPEI_MEM_ACCESS_CTL, npei_mem_access_ctl.u64);
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
- mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
+ mem_access_subid.s.nmerge = 1; /* Due to an errata on pass 1 chips, no merging is allowed. */
mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
mem_access_subid.s.nsr = 0; /* Enable Snooping for Reads. Octeon doesn't care, but devices might want this more conservative setting */
@@ -926,7 +926,7 @@ retry:
bar1_index.u32 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
- bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
@@ -1342,11 +1342,11 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
/* Setup Mem access SubDIDs */
mem_access_subid.u64 = 0;
mem_access_subid.s.port = pcie_port; /* Port the request is sent to. */
- mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
- mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
- mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
- mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
- mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.nmerge = 0; /* Allow merging as it works on CN6XXX. */
+ mem_access_subid.s.esr = 1; /* Endian-swap for Reads. */
+ mem_access_subid.s.esw = 1; /* Endian-swap for Writes. */
+ mem_access_subid.s.wtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
+ mem_access_subid.s.rtype = 0; /* "No snoop" and "Relaxed ordering" are not set */
/* PCIe Adddress Bits <63:34>. */
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
mem_access_subid.cn68xx.ba = 0;
@@ -1409,7 +1409,7 @@ static int __cvmx_pcie_rc_initialize_gen2(int pcie_port)
bar1_index.u64 = 0;
bar1_index.s.addr_idx = (CVMX_PCIE_BAR1_PHYS_BASE >> 22);
- bar1_index.s.ca = 1; /* Not Cached */
+ bar1_index.s.ca = 1; /* Not Cached */
bar1_index.s.end_swp = 1; /* Endian Swap mode */
bar1_index.s.addr_v = 1; /* Valid entry */
@@ -1458,10 +1458,10 @@ static int cvmx_pcie_rc_initialize(int pcie_port)
*
* @dev: The Linux PCI device structure for the device to map
* @slot: The slot number for this device on __BUS 0__. Linux
- * enumerates through all the bridges and figures out the
- * slot on Bus 0 where this device eventually hooks to.
+ * enumerates through all the bridges and figures out the
+ * slot on Bus 0 where this device eventually hooks to.
* @pin: The PCI interrupt pin read from the device, then swizzled
- * as it goes through each bridge.
+ * as it goes through each bridge.
* Returns Interrupt number for the device
*/
int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
@@ -1503,7 +1503,7 @@ int __init octeon_pcie_pcibios_map_irq(const struct pci_dev *dev,
return pin - 1 + OCTEON_IRQ_PCI_INT0;
}
-static void set_cfg_read_retry(u32 retry_cnt)
+static void set_cfg_read_retry(u32 retry_cnt)
{
union cvmx_pemx_ctl_status pemx_ctl;
pemx_ctl.u64 = cvmx_read_csr(CVMX_PEMX_CTL_STATUS(1));
@@ -1931,7 +1931,7 @@ static int __init octeon_pcie_setup(void)
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(0));
if (sriox_status_reg.s.srio) {
- srio_war15205 += 1; /* Port is SRIO */
+ srio_war15205 += 1; /* Port is SRIO */
port = 0;
}
}
@@ -2004,7 +2004,7 @@ static int __init octeon_pcie_setup(void)
OCTEON_IS_MODEL(OCTEON_CN63XX_PASS2_0)) {
sriox_status_reg.u64 = cvmx_read_csr(CVMX_SRIOX_STATUS_REG(1));
if (sriox_status_reg.s.srio) {
- srio_war15205 += 1; /* Port is SRIO */
+ srio_war15205 += 1; /* Port is SRIO */
port = 1;
}
}
diff --git a/arch/mips/pmc-sierra/Platform b/arch/mips/pmc-sierra/Platform
deleted file mode 100644
index 387fda6c28c..00000000000
--- a/arch/mips/pmc-sierra/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# PMC-Sierra MSP SOCs
-#
-platform-$(CONFIG_PMC_MSP) += pmc-sierra/msp71xx/
-cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/pmc-sierra/msp71xx \
- -mno-branch-likely
-load-$(CONFIG_PMC_MSP) += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig
index 3482b8c8640..3482b8c8640 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmcs-msp71xx/Kconfig
diff --git a/arch/mips/pmc-sierra/msp71xx/Makefile b/arch/mips/pmcs-msp71xx/Makefile
index cefba7733b7..cefba7733b7 100644
--- a/arch/mips/pmc-sierra/msp71xx/Makefile
+++ b/arch/mips/pmcs-msp71xx/Makefile
diff --git a/arch/mips/pmcs-msp71xx/Platform b/arch/mips/pmcs-msp71xx/Platform
new file mode 100644
index 00000000000..7af0734a500
--- /dev/null
+++ b/arch/mips/pmcs-msp71xx/Platform
@@ -0,0 +1,7 @@
+#
+# PMC-Sierra MSP SOCs
+#
+platform-$(CONFIG_PMC_MSP) += pmcs-msp71xx/
+cflags-$(CONFIG_PMC_MSP) += -I$(srctree)/arch/mips/include/asm/mach-pmcs-msp71xx \
+ -mno-branch-likely
+load-$(CONFIG_PMC_MSP) += 0xffffffff80100000
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio.c b/arch/mips/pmcs-msp71xx/gpio.c
index aaccbe52438..aaccbe52438 100644
--- a/arch/mips/pmc-sierra/msp71xx/gpio.c
+++ b/arch/mips/pmcs-msp71xx/gpio.c
diff --git a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c b/arch/mips/pmcs-msp71xx/gpio_extended.c
index 2a99f360fae..2a99f360fae 100644
--- a/arch/mips/pmc-sierra/msp71xx/gpio_extended.c
+++ b/arch/mips/pmcs-msp71xx/gpio_extended.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_elb.c b/arch/mips/pmcs-msp71xx/msp_elb.c
index 3e964100721..3e964100721 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_elb.c
+++ b/arch/mips/pmcs-msp71xx/msp_elb.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_eth.c b/arch/mips/pmcs-msp71xx/msp_eth.c
index c584df393de..c584df393de 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_eth.c
+++ b/arch/mips/pmcs-msp71xx/msp_eth.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c b/arch/mips/pmcs-msp71xx/msp_hwbutton.c
index bb57ed9ea2b..bb57ed9ea2b 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_hwbutton.c
+++ b/arch/mips/pmcs-msp71xx/msp_hwbutton.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
index d3c3d81757a..9da5619c00a 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq.c
@@ -41,9 +41,9 @@ static inline void sec_int_dispatch(void) { do_IRQ(MSP_INT_SEC); }
/*
* The PMC-Sierra MSP interrupts are arranged in a 3 level cascaded
- * hierarchical system. The first level are the direct MIPS interrupts
+ * hierarchical system. The first level are the direct MIPS interrupts
* and are assigned the interrupt range 0-7. The second level is the SLM
- * interrupt controller and is assigned the range 8-39. The third level
+ * interrupt controller and is assigned the range 8-39. The third level
* comprises the Peripherial block, the PCI block, the PCI MSI block and
* the SLP. The PCI interrupts and the SLP errors are handled by the
* relevant subsystems so the core interrupt code needs only concern
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index 2e6f7cab24c..e49b499f66d 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -3,8 +3,8 @@
*
* This file define the irq handler for MSP CIC subsystem interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -171,7 +171,7 @@ void __init msp_cic_irq_init(void)
/* Mask/clear interrupts. */
*CIC_VPE0_MSK_REG = 0x00000000;
*CIC_VPE1_MSK_REG = 0x00000000;
- *CIC_STS_REG = 0xFFFFFFFF;
+ *CIC_STS_REG = 0xFFFFFFFF;
/*
* The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
* These inputs map to EXT_INT_POL[6:4] inside the CIC.
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/arch/mips/pmcs-msp71xx/msp_irq_per.c
index 598b6a66b97..d1fd530479d 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_per.c
@@ -3,8 +3,8 @@
*
* This file define the irq handler for MSP PER subsystem interrupts.
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c b/arch/mips/pmcs-msp71xx/msp_irq_slp.c
index 83a1c5eae3f..5f66a76311c 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_irq_slp.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_slp.c
@@ -4,8 +4,8 @@
* Copyright 2005-2006 PMC-Sierra, Inc, derived from irq_cpu.c
* Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_pci.c b/arch/mips/pmcs-msp71xx/msp_pci.c
index f764fe7748d..428dea23c35 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_pci.c
+++ b/arch/mips/pmcs-msp71xx/msp_pci.c
@@ -36,7 +36,7 @@ static int __init msp_pci_setup(void)
#if 0 /* Linux 2.6 initialization code to be completed */
if (getdeviceid() & DEV_ID_SINGLE_PC) {
/* If single card mode */
- slmRegs *sreg = (slmRegs *) SREG_BASE;
+ slmRegs *sreg = (slmRegs *) SREG_BASE;
sreg->single_pc_enable = SINGLE_PCCARD;
}
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index db00deb59b9..0edb89a6351 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -99,7 +99,7 @@ static inline int str2eaddr(unsigned char *ea, unsigned char *str)
}
}
- if (index == 5) {
+ if (index == 5) {
ea[index++] = num;
return 0;
} else
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(identify_revision);
char *prom_getenv(char *env_name)
{
/*
- * Return a pointer to the given environment variable. prom_envp
+ * Return a pointer to the given environment variable. prom_envp
* points to a null terminated array of pointers to variables.
* Environment variables are stored in the form of "memsize=64"
*/
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_serial.c b/arch/mips/pmcs-msp71xx/msp_serial.c
index a1c7c7da233..d304be22b96 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_serial.c
+++ b/arch/mips/pmcs-msp71xx/msp_serial.c
@@ -90,8 +90,8 @@ static int msp_serial_handle_irq(struct uart_port *p)
void __init msp_serial_setup(void)
{
- char *s;
- char *endp;
+ char *s;
+ char *endp;
struct uart_port up;
unsigned int uartclk;
@@ -104,19 +104,19 @@ void __init msp_serial_setup(void)
ppfinit("UART clock set to %d\n", uartclk);
/* Initialize first serial port */
- up.mapbase = MSP_UART0_BASE;
- up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
- up.irq = MSP_INT_UART0;
- up.uartclk = uartclk;
- up.regshift = 2;
- up.iotype = UPIO_MEM;
- up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
- up.type = PORT_16550A;
- up.line = 0;
+ up.mapbase = MSP_UART0_BASE;
+ up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+ up.irq = MSP_INT_UART0;
+ up.uartclk = uartclk;
+ up.regshift = 2;
+ up.iotype = UPIO_MEM;
+ up.flags = ASYNC_BOOT_AUTOCONF | ASYNC_SKIP_TEST;
+ up.type = PORT_16550A;
+ up.line = 0;
up.serial_out = msp_serial_out;
up.serial_in = msp_serial_in;
up.handle_irq = msp_serial_handle_irq;
- up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
+ up.private_data = kzalloc(sizeof(struct msp_uart_data), GFP_KERNEL);
if (!up.private_data) {
pr_err("failed to allocate uart private data\n");
return;
@@ -142,10 +142,10 @@ void __init msp_serial_setup(void)
return; /* No second serial port, good-bye. */
}
- up.mapbase = MSP_UART1_BASE;
- up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
- up.irq = MSP_INT_UART1;
- up.line = 1;
+ up.mapbase = MSP_UART1_BASE;
+ up.membase = ioremap_nocache(up.mapbase, MSP_UART_REG_LEN);
+ up.irq = MSP_INT_UART1;
+ up.line = 1;
up.private_data = (void*)UART1_STATUS_REG;
if (early_serial_setup(&up)) {
kfree(up.private_data);
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 7a834b2f8a5..1651cfdbfe7 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -4,8 +4,8 @@
* Copyright 2005-2007 PMC-Sierra, Inc,
* Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -168,7 +168,7 @@ void __init prom_init(void)
family = identify_family();
revision = identify_revision();
- switch (family) {
+ switch (family) {
case FAMILY_FPGA:
if (FPGA_IS_MSP4200(revision)) {
/* Old-style revision ID */
@@ -219,7 +219,7 @@ void __init prom_init(void)
/*
* Sub-system setup follows.
- * Setup functions can either be called here or using the
+ * Setup functions can either be called here or using the
* subsys_initcall mechanism (i.e. see msp_pci_setup). The
* order in which they are called can be changed by using the
* link order in arch/mips/pmc-sierra/msp71xx/Makefile.
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2d..10170580a2d 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c b/arch/mips/pmcs-msp71xx/msp_smtc.c
index c8dcc1c01e1..c8dcc1c01e1 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_smtc.c
+++ b/arch/mips/pmcs-msp71xx/msp_smtc.c
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmcs-msp71xx/msp_time.c
index 8b42f307a7a..8f12ecc55ac 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmcs-msp71xx/msp_time.c
@@ -45,7 +45,7 @@ static int tim_installed;
void __init plat_time_init(void)
{
- char *endp, *s;
+ char *endp, *s;
unsigned long cpu_rate = 0;
if (cpu_rate == 0) {
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_usb.c b/arch/mips/pmcs-msp71xx/msp_usb.c
index 9a1aef89bd4..4dab915696e 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_usb.c
+++ b/arch/mips/pmcs-msp71xx/msp_usb.c
@@ -40,14 +40,14 @@
#if defined(CONFIG_USB_EHCI_HCD)
static struct resource msp_usbhost0_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB0_HS_START,
- .end = MSP_USB0_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB0_HS_START,
+ .end = MSP_USB0_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB0_MAB_START,
@@ -71,8 +71,8 @@ static struct mspusb_device msp_usbhost0_device = {
.dma_mask = &msp_usbhost0_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbhost0_resources),
- .resource = msp_usbhost0_resources,
+ .num_resources = ARRAY_SIZE(msp_usbhost0_resources),
+ .resource = msp_usbhost0_resources,
},
};
@@ -121,14 +121,14 @@ static struct mspusb_device msp_usbhost1_device = {
#if defined(CONFIG_USB_GADGET)
static struct resource msp_usbdev0_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB0_HS_START,
- .end = MSP_USB0_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB0_HS_START,
+ .end = MSP_USB0_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB0_MAB_START,
@@ -153,22 +153,22 @@ static struct mspusb_device msp_usbdev0_device = {
.dma_mask = &msp_usbdev_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbdev0_resources),
- .resource = msp_usbdev0_resources,
+ .num_resources = ARRAY_SIZE(msp_usbdev0_resources),
+ .resource = msp_usbdev0_resources,
},
};
#ifdef CONFIG_MSP_HAS_DUAL_USB
static struct resource msp_usbdev1_resources[] = {
[0] = { /* EHCI-HS operational and capabilities registers */
- .start = MSP_USB1_HS_START,
- .end = MSP_USB1_HS_END,
- .flags = IORESOURCE_MEM,
+ .start = MSP_USB1_HS_START,
+ .end = MSP_USB1_HS_END,
+ .flags = IORESOURCE_MEM,
},
[1] = {
- .start = MSP_INT_USB,
- .end = MSP_INT_USB,
- .flags = IORESOURCE_IRQ,
+ .start = MSP_INT_USB,
+ .end = MSP_INT_USB,
+ .flags = IORESOURCE_IRQ,
},
[2] = { /* MSBus-to-AMBA bridge register space */
.start = MSP_USB1_MAB_START,
@@ -191,8 +191,8 @@ static struct mspusb_device msp_usbdev1_device = {
.dma_mask = &msp_usbdev_dma_mask,
.coherent_dma_mask = 0xffffffffUL,
},
- .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
- .resource = msp_usbdev1_resources,
+ .num_resources = ARRAY_SIZE(msp_usbdev1_resources),
+ .resource = msp_usbdev1_resources,
},
};
@@ -211,7 +211,7 @@ static int __init msp_usb_setup(void)
/*
* Could this perhaps be integrated into the "features" env var?
* Use the features key "U", and follow with "H" for host-mode,
- * "D" for device-mode. If it works for Ethernet, why not USB...
+ * "D" for device-mode. If it works for Ethernet, why not USB...
* -- hammtrev, 2007/03/22
*/
snprintf((char *)&envstr[0], sizeof(envstr), "usbmode");
@@ -237,7 +237,7 @@ static int __init msp_usb_setup(void)
#endif
#else
ppfinit("%s: echi_hcd not supported\n", __FILE__);
-#endif /* CONFIG_USB_EHCI_HCD */
+#endif /* CONFIG_USB_EHCI_HCD */
} else {
#if defined(CONFIG_USB_GADGET)
/* get device mode structure */
@@ -251,7 +251,7 @@ static int __init msp_usb_setup(void)
#endif
#else
ppfinit("%s: usb_gadget not supported\n", __FILE__);
-#endif /* CONFIG_USB_GADGET */
+#endif /* CONFIG_USB_GADGET */
}
/* add device */
platform_add_devices(msp_devs, ARRAY_SIZE(msp_devs));
diff --git a/arch/mips/pnx833x/Platform b/arch/mips/pnx833x/Platform
index 7e6ec4dbc8d..794526caab1 100644
--- a/arch/mips/pnx833x/Platform
+++ b/arch/mips/pnx833x/Platform
@@ -1,5 +1,5 @@
# NXP STB225
platform-$(CONFIG_SOC_PNX833X) += pnx833x/
-cflags-$(CONFIG_SOC_PNX833X) += -Iarch/mips/include/asm/mach-pnx833x
+cflags-$(CONFIG_SOC_PNX833X) += -Iarch/mips/include/asm/mach-pnx833x
load-$(CONFIG_NXP_STB220) += 0xffffffff80001000
load-$(CONFIG_NXP_STB225) += 0xffffffff80001000
diff --git a/arch/mips/pnx833x/common/interrupts.c b/arch/mips/pnx833x/common/interrupts.c
index a86d5d5fceb..a4a90596c0a 100644
--- a/arch/mips/pnx833x/common/interrupts.c
+++ b/arch/mips/pnx833x/common/interrupts.c
@@ -35,64 +35,64 @@ static int mips_cpu_timer_irq;
static const unsigned int irq_prio[PNX833X_PIC_NUM_IRQ] =
{
0, /* unused */
- 4, /* PNX833X_PIC_I2C0_INT 1 */
- 4, /* PNX833X_PIC_I2C1_INT 2 */
- 1, /* PNX833X_PIC_UART0_INT 3 */
- 1, /* PNX833X_PIC_UART1_INT 4 */
- 6, /* PNX833X_PIC_TS_IN0_DV_INT 5 */
- 6, /* PNX833X_PIC_TS_IN0_DMA_INT 6 */
- 7, /* PNX833X_PIC_GPIO_INT 7 */
- 4, /* PNX833X_PIC_AUDIO_DEC_INT 8 */
- 5, /* PNX833X_PIC_VIDEO_DEC_INT 9 */
- 4, /* PNX833X_PIC_CONFIG_INT 10 */
- 4, /* PNX833X_PIC_AOI_INT 11 */
- 9, /* PNX833X_PIC_SYNC_INT 12 */
- 9, /* PNX8335_PIC_SATA_INT 13 */
- 4, /* PNX833X_PIC_OSD_INT 14 */
- 9, /* PNX833X_PIC_DISP1_INT 15 */
- 4, /* PNX833X_PIC_DEINTERLACER_INT 16 */
- 9, /* PNX833X_PIC_DISPLAY2_INT 17 */
- 4, /* PNX833X_PIC_VC_INT 18 */
- 4, /* PNX833X_PIC_SC_INT 19 */
- 9, /* PNX833X_PIC_IDE_INT 20 */
- 9, /* PNX833X_PIC_IDE_DMA_INT 21 */
- 6, /* PNX833X_PIC_TS_IN1_DV_INT 22 */
- 6, /* PNX833X_PIC_TS_IN1_DMA_INT 23 */
- 4, /* PNX833X_PIC_SGDX_DMA_INT 24 */
- 4, /* PNX833X_PIC_TS_OUT_INT 25 */
- 4, /* PNX833X_PIC_IR_INT 26 */
- 3, /* PNX833X_PIC_VMSP1_INT 27 */
- 3, /* PNX833X_PIC_VMSP2_INT 28 */
- 4, /* PNX833X_PIC_PIBC_INT 29 */
- 4, /* PNX833X_PIC_TS_IN0_TRD_INT 30 */
- 4, /* PNX833X_PIC_SGDX_TPD_INT 31 */
- 5, /* PNX833X_PIC_USB_INT 32 */
- 4, /* PNX833X_PIC_TS_IN1_TRD_INT 33 */
- 4, /* PNX833X_PIC_CLOCK_INT 34 */
- 4, /* PNX833X_PIC_SGDX_PARSER_INT 35 */
- 4, /* PNX833X_PIC_VMSP_DMA_INT 36 */
+ 4, /* PNX833X_PIC_I2C0_INT 1 */
+ 4, /* PNX833X_PIC_I2C1_INT 2 */
+ 1, /* PNX833X_PIC_UART0_INT 3 */
+ 1, /* PNX833X_PIC_UART1_INT 4 */
+ 6, /* PNX833X_PIC_TS_IN0_DV_INT 5 */
+ 6, /* PNX833X_PIC_TS_IN0_DMA_INT 6 */
+ 7, /* PNX833X_PIC_GPIO_INT 7 */
+ 4, /* PNX833X_PIC_AUDIO_DEC_INT 8 */
+ 5, /* PNX833X_PIC_VIDEO_DEC_INT 9 */
+ 4, /* PNX833X_PIC_CONFIG_INT 10 */
+ 4, /* PNX833X_PIC_AOI_INT 11 */
+ 9, /* PNX833X_PIC_SYNC_INT 12 */
+ 9, /* PNX8335_PIC_SATA_INT 13 */
+ 4, /* PNX833X_PIC_OSD_INT 14 */
+ 9, /* PNX833X_PIC_DISP1_INT 15 */
+ 4, /* PNX833X_PIC_DEINTERLACER_INT 16 */
+ 9, /* PNX833X_PIC_DISPLAY2_INT 17 */
+ 4, /* PNX833X_PIC_VC_INT 18 */
+ 4, /* PNX833X_PIC_SC_INT 19 */
+ 9, /* PNX833X_PIC_IDE_INT 20 */
+ 9, /* PNX833X_PIC_IDE_DMA_INT 21 */
+ 6, /* PNX833X_PIC_TS_IN1_DV_INT 22 */
+ 6, /* PNX833X_PIC_TS_IN1_DMA_INT 23 */
+ 4, /* PNX833X_PIC_SGDX_DMA_INT 24 */
+ 4, /* PNX833X_PIC_TS_OUT_INT 25 */
+ 4, /* PNX833X_PIC_IR_INT 26 */
+ 3, /* PNX833X_PIC_VMSP1_INT 27 */
+ 3, /* PNX833X_PIC_VMSP2_INT 28 */
+ 4, /* PNX833X_PIC_PIBC_INT 29 */
+ 4, /* PNX833X_PIC_TS_IN0_TRD_INT 30 */
+ 4, /* PNX833X_PIC_SGDX_TPD_INT 31 */
+ 5, /* PNX833X_PIC_USB_INT 32 */
+ 4, /* PNX833X_PIC_TS_IN1_TRD_INT 33 */
+ 4, /* PNX833X_PIC_CLOCK_INT 34 */
+ 4, /* PNX833X_PIC_SGDX_PARSER_INT 35 */
+ 4, /* PNX833X_PIC_VMSP_DMA_INT 36 */
#if defined(CONFIG_SOC_PNX8335)
- 4, /* PNX8335_PIC_MIU_INT 37 */
- 4, /* PNX8335_PIC_AVCHIP_IRQ_INT 38 */
- 9, /* PNX8335_PIC_SYNC_HD_INT 39 */
- 9, /* PNX8335_PIC_DISP_HD_INT 40 */
- 9, /* PNX8335_PIC_DISP_SCALER_INT 41 */
- 4, /* PNX8335_PIC_OSD_HD1_INT 42 */
- 4, /* PNX8335_PIC_DTL_WRITER_Y_INT 43 */
- 4, /* PNX8335_PIC_DTL_WRITER_C_INT 44 */
+ 4, /* PNX8335_PIC_MIU_INT 37 */
+ 4, /* PNX8335_PIC_AVCHIP_IRQ_INT 38 */
+ 9, /* PNX8335_PIC_SYNC_HD_INT 39 */
+ 9, /* PNX8335_PIC_DISP_HD_INT 40 */
+ 9, /* PNX8335_PIC_DISP_SCALER_INT 41 */
+ 4, /* PNX8335_PIC_OSD_HD1_INT 42 */
+ 4, /* PNX8335_PIC_DTL_WRITER_Y_INT 43 */
+ 4, /* PNX8335_PIC_DTL_WRITER_C_INT 44 */
4, /* PNX8335_PIC_DTL_EMULATOR_Y_IR_INT 45 */
4, /* PNX8335_PIC_DTL_EMULATOR_C_IR_INT 46 */
- 4, /* PNX8335_PIC_DENC_TTX_INT 47 */
- 4, /* PNX8335_PIC_MMI_SIF0_INT 48 */
- 4, /* PNX8335_PIC_MMI_SIF1_INT 49 */
- 4, /* PNX8335_PIC_MMI_CDMMU_INT 50 */
- 4, /* PNX8335_PIC_PIBCS_INT 51 */
- 12, /* PNX8335_PIC_ETHERNET_INT 52 */
- 3, /* PNX8335_PIC_VMSP1_0_INT 53 */
- 3, /* PNX8335_PIC_VMSP1_1_INT 54 */
- 4, /* PNX8335_PIC_VMSP1_DMA_INT 55 */
- 4, /* PNX8335_PIC_TDGR_DE_INT 56 */
- 4, /* PNX8335_PIC_IR1_IRQ_INT 57 */
+ 4, /* PNX8335_PIC_DENC_TTX_INT 47 */
+ 4, /* PNX8335_PIC_MMI_SIF0_INT 48 */
+ 4, /* PNX8335_PIC_MMI_SIF1_INT 49 */
+ 4, /* PNX8335_PIC_MMI_CDMMU_INT 50 */
+ 4, /* PNX8335_PIC_PIBCS_INT 51 */
+ 12, /* PNX8335_PIC_ETHERNET_INT 52 */
+ 3, /* PNX8335_PIC_VMSP1_0_INT 53 */
+ 3, /* PNX8335_PIC_VMSP1_1_INT 54 */
+ 4, /* PNX8335_PIC_VMSP1_DMA_INT 55 */
+ 4, /* PNX8335_PIC_TDGR_DE_INT 56 */
+ 4, /* PNX8335_PIC_IR1_IRQ_INT 57 */
#endif
};
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 05a1d922cd6..d22dc0d6f28 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@
#include <irq-mapping.h>
#include <pnx833x.h>
-static u64 uart_dmamask = DMA_BIT_MASK(32);
+static u64 uart_dmamask = DMA_BIT_MASK(32);
static struct resource pnx833x_uart_resources[] = {
[0] = {
@@ -69,7 +69,7 @@ static struct resource pnx833x_uart_resources[] = {
struct pnx8xxx_port pnx8xxx_ports[] = {
[0] = {
- .port = {
+ .port = {
.type = PORT_PNX8XXX,
.iotype = UPIO_MEM,
.membase = (void __iomem *)PNX833X_UART0_PORTS_START,
@@ -82,7 +82,7 @@ struct pnx8xxx_port pnx8xxx_ports[] = {
},
},
[1] = {
- .port = {
+ .port = {
.type = PORT_PNX8XXX,
.iotype = UPIO_MEM,
.membase = (void __iomem *)PNX833X_UART1_PORTS_START,
@@ -108,7 +108,7 @@ static struct platform_device pnx833x_uart_device = {
.resource = pnx833x_uart_resources,
};
-static u64 ehci_dmamask = DMA_BIT_MASK(32);
+static u64 ehci_dmamask = DMA_BIT_MASK(32);
static struct resource pnx833x_usb_ehci_resources[] = {
[0] = {
@@ -183,7 +183,7 @@ static struct platform_device pnx833x_i2c0_device = {
.dev = {
.platform_data = &pnx833x_i2c_dev[0],
},
- .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources),
+ .num_resources = ARRAY_SIZE(pnx833x_i2c0_resources),
.resource = pnx833x_i2c0_resources,
};
@@ -193,7 +193,7 @@ static struct platform_device pnx833x_i2c1_device = {
.dev = {
.platform_data = &pnx833x_i2c_dev[1],
},
- .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources),
+ .num_resources = ARRAY_SIZE(pnx833x_i2c1_resources),
.resource = pnx833x_i2c1_resources,
};
#endif
@@ -217,7 +217,7 @@ static struct platform_device pnx833x_ethernet_device = {
.name = "ip3902-eth",
.id = -1,
.dev = {
- .dma_mask = &ethernet_dmamask,
+ .dma_mask = &ethernet_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(pnx833x_ethernet_resources),
@@ -238,8 +238,8 @@ static struct resource pnx833x_sata_resources[] = {
};
static struct platform_device pnx833x_sata_device = {
- .name = "pnx833x-sata",
- .id = -1,
+ .name = "pnx833x-sata",
+ .id = -1,
.num_resources = ARRAY_SIZE(pnx833x_sata_resources),
.resource = pnx833x_sata_resources,
};
@@ -265,7 +265,7 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
.chip_delay = 25,
},
.ctrl = {
- .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl
+ .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl
}
};
@@ -274,17 +274,17 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
* 12 bytes more seems to be the standard that allows for NAND access.
*/
static struct resource pnx833x_flash_nand_resource = {
- .start = PNX8335_NAND_BASE,
- .end = PNX8335_NAND_BASE + 12,
- .flags = IORESOURCE_MEM,
+ .start = PNX8335_NAND_BASE,
+ .end = PNX8335_NAND_BASE + 12,
+ .flags = IORESOURCE_MEM,
};
static struct platform_device pnx833x_flash_nand = {
- .name = "gen_nand",
- .id = -1,
+ .name = "gen_nand",
+ .id = -1,
.num_resources = 1,
.resource = &pnx833x_flash_nand_resource,
- .dev = {
+ .dev = {
.platform_data = &pnx833x_flash_nand_data,
},
};
diff --git a/arch/mips/pnx833x/common/prom.c b/arch/mips/pnx833x/common/prom.c
index 29969f90a6b..dfafdd732ca 100644
--- a/arch/mips/pnx833x/common/prom.c
+++ b/arch/mips/pnx833x/common/prom.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/common/reset.c b/arch/mips/pnx833x/common/reset.c
index e0ea96d29fd..5cc9a9b3601 100644
--- a/arch/mips/pnx833x/common/reset.c
+++ b/arch/mips/pnx833x/common/reset.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/common/setup.c b/arch/mips/pnx833x/common/setup.c
index e51fbc4b644..99b4d94236c 100644
--- a/arch/mips/pnx833x/common/setup.c
+++ b/arch/mips/pnx833x/common/setup.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx833x/stb22x/board.c b/arch/mips/pnx833x/stb22x/board.c
index 4b328ac4305..2ac5203438d 100644
--- a/arch/mips/pnx833x/stb22x/board.c
+++ b/arch/mips/pnx833x/stb22x/board.c
@@ -6,7 +6,7 @@
* Daniel Laird <daniel.j.laird@nxp.com>
*
* Based on software written by:
- * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
+ * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/pnx8550/Makefile b/arch/mips/pnx8550/Makefile
deleted file mode 100644
index 3f7e8561437..00000000000
--- a/arch/mips/pnx8550/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_SOC_PNX8550) += common/
-obj-$(CONFIG_PNX8550_JBS) += jbs/
-obj-$(CONFIG_PNX8550_STB810) += stb810/
diff --git a/arch/mips/pnx8550/Platform b/arch/mips/pnx8550/Platform
deleted file mode 100644
index 0e7fbde768d..00000000000
--- a/arch/mips/pnx8550/Platform
+++ /dev/null
@@ -1,7 +0,0 @@
-platform-$(CONFIG_SOC_PNX8550) += pnx8550/
-
-cflags-$(CONFIG_SOC_PNX8550) += \
- -I$(srctree)/arch/mips/include/asm/mach-pnx8550
-
-load-$(CONFIG_PNX8550_JBS) += 0xffffffff80060000
-load-$(CONFIG_PNX8550_STB810) += 0xffffffff80060000
diff --git a/arch/mips/pnx8550/common/Makefile b/arch/mips/pnx8550/common/Makefile
deleted file mode 100644
index f8ce695dc54..00000000000
--- a/arch/mips/pnx8550/common/Makefile
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Per Hallsmark, per.hallsmark@mvista.com
-#
-# ########################################################################
-#
-# This program is free software; you can distribute it and/or modify it
-# under the terms of the GNU General Public License (Version 2) as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-# for more details.
-#
-# You should have received a copy of the GNU General Public License along
-# with this program; if not, write to the Free Software Foundation, Inc.,
-# 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
-#
-# #######################################################################
-#
-# Makefile for the PNX8550 specific kernel interface routines
-# under Linux.
-#
-
-obj-y := setup.o prom.o int.o reset.o time.o proc.o platform.o
-obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c
deleted file mode 100644
index ec684b8c3f7..00000000000
--- a/arch/mips/pnx8550/common/int.c
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- *
- * Copyright (C) 2005 Embedded Alley Solutions, Inc
- * Ported to 2.6.
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- * Copyright (C) 2000, 2001 MIPS Technologies, Inc.
- * Copyright (C) 2001 Ralf Baechle
- *
- * Cleaned up and bug fixing: Pete Popov, ppopov@embeddedalley.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-#include <linux/compiler.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-/* default prio for interrupts */
-/* first one is a no-no so therefore always prio 0 (disabled) */
-static char gic_prio[PNX8550_INT_GIC_TOTINT] = {
- 0, 1, 1, 1, 1, 15, 1, 1, 1, 1, // 0 - 9
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 10 - 19
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 20 - 29
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 30 - 39
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 40 - 49
- 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, // 50 - 59
- 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // 60 - 69
- 1 // 70
-};
-
-static void hw0_irqdispatch(int irq)
-{
- /* find out which interrupt */
- irq = PNX8550_GIC_VECTOR_0 >> 3;
-
- if (irq == 0) {
- printk("hw0_irqdispatch: irq 0, spurious interrupt?\n");
- return;
- }
- do_IRQ(PNX8550_INT_GIC_MIN + irq);
-}
-
-
-static void timer_irqdispatch(int irq)
-{
- irq = (0x01c0 & read_c0_config7()) >> 6;
-
- if (unlikely(irq == 0)) {
- printk("timer_irqdispatch: irq 0, spurious interrupt?\n");
- return;
- }
-
- if (irq & 0x1)
- do_IRQ(PNX8550_INT_TIMER1);
- if (irq & 0x2)
- do_IRQ(PNX8550_INT_TIMER2);
- if (irq & 0x4)
- do_IRQ(PNX8550_INT_TIMER3);
-}
-
-asmlinkage void plat_irq_dispatch(void)
-{
- unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-
- if (pending & STATUSF_IP2)
- hw0_irqdispatch(2);
- else if (pending & STATUSF_IP7) {
- if (read_c0_config7() & 0x01c0)
- timer_irqdispatch(7);
- } else
- spurious_interrupt();
-}
-
-static inline void modify_cp0_intmask(unsigned clr_mask, unsigned set_mask)
-{
- unsigned long status = read_c0_status();
-
- status &= ~((clr_mask & 0xFF) << 8);
- status |= (set_mask & 0xFF) << 8;
-
- write_c0_status(status);
-}
-
-static inline void mask_gic_int(unsigned int irq_nr)
-{
- /* interrupt disabled, bit 26(WE_ENABLE)=1 and bit 16(enable)=0 */
- PNX8550_GIC_REQ(irq_nr) = 1<<28; /* set priority to 0 */
-}
-
-static inline void unmask_gic_int(unsigned int irq_nr)
-{
- /* set prio mask to lower four bits and enable interrupt */
- PNX8550_GIC_REQ(irq_nr) = (1<<26 | 1<<16) | (1<<28) | gic_prio[irq_nr];
-}
-
-static inline void mask_irq(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq;
-
- if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
- modify_cp0_intmask(1 << irq_nr, 0);
- } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_GIC_MAX)) {
- mask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
- } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_TIMER_MAX)) {
- modify_cp0_intmask(1 << 7, 0);
- } else {
- printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
- }
-}
-
-static inline void unmask_irq(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq;
-
- if ((PNX8550_INT_CP0_MIN <= irq_nr) && (irq_nr <= PNX8550_INT_CP0_MAX)) {
- modify_cp0_intmask(0, 1 << irq_nr);
- } else if ((PNX8550_INT_GIC_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_GIC_MAX)) {
- unmask_gic_int(irq_nr - PNX8550_INT_GIC_MIN);
- } else if ((PNX8550_INT_TIMER_MIN <= irq_nr) &&
- (irq_nr <= PNX8550_INT_TIMER_MAX)) {
- modify_cp0_intmask(0, 1 << 7);
- } else {
- printk("mask_irq: irq %d doesn't exist!\n", irq_nr);
- }
-}
-
-int pnx8550_set_gic_priority(int irq, int priority)
-{
- int gic_irq = irq-PNX8550_INT_GIC_MIN;
- int prev_priority = PNX8550_GIC_REQ(gic_irq) & 0xf;
-
- gic_prio[gic_irq] = priority;
- PNX8550_GIC_REQ(gic_irq) |= (0x10000000 | gic_prio[gic_irq]);
-
- return prev_priority;
-}
-
-static struct irq_chip level_irq_type = {
- .name = "PNX Level IRQ",
- .irq_mask = mask_irq,
- .irq_unmask = unmask_irq,
-};
-
-static struct irqaction gic_action = {
- .handler = no_action,
- .flags = IRQF_NO_THREAD,
- .name = "GIC",
-};
-
-static struct irqaction timer_action = {
- .handler = no_action,
- .flags = IRQF_TIMER,
- .name = "Timer",
-};
-
-void __init arch_init_irq(void)
-{
- int i;
- int configPR;
-
- for (i = 0; i < PNX8550_INT_CP0_TOTINT; i++)
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
- /* init of GIC/IPC interrupts */
- /* should be done before cp0 since cp0 init enables the GIC int */
- for (i = PNX8550_INT_GIC_MIN; i <= PNX8550_INT_GIC_MAX; i++) {
- int gic_int_line = i - PNX8550_INT_GIC_MIN;
- if (gic_int_line == 0 )
- continue; // don't fiddle with int 0
- /*
- * enable change of TARGET, ENABLE and ACTIVE_LOW bits
- * set TARGET 0 to route through hw0 interrupt
- * set ACTIVE_LOW 0 active high (correct?)
- *
- * We really should setup an interrupt description table
- * to do this nicely.
- * Note, PCI INTA is active low on the bus, but inverted
- * in the GIC, so to us it's active high.
- */
- PNX8550_GIC_REQ(i - PNX8550_INT_GIC_MIN) = 0x1E000000;
-
- /* mask/priority is still 0 so we will not get any
- * interrupts until it is unmasked */
-
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
- }
-
- /* Priority level 0 */
- PNX8550_GIC_PRIMASK_0 = PNX8550_GIC_PRIMASK_1 = 0;
-
- /* Set int vector table address */
- PNX8550_GIC_VECTOR_0 = PNX8550_GIC_VECTOR_1 = 0;
-
- irq_set_chip_and_handler(MIPS_CPU_GIC_IRQ, &level_irq_type,
- handle_level_irq);
- setup_irq(MIPS_CPU_GIC_IRQ, &gic_action);
-
- /* init of Timer interrupts */
- for (i = PNX8550_INT_TIMER_MIN; i <= PNX8550_INT_TIMER_MAX; i++)
- irq_set_chip_and_handler(i, &level_irq_type, handle_level_irq);
-
- /* Stop Timer 1-3 */
- configPR = read_c0_config7();
- configPR |= 0x00000038;
- write_c0_config7(configPR);
-
- irq_set_chip_and_handler(MIPS_CPU_TIMER_IRQ, &level_irq_type,
- handle_level_irq);
- setup_irq(MIPS_CPU_TIMER_IRQ, &timer_action);
-}
-
-EXPORT_SYMBOL(pnx8550_set_gic_priority);
diff --git a/arch/mips/pnx8550/common/pci.c b/arch/mips/pnx8550/common/pci.c
deleted file mode 100644
index 98e86ddb86c..00000000000
--- a/arch/mips/pnx8550/common/pci.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- *
- * Author: source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <pci.h>
-#include <glb.h>
-#include <nand.h>
-
-static struct resource pci_io_resource = {
- .start = PNX8550_PCIIO + 0x1000, /* reserve regacy I/O space */
- .end = PNX8550_PCIIO + PNX8550_PCIIO_SIZE,
- .name = "pci IO space",
- .flags = IORESOURCE_IO
-};
-
-static struct resource pci_mem_resource = {
- .start = PNX8550_PCIMEM,
- .end = PNX8550_PCIMEM + PNX8550_PCIMEM_SIZE - 1,
- .name = "pci memory space",
- .flags = IORESOURCE_MEM
-};
-
-extern struct pci_ops pnx8550_pci_ops;
-
-static struct pci_controller pnx8550_controller = {
- .pci_ops = &pnx8550_pci_ops,
- .io_map_base = PNX8550_PORT_BASE,
- .io_resource = &pci_io_resource,
- .mem_resource = &pci_mem_resource,
-};
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-static inline unsigned long get_system_mem_size(void)
-{
- /* Read IP2031_RANK0_ADDR_LO */
- unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
- /* Read IP2031_RANK1_ADDR_HI */
- unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
- return dram_r1_hi - dram_r0_lo + 1;
-}
-
-static int __init pnx8550_pci_setup(void)
-{
- int pci_mem_code;
- int mem_size = get_system_mem_size() >> 20;
-
- /* Clear the Global 2 Register, PCI Inta Output Enable Registers
- Bit 1:Enable DAC Powerdown
- -> 0:DACs are enabled and are working normally
- 1:DACs are powerdown
- Bit 0:Enable of PCI inta output
- -> 0 = Disable PCI inta output
- 1 = Enable PCI inta output
- */
- PNX8550_GLB2_ENAB_INTA_O = 0;
-
- /* Calc the PCI mem size code */
- if (mem_size >= 128)
- pci_mem_code = SIZE_128M;
- else if (mem_size >= 64)
- pci_mem_code = SIZE_64M;
- else if (mem_size >= 32)
- pci_mem_code = SIZE_32M;
- else
- pci_mem_code = SIZE_16M;
-
- /* Set PCI_XIO registers */
- outl(pci_mem_resource.start, PCI_BASE | PCI_BASE1_LO);
- outl(pci_mem_resource.end + 1, PCI_BASE | PCI_BASE1_HI);
- outl(pci_io_resource.start, PCI_BASE | PCI_BASE2_LO);
- outl(pci_io_resource.end, PCI_BASE | PCI_BASE2_HI);
-
- /* Send memory transaction via PCI_BASE2 */
- outl(0x00000001, PCI_BASE | PCI_IO);
-
- /* Unlock the setup register */
- outl(0xca, PCI_BASE | PCI_UNLOCKREG);
-
- /*
- * BAR0 of PNX8550 (pci base 10) must be zero in order for ide
- * to work, and in order for bus_to_baddr to work without any
- * hacks.
- */
- outl(0x00000000, PCI_BASE | PCI_BASE10);
-
- /*
- *These two bars are set by default or the boot code.
- * However, it's safer to set them here so we're not boot
- * code dependent.
- */
- outl(0x1be00000, PCI_BASE | PCI_BASE14); /* PNX MMIO */
- outl(PNX8550_NAND_BASE_ADDR, PCI_BASE | PCI_BASE18); /* XIO */
-
- outl(PCI_EN_TA |
- PCI_EN_PCI2MMI |
- PCI_EN_XIO |
- PCI_SETUP_BASE18_SIZE(SIZE_32M) |
- PCI_SETUP_BASE18_EN |
- PCI_SETUP_BASE14_EN |
- PCI_SETUP_BASE10_PREF |
- PCI_SETUP_BASE10_SIZE(pci_mem_code) |
- PCI_SETUP_CFGMANAGE_EN |
- PCI_SETUP_PCIARB_EN,
- PCI_BASE |
- PCI_SETUP); /* PCI_SETUP */
- outl(0x00000000, PCI_BASE | PCI_CTRL); /* PCI_CONTROL */
-
- register_pci_controller(&pnx8550_controller);
-
- return 0;
-}
-
-arch_initcall(pnx8550_pci_setup);
diff --git a/arch/mips/pnx8550/common/platform.c b/arch/mips/pnx8550/common/platform.c
deleted file mode 100644
index 0a8faeaa7b7..00000000000
--- a/arch/mips/pnx8550/common/platform.c
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Platform device support for NXP PNX8550 SoCs
- *
- * Copyright 2005, Embedded Alley Solutions, Inc
- *
- * Based on arch/mips/au1000/common/platform.c
- * Platform device support for Au1x00 SoCs.
- *
- * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/resource.h>
-#include <linux/serial.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/platform_device.h>
-#include <linux/usb/ohci_pdriver.h>
-
-#include <int.h>
-#include <usb.h>
-#include <uart.h>
-
-static struct resource pnx8550_usb_ohci_resources[] = {
- [0] = {
- .start = PNX8550_USB_OHCI_OP_BASE,
- .end = PNX8550_USB_OHCI_OP_BASE +
- PNX8550_USB_OHCI_OP_LEN,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PNX8550_INT_USB,
- .end = PNX8550_INT_USB,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct resource pnx8550_uart_resources[] = {
- [0] = {
- .start = PNX8550_UART_PORT0,
- .end = PNX8550_UART_PORT0 + 0xfff,
- .flags = IORESOURCE_MEM,
- },
- [1] = {
- .start = PNX8550_UART_INT(0),
- .end = PNX8550_UART_INT(0),
- .flags = IORESOURCE_IRQ,
- },
- [2] = {
- .start = PNX8550_UART_PORT1,
- .end = PNX8550_UART_PORT1 + 0xfff,
- .flags = IORESOURCE_MEM,
- },
- [3] = {
- .start = PNX8550_UART_INT(1),
- .end = PNX8550_UART_INT(1),
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct pnx8xxx_port pnx8xxx_ports[] = {
- [0] = {
- .port = {
- .type = PORT_PNX8XXX,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)PNX8550_UART_PORT0,
- .mapbase = PNX8550_UART_PORT0,
- .irq = PNX8550_UART_INT(0),
- .uartclk = 3692300,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 0,
- },
- },
- [1] = {
- .port = {
- .type = PORT_PNX8XXX,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)PNX8550_UART_PORT1,
- .mapbase = PNX8550_UART_PORT1,
- .irq = PNX8550_UART_INT(1),
- .uartclk = 3692300,
- .fifosize = 16,
- .flags = UPF_BOOT_AUTOCONF,
- .line = 1,
- },
- },
-};
-
-/* The dmamask must be set for OHCI to work */
-static u64 ohci_dmamask = DMA_BIT_MASK(32);
-
-static u64 uart_dmamask = DMA_BIT_MASK(32);
-
-static int pnx8550_usb_ohci_power_on(struct platform_device *pdev)
-{
- /*
- * Set register CLK48CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x0004770c);
-
- /*
- * Set register CLK12CTL to enable and 48MHz
- */
- outl(0x00000003, PCI_BASE | 0x00047710);
-
- udelay(100);
-
- return 0;
-}
-
-static void pnx8550_usb_ohci_power_off(struct platform_device *pdev)
-{
- udelay(10);
-}
-
-static struct usb_ohci_pdata pnx8550_usb_ohci_pdata = {
- .power_on = pnx8550_usb_ohci_power_on,
- .power_off = pnx8550_usb_ohci_power_off,
-};
-
-static struct platform_device pnx8550_usb_ohci_device = {
- .name = "ohci-platform",
- .id = -1,
- .dev = {
- .dma_mask = &ohci_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = &pnx8550_usb_ohci_pdata,
- },
- .num_resources = ARRAY_SIZE(pnx8550_usb_ohci_resources),
- .resource = pnx8550_usb_ohci_resources,
-};
-
-static struct platform_device pnx8550_uart_device = {
- .name = "pnx8xxx-uart",
- .id = -1,
- .dev = {
- .dma_mask = &uart_dmamask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- .platform_data = pnx8xxx_ports,
- },
- .num_resources = ARRAY_SIZE(pnx8550_uart_resources),
- .resource = pnx8550_uart_resources,
-};
-
-static struct platform_device *pnx8550_platform_devices[] __initdata = {
- &pnx8550_usb_ohci_device,
- &pnx8550_uart_device,
-};
-
-static int __init pnx8550_platform_init(void)
-{
- return platform_add_devices(pnx8550_platform_devices,
- ARRAY_SIZE(pnx8550_platform_devices));
-}
-
-arch_initcall(pnx8550_platform_init);
diff --git a/arch/mips/pnx8550/common/proc.c b/arch/mips/pnx8550/common/proc.c
deleted file mode 100644
index 3bba5ec828e..00000000000
--- a/arch/mips/pnx8550/common/proc.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/irq.h>
-#include <linux/sched.h>
-#include <linux/interrupt.h>
-#include <linux/kernel_stat.h>
-#include <linux/random.h>
-
-#include <asm/io.h>
-#include <int.h>
-#include <uart.h>
-
-
-static int pnx8550_timers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
- int len = 0;
- int configPR = read_c0_config7();
-
- if (offset==0) {
- len += sprintf(&page[len], "Timer: count, compare, tc, status\n");
- len += sprintf(&page[len], " 1: %11i, %8i, %1i, %s\n",
- read_c0_count(), read_c0_compare(),
- (configPR>>6)&0x1, ((configPR>>3)&0x1)? "off":"on");
- len += sprintf(&page[len], " 2: %11i, %8i, %1i, %s\n",
- read_c0_count2(), read_c0_compare2(),
- (configPR>>7)&0x1, ((configPR>>4)&0x1)? "off":"on");
- len += sprintf(&page[len], " 3: %11i, %8i, %1i, %s\n",
- read_c0_count3(), read_c0_compare3(),
- (configPR>>8)&0x1, ((configPR>>5)&0x1)? "off":"on");
- }
-
- return len;
-}
-
-static int pnx8550_registers_read(char* page, char** start, off_t offset, int count, int* eof, void* data)
-{
- int len = 0;
-
- if (offset==0) {
- len += sprintf(&page[len], "config1: %#10.8x\n", read_c0_config1());
- len += sprintf(&page[len], "config2: %#10.8x\n", read_c0_config2());
- len += sprintf(&page[len], "config3: %#10.8x\n", read_c0_config3());
- len += sprintf(&page[len], "configPR: %#10.8x\n", read_c0_config7());
- len += sprintf(&page[len], "status: %#10.8x\n", read_c0_status());
- len += sprintf(&page[len], "cause: %#10.8x\n", read_c0_cause());
- len += sprintf(&page[len], "count: %#10.8x\n", read_c0_count());
- len += sprintf(&page[len], "count_2: %#10.8x\n", read_c0_count2());
- len += sprintf(&page[len], "count_3: %#10.8x\n", read_c0_count3());
- len += sprintf(&page[len], "compare: %#10.8x\n", read_c0_compare());
- len += sprintf(&page[len], "compare_2: %#10.8x\n", read_c0_compare2());
- len += sprintf(&page[len], "compare_3: %#10.8x\n", read_c0_compare3());
- }
-
- return len;
-}
-
-static struct proc_dir_entry* pnx8550_dir;
-static struct proc_dir_entry* pnx8550_timers;
-static struct proc_dir_entry* pnx8550_registers;
-
-static int pnx8550_proc_init( void )
-{
-
- // Create /proc/pnx8550
- pnx8550_dir = proc_mkdir("pnx8550", NULL);
- if (!pnx8550_dir) {
- printk(KERN_ERR "Can't create pnx8550 proc dir\n");
- return -1;
- }
-
- // Create /proc/pnx8550/timers
- pnx8550_timers = create_proc_read_entry(
- "timers",
- 0,
- pnx8550_dir,
- pnx8550_timers_read,
- NULL);
-
- if (!pnx8550_timers)
- printk(KERN_ERR "Can't create pnx8550 timers proc file\n");
-
- // Create /proc/pnx8550/registers
- pnx8550_registers = create_proc_read_entry(
- "registers",
- 0,
- pnx8550_dir,
- pnx8550_registers_read,
- NULL);
-
- if (!pnx8550_registers)
- printk(KERN_ERR "Can't create pnx8550 registers proc file\n");
-
- return 0;
-}
-
-__initcall(pnx8550_proc_init);
diff --git a/arch/mips/pnx8550/common/prom.c b/arch/mips/pnx8550/common/prom.c
deleted file mode 100644
index 49639e8120d..00000000000
--- a/arch/mips/pnx8550/common/prom.c
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- *
- * Per Hallsmark, per.hallsmark@mvista.com
- *
- * Based on jmr3927/common/prom.c
- *
- * 2004 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/serial_pnx8xxx.h>
-
-#include <asm/bootinfo.h>
-#include <uart.h>
-
-/* #define DEBUG_CMDLINE */
-
-extern int prom_argc;
-extern char **prom_argv, **prom_envp;
-
-typedef struct
-{
- char *name;
-/* char *val; */
-}t_env_var;
-
-
-char * __init prom_getcmdline(void)
-{
- return &(arcs_cmdline[0]);
-}
-
-void __init prom_init_cmdline(void)
-{
- int i;
-
- arcs_cmdline[0] = '\0';
- for (i = 0; i < prom_argc; i++) {
- strcat(arcs_cmdline, prom_argv[i]);
- strcat(arcs_cmdline, " ");
- }
-}
-
-char *prom_getenv(char *envname)
-{
- /*
- * Return a pointer to the given environment variable.
- * Environment variables are stored in the form of "memsize=64".
- */
-
- t_env_var *env = (t_env_var *)prom_envp;
- int i;
-
- i = strlen(envname);
-
- while(env->name) {
- if(strncmp(envname, env->name, i) == 0) {
- return(env->name + strlen(envname) + 1);
- }
- env++;
- }
- return(NULL);
-}
-
-inline unsigned char str2hexnum(unsigned char c)
-{
- if(c >= '0' && c <= '9')
- return c - '0';
- if(c >= 'a' && c <= 'f')
- return c - 'a' + 10;
- if(c >= 'A' && c <= 'F')
- return c - 'A' + 10;
- return 0; /* foo */
-}
-
-inline void str2eaddr(unsigned char *ea, unsigned char *str)
-{
- int i;
-
- for(i = 0; i < 6; i++) {
- unsigned char num;
-
- if((*str == '.') || (*str == ':'))
- str++;
- num = str2hexnum(*str++) << 4;
- num |= (str2hexnum(*str++));
- ea[i] = num;
- }
-}
-
-int get_ethernet_addr(char *ethernet_addr)
-{
- char *ethaddr_str;
-
- ethaddr_str = prom_getenv("ethaddr");
- if (!ethaddr_str) {
- printk("ethaddr not set in boot prom\n");
- return -1;
- }
- str2eaddr(ethernet_addr, ethaddr_str);
- return 0;
-}
-
-void __init prom_free_prom_memory(void)
-{
-}
-
-extern int pnx8550_console_port;
-
-/* used by early printk */
-void prom_putchar(char c)
-{
- if (pnx8550_console_port != -1) {
- /* Wait until FIFO not full */
- while( ((ip3106_fifo(UART_BASE, pnx8550_console_port) & PNX8XXX_UART_FIFO_TXFIFO) >> 16) >= 16)
- ;
- /* Send one char */
- ip3106_fifo(UART_BASE, pnx8550_console_port) = c;
- }
-}
-
-EXPORT_SYMBOL(get_ethernet_addr);
-EXPORT_SYMBOL(str2eaddr);
diff --git a/arch/mips/pnx8550/common/reset.c b/arch/mips/pnx8550/common/reset.c
deleted file mode 100644
index e7a12ff304b..00000000000
--- a/arch/mips/pnx8550/common/reset.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * Reset the PNX8550 board.
- *
- */
-#include <linux/kernel.h>
-
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <glb.h>
-
-void pnx8550_machine_restart(char *command)
-{
- PNX8550_RST_CTL = PNX8550_RST_DO_SW_RST;
-}
-
-void pnx8550_machine_halt(void)
-{
- while (1) {
- if (cpu_wait)
- cpu_wait();
- }
-}
diff --git a/arch/mips/pnx8550/common/setup.c b/arch/mips/pnx8550/common/setup.c
deleted file mode 100644
index fccd6b0c6d3..00000000000
--- a/arch/mips/pnx8550/common/setup.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- *
- * 2.6 port, Embedded Alley Solutions, Inc
- *
- * Based on Per Hallsmark, per.hallsmark@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/irq.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/serial_pnx8xxx.h>
-#include <linux/pm.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-#include <asm/time.h>
-
-#include <glb.h>
-#include <int.h>
-#include <pci.h>
-#include <uart.h>
-#include <nand.h>
-
-extern void __init board_setup(void);
-extern void pnx8550_machine_restart(char *);
-extern void pnx8550_machine_halt(void);
-extern struct resource ioport_resource;
-extern struct resource iomem_resource;
-extern char *prom_getcmdline(void);
-
-struct resource standard_io_resources[] = {
- {
- .start = 0x00,
- .end = 0x1f,
- .name = "dma1",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0x40,
- .end = 0x5f,
- .name = "timer",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0x80,
- .end = 0x8f,
- .name = "dma page reg",
- .flags = IORESOURCE_BUSY
- }, {
- .start = 0xc0,
- .end = 0xdf,
- .name = "dma2",
- .flags = IORESOURCE_BUSY
- },
-};
-
-#define STANDARD_IO_RESOURCES ARRAY_SIZE(standard_io_resources)
-
-extern struct resource pci_io_resource;
-extern struct resource pci_mem_resource;
-
-/* Return the total size of DRAM-memory, (RANK0 + RANK1) */
-unsigned long get_system_mem_size(void)
-{
- /* Read IP2031_RANK0_ADDR_LO */
- unsigned long dram_r0_lo = inl(PCI_BASE | 0x65010);
- /* Read IP2031_RANK1_ADDR_HI */
- unsigned long dram_r1_hi = inl(PCI_BASE | 0x65018);
-
- return dram_r1_hi - dram_r0_lo + 1;
-}
-
-int pnx8550_console_port = -1;
-
-void __init plat_mem_setup(void)
-{
- int i;
- char* argptr;
-
- board_setup(); /* board specific setup */
-
- _machine_restart = pnx8550_machine_restart;
- _machine_halt = pnx8550_machine_halt;
- pm_power_off = pnx8550_machine_halt;
-
- /* Clear the Global 2 Register, PCI Inta Output Enable Registers
- Bit 1:Enable DAC Powerdown
- -> 0:DACs are enabled and are working normally
- 1:DACs are powerdown
- Bit 0:Enable of PCI inta output
- -> 0 = Disable PCI inta output
- 1 = Enable PCI inta output
- */
- PNX8550_GLB2_ENAB_INTA_O = 0;
-
- /* IO/MEM resources. */
- set_io_port_base(PNX8550_PORT_BASE);
- ioport_resource.start = 0;
- ioport_resource.end = ~0;
- iomem_resource.start = 0;
- iomem_resource.end = ~0;
-
- /* Request I/O space for devices on this board */
- for (i = 0; i < STANDARD_IO_RESOURCES; i++)
- request_resource(&ioport_resource, standard_io_resources + i);
-
- /* Place the Mode Control bit for GPIO pin 16 in primary function */
- /* Pin 16 is used by UART1, UA1_TX */
- outl((PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_16_BIT) |
- (PNX8550_GPIO_MODE_PRIMOP << PNX8550_GPIO_MC_17_BIT),
- PNX8550_GPIO_MC1);
-
- argptr = prom_getcmdline();
- if ((argptr = strstr(argptr, "console=ttyS")) != NULL) {
- argptr += strlen("console=ttyS");
- pnx8550_console_port = *argptr == '0' ? 0 : 1;
-
- /* We must initialize the UART (console) before early printk */
- /* Set LCR to 8-bit and BAUD to 38400 (no 5) */
- ip3106_lcr(UART_BASE, pnx8550_console_port) =
- PNX8XXX_UART_LCR_8BIT;
- ip3106_baud(UART_BASE, pnx8550_console_port) = 5;
- }
-}
diff --git a/arch/mips/pnx8550/common/time.c b/arch/mips/pnx8550/common/time.c
deleted file mode 100644
index 831d6b369e9..00000000000
--- a/arch/mips/pnx8550/common/time.c
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright 2001, 2002, 2003 MontaVista Software Inc.
- * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
- * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
- *
- * Common time service routines for MIPS machines. See
- * Documents/MIPS/README.txt.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/param.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/smp.h>
-#include <linux/kernel_stat.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-#include <asm/time.h>
-#include <asm/hardirq.h>
-#include <asm/div64.h>
-#include <asm/debug.h>
-
-#include <int.h>
-#include <cm.h>
-
-static unsigned long cpj;
-
-static cycle_t hpt_read(struct clocksource *cs)
-{
- return read_c0_count2();
-}
-
-static struct clocksource pnx_clocksource = {
- .name = "pnx8xxx",
- .rating = 200,
- .read = hpt_read,
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-
-static irqreturn_t pnx8xxx_timer_interrupt(int irq, void *dev_id)
-{
- struct clock_event_device *c = dev_id;
-
- /* clear MATCH, signal the event */
- c->event_handler(c);
-
- return IRQ_HANDLED;
-}
-
-static struct irqaction pnx8xxx_timer_irq = {
- .handler = pnx8xxx_timer_interrupt,
- .flags = IRQF_PERCPU | IRQF_TIMER,
- .name = "pnx8xxx_timer",
-};
-
-static irqreturn_t monotonic_interrupt(int irq, void *dev_id)
-{
- /* Timer 2 clear interrupt */
- write_c0_compare2(-1);
- return IRQ_HANDLED;
-}
-
-static struct irqaction monotonic_irqaction = {
- .handler = monotonic_interrupt,
- .flags = IRQF_TIMER,
- .name = "Monotonic timer",
-};
-
-static int pnx8xxx_set_next_event(unsigned long delta,
- struct clock_event_device *evt)
-{
- write_c0_compare(delta);
- return 0;
-}
-
-static struct clock_event_device pnx8xxx_clockevent = {
- .name = "pnx8xxx_clockevent",
- .features = CLOCK_EVT_FEAT_ONESHOT,
- .set_next_event = pnx8xxx_set_next_event,
-};
-
-static inline void timer_ack(void)
-{
- write_c0_compare(cpj);
-}
-
-__init void plat_time_init(void)
-{
- unsigned int configPR;
- unsigned int n;
- unsigned int m;
- unsigned int p;
- unsigned int pow2p;
-
- pnx8xxx_clockevent.cpumask = cpu_none_mask;
- clockevents_register_device(&pnx8xxx_clockevent);
- clocksource_register(&pnx_clocksource);
-
- /* Timer 1 start */
- configPR = read_c0_config7();
- configPR &= ~0x00000008;
- write_c0_config7(configPR);
-
- /* Timer 2 start */
- configPR = read_c0_config7();
- configPR &= ~0x00000010;
- write_c0_config7(configPR);
-
- /* Timer 3 stop */
- configPR = read_c0_config7();
- configPR |= 0x00000020;
- write_c0_config7(configPR);
-
-
- /* PLL0 sets MIPS clock (PLL1 <=> TM1, PLL6 <=> TM2, PLL5 <=> mem) */
- /* (but only if CLK_MIPS_CTL select value [bits 3:1] is 1: FIXME) */
-
- n = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_N_MASK) >> 16;
- m = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_M_MASK) >> 8;
- p = (PNX8550_CM_PLL0_CTL & PNX8550_CM_PLL_P_MASK) >> 2;
- pow2p = (1 << p);
-
- db_assert(m != 0 && pow2p != 0);
-
- /*
- * Compute the frequency as in the PNX8550 User Manual 1.0, p.186
- * (a.k.a. 8-10). Divide by HZ for a timer offset that results in
- * HZ timer interrupts per second.
- */
- mips_hpt_frequency = 27UL * ((1000000UL * n)/(m * pow2p));
- cpj = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
- write_c0_count(0);
- timer_ack();
-
- /* Setup Timer 2 */
- write_c0_count2(0);
- write_c0_compare2(0xffffffff);
-
- setup_irq(PNX8550_INT_TIMER1, &pnx8xxx_timer_irq);
- setup_irq(PNX8550_INT_TIMER2, &monotonic_irqaction);
-}
diff --git a/arch/mips/pnx8550/jbs/Makefile b/arch/mips/pnx8550/jbs/Makefile
deleted file mode 100644
index c4dc3d53eb5..00000000000
--- a/arch/mips/pnx8550/jbs/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP JBS Board.
-
-obj-y := init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/jbs/board_setup.c b/arch/mips/pnx8550/jbs/board_setup.c
deleted file mode 100644
index 57dd903ca40..00000000000
--- a/arch/mips/pnx8550/jbs/board_setup.c
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * JBS Specific board startup routines.
- *
- * Copyright 2005, Embedded Alley Solutions, Inc
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-/* CP0 hazard avoidance. */
-#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
- "nop; nop; nop; nop; nop; nop;\n\t" \
- ".set reorder\n\t")
-
-void __init board_setup(void)
-{
- unsigned long configpr;
-
- configpr = read_c0_config7();
- configpr |= (1<<19); /* enable tlb */
- write_c0_config7(configpr);
- BARRIER;
-}
diff --git a/arch/mips/pnx8550/jbs/init.c b/arch/mips/pnx8550/jbs/init.c
deleted file mode 100644
index d59b4a4e5e8..00000000000
--- a/arch/mips/pnx8550/jbs/init.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddedalley.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
- return "NXP PNX8550/JBS";
-}
-
-void __init prom_init(void)
-{
- unsigned long memsize;
-
- //memsize = 0x02800000; /* Trimedia uses memory above */
- memsize = 0x08000000; /* Trimedia uses memory above */
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/pnx8550/jbs/irqmap.c b/arch/mips/pnx8550/jbs/irqmap.c
deleted file mode 100644
index 7fc89842002..00000000000
--- a/arch/mips/pnx8550/jbs/irqmap.c
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * NXP JBS board irqmap.
- *
- * Copyright 2005 Embedded Alley Solutions, Inc
- * source@embeddealley.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
- [8] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [9] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [17] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/Makefile b/arch/mips/pnx8550/stb810/Makefile
deleted file mode 100644
index cb4ff022f1f..00000000000
--- a/arch/mips/pnx8550/stb810/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-
-# Makefile for the NXP STB810 Board.
-
-obj-y := prom_init.o board_setup.o irqmap.o
diff --git a/arch/mips/pnx8550/stb810/board_setup.c b/arch/mips/pnx8550/stb810/board_setup.c
deleted file mode 100644
index af2a55e0b4e..00000000000
--- a/arch/mips/pnx8550/stb810/board_setup.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * STB810 specific board startup routines.
- *
- * Based on the arch/mips/nxp/pnx8550/jbs/board_setup.c
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
-
-#include <glb.h>
-
-void __init board_setup(void)
-{
- unsigned long configpr;
-
- configpr = read_c0_config7();
- configpr |= (1<<19); /* enable tlb */
- write_c0_config7(configpr);
-}
diff --git a/arch/mips/pnx8550/stb810/irqmap.c b/arch/mips/pnx8550/stb810/irqmap.c
deleted file mode 100644
index 8c034963ddc..00000000000
--- a/arch/mips/pnx8550/stb810/irqmap.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * NXP STB810 board irqmap.
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <int.h>
-
-char pnx8550_irq_tab[][5] __initdata = {
- [8] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [9] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
- [10] = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
-};
diff --git a/arch/mips/pnx8550/stb810/prom_init.c b/arch/mips/pnx8550/stb810/prom_init.c
deleted file mode 100644
index ca7f4ada064..00000000000
--- a/arch/mips/pnx8550/stb810/prom_init.c
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * STB810 specific prom routines
- *
- * Author: MontaVista Software, Inc.
- * source@mvista.com
- *
- * Copyright 2005 MontaVista Software Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-
-int prom_argc;
-char **prom_argv, **prom_envp;
-extern void __init prom_init_cmdline(void);
-extern char *prom_getenv(char *envname);
-
-const char *get_system_type(void)
-{
- return "NXP PNX8950/STB810";
-}
-
-void __init prom_init(void)
-{
- unsigned long memsize;
-
- prom_argc = (int) fw_arg0;
- prom_argv = (char **) fw_arg1;
- prom_envp = (char **) fw_arg2;
-
- prom_init_cmdline();
-
- memsize = 0x08000000; /* Trimedia uses memory above */
- add_memory_region(0, memsize, BOOT_MEM_RAM);
-}
diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c
index 26a6ef19d71..521e5963df0 100644
--- a/arch/mips/power/cpu.c
+++ b/arch/mips/power/cpu.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <huhb@lemote.com>
- * Wu Zhangjin <wuzhangjin@gmail.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/suspend.h>
#include <asm/fpu.h>
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 61e2558a2dc..7e0277a1048 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009 Lemote Inc.
* Author: Hu Hongbing <huhb@lemote.com>
- * Wu Zhangjin <wuzhangjin@gmail.com>
+ * Wu Zhangjin <wuzhangjin@gmail.com>
*/
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
diff --git a/arch/mips/powertv/asic/asic-calliope.c b/arch/mips/powertv/asic/asic-calliope.c
index 7773f3d956b..2f539b43f56 100644
--- a/arch/mips/powertv/asic/asic-calliope.c
+++ b/arch/mips/powertv/asic/asic-calliope.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
@@ -90,12 +90,12 @@ const struct register_map calliope_register_map __initconst = {
.usb2_stbus_mess_size = {.phys = CALLIOPE_ADDR(0x9BFF04)},
.usb2_stbus_chunk_size = {.phys = CALLIOPE_ADDR(0x9BFF08)},
- .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */
+ .pcie_regs = {.phys = 0x000000}, /* -doesn't exist- */
.tim_ch = {.phys = CALLIOPE_ADDR(0xA02C10)},
.tim_cl = {.phys = CALLIOPE_ADDR(0xA02C14)},
.gpio_dout = {.phys = CALLIOPE_ADDR(0xA02c20)},
.gpio_din = {.phys = CALLIOPE_ADDR(0xA02c24)},
.gpio_dir = {.phys = CALLIOPE_ADDR(0xA02c2C)},
.watchdog = {.phys = CALLIOPE_ADDR(0xA02c30)},
- .front_panel = {.phys = 0x000000}, /* -not used- */
+ .front_panel = {.phys = 0x000000}, /* -not used- */
};
diff --git a/arch/mips/powertv/asic/asic-cronus.c b/arch/mips/powertv/asic/asic-cronus.c
index da076db7b7e..7f8f3429b35 100644
--- a/arch/mips/powertv/asic/asic-cronus.c
+++ b/arch/mips/powertv/asic/asic-cronus.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic-gaia.c b/arch/mips/powertv/asic/asic-gaia.c
index 47683b370e7..1265b49012e 100644
--- a/arch/mips/powertv/asic/asic-gaia.c
+++ b/arch/mips/powertv/asic/asic-gaia.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn
+ * Author: David VomLehn
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic-zeus.c b/arch/mips/powertv/asic/asic-zeus.c
index 6ff4b10f09d..14e7de137e0 100644
--- a/arch/mips/powertv/asic/asic-zeus.c
+++ b/arch/mips/powertv/asic/asic-zeus.c
@@ -17,10 +17,10 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*/
#include <linux/init.h>
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index bce1872249b..d38b095fd0d 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -1,6 +1,6 @@
/*
*
- * Description: Defines the platform resources for Gaia-based settops.
+ * Description: Defines the platform resources for Gaia-based settops.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
@@ -90,12 +90,12 @@ struct resource asic_resource = {
/*
* Allow override of bootloader-specified model
- * Returns zero on success, a negative errno value on failure. This parameter
+ * Returns zero on success, a negative errno value on failure. This parameter
* allows overriding of the bootloader-specified model.
*/
static char __initdata cmdline[COMMAND_LINE_SIZE];
-#define FORCEFAMILY_PARAM "forcefamily"
+#define FORCEFAMILY_PARAM "forcefamily"
/*
* check_forcefamily - check for, and parse, forcefamily command line parameter
@@ -486,7 +486,7 @@ static void __init pmem_setup_resource(void)
resource->start = phys_to_dma(pmemaddr - 0x80000000);
resource->end = resource->start + pmemlen - 1;
- pr_info("persistent memory: start=0x%x end=0x%x\n",
+ pr_info("persistent memory: start=0x%x end=0x%x\n",
resource->start, resource->end);
}
}
diff --git a/arch/mips/powertv/asic/asic_int.c b/arch/mips/powertv/asic/asic_int.c
index 99d82e10000..f44cd9295ca 100644
--- a/arch/mips/powertv/asic/asic_int.c
+++ b/arch/mips/powertv/asic/asic_int.c
@@ -2,7 +2,7 @@
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
- * Portions copyright (C) 2009 Cisco Systems, Inc.
+ * Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
@@ -64,7 +64,7 @@ static void asic_irqdispatch(void)
irq = get_int();
if (irq < 0)
- return; /* interrupt has already been cleared */
+ return; /* interrupt has already been cleared */
do_IRQ(irq);
}
diff --git a/arch/mips/powertv/asic/irq_asic.c b/arch/mips/powertv/asic/irq_asic.c
index fa9ae958471..9344902dc58 100644
--- a/arch/mips/powertv/asic/irq_asic.c
+++ b/arch/mips/powertv/asic/irq_asic.c
@@ -5,8 +5,8 @@
* Modified from arch/mips/kernel/irq-rm7000.c:
* Copyright (C) 2003 Ralf Baechle
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/powertv/asic/prealloc-calliope.c b/arch/mips/powertv/asic/prealloc-calliope.c
index 3fc5d46687a..98dc5165057 100644
--- a/arch/mips/powertv/asic/prealloc-calliope.c
+++ b/arch/mips/powertv/asic/prealloc-calliope.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -153,7 +153,7 @@ struct resource non_dvr_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -260,7 +260,7 @@ struct resource non_dvr_vze_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -380,6 +380,6 @@ struct resource non_dvr_vzf_calliope_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-cronus.c b/arch/mips/powertv/asic/prealloc-cronus.c
index c532b50521e..7c6ce759693 100644
--- a/arch/mips/powertv/asic/prealloc-cronus.c
+++ b/arch/mips/powertv/asic/prealloc-cronus.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_cronus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -185,7 +185,7 @@ struct resource dvr_cronus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -241,7 +241,7 @@ struct resource non_dvr_cronus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -335,6 +335,6 @@ struct resource non_dvr_cronus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-cronuslite.c b/arch/mips/powertv/asic/prealloc-cronuslite.c
index b5537e49e7f..a7937ba7b4c 100644
--- a/arch/mips/powertv/asic/prealloc-cronuslite.c
+++ b/arch/mips/powertv/asic/prealloc-cronuslite.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -65,7 +65,7 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -169,6 +169,6 @@ struct resource non_dvr_cronuslite_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/asic/prealloc-gaia.c b/arch/mips/powertv/asic/prealloc-gaia.c
index 8ac8c7aeb98..2303bbfe6b8 100644
--- a/arch/mips/powertv/asic/prealloc-gaia.c
+++ b/arch/mips/powertv/asic/prealloc-gaia.c
@@ -17,7 +17,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn
+ * Author: David VomLehn
*/
#include <linux/init.h>
@@ -33,22 +33,22 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -56,22 +56,22 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
},
{
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
},
{
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -87,28 +87,28 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -119,16 +119,16 @@ struct resource dvr_gaia_resources[] __initdata = {
* Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*
*/
{
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -141,10 +141,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -157,10 +157,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -173,10 +173,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -189,10 +189,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x00280000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x00280000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -205,10 +205,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -221,10 +221,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -237,11 +237,11 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ITFS",
- .start = 0x64180000,
+ .name = "ITFS",
+ .start = 0x64180000,
/* 815,104 bytes each for 2 ITFS partitions. */
- .end = 0x6430DFFF,
- .flags = IORESOURCE_IO,
+ .end = 0x6430DFFF,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -254,17 +254,17 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
/* (945K * 8) = (128K *3) 5 playbacks / 3 server */
- .end = 0x64AD0000 - 1,
- .flags = IORESOURCE_IO,
+ .end = 0x64AD0000 - 1,
+ .flags = IORESOURCE_IO,
},
{
- .name = "AvfsFileSys",
- .start = 0x64AD0000,
- .end = 0x64AD1000 - 1, /* 4K */
- .flags = IORESOURCE_IO,
+ .name = "AvfsFileSys",
+ .start = 0x64AD0000,
+ .end = 0x64AD1000 - 1, /* 4K */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -277,10 +277,10 @@ struct resource dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -290,22 +290,22 @@ struct resource dvr_gaia_resources[] __initdata = {
* NP IPC - must be video bank 2
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
},
/*
* Add other resources here
@@ -323,22 +323,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231aImage", /* Delta-Mu 1 image and ram */
- .start = 0x24000000,
- .end = 0x241FFFFF, /* 2MiB */
- .flags = IORESOURCE_MEM,
+ .name = "ST231aImage", /* Delta-Mu 1 image and ram */
+ .start = 0x24000000,
+ .end = 0x241FFFFF, /* 2MiB */
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
- .start = 0x24200000,
- .end = 0x24201FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ST231aMonitor", /* 8KiB block ST231a monitor */
+ .start = 0x24200000,
+ .end = 0x24201FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "MediaMemory1",
- .start = 0x24202000,
- .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_MEM,
+ .name = "MediaMemory1",
+ .start = 0x24202000,
+ .end = 0x25FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -346,22 +346,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "ST231bImage", /* Delta-Mu 2 image and ram */
- .start = 0x60000000,
- .end = 0x601FFFFF, /* 2MiB */
- .flags = IORESOURCE_IO,
+ .name = "ST231bImage", /* Delta-Mu 2 image and ram */
+ .start = 0x60000000,
+ .end = 0x601FFFFF, /* 2MiB */
+ .flags = IORESOURCE_IO,
},
{
- .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
- .start = 0x60200000,
- .end = 0x60201FFF,
- .flags = IORESOURCE_IO,
+ .name = "ST231bMonitor", /* 8KiB block ST231b monitor */
+ .start = 0x60200000,
+ .end = 0x60201FFF,
+ .flags = IORESOURCE_IO,
},
{
- .name = "MediaMemory2",
- .start = 0x60202000,
- .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
- .flags = IORESOURCE_IO,
+ .name = "MediaMemory2",
+ .start = 0x60202000,
+ .end = 0x61FFFFFF, /*~29.9MiB (32MiB - (2MiB + 8KiB)) */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -377,28 +377,28 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DSP_Image_Buff",
- .start = 0x00000000,
- .end = 0x000FFFFF,
- .flags = IORESOURCE_MEM,
+ .name = "DSP_Image_Buff",
+ .start = 0x00000000,
+ .end = 0x000FFFFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_CPU_PCM_Buff",
- .start = 0x00000000,
- .end = 0x00009FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_CPU_PCM_Buff",
+ .start = 0x00000000,
+ .end = 0x00009FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_AUX_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_AUX_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "ADSC_Main_Buff",
- .start = 0x00000000,
- .end = 0x00003FFF,
- .flags = IORESOURCE_MEM,
+ .name = "ADSC_Main_Buff",
+ .start = 0x00000000,
+ .end = 0x00003FFF,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -409,16 +409,16 @@ struct resource non_dvr_gaia_resources[] __initdata = {
* Arbitrary Based Buffers:
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*
*/
{
- .name = "AVMEMPartition0",
- .start = 0x63580000,
- .end = 0x64180000 - 1, /* 12 MB total */
- .flags = IORESOURCE_IO,
+ .name = "AVMEMPartition0",
+ .start = 0x63580000,
+ .end = 0x64180000 - 1, /* 12 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -431,10 +431,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "Docsis",
- .start = 0x62000000,
- .end = 0x62700000 - 1, /* 7 MB total */
- .flags = IORESOURCE_IO,
+ .name = "Docsis",
+ .start = 0x62000000,
+ .end = 0x62700000 - 1, /* 7 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -447,10 +447,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "GraphicsHeap",
- .start = 0x62700000,
- .end = 0x63500000 - 1, /* 14 MB total */
- .flags = IORESOURCE_IO,
+ .name = "GraphicsHeap",
+ .start = 0x62700000,
+ .end = 0x63500000 - 1, /* 14 MB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -463,10 +463,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "MulticomSHM",
- .start = 0x26000000,
- .end = 0x26020000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "MulticomSHM",
+ .start = 0x26000000,
+ .end = 0x26020000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -479,10 +479,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "BMM_Buffer",
- .start = 0x00000000,
- .end = 0x000AA000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "BMM_Buffer",
+ .start = 0x00000000,
+ .end = 0x000AA000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -495,10 +495,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins0",
- .start = 0x00000000,
- .end = 0x00000FFF, /* 4 KB total */
- .flags = IORESOURCE_MEM,
+ .name = "DisplayBins0",
+ .start = 0x00000000,
+ .end = 0x00000FFF, /* 4 KB total */
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -511,10 +511,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DisplayBins1",
- .start = 0x64AD4000,
- .end = 0x64AD5000 - 1, /* 4 KB total */
- .flags = IORESOURCE_IO,
+ .name = "DisplayBins1",
+ .start = 0x64AD4000,
+ .end = 0x64AD5000 - 1, /* 4 KB total */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -523,10 +523,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "AvfsDmaMem",
- .start = 0x6430E000,
- .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
- .flags = IORESOURCE_IO,
+ .name = "AvfsDmaMem",
+ .start = 0x6430E000,
+ .end = 0x645D2C00 - 1, /* 945K * 3 for playback */
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -539,10 +539,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "DiagPersistentMemory",
- .start = 0x00000000,
- .end = 0x10000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "DiagPersistentMemory",
+ .start = 0x00000000,
+ .end = 0x10000 - 1,
+ .flags = IORESOURCE_MEM,
},
/*
*
@@ -555,10 +555,10 @@ struct resource non_dvr_gaia_resources[] __initdata = {
*
*/
{
- .name = "SmartCardInfo",
- .start = 0x64AD1000,
- .end = 0x64AD3800 - 1,
- .flags = IORESOURCE_IO,
+ .name = "SmartCardInfo",
+ .start = 0x64AD1000,
+ .end = 0x64AD3800 - 1,
+ .flags = IORESOURCE_IO,
},
/*
*
@@ -568,22 +568,22 @@ struct resource non_dvr_gaia_resources[] __initdata = {
* NP IPC - must be video bank 2
*/
{
- .name = "NP_Reset_Vector",
- .start = 0x27c00000,
- .end = 0x27c01000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Reset_Vector",
+ .start = 0x27c00000,
+ .end = 0x27c01000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_Image",
- .start = 0x27020000,
- .end = 0x27060000 - 1,
- .flags = IORESOURCE_MEM,
+ .name = "NP_Image",
+ .start = 0x27020000,
+ .end = 0x27060000 - 1,
+ .flags = IORESOURCE_MEM,
},
{
- .name = "NP_IPC",
- .start = 0x63500000,
- .end = 0x63580000 - 1,
- .flags = IORESOURCE_IO,
+ .name = "NP_IPC",
+ .start = 0x63500000,
+ .end = 0x63580000 - 1,
+ .flags = IORESOURCE_IO,
},
{ },
};
diff --git a/arch/mips/powertv/asic/prealloc-zeus.c b/arch/mips/powertv/asic/prealloc-zeus.c
index 96480a2395c..6e76f09c68d 100644
--- a/arch/mips/powertv/asic/prealloc-zeus.c
+++ b/arch/mips/powertv/asic/prealloc-zeus.c
@@ -17,8 +17,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*/
#include <linux/init.h>
@@ -78,7 +78,7 @@ struct resource dvr_zeus_resources[] __initdata =
*
* This memory area is used for allocating buffers for Video decoding
* purposes. Allocation/De-allocation within this buffer is managed
- * by the STAVMEM driver of the STAPI. They could be Decimated
+ * by the STAVMEM driver of the STAPI. They could be Decimated
* Picture Buffers, Intermediate Buffers, as deemed necessary for
* video decoding purposes, for any video decoders on Zeus.
*/
@@ -175,7 +175,7 @@ struct resource dvr_zeus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
@@ -299,6 +299,6 @@ struct resource non_dvr_zeus_resources[] __initdata =
* End of Resource marker
*/
{
- .flags = 0,
+ .flags = 0,
},
};
diff --git a/arch/mips/powertv/init.c b/arch/mips/powertv/init.c
index c6979353980..5bd9d8f468c 100644
--- a/arch/mips/powertv/init.c
+++ b/arch/mips/powertv/init.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc.
* All rights reserved.
* Authors: Carsten Langgaard <carstenl@mips.com>
* Maciej W. Rozycki <macro@mips.com>
diff --git a/arch/mips/powertv/ioremap.c b/arch/mips/powertv/ioremap.c
index a77c6f62fe2..d060478aab0 100644
--- a/arch/mips/powertv/ioremap.c
+++ b/arch/mips/powertv/ioremap.c
@@ -19,9 +19,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: David VomLehn <dvomlehn@cisco.com>
+ * Author: David VomLehn <dvomlehn@cisco.com>
*
- * Description: Defines the platform resources for the SA settop.
+ * Description: Defines the platform resources for the SA settop.
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
diff --git a/arch/mips/powertv/memory.c b/arch/mips/powertv/memory.c
index fb3d29660c4..6e5f1bdc59b 100644
--- a/arch/mips/powertv/memory.c
+++ b/arch/mips/powertv/memory.c
@@ -60,7 +60,7 @@ unsigned long ptv_memsize;
* struct low_mem_reserved - Items in low memory that are reserved
* @start: Physical address of item
* @size: Size, in bytes, of this item
- * @is_aliased: True if this is RAM aliased from another location. If false,
+ * @is_aliased: True if this is RAM aliased from another location. If false,
* it is something other than aliased RAM and the RAM in the
* unaliased address is still visible outside of low memory.
*/
diff --git a/arch/mips/powertv/powertv-usb.c b/arch/mips/powertv/powertv-usb.c
index b0e2afa8939..d845eace58e 100644
--- a/arch/mips/powertv/powertv-usb.c
+++ b/arch/mips/powertv/powertv-usb.c
@@ -1,7 +1,7 @@
/*
* powertv-usb.c
*
- * Description: ASIC-specific USB device setup and shutdown
+ * Description: ASIC-specific USB device setup and shutdown
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
* Copyright (C) 2009 Cisco Systems, Inc.
@@ -20,8 +20,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
- * Author: Ken Eppinett
- * David Schleef <ds@schleef.org>
+ * Author: Ken Eppinett
+ * David Schleef <ds@schleef.org>
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
@@ -70,15 +70,15 @@
#define MCC2_GMII_RX2_CLOCK_SELECT (1 << 16)
#define ETHER_CLK_CONFIG (MCC2_GMII_GCLK_TO_PAD | \
- MCC2_ETHER125_0_CLOCK_SELECT | \
+ MCC2_ETHER125_0_CLOCK_SELECT | \
MCC2_RMII_0_CLOCK_SELECT | \
MCC2_GMII_TX0_CLOCK_SELECT | \
MCC2_GMII_RX0_CLOCK_SELECT | \
- MCC2_ETHER125_1_CLOCK_SELECT | \
+ MCC2_ETHER125_1_CLOCK_SELECT | \
MCC2_RMII_1_CLOCK_SELECT | \
MCC2_GMII_TX1_CLOCK_SELECT | \
MCC2_GMII_RX1_CLOCK_SELECT | \
- MCC2_ETHER125_2_CLOCK_SELECT | \
+ MCC2_ETHER125_2_CLOCK_SELECT | \
MCC2_RMII_2_CLOCK_SELECT | \
MCC2_GMII_TX2_CLOCK_SELECT | \
MCC2_GMII_RX2_CLOCK_SELECT)
@@ -98,9 +98,9 @@
#define QAM_FS_DISABLE_DIVIDE_BY_3 (1 << 5)
#define QAM_FS_ENABLE_PROGRAM (1 << 4)
-#define QAM_FS_ENABLE_OUTPUT (1 << 3)
-#define QAM_FS_SELECT_TEST_BYPASS (1 << 2)
-#define QAM_FS_DISABLE_DIGITAL_STANDBY (1 << 1)
+#define QAM_FS_ENABLE_OUTPUT (1 << 3)
+#define QAM_FS_SELECT_TEST_BYPASS (1 << 2)
+#define QAM_FS_DISABLE_DIGITAL_STANDBY (1 << 1)
#define QAM_FS_CHOOSE_FS (1 << 0)
/* Definitions for fs432x4a_ctl register */
@@ -142,14 +142,14 @@
static struct resource ehci_resources[] = {
{
.parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
},
{
- .start = irq_usbehci,
- .end = irq_usbehci,
- .flags = IORESOURCE_IRQ,
+ .start = irq_usbehci,
+ .end = irq_usbehci,
+ .flags = IORESOURCE_IRQ,
},
};
@@ -169,14 +169,14 @@ static struct platform_device ehci_device = {
static struct resource ohci_resources[] = {
{
.parent = &asic_resource,
- .start = 0,
- .end = 0xff,
- .flags = IORESOURCE_MEM,
+ .start = 0,
+ .end = 0xff,
+ .flags = IORESOURCE_MEM,
},
{
- .start = irq_usbohci,
- .end = irq_usbohci,
- .flags = IORESOURCE_IRQ,
+ .start = irq_usbohci,
+ .end = irq_usbohci,
+ .flags = IORESOURCE_IRQ,
},
};
@@ -207,9 +207,9 @@ static DEFINE_SPINLOCK(usb_regs_lock);
*
* QAM frequency selection code, which affects the frequency at which USB
* runs. The frequency is calculated as:
- * 2^15 * ndiv * Fin
+ * 2^15 * ndiv * Fin
* Fout = ------------------------------------------------------------
- * (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
+ * (sdiv * (ipe * (1 + md/32) - (ipe - 2^15)*(1 + (md + 1)/32)))
* where:
* Fin 54 MHz
* ndiv QAM_FS_NSDIV_54MHZ ? 8 : 16
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
new file mode 100644
index 00000000000..a0b0197cab0
--- /dev/null
+++ b/arch/mips/ralink/Kconfig
@@ -0,0 +1,32 @@
+if RALINK
+
+choice
+ prompt "Ralink SoC selection"
+ default SOC_RT305X
+ help
+ Select Ralink MIPS SoC type.
+
+ config SOC_RT305X
+ bool "RT305x"
+ select USB_ARCH_HAS_HCD
+ select USB_ARCH_HAS_OHCI
+ select USB_ARCH_HAS_EHCI
+
+endchoice
+
+choice
+ prompt "Devicetree selection"
+ default DTB_RT_NONE
+ help
+ Select the devicetree.
+
+ config DTB_RT_NONE
+ bool "None"
+
+ config DTB_RT305X_EVAL
+ bool "RT305x eval kit"
+ depends on SOC_RT305X
+
+endchoice
+
+endif
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
new file mode 100644
index 00000000000..939757f0e71
--- /dev/null
+++ b/arch/mips/ralink/Makefile
@@ -0,0 +1,15 @@
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 as published
+# by the Free Software Foundation.#
+# Makefile for the Ralink common stuff
+#
+# Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+
+obj-y := prom.o of.o reset.o clk.o irq.o
+
+obj-$(CONFIG_SOC_RT305X) += rt305x.o
+
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+obj-y += dts/
diff --git a/arch/mips/ralink/Platform b/arch/mips/ralink/Platform
new file mode 100644
index 00000000000..6babd65765e
--- /dev/null
+++ b/arch/mips/ralink/Platform
@@ -0,0 +1,10 @@
+#
+# Ralink SoC common stuff
+#
+core-$(CONFIG_RALINK) += arch/mips/ralink/
+cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink
+
+#
+# Ralink RT305x
+#
+load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
new file mode 100644
index 00000000000..8dfa22ff300
--- /dev/null
+++ b/arch/mips/ralink/clk.c
@@ -0,0 +1,72 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clkdev.h>
+#include <linux/clk.h>
+
+#include <asm/time.h>
+
+#include "common.h"
+
+struct clk {
+ struct clk_lookup cl;
+ unsigned long rate;
+};
+
+void ralink_clk_add(const char *dev, unsigned long rate)
+{
+ struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
+
+ if (!clk)
+ panic("failed to add clock\n");
+
+ clk->cl.dev_id = dev;
+ clk->cl.clk = clk;
+
+ clk->rate = rate;
+
+ clkdev_add(&clk->cl);
+}
+
+/*
+ * Linux clock API
+ */
+int clk_enable(struct clk *clk)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(clk_enable);
+
+void clk_disable(struct clk *clk)
+{
+}
+EXPORT_SYMBOL_GPL(clk_disable);
+
+unsigned long clk_get_rate(struct clk *clk)
+{
+ return clk->rate;
+}
+EXPORT_SYMBOL_GPL(clk_get_rate);
+
+void __init plat_time_init(void)
+{
+ struct clk *clk;
+
+ ralink_of_remap();
+
+ ralink_clk_init();
+ clk = clk_get_sys("cpu", NULL);
+ if (IS_ERR(clk))
+ panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
+ pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
+ mips_hpt_frequency = clk_get_rate(clk) / 2;
+ clk_put(clk);
+}
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
new file mode 100644
index 00000000000..300990313e1
--- /dev/null
+++ b/arch/mips/ralink/common.h
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#ifndef _RALINK_COMMON_H__
+#define _RALINK_COMMON_H__
+
+#define RAMIPS_SYS_TYPE_LEN 32
+
+struct ralink_pinmux_grp {
+ const char *name;
+ u32 mask;
+ int gpio_first;
+ int gpio_last;
+};
+
+struct ralink_pinmux {
+ struct ralink_pinmux_grp *mode;
+ struct ralink_pinmux_grp *uart;
+ int uart_shift;
+ void (*wdt_reset)(void);
+};
+extern struct ralink_pinmux gpio_pinmux;
+
+struct ralink_soc_info {
+ unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
+ unsigned char *compatible;
+};
+extern struct ralink_soc_info soc_info;
+
+extern void ralink_of_remap(void);
+
+extern void ralink_clk_init(void);
+extern void ralink_clk_add(const char *dev, unsigned long rate);
+
+extern void prom_soc_init(struct ralink_soc_info *soc_info);
+
+__iomem void *plat_of_remap_node(const char *node);
+
+#endif /* _RALINK_COMMON_H__ */
diff --git a/arch/mips/ralink/dts/Makefile b/arch/mips/ralink/dts/Makefile
new file mode 100644
index 00000000000..1a69fb30095
--- /dev/null
+++ b/arch/mips/ralink/dts/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
new file mode 100644
index 00000000000..069d0660e1d
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -0,0 +1,106 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3050-soc", "ralink,rt3052-soc";
+
+ cpus {
+ cpu@0 {
+ compatible = "mips,mips24KEc";
+ };
+ };
+
+ chosen {
+ bootargs = "console=ttyS0,57600 init=/init";
+ };
+
+ cpuintc: cpuintc@0 {
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "mti,cpu-interrupt-controller";
+ };
+
+ palmbus@10000000 {
+ compatible = "palmbus";
+ reg = <0x10000000 0x200000>;
+ ranges = <0x0 0x10000000 0x1FFFFF>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ sysc@0 {
+ compatible = "ralink,rt3052-sysc", "ralink,rt3050-sysc";
+ reg = <0x0 0x100>;
+ };
+
+ timer@100 {
+ compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt";
+ reg = <0x100 0x100>;
+ };
+
+ intc: intc@200 {
+ compatible = "ralink,rt3052-intc", "ralink,rt2880-intc";
+ reg = <0x200 0x100>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <2>;
+ };
+
+ memc@300 {
+ compatible = "ralink,rt3052-memc", "ralink,rt3050-memc";
+ reg = <0x300 0x100>;
+ };
+
+ gpio0: gpio@600 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x600 0x34>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <24>;
+ ralink,regs = [ 00 04 08 0c
+ 20 24 28 2c
+ 30 34 ];
+ };
+
+ gpio1: gpio@638 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x638 0x24>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <16>;
+ ralink,regs = [ 00 04 08 0c
+ 10 14 18 1c
+ 20 24 ];
+ };
+
+ gpio2: gpio@660 {
+ compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio";
+ reg = <0x660 0x24>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ ralink,ngpio = <12>;
+ ralink,regs = [ 00 04 08 0c
+ 10 14 18 1c
+ 20 24 ];
+ };
+
+ uartlite@c00 {
+ compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a";
+ reg = <0xc00 0x100>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <12>;
+
+ reg-shift = <2>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
new file mode 100644
index 00000000000..148a590bc41
--- /dev/null
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -0,0 +1,52 @@
+/dts-v1/;
+
+/include/ "rt3050.dtsi"
+
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc";
+ model = "Ralink RT3052 evaluation board";
+
+ memory@0 {
+ reg = <0x0 0x2000000>;
+ };
+
+ palmbus@10000000 {
+ sysc@0 {
+ ralink,pinmmux = "uartlite", "spi";
+ ralink,uartmux = "gpio";
+ ralink,wdtmux = <0>;
+ };
+ };
+
+ cfi@1f000000 {
+ compatible = "cfi-flash";
+ reg = <0x1f000000 0x800000>;
+
+ bank-width = <2>;
+ device-width = <2>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ partition@0 {
+ label = "uboot";
+ reg = <0x0 0x30000>;
+ read-only;
+ };
+ partition@30000 {
+ label = "uboot-env";
+ reg = <0x30000 0x10000>;
+ read-only;
+ };
+ partition@40000 {
+ label = "calibration";
+ reg = <0x40000 0x10000>;
+ read-only;
+ };
+ partition@50000 {
+ label = "linux";
+ reg = <0x50000 0x7b0000>;
+ };
+ };
+};
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
new file mode 100644
index 00000000000..c4ae47eb24a
--- /dev/null
+++ b/arch/mips/ralink/early_printk.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2011-2012 Gabor Juhos <juhosg@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/serial_reg.h>
+
+#include <asm/addrspace.h>
+
+#define EARLY_UART_BASE 0x10000c00
+
+#define UART_REG_RX 0x00
+#define UART_REG_TX 0x04
+#define UART_REG_IER 0x08
+#define UART_REG_IIR 0x0c
+#define UART_REG_FCR 0x10
+#define UART_REG_LCR 0x14
+#define UART_REG_MCR 0x18
+#define UART_REG_LSR 0x1c
+
+static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
+
+static inline void uart_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, uart_membase + reg);
+}
+
+static inline u32 uart_r32(unsigned reg)
+{
+ return __raw_readl(uart_membase + reg);
+}
+
+void prom_putchar(unsigned char ch)
+{
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+ uart_w32(ch, UART_REG_TX);
+ while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
+ ;
+}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
new file mode 100644
index 00000000000..6d054c5ec9a
--- /dev/null
+++ b/arch/mips/ralink/irq.c
@@ -0,0 +1,180 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/interrupt.h>
+
+#include <asm/irq_cpu.h>
+#include <asm/mipsregs.h>
+
+#include "common.h"
+
+/* INTC register offsets */
+#define INTC_REG_STATUS0 0x00
+#define INTC_REG_STATUS1 0x04
+#define INTC_REG_TYPE 0x20
+#define INTC_REG_RAW_STATUS 0x30
+#define INTC_REG_ENABLE 0x34
+#define INTC_REG_DISABLE 0x38
+
+#define INTC_INT_GLOBAL BIT(31)
+
+#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
+#define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5)
+#define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6)
+#define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7)
+
+/* we have a cascade of 8 irqs */
+#define RALINK_INTC_IRQ_BASE 8
+
+/* we have 32 SoC irqs */
+#define RALINK_INTC_IRQ_COUNT 32
+
+#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
+
+static void __iomem *rt_intc_membase;
+
+static inline void rt_intc_w32(u32 val, unsigned reg)
+{
+ __raw_writel(val, rt_intc_membase + reg);
+}
+
+static inline u32 rt_intc_r32(unsigned reg)
+{
+ return __raw_readl(rt_intc_membase + reg);
+}
+
+static void ralink_intc_irq_unmask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_ENABLE);
+}
+
+static void ralink_intc_irq_mask(struct irq_data *d)
+{
+ rt_intc_w32(BIT(d->hwirq), INTC_REG_DISABLE);
+}
+
+static struct irq_chip ralink_intc_irq_chip = {
+ .name = "INTC",
+ .irq_unmask = ralink_intc_irq_unmask,
+ .irq_mask = ralink_intc_irq_mask,
+ .irq_mask_ack = ralink_intc_irq_mask,
+};
+
+unsigned int __cpuinit get_c0_compare_int(void)
+{
+ return CP0_LEGACY_COMPARE_IRQ;
+}
+
+static void ralink_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+ u32 pending = rt_intc_r32(INTC_REG_STATUS0);
+
+ if (pending) {
+ struct irq_domain *domain = irq_get_handler_data(irq);
+ generic_handle_irq(irq_find_mapping(domain, __ffs(pending)));
+ } else {
+ spurious_interrupt();
+ }
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned long pending;
+
+ pending = read_c0_status() & read_c0_cause() & ST0_IM;
+
+ if (pending & STATUSF_IP7)
+ do_IRQ(RALINK_CPU_IRQ_COUNTER);
+
+ else if (pending & STATUSF_IP5)
+ do_IRQ(RALINK_CPU_IRQ_FE);
+
+ else if (pending & STATUSF_IP6)
+ do_IRQ(RALINK_CPU_IRQ_WIFI);
+
+ else if (pending & STATUSF_IP2)
+ do_IRQ(RALINK_CPU_IRQ_INTC);
+
+ else
+ spurious_interrupt();
+}
+
+static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
+{
+ irq_set_chip_and_handler(irq, &ralink_intc_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_domain_ops = {
+ .xlate = irq_domain_xlate_onecell,
+ .map = intc_map,
+};
+
+static int __init intc_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ struct irq_domain *domain;
+ int irq;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (!irq)
+ panic("Failed to get INTC IRQ");
+
+ if (of_address_to_resource(node, 0, &res))
+ panic("Failed to get intc memory range");
+
+ if (request_mem_region(res.start, resource_size(&res),
+ res.name) < 0)
+ pr_err("Failed to request intc memory");
+
+ rt_intc_membase = ioremap_nocache(res.start,
+ resource_size(&res));
+ if (!rt_intc_membase)
+ panic("Failed to remap intc memory");
+
+ /* disable all interrupts */
+ rt_intc_w32(~0, INTC_REG_DISABLE);
+
+ /* route all INTC interrupts to MIPS HW0 interrupt */
+ rt_intc_w32(0, INTC_REG_TYPE);
+
+ domain = irq_domain_add_legacy(node, RALINK_INTC_IRQ_COUNT,
+ RALINK_INTC_IRQ_BASE, 0, &irq_domain_ops, NULL);
+ if (!domain)
+ panic("Failed to add irqdomain");
+
+ rt_intc_w32(INTC_INT_GLOBAL, INTC_REG_ENABLE);
+
+ irq_set_chained_handler(irq, ralink_intc_irq_handler);
+ irq_set_handler_data(irq, domain);
+
+ cp0_perfcount_irq = irq_create_mapping(domain, 9);
+
+ return 0;
+}
+
+static struct of_device_id __initdata of_irq_ids[] = {
+ { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init },
+ { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
+ {},
+};
+
+void __init arch_init_irq(void)
+{
+ of_irq_init(of_irq_ids);
+}
+
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
new file mode 100644
index 00000000000..4165e70775b
--- /dev/null
+++ b/arch/mips/ralink/of.c
@@ -0,0 +1,107 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <linux/of_fdt.h>
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+
+#include <asm/reboot.h>
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+__iomem void *rt_sysc_membase;
+__iomem void *rt_memc_membase;
+
+extern struct boot_param_header __dtb_start;
+
+__iomem void *plat_of_remap_node(const char *node)
+{
+ struct resource res;
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, node);
+ if (!np)
+ panic("Failed to find %s node", node);
+
+ if (of_address_to_resource(np, 0, &res))
+ panic("Failed to get resource for %s", node);
+
+ if ((request_mem_region(res.start,
+ resource_size(&res),
+ res.name) < 0))
+ panic("Failed to request resources for %s", node);
+
+ return ioremap_nocache(res.start, resource_size(&res));
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+ void *fdt_copy;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ /* The strings in the flattened tree are referenced directly by the
+ * device tree, so copy the flattened device tree from init memory
+ * to regular memory.
+ */
+ fdt_copy = alloc_bootmem(size);
+ memcpy(fdt_copy, initial_boot_params, size);
+ initial_boot_params = fdt_copy;
+
+ unflatten_device_tree();
+
+ /* free the space reserved for the dt blob */
+ free_bootmem(base, size);
+}
+
+void __init plat_mem_setup(void)
+{
+ set_io_port_base(KSEG1);
+
+ /*
+ * Load the builtin devicetree. This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+ __dt_setup_arch(&__dtb_start);
+}
+
+static int __init plat_of_setup(void)
+{
+ static struct of_device_id of_ids[3];
+ int len = sizeof(of_ids[0].compatible);
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strncpy(of_ids[0].compatible, soc_info.compatible, len);
+ strncpy(of_ids[1].compatible, "palmbus", len);
+
+ if (of_platform_populate(NULL, of_ids, NULL, NULL))
+ panic("failed to populate DT\n");
+
+ return 0;
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
new file mode 100644
index 00000000000..9c64f029d04
--- /dev/null
+++ b/arch/mips/ralink/prom.c
@@ -0,0 +1,69 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/string.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/addrspace.h>
+
+#include "common.h"
+
+struct ralink_soc_info soc_info;
+
+const char *get_system_type(void)
+{
+ return soc_info.sys_type;
+}
+
+static __init void prom_init_cmdline(int argc, char **argv)
+{
+ int i;
+
+ pr_debug("prom: fw_arg0=%08x fw_arg1=%08x fw_arg2=%08x fw_arg3=%08x\n",
+ (unsigned int)fw_arg0, (unsigned int)fw_arg1,
+ (unsigned int)fw_arg2, (unsigned int)fw_arg3);
+
+ argc = fw_arg0;
+ argv = (char **) KSEG1ADDR(fw_arg1);
+
+ if (!argv) {
+ pr_debug("argv=%p is invalid, skipping\n",
+ argv);
+ return;
+ }
+
+ for (i = 0; i < argc; i++) {
+ char *p = (char *) KSEG1ADDR(argv[i]);
+
+ if (CPHYSADDR(p) && *p) {
+ pr_debug("argv[%d]: %s\n", i, p);
+ strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
+ strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
+ }
+ }
+}
+
+void __init prom_init(void)
+{
+ int argc;
+ char **argv;
+
+ prom_soc_init(&soc_info);
+
+ pr_info("SoC Type: %s\n", get_system_type());
+
+ prom_init_cmdline(argc, argv);
+}
+
+void __init prom_free_prom_memory(void)
+{
+}
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
new file mode 100644
index 00000000000..22120e512e7
--- /dev/null
+++ b/arch/mips/ralink/reset.c
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/pm.h>
+#include <linux/io.h>
+
+#include <asm/reboot.h>
+
+#include <asm/mach-ralink/ralink_regs.h>
+
+/* Reset Control */
+#define SYSC_REG_RESET_CTRL 0x034
+#define RSTCTL_RESET_SYSTEM BIT(0)
+
+static void ralink_restart(char *command)
+{
+ local_irq_disable();
+ rt_sysc_w32(RSTCTL_RESET_SYSTEM, SYSC_REG_RESET_CTRL);
+ unreachable();
+}
+
+static void ralink_halt(void)
+{
+ local_irq_disable();
+ unreachable();
+}
+
+static int __init mips_reboot_setup(void)
+{
+ _machine_restart = ralink_restart;
+ _machine_halt = ralink_halt;
+ pm_power_off = ralink_halt;
+
+ return 0;
+}
+
+arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
new file mode 100644
index 00000000000..0a4bbdcf59d
--- /dev/null
+++ b/arch/mips/ralink/rt305x.c
@@ -0,0 +1,242 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Parts of this file are based on Ralink's 2.6.21 BSP
+ *
+ * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
+ * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mach-ralink/ralink_regs.h>
+#include <asm/mach-ralink/rt305x.h>
+
+#include "common.h"
+
+enum rt305x_soc_type rt305x_soc;
+
+struct ralink_pinmux_grp mode_mux[] = {
+ {
+ .name = "i2c",
+ .mask = RT305X_GPIO_MODE_I2C,
+ .gpio_first = RT305X_GPIO_I2C_SD,
+ .gpio_last = RT305X_GPIO_I2C_SCLK,
+ }, {
+ .name = "spi",
+ .mask = RT305X_GPIO_MODE_SPI,
+ .gpio_first = RT305X_GPIO_SPI_EN,
+ .gpio_last = RT305X_GPIO_SPI_CLK,
+ }, {
+ .name = "uartlite",
+ .mask = RT305X_GPIO_MODE_UART1,
+ .gpio_first = RT305X_GPIO_UART1_TXD,
+ .gpio_last = RT305X_GPIO_UART1_RXD,
+ }, {
+ .name = "jtag",
+ .mask = RT305X_GPIO_MODE_JTAG,
+ .gpio_first = RT305X_GPIO_JTAG_TDO,
+ .gpio_last = RT305X_GPIO_JTAG_TDI,
+ }, {
+ .name = "mdio",
+ .mask = RT305X_GPIO_MODE_MDIO,
+ .gpio_first = RT305X_GPIO_MDIO_MDC,
+ .gpio_last = RT305X_GPIO_MDIO_MDIO,
+ }, {
+ .name = "sdram",
+ .mask = RT305X_GPIO_MODE_SDRAM,
+ .gpio_first = RT305X_GPIO_SDRAM_MD16,
+ .gpio_last = RT305X_GPIO_SDRAM_MD31,
+ }, {
+ .name = "rgmii",
+ .mask = RT305X_GPIO_MODE_RGMII,
+ .gpio_first = RT305X_GPIO_GE0_TXD0,
+ .gpio_last = RT305X_GPIO_GE0_RXCLK,
+ }, {0}
+};
+
+struct ralink_pinmux_grp uart_mux[] = {
+ {
+ .name = "uartf",
+ .mask = RT305X_GPIO_MODE_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm uartf",
+ .mask = RT305X_GPIO_MODE_PCM_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm i2s",
+ .mask = RT305X_GPIO_MODE_PCM_I2S,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "i2s uartf",
+ .mask = RT305X_GPIO_MODE_I2S_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "pcm gpio",
+ .mask = RT305X_GPIO_MODE_PCM_GPIO,
+ .gpio_first = RT305X_GPIO_10,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio uartf",
+ .mask = RT305X_GPIO_MODE_GPIO_UARTF,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio i2s",
+ .mask = RT305X_GPIO_MODE_GPIO_I2S,
+ .gpio_first = RT305X_GPIO_7,
+ .gpio_last = RT305X_GPIO_14,
+ }, {
+ .name = "gpio",
+ .mask = RT305X_GPIO_MODE_GPIO,
+ }, {0}
+};
+
+void rt305x_wdt_reset(void)
+{
+ u32 t;
+
+ /* enable WDT reset output on pin SRAM_CS_N */
+ t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+ t |= RT305X_SYSCFG_SRAM_CS0_MODE_WDT <<
+ RT305X_SYSCFG_SRAM_CS0_MODE_SHIFT;
+ rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
+}
+
+struct ralink_pinmux gpio_pinmux = {
+ .mode = mode_mux,
+ .uart = uart_mux,
+ .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
+ .wdt_reset = rt305x_wdt_reset,
+};
+
+void __init ralink_clk_init(void)
+{
+ unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate;
+ u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
+
+ if (soc_is_rt305x() || soc_is_rt3350()) {
+ t = (t >> RT305X_SYSCFG_CPUCLK_SHIFT) &
+ RT305X_SYSCFG_CPUCLK_MASK;
+ switch (t) {
+ case RT305X_SYSCFG_CPUCLK_LOW:
+ cpu_rate = 320000000;
+ break;
+ case RT305X_SYSCFG_CPUCLK_HIGH:
+ cpu_rate = 384000000;
+ break;
+ }
+ sys_rate = uart_rate = wdt_rate = cpu_rate / 3;
+ } else if (soc_is_rt3352()) {
+ t = (t >> RT3352_SYSCFG0_CPUCLK_SHIFT) &
+ RT3352_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT3352_SYSCFG0_CPUCLK_LOW:
+ cpu_rate = 384000000;
+ break;
+ case RT3352_SYSCFG0_CPUCLK_HIGH:
+ cpu_rate = 400000000;
+ break;
+ }
+ sys_rate = wdt_rate = cpu_rate / 3;
+ uart_rate = 40000000;
+ } else if (soc_is_rt5350()) {
+ t = (t >> RT5350_SYSCFG0_CPUCLK_SHIFT) &
+ RT5350_SYSCFG0_CPUCLK_MASK;
+ switch (t) {
+ case RT5350_SYSCFG0_CPUCLK_360:
+ cpu_rate = 360000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_320:
+ cpu_rate = 320000000;
+ sys_rate = cpu_rate / 4;
+ break;
+ case RT5350_SYSCFG0_CPUCLK_300:
+ cpu_rate = 300000000;
+ sys_rate = cpu_rate / 3;
+ break;
+ default:
+ BUG();
+ }
+ uart_rate = 40000000;
+ wdt_rate = sys_rate;
+ } else {
+ BUG();
+ }
+
+ ralink_clk_add("cpu", cpu_rate);
+ ralink_clk_add("10000b00.spi", sys_rate);
+ ralink_clk_add("10000100.timer", wdt_rate);
+ ralink_clk_add("10000500.uart", uart_rate);
+ ralink_clk_add("10000c00.uartlite", uart_rate);
+}
+
+void __init ralink_of_remap(void)
+{
+ rt_sysc_membase = plat_of_remap_node("ralink,rt3050-sysc");
+ rt_memc_membase = plat_of_remap_node("ralink,rt3050-memc");
+
+ if (!rt_sysc_membase || !rt_memc_membase)
+ panic("Failed to remap core resources");
+}
+
+void prom_soc_init(struct ralink_soc_info *soc_info)
+{
+ void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
+ unsigned char *name;
+ u32 n0;
+ u32 n1;
+ u32 id;
+
+ n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
+ n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
+
+ if (n0 == RT3052_CHIP_NAME0 && n1 == RT3052_CHIP_NAME1) {
+ unsigned long icache_sets;
+
+ icache_sets = (read_c0_config1() >> 22) & 7;
+ if (icache_sets == 1) {
+ rt305x_soc = RT305X_SOC_RT3050;
+ name = "RT3050";
+ soc_info->compatible = "ralink,rt3050-soc";
+ } else {
+ rt305x_soc = RT305X_SOC_RT3052;
+ name = "RT3052";
+ soc_info->compatible = "ralink,rt3052-soc";
+ }
+ } else if (n0 == RT3350_CHIP_NAME0 && n1 == RT3350_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT3350;
+ name = "RT3350";
+ soc_info->compatible = "ralink,rt3350-soc";
+ } else if (n0 == RT3352_CHIP_NAME0 && n1 == RT3352_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT3352;
+ name = "RT3352";
+ soc_info->compatible = "ralink,rt3352-soc";
+ } else if (n0 == RT5350_CHIP_NAME0 && n1 == RT5350_CHIP_NAME1) {
+ rt305x_soc = RT305X_SOC_RT5350;
+ name = "RT5350";
+ soc_info->compatible = "ralink,rt5350-soc";
+ } else {
+ panic("rt305x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
+ }
+
+ id = __raw_readl(sysc + SYSC_REG_CHIP_ID);
+
+ snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
+ "Ralink %s id:%u rev:%u",
+ name,
+ (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK,
+ (id & CHIP_ID_REV_MASK));
+}
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index 716e9a12f0e..3af00b2a26e 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -215,9 +215,9 @@ static struct resource rb532_wdt_res[] = {
};
static struct platform_device rb532_wdt = {
- .name = "rc32434_wdt",
- .id = -1,
- .resource = rb532_wdt_res,
+ .name = "rc32434_wdt",
+ .id = -1,
+ .resource = rb532_wdt_res,
.num_resources = ARRAY_SIZE(rb532_wdt_res),
};
@@ -235,8 +235,8 @@ static struct plat_serial8250_port rb532_uart_res[] = {
};
static struct platform_device rb532_uart = {
- .name = "serial8250",
- .id = PLAT8250_DEV_PLATFORM,
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
.dev.platform_data = &rb532_uart_res,
};
@@ -273,7 +273,7 @@ static void __init parse_mac_addr(char *macstr)
/* NAND definitions */
-#define NAND_CHIP_DELAY 25
+#define NAND_CHIP_DELAY 25
static void __init rb532_nand_setup(void)
{
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index 6ec41df3cb9..a18007613c3 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -44,10 +44,10 @@ struct rb532_gpio_chip {
static struct resource rb532_gpio_reg0_res[] = {
{
- .name = "gpio_reg0",
- .start = REGBASE + GPIOBASE,
- .end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
- .flags = IORESOURCE_MEM,
+ .name = "gpio_reg0",
+ .start = REGBASE + GPIOBASE,
+ .end = REGBASE + GPIOBASE + sizeof(struct rb532_gpio_reg) - 1,
+ .flags = IORESOURCE_MEM,
}
};
diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c
index f298430cff0..3a431e802bb 100644
--- a/arch/mips/rb532/irq.c
+++ b/arch/mips/rb532/irq.c
@@ -21,7 +21,7 @@
*
* Copyright 2002 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * stevel@mvista.com or source@mvista.com
+ * stevel@mvista.com or source@mvista.com
*/
#include <linux/bitops.h>
@@ -51,7 +51,7 @@ struct intr_group {
volatile u32 *base_addr;
};
-#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
+#define RC32434_NR_IRQS (GROUP4_IRQ_BASE + 32)
#if (NR_IRQS < RC32434_NR_IRQS)
#error Too little irqs defined. Did you override <asm/irq.h> ?
diff --git a/arch/mips/sgi-ip22/ip22-eisa.c b/arch/mips/sgi-ip22/ip22-eisa.c
index 4a6057b35b9..a0a79222ce0 100644
--- a/arch/mips/sgi-ip22/ip22-eisa.c
+++ b/arch/mips/sgi-ip22/ip22-eisa.c
@@ -2,7 +2,7 @@
* Basic EISA bus support for the SGI Indigo-2.
*
* (C) 2002 Pascal Dameme <netinet@freesurf.fr>
- * and Marc Zyngier <mzyngier@freesurf.fr>
+ * and Marc Zyngier <mzyngier@freesurf.fr>
*
* This code is released under both the GPL version 2 and BSD
* licenses. Either license may be used.
@@ -40,13 +40,13 @@
/* I2 has four EISA slots. */
#define IP22_EISA_MAX_SLOTS 4
-#define EISA_MAX_IRQ 16
+#define EISA_MAX_IRQ 16
-#define EIU_MODE_REG 0x0001ffc0
-#define EIU_STAT_REG 0x0001ffc4
-#define EIU_PREMPT_REG 0x0001ffc8
-#define EIU_QUIET_REG 0x0001ffcc
-#define EIU_INTRPT_ACK 0x00010004
+#define EIU_MODE_REG 0x0001ffc0
+#define EIU_STAT_REG 0x0001ffc4
+#define EIU_PREMPT_REG 0x0001ffc8
+#define EIU_QUIET_REG 0x0001ffcc
+#define EIU_INTRPT_ACK 0x00010004
static char __init *decode_eisa_sig(unsigned long addr)
{
diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c
index f5ebc092aed..ab0e379dc7e 100644
--- a/arch/mips/sgi-ip22/ip22-gio.c
+++ b/arch/mips/sgi-ip22/ip22-gio.c
@@ -15,7 +15,7 @@ static struct bus_type gio_bus_type;
static struct {
const char *name;
- __u8 id;
+ __u8 id;
} gio_name_table[] = {
{ .name = "SGI Impact", .id = 0x10 },
{ .name = "Phobos G160", .id = 0x35 },
@@ -376,15 +376,15 @@ static void ip22_check_gio(int slotno, unsigned long addr)
}
static struct bus_type gio_bus_type = {
- .name = "gio",
+ .name = "gio",
.dev_attrs = gio_dev_attrs,
- .match = gio_bus_match,
- .probe = gio_device_probe,
- .remove = gio_device_remove,
+ .match = gio_bus_match,
+ .probe = gio_device_probe,
+ .remove = gio_device_remove,
.suspend = gio_device_suspend,
- .resume = gio_device_resume,
+ .resume = gio_device_resume,
.shutdown = gio_device_shutdown,
- .uevent = gio_device_uevent,
+ .uevent = gio_device_uevent,
};
static struct resource gio_bus_resource = {
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c
index 3f2b7633f94..3db64d51798 100644
--- a/arch/mips/sgi-ip22/ip22-int.c
+++ b/arch/mips/sgi-ip22/ip22-int.c
@@ -1,12 +1,12 @@
/*
* ip22-int.c: Routines for generic manipulation of the INT[23] ASIC
- * found on INDY and Indigo2 workstations.
+ * found on INDY and Indigo2 workstations.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999 Andrew R. Baker (andrewb@uab.edu)
- * - Indigo2 changes
- * - Interrupt handling fixes
+ * - Indigo2 changes
+ * - Interrupt handling fixes
* Copyright (C) 2001, 2003 Ladislav Michl (ladis@linux-mips.org)
*/
#include <linux/types.h>
@@ -195,24 +195,24 @@ extern void indy_8254timer_irq(void);
* at all) like:
*
* MIPS IRQ Source
- * -------- ------
- * 0 Software (ignored)
- * 1 Software (ignored)
- * 2 Local IRQ level zero
- * 3 Local IRQ level one
- * 4 8254 Timer zero
- * 5 8254 Timer one
- * 6 Bus Error
- * 7 R4k timer (what we use)
+ * -------- ------
+ * 0 Software (ignored)
+ * 1 Software (ignored)
+ * 2 Local IRQ level zero
+ * 3 Local IRQ level one
+ * 4 8254 Timer zero
+ * 5 8254 Timer one
+ * 6 Bus Error
+ * 7 R4k timer (what we use)
*
* We handle the IRQ according to _our_ priority which is:
*
- * Highest ---- R4k Timer
- * Local IRQ zero
- * Local IRQ one
- * Bus Error
- * 8254 Timer zero
- * Lowest ---- 8254 Timer one
+ * Highest ---- R4k Timer
+ * Local IRQ zero
+ * Local IRQ one
+ * Bus Error
+ * 8254 Timer zero
+ * Lowest ---- 8254 Timer one
*
* then we just return, if multiple IRQs are pending then we will just take
* another exception, big deal.
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index 75ada8a9713..7cec0a4e527 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -121,22 +121,22 @@ void __init sgimc_init(void)
*/
/* Step 0: Make sure we turn off the watchdog in case it's
- * still running (which might be the case after a
- * soft reboot).
+ * still running (which might be the case after a
+ * soft reboot).
*/
tmp = sgimc->cpuctrl0;
tmp &= ~SGIMC_CCTRL0_WDOG;
sgimc->cpuctrl0 = tmp;
/* Step 1: The CPU/GIO error status registers will not latch
- * up a new error status until the register has been
- * cleared by the cpu. These status registers are
- * cleared by writing any value to them.
+ * up a new error status until the register has been
+ * cleared by the cpu. These status registers are
+ * cleared by writing any value to them.
*/
sgimc->cstat = sgimc->gstat = 0;
/* Step 2: Enable all parity checking in cpu control register
- * zero.
+ * zero.
*/
/* don't touch parity settings for IP28 */
tmp = sgimc->cpuctrl0;
@@ -147,7 +147,7 @@ void __init sgimc_init(void)
sgimc->cpuctrl0 = tmp;
/* Step 3: Setup the MC write buffer depth, this is controlled
- * in cpu control register 1 in the lower 4 bits.
+ * in cpu control register 1 in the lower 4 bits.
*/
tmp = sgimc->cpuctrl1;
tmp &= ~0xf;
@@ -155,26 +155,26 @@ void __init sgimc_init(void)
sgimc->cpuctrl1 = tmp;
/* Step 4: Initialize the RPSS divider register to run as fast
- * as it can correctly operate. The register is laid
- * out as follows:
+ * as it can correctly operate. The register is laid
+ * out as follows:
*
- * ----------------------------------------
- * | RESERVED | INCREMENT | DIVIDER |
- * ----------------------------------------
- * 31 16 15 8 7 0
+ * ----------------------------------------
+ * | RESERVED | INCREMENT | DIVIDER |
+ * ----------------------------------------
+ * 31 16 15 8 7 0
*
- * DIVIDER determines how often a 'tick' happens,
- * INCREMENT determines by how the RPSS increment
- * registers value increases at each 'tick'. Thus,
- * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
+ * DIVIDER determines how often a 'tick' happens,
+ * INCREMENT determines by how the RPSS increment
+ * registers value increases at each 'tick'. Thus,
+ * for IP22 we get INCREMENT=1, DIVIDER=1 == 0x101
*/
sgimc->divider = 0x101;
/* Step 5: Initialize GIO64 arbitrator configuration register.
*
* NOTE: HPC init code in sgihpc_init() must run before us because
- * we need to know Guiness vs. FullHouse and the board
- * revision on this machine. You have been warned.
+ * we need to know Guiness vs. FullHouse and the board
+ * revision on this machine. You have been warned.
*/
/* First the basic invariants across all GIO64 implementations. */
@@ -187,18 +187,18 @@ void __init sgimc_init(void)
if (SGIOC_SYSID_BOARDREV(sgioc->sysid) < 2) {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC at 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp0 pipelines */
- tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
+ tmp |= SGIMC_GIOPAR_MASTEREXP1; /* exp1 masters */
tmp |= SGIMC_GIOPAR_RTIMEEXP0; /* exp0 is realtime */
} else {
tmp |= SGIMC_GIOPAR_HPC264; /* 2nd HPC 64bits */
tmp |= SGIMC_GIOPAR_PLINEEXP0; /* exp[01] pipelined */
tmp |= SGIMC_GIOPAR_PLINEEXP1;
- tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA masters */
}
} else {
/* Guiness specific settings. */
tmp |= SGIMC_GIOPAR_EISA64; /* MC talks to EISA at 64bits */
- tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
+ tmp |= SGIMC_GIOPAR_MASTEREISA; /* EISA bus can act as master */
}
sgimc->giopar = tmp; /* poof */
diff --git a/arch/mips/sgi-ip22/ip22-nvram.c b/arch/mips/sgi-ip22/ip22-nvram.c
index 0177566475d..e077036a676 100644
--- a/arch/mips/sgi-ip22/ip22-nvram.c
+++ b/arch/mips/sgi-ip22/ip22-nvram.c
@@ -14,11 +14,11 @@
#define EEPROM_WRITE 0xa000 /* serial memory write */
#define EEPROM_WRALL 0x8800 /* write all registers */
#define EEPROM_WDS 0x8000 /* disable all programming */
-#define EEPROM_PRREAD 0xc000 /* read protect register */
-#define EEPROM_PREN 0x9800 /* enable protect register mode */
-#define EEPROM_PRCLEAR 0xffff /* clear protect register */
-#define EEPROM_PRWRITE 0xa000 /* write protect register */
-#define EEPROM_PRDS 0x8000 /* disable protect register, forever */
+#define EEPROM_PRREAD 0xc000 /* read protect register */
+#define EEPROM_PREN 0x9800 /* enable protect register mode */
+#define EEPROM_PRCLEAR 0xffff /* clear protect register */
+#define EEPROM_PRWRITE 0xa000 /* write protect register */
+#define EEPROM_PRDS 0x8000 /* disable protect register, forever */
#define EEPROM_EPROT 0x01 /* Protect register enable */
#define EEPROM_CSEL 0x02 /* Chip select */
@@ -27,7 +27,7 @@
#define EEPROM_DATI 0x10 /* Data in */
/* We need to use these functions early... */
-#define delay() ({ \
+#define delay() ({ \
int x; \
for (x=0; x<100000; x++) __asm__ __volatile__(""); })
@@ -35,7 +35,7 @@
__raw_writel(__raw_readl(ptr) & ~EEPROM_DATO, ptr); \
__raw_writel(__raw_readl(ptr) & ~EEPROM_ECLK, ptr); \
__raw_writel(__raw_readl(ptr) & ~EEPROM_EPROT, ptr); \
- delay(); \
+ delay(); \
__raw_writel(__raw_readl(ptr) | EEPROM_CSEL, ptr); \
__raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
@@ -46,7 +46,7 @@
__raw_writel(__raw_readl(ptr) | EEPROM_EPROT, ptr); \
__raw_writel(__raw_readl(ptr) | EEPROM_ECLK, ptr); })
-#define BITS_IN_COMMAND 11
+#define BITS_IN_COMMAND 11
/*
* clock in the nvram command and the register number. For the
* national semiconductor nv ram chip the op code is 3 bits and
diff --git a/arch/mips/sgi-ip22/ip22-platform.c b/arch/mips/sgi-ip22/ip22-platform.c
index 698904daf90..a14fd32b76b 100644
--- a/arch/mips/sgi-ip22/ip22-platform.c
+++ b/arch/mips/sgi-ip22/ip22-platform.c
@@ -137,7 +137,7 @@ static int __init sgiseeq_devinit(void)
eth0_pd.hpc = hpc3c0;
eth0_pd.irq = SGI_ENET_IRQ;
-#define EADDR_NVOFS 250
+#define EADDR_NVOFS 250
for (i = 0; i < 3; i++) {
unsigned short tmp = ip22_nvram_read(EADDR_NVOFS / 2 + i);
@@ -155,17 +155,17 @@ static int __init sgiseeq_devinit(void)
return 0;
sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 |
- SGIMC_GIOPAR_HPC264;
+ SGIMC_GIOPAR_HPC264;
hpc3c1->pbus_piocfg[0][0] = 0x3ffff;
/* interrupt/config register on Challenge S Mezz board */
hpc3c1->pbus_extregs[0][0] = 0x30;
eth1_pd.hpc = hpc3c1;
eth1_pd.irq = SGI_GIO_0_IRQ;
-#define EADDR_NVOFS 250
+#define EADDR_NVOFS 250
for (i = 0; i < 3; i++) {
unsigned short tmp = ip22_eeprom_read(&hpc3c1->eeprom,
- EADDR_NVOFS / 2 + i);
+ EADDR_NVOFS / 2 + i);
eth1_pd.mac[2 * i] = tmp >> 8;
eth1_pd.mac[2 * i + 1] = tmp & 0xff;
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 20363d29cb5..063c2dd31e7 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -101,7 +101,7 @@ static void debounce(unsigned long data)
del_timer(&debounce_timer);
if (sgint->istat1 & SGINT_ISTAT1_PWR) {
/* Interrupt still being sent. */
- debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
+ debounce_timer.expires = jiffies + (HZ / 20); /* 0.05s */
add_timer(&debounce_timer);
sgioc->panel = SGIOC_PANEL_POWERON | SGIOC_PANEL_POWERINTR |
@@ -166,7 +166,7 @@ static irqreturn_t panel_int(int irq, void *dev_id)
}
static int panic_event(struct notifier_block *this, unsigned long event,
- void *ptr)
+ void *ptr)
{
if (machine_state & MACHINE_PANICED)
return NOTIFY_DONE;
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 0626555fd1a..3f47346608d 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -136,14 +136,14 @@ static void save_and_clear_buserr(void)
hpc3.scsi[1].cbp = hpc3c0->scsi_chan1.cbptr;
hpc3.scsi[1].ndptr = hpc3c0->scsi_chan1.ndptr;
- hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
- hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
- hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.addr = (unsigned long)&hpc3c0->ethregs.rx_cbptr;
+ hpc3.ethrx.ctrl = hpc3c0->ethregs.rx_ctrl; /* HPC3_ERXCTRL_ACTIVE ? */
+ hpc3.ethrx.cbp = hpc3c0->ethregs.rx_cbptr;
hpc3.ethrx.ndptr = hpc3c0->ethregs.rx_ndptr;
- hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
- hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
- hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.addr = (unsigned long)&hpc3c0->ethregs.tx_cbptr;
+ hpc3.ethtx.ctrl = hpc3c0->ethregs.tx_ctrl; /* HPC3_ETXCTRL_ACTIVE ? */
+ hpc3.ethtx.cbp = hpc3c0->ethregs.tx_cbptr;
hpc3.ethtx.ndptr = hpc3c0->ethregs.tx_ndptr;
for (i = 0; i < 8; ++i) {
@@ -196,11 +196,11 @@ static void print_cache_tags(void)
scb | (1 << 12)*i);
}
i = read_c0_config();
- scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
+ scb = i & (1 << 13) ? 7:6; /* scblksize = 2^[7..6] */
scw = ((i >> 16) & 7) + 19 - 1; /* scwaysize = 2^[24..19] / 2 */
i = ((1 << scw) - 1) & ~((1 << scb) - 1);
- printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
+ printk(KERN_ERR "S: 0: %08x %08x, 1: %08x %08x (PA[%u:%u] %05x)\n",
cache_tags.tags[0][0].hi, cache_tags.tags[0][0].lo,
cache_tags.tags[0][1].hi, cache_tags.tags[0][1].lo,
scw-1, scb, i & (unsigned)cache_tags.err_addr);
diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c
index 04cebadc2b3..692778da9e7 100644
--- a/arch/mips/sgi-ip27/ip27-berr.c
+++ b/arch/mips/sgi-ip27/ip27-berr.c
@@ -39,7 +39,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
printk("Hub has valid error information:\n");
if (errst0 & PI_ERR_ST0_OVERRUN_MASK)
- printk("Overrun is set. Error stack may contain additional "
+ printk("Overrun is set. Error stack may contain additional "
"information.\n");
printk("Hub error address is %08lx\n",
(errst0 & PI_ERR_ST0_ADDR_MASK) >> (PI_ERR_ST0_ADDR_SHFT - 3));
@@ -85,7 +85,7 @@ void __init ip27_be_init(void)
board_be_handler = ip27_be_handler;
LOCAL_HUB_S(PI_ERR_INT_PEND,
- cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
+ cpu ? PI_ERR_CLEAR_ALL_B : PI_ERR_CLEAR_ALL_A);
LOCAL_HUB_S(PI_ERR_INT_MASK_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_ADDR_A + cpuoff, 0);
LOCAL_HUB_S(PI_ERR_STACK_SIZE, 0); /* Disable error stack */
diff --git a/arch/mips/sgi-ip27/ip27-console.c b/arch/mips/sgi-ip27/ip27-console.c
index 984e561f0f7..b952d5b1af8 100644
--- a/arch/mips/sgi-ip27/ip27-console.c
+++ b/arch/mips/sgi-ip27/ip27-console.c
@@ -31,7 +31,7 @@ static inline struct ioc3_uartregs *console_uart(void)
return &ioc3->sregs.uarta;
}
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
struct ioc3_uartregs *uart = console_uart();
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index cd0d5b06cd8..328ceb3c86e 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -17,11 +17,11 @@
static int force_fire_and_forget = 1;
/**
- * hub_pio_map - establish a HUB PIO mapping
+ * hub_pio_map - establish a HUB PIO mapping
*
* @hub: hub to perform PIO mapping on
* @widget: widget ID to perform PIO mapping for
- * @xtalk_addr: xtalk_address that needs to be mapped
+ * @xtalk_addr: xtalk_address that needs to be mapped
* @size: size of the PIO mapping
*
**/
@@ -78,8 +78,8 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget,
/*
* hub_setup_prb(nasid, prbnum, credits, conveyor)
*
- * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
- * put it into conveyor belt mode with the specified number of credits.
+ * Put a PRB into fire-and-forget mode if conveyor isn't set. Otherwise,
+ * put it into conveyor belt mode with the specified number of credits.
*/
static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
{
@@ -125,12 +125,12 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
* so we turn off access to all widgets for the duration of the function.
*
* XXX - This code should really check what kind of widget we're talking
- * to. Bridges can only handle three requests, but XG will do more.
+ * to. Bridges can only handle three requests, but XG will do more.
* How many can crossbow handle to widget 0? We're assuming 1.
*
* XXX - There is a bug in the crossbow that link reset PIOs do not
* return write responses. The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times. This
+ * leave widget 0 (xbow) in fire-and-forget mode at all times. This
* only affects pio's to xbow registers, which should be rare.
**/
static void hub_set_piomode(nasid_t nasid)
@@ -167,7 +167,7 @@ static void hub_set_piomode(nasid_t nasid)
}
/*
- * hub_pio_init - PIO-related hub initialization
+ * hub_pio_init - PIO-related hub initialization
*
* @hub: hubinfo structure for our hub
*/
diff --git a/arch/mips/sgi-ip27/ip27-init.c b/arch/mips/sgi-ip27/ip27-init.c
index 923c080f77b..d41b1c6fb03 100644
--- a/arch/mips/sgi-ip27/ip27-init.c
+++ b/arch/mips/sgi-ip27/ip27-init.c
@@ -151,7 +151,7 @@ nasid_t
get_nasid(void)
{
return (nasid_t)((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_NODEID_MASK)
- >> NSRI_NODEID_SHFT);
+ >> NSRI_NODEID_SHFT);
}
/*
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 69a939ae65e..2315cfeb268 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -62,7 +62,7 @@ extern int irq_to_slot[];
* from the irq value
*/
#define IRQ_TO_BRIDGE(i) irq_to_bridge[(i)]
-#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
+#define SLOT_FROM_PCI_IRQ(i) irq_to_slot[i]
static inline int alloc_level(int cpu, int irq)
{
@@ -281,11 +281,11 @@ static unsigned int startup_bridge_irq(struct irq_data *d)
device |= (pin << (pin*3));
bridge->b_int_device = device;
- bridge->b_wid_tflush;
+ bridge->b_wid_tflush;
intr_connect_level(cpu, swlevel);
- return 0; /* Never anything pending. */
+ return 0; /* Never anything pending. */
}
/* Shutdown one of the (PCI ...) IRQs routes over a bridge. */
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index cd8fcab6b05..3505d08ff2f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -31,8 +31,8 @@
#include <asm/sn/sn_private.h>
-#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
-#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
+#define SLOT_PFNSHIFT (SLOT_SHIFT - PAGE_SHIFT)
+#define PFN_NASIDSHFT (NASID_SHFT - PAGE_SHIFT)
struct node_data *__node_data[MAX_COMPACT_NODES];
@@ -43,7 +43,7 @@ static int fine_mode;
static int is_fine_dirmode(void)
{
return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK)
- >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
+ >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
}
static hubreg_t get_region(cnodeid_t cnode)
@@ -66,7 +66,7 @@ static void gen_region_mask(hubreg_t *region_mask)
}
}
-#define rou_rflag rou_flags
+#define rou_rflag rou_flags
static int router_distance;
@@ -412,7 +412,7 @@ static void __init node_mem_init(cnodeid_t node)
slot_freepfn += PFN_UP(sizeof(struct pglist_data) +
sizeof(struct hub_data));
- bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
+ bootmap_size = init_bootmem_node(NODE_DATA(node), slot_freepfn,
start_pfn, end_pfn);
free_bootmem_with_active_regions(node, end_pfn);
reserve_bootmem_node(NODE_DATA(node), slot_firstpfn << PAGE_SHIFT,
@@ -422,7 +422,7 @@ static void __init node_mem_init(cnodeid_t node)
}
/*
- * A node with nothing. We use it to avoid any special casing in
+ * A node with nothing. We use it to avoid any special casing in
* cpumask_of_node
*/
static struct node_data null_node = {
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index 005c29ed419..a2358b44420 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -54,7 +54,7 @@ void install_cpu_nmi_handler(int slice)
void nmi_cpu_eframe_save(nasid_t nasid, int slice)
{
struct reg_struct *nr;
- int i;
+ int i;
/* Get the pointer to the current cpu's register set. */
nr = (struct reg_struct *)
@@ -86,12 +86,12 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
printk("%s\n", print_tainted());
printk("ErrEPC: %016lx %pS\n", nr->error_epc, (void *) nr->error_epc);
printk("ra : %016lx %pS\n", nr->gpr[31], (void *) nr->gpr[31]);
- printk("Status: %08lx ", nr->sr);
+ printk("Status: %08lx ", nr->sr);
if (nr->sr & ST0_KX)
printk("KX ");
if (nr->sr & ST0_SX)
- printk("SX ");
+ printk("SX ");
if (nr->sr & ST0_UX)
printk("UX ");
diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c
index f347bc6b795..ac37e54b3d5 100644
--- a/arch/mips/sgi-ip27/ip27-reset.c
+++ b/arch/mips/sgi-ip27/ip27-reset.c
@@ -29,7 +29,7 @@ void machine_restart(char *command) __attribute__((noreturn));
void machine_halt(void) __attribute__((noreturn));
void machine_power_off(void) __attribute__((noreturn));
-#define noreturn while(1); /* Silence gcc. */
+#define noreturn while(1); /* Silence gcc. */
/* XXX How to pass the reboot command to the firmware??? */
static void ip27_machine_restart(char *command)
diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c
index 735b43bf8f8..f94638141b2 100644
--- a/arch/mips/sgi-ip27/ip27-smp.c
+++ b/arch/mips/sgi-ip27/ip27-smp.c
@@ -191,7 +191,7 @@ static void __init ip27_cpus_done(void)
}
/*
- * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
+ * Launch a slave into smp_bootstrap(). It doesn't take an argument, and we
* set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work.
*/
@@ -219,7 +219,7 @@ static void __init ip27_smp_setup(void)
/*
* Assumption to be fixed: we're always booted on logical / physical
- * processor 0. While we're always running on logical processor 0
+ * processor 0. While we're always running on logical processor 0
* this still means this is physical processor zero; it might for
* example be disabled in the firmware.
*/
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 13cfeab5052..fff58ac176f 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -117,8 +117,8 @@ void __cpuinit hub_rt_clock_event_init(void)
cd->name = name;
cd->features = CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, CYCLES_PER_SEC);
- cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->max_delta_ns = clockevent_delta2ns(0xfffffffffffff, cd);
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
@@ -153,7 +153,7 @@ static cycle_t hub_rt_read(struct clocksource *cs)
struct clocksource hub_rt_clocksource = {
.name = "HUB-RT",
- .rating = 200,
+ .rating = 200,
.read = hub_rt_read,
.mask = CLOCKSOURCE_MASK(52),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 5e871e75a8d..a4df7d0f6f1 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -17,15 +17,15 @@
#include <asm/xtalk/xtalk.h>
-#define XBOW_WIDGET_PART_NUM 0x0
-#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
-#define BASE_XBOW_PORT 8 /* Lowest external port */
+#define XBOW_WIDGET_PART_NUM 0x0
+#define XXBOW_WIDGET_PART_NUM 0xd000 /* Xbow in Xbridge */
+#define BASE_XBOW_PORT 8 /* Lowest external port */
extern int bridge_probe(nasid_t nasid, int widget, int masterwid);
static int __cpuinit probe_one_port(nasid_t nasid, int widget, int masterwid)
{
- widgetreg_t widget_id;
+ widgetreg_t widget_id;
xwidget_part_num_t partnum;
widget_id = *(volatile widgetreg_t *)
@@ -102,10 +102,10 @@ static int __cpuinit xbow_probe(nasid_t nasid)
void __cpuinit xtalk_probe_node(cnodeid_t nid)
{
- volatile u64 hubreg;
- nasid_t nasid;
+ volatile u64 hubreg;
+ nasid_t nasid;
xwidget_part_num_t partnum;
- widgetreg_t widget_id;
+ widgetreg_t widget_id;
nasid = COMPACT_TO_NASID_NODEID(nid);
hubreg = REMOTE_HUB_L(nasid, IIO_LLP_CSR);
@@ -115,7 +115,7 @@ void __cpuinit xtalk_probe_node(cnodeid_t nid)
return;
widget_id = *(volatile widgetreg_t *)
- (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
+ (RAW_NODE_SWIN_BASE(nasid, 0x0) + WIDGET_ID);
partnum = XWIDGET_PART_NUM(widget_id);
printk(KERN_INFO "Cpu %d, Nasid 0x%x: partnum 0x%x is ",
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index e7d5054de8c..e0c7d9e142f 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -173,7 +173,7 @@ static struct irq_chip crime_edge_interrupt = {
/*
* This is for MACE PCI interrupts. We can decrease bus traffic by masking
- * as close to the source as possible. This also means we can take the
+ * as close to the source as possible. This also means we can take the
* next chunk of the CRIME register in one piece.
*/
@@ -271,11 +271,11 @@ static void disable_maceisa_irq(struct irq_data *d)
unsigned int crime_int = 0;
maceisa_mask &= ~(1 << (d->irq - MACEISA_AUDIO_SW_IRQ));
- if (!(maceisa_mask & MACEISA_AUDIO_INT))
+ if (!(maceisa_mask & MACEISA_AUDIO_INT))
crime_int |= MACE_AUDIO_INT;
- if (!(maceisa_mask & MACEISA_MISC_INT))
+ if (!(maceisa_mask & MACEISA_MISC_INT))
crime_int |= MACE_MISC_INT;
- if (!(maceisa_mask & MACEISA_SUPERIO_INT))
+ if (!(maceisa_mask & MACEISA_SUPERIO_INT))
crime_int |= MACE_SUPERIO_INT;
crime_mask &= ~crime_int;
crime->imask = crime_mask;
diff --git a/arch/mips/sibyte/Platform b/arch/mips/sibyte/Platform
index 911dfe39c63..d03a07516f8 100644
--- a/arch/mips/sibyte/Platform
+++ b/arch/mips/sibyte/Platform
@@ -9,7 +9,7 @@ platform-$(CONFIG_SIBYTE_BCM1x80) += sibyte/
#
# Sibyte SB1250 / BCM1480 family of SOCs
#
-cflags-$(CONFIG_SIBYTE_BCM112X) += \
+cflags-$(CONFIG_SIBYTE_BCM112X) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
@@ -18,11 +18,11 @@ cflags-$(CONFIG_SIBYTE_SB1250) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1250_112x_ALL
-cflags-$(CONFIG_SIBYTE_BCM1x55) += \
+cflags-$(CONFIG_SIBYTE_BCM1x55) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
-cflags-$(CONFIG_SIBYTE_BCM1x80) += \
+cflags-$(CONFIG_SIBYTE_BCM1x80) += \
-I$(srctree)/arch/mips/include/asm/mach-sibyte \
-DSIBYTE_HDR_FEATURES=SIBYTE_HDR_FMASK_1480_ALL
diff --git a/arch/mips/sibyte/bcm1480/irq.c b/arch/mips/sibyte/bcm1480/irq.c
index 215713e1f3c..09d6e16a70f 100644
--- a/arch/mips/sibyte/bcm1480/irq.c
+++ b/arch/mips/sibyte/bcm1480/irq.c
@@ -283,10 +283,10 @@ void __init arch_init_irq(void)
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(IMR_IP3_VAL, IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) +
(K_BCM1480_INT_MBOX_0_0 << 3)));
- }
+ }
- /* Clear the mailboxes. The firmware may leave them dirty */
+ /* Clear the mailboxes. The firmware may leave them dirty */
for (cpu = 0; cpu < 4; cpu++) {
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_BCM1480_IMR_REGISTER(cpu, R_BCM1480_IMR_MAILBOX_0_CLR_CPU)));
@@ -307,7 +307,7 @@ void __init arch_init_irq(void)
/*
* Note that the timer interrupts are also mapped, but this is
- * done in bcm1480_time_init(). Also, the profiling driver
+ * done in bcm1480_time_init(). Also, the profiling driver
* does its own management of IP7.
*/
@@ -325,7 +325,7 @@ static inline void dispatch_ip2(void)
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
- * check the 1480 interrupt registers to figure out what to do. Need
+ * check the 1480 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
base = A_BCM1480_IMR_MAPPER(cpu);
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 6343011e990..588e1806a1a 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -127,8 +127,8 @@ static __init void prom_meminit(void)
if ((initrd_pstart > addr) &&
(initrd_pstart < (addr + size))) {
add_memory_region(addr,
- initrd_pstart - addr,
- BOOT_MEM_RAM);
+ initrd_pstart - addr,
+ BOOT_MEM_RAM);
rd_flag = 1;
}
if ((initrd_pend > addr) &&
@@ -195,7 +195,7 @@ static int __init initrd_setup(char *str)
/*
*Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>"
- * e.g. initrd=3abfd@80010000. This is set up by the loader.
+ * e.g. initrd=3abfd@80010000. This is set up by the loader.
*/
for (tmp = str; *tmp != '@'; tmp++) {
if (!*tmp) {
@@ -244,7 +244,7 @@ void __init prom_init(void)
int *prom_vec = (int *) fw_arg3;
_machine_restart = cfe_linux_restart;
- _machine_halt = cfe_linux_halt;
+ _machine_halt = cfe_linux_halt;
pm_power_off = cfe_linux_halt;
/*
@@ -299,7 +299,7 @@ void __init prom_init(void)
#ifdef CONFIG_BLK_DEV_INITRD
{
char *ptr;
- /* Need to find out early whether we've got an initrd. So scan
+ /* Need to find out early whether we've got an initrd. So scan
the list looking now */
for (ptr = arcs_cmdline; *ptr; ptr++) {
while (*ptr == ' ') {
diff --git a/arch/mips/sibyte/common/sb_tbprof.c b/arch/mips/sibyte/common/sb_tbprof.c
index e8c4538c5f6..2188b39a125 100644
--- a/arch/mips/sibyte/common/sb_tbprof.c
+++ b/arch/mips/sibyte/common/sb_tbprof.c
@@ -152,7 +152,7 @@ static u64 tb_period;
static void arm_tb(void)
{
- u64 scdperfcnt;
+ u64 scdperfcnt;
u64 next = (1ULL << 40) - tb_period;
u64 tb_options = M_SCD_TRACE_CFG_FREEZE_FULL;
@@ -257,8 +257,8 @@ static irqreturn_t sbprof_pc_intr(int irq, void *dev_id)
/*
* Requires: Already called zclk_timer_init with a value that won't
- * saturate 40 bits. No subsequent use of SCD performance counters
- * or trace buffer.
+ * saturate 40 bits. No subsequent use of SCD performance counters
+ * or trace buffer.
*/
static int sbprof_zbprof_start(struct file *filp)
@@ -288,8 +288,8 @@ static int sbprof_zbprof_start(struct file *filp)
/*
* We grab this interrupt to prevent others from trying to use
- * it, even though we don't want to service the interrupts
- * (they only feed into the trace-on-interrupt mechanism)
+ * it, even though we don't want to service the interrupts
+ * (they only feed into the trace-on-interrupt mechanism)
*/
if (request_irq(K_INT_PERF_CNT, sbprof_pc_intr, 0, DEVNAME " scd perfcnt", &sbp)) {
free_irq(K_INT_TRACE_FREEZE, &sbp);
@@ -298,7 +298,7 @@ static int sbprof_zbprof_start(struct file *filp)
/*
* I need the core to mask these, but the interrupt mapper to
- * pass them through. I am exploiting my knowledge that
+ * pass them through. I am exploiting my knowledge that
* cp0_status masks out IP[5]. krw
*/
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
@@ -328,7 +328,7 @@ static int sbprof_zbprof_start(struct file *filp)
__raw_writeq(0, IOADDR(A_ADDR_TRAP_CFG_3));
/* Initialize Trace Event 0-7 */
- /* when interrupt */
+ /* when interrupt */
__raw_writeq(M_SCD_TREVT_INTERRUPT, IOADDR(A_SCD_TRACE_EVENT_0));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_1));
__raw_writeq(0, IOADDR(A_SCD_TRACE_EVENT_2));
@@ -479,7 +479,7 @@ static ssize_t sbprof_tb_read(struct file *filp, char *buf,
return err;
}
pr_debug(DEVNAME ": read from sample %d, %d bytes\n",
- cur_sample, cur_count);
+ cur_sample, cur_count);
size -= cur_count;
sample_left -= cur_count;
if (!sample_left) {
@@ -540,7 +540,7 @@ static const struct file_operations sbprof_tb_fops = {
.open = sbprof_tb_open,
.release = sbprof_tb_release,
.read = sbprof_tb_read,
- .unlocked_ioctl = sbprof_tb_ioctl,
+ .unlocked_ioctl = sbprof_tb_ioctl,
.compat_ioctl = sbprof_tb_ioctl,
.mmap = NULL,
.llseek = default_llseek,
diff --git a/arch/mips/sibyte/sb1250/bus_watcher.c b/arch/mips/sibyte/sb1250/bus_watcher.c
index 86e6e54dd15..e651105b3f0 100644
--- a/arch/mips/sibyte/sb1250/bus_watcher.c
+++ b/arch/mips/sibyte/sb1250/bus_watcher.c
@@ -71,7 +71,7 @@ static void print_summary(uint32_t status, uint32_t l2_err,
* already been destructively read out of the registers.
*
* notes: this is currently used by the cache error handler
- * should provide locking against the interrupt handler
+ * should provide locking against the interrupt handler
*/
void check_bus_watcher(void)
{
@@ -119,7 +119,7 @@ static int bw_print_buffer(char *page, struct bw_stats_struct *stats)
(int)G_SCD_BERR_RID(stats->status),
(int)G_SCD_BERR_DCODE(stats->status));
/* XXXKW indicate multiple errors between printings, or stats
- collection (or both)? */
+ collection (or both)? */
if (stats->status & M_SCD_BERR_MULTERRS)
len += sprintf(page+len, "Multiple errors observed since last check.\n");
if (stats->status_printed) {
@@ -168,7 +168,7 @@ static void create_proc_decoder(struct bw_stats_struct *stats)
* sibyte_bw_int - handle bus watcher interrupts and accumulate counts
*
* notes: possible re-entry due to multiple sources
- * should check/indicate saturation
+ * should check/indicate saturation
*/
static irqreturn_t sibyte_bw_int(int irq, void *data)
{
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 340aaf62665..fca0cdb9950 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -264,7 +264,7 @@ void __init arch_init_irq(void)
IOADDR(A_IMR_REGISTER(1, R_IMR_INTERRUPT_MAP_BASE) +
(K_INT_MBOX_0 << 3)));
- /* Clear the mailboxes. The firmware may leave them dirty */
+ /* Clear the mailboxes. The firmware may leave them dirty */
__raw_writeq(0xffffffffffffffffULL,
IOADDR(A_IMR_REGISTER(0, R_IMR_MAILBOX_CLR_CPU)));
__raw_writeq(0xffffffffffffffffULL,
@@ -277,7 +277,7 @@ void __init arch_init_irq(void)
/*
* Note that the timer interrupts are also mapped, but this is
- * done in sb1250_time_init(). Also, the profiling driver
+ * done in sb1250_time_init(). Also, the profiling driver
* does its own management of IP7.
*/
@@ -294,7 +294,7 @@ static inline void dispatch_ip2(void)
/*
* Default...we've hit an IP[2] interrupt, which means we've got to
- * check the 1250 interrupt registers to figure out what to do. Need
+ * check the 1250 interrupt registers to figure out what to do. Need
* to detect which CPU we're on, now that smp_affinity is supported.
*/
mask = __raw_readq(IOADDR(A_IMR_REGISTER(cpu,
@@ -323,7 +323,7 @@ asmlinkage void plat_irq_dispatch(void)
if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP4)
- do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
+ do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
#ifdef CONFIG_SMP
else if (pending & CAUSEF_IP3)
diff --git a/arch/mips/sibyte/sb1250/setup.c b/arch/mips/sibyte/sb1250/setup.c
index 92da3155ce0..a14bd4cb0bc 100644
--- a/arch/mips/sibyte/sb1250/setup.c
+++ b/arch/mips/sibyte/sb1250/setup.c
@@ -203,8 +203,8 @@ void __init sb1250_setup(void)
case K_SYS_REVISION_BCM1250_PASS1:
#ifndef CONFIG_SB1_PASS_1_WORKAROUNDS
printk("@@@@ This is a BCM1250 A0-A2 (Pass 1) board, "
- "and the kernel doesn't have the proper "
- "workarounds compiled in. @@@@\n");
+ "and the kernel doesn't have the proper "
+ "workarounds compiled in. @@@@\n");
bad_config = 1;
#endif
break;
@@ -213,28 +213,28 @@ void __init sb1250_setup(void)
#if !defined(CONFIG_SB1_PASS_2_WORKAROUNDS) || \
!defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS)
printk("@@@@ This is a BCM1250 A3-A10 board, and the "
- "kernel doesn't have the proper workarounds "
- "compiled in. @@@@\n");
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
bad_config = 1;
#endif
#ifdef CONFIG_CPU_HAS_PREFETCH
printk("@@@@ Prefetches may be enabled in this kernel, "
- "but are buggy on this board. @@@@\n");
+ "but are buggy on this board. @@@@\n");
bad_config = 1;
#endif
break;
case K_SYS_REVISION_BCM1250_PASS2_2:
#ifndef CONFIG_SB1_PASS_2_WORKAROUNDS
printk("@@@@ This is a BCM1250 B1/B2. board, and the "
- "kernel doesn't have the proper workarounds "
- "compiled in. @@@@\n");
+ "kernel doesn't have the proper workarounds "
+ "compiled in. @@@@\n");
bad_config = 1;
#endif
#if defined(CONFIG_SB1_PASS_2_1_WORKAROUNDS) || \
!defined(CONFIG_CPU_HAS_PREFETCH)
printk("@@@@ This is a BCM1250 B1/B2, but the kernel is "
- "conservatively configured for an 'A' stepping. "
- "@@@@\n");
+ "conservatively configured for an 'A' stepping. "
+ "@@@@\n");
#endif
break;
default:
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 097335262fb..9480c14ec66 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -13,7 +13,7 @@
#define DRV_NAME "pata-swarm"
-#define SWARM_IDE_SHIFT 5
+#define SWARM_IDE_SHIFT 5
#define SWARM_IDE_BASE 0x1f0
#define SWARM_IDE_CTRL 0x3f6
@@ -123,7 +123,7 @@ static int __init sb1250_device_init(void)
case K_SYS_SOC_TYPE_BCM1120:
case K_SYS_SOC_TYPE_BCM1125:
case K_SYS_SOC_TYPE_BCM1125H:
- case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
+ case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
ret = platform_add_devices(sb1250_devs, 2);
break;
case K_SYS_SOC_TYPE_BCM1x55:
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index 4438b2195c4..178a824b28d 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -4,8 +4,8 @@
* Copyright (C) 2002 MontaVista Software Inc.
* Author: jsun@mvista.com or jsun@junsun.net
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
@@ -28,15 +28,15 @@
* Register bits
*/
-#define X1241REG_SR_BAT 0x80 /* currently on battery power */
+#define X1241REG_SR_BAT 0x80 /* currently on battery power */
#define X1241REG_SR_RWEL 0x04 /* r/w latch is enabled, can write RTC */
#define X1241REG_SR_WEL 0x02 /* r/w latch is unlocked, can enable r/w now */
#define X1241REG_SR_RTCF 0x01 /* clock failed */
#define X1241REG_BL_BP2 0x80 /* block protect 2 */
#define X1241REG_BL_BP1 0x40 /* block protect 1 */
#define X1241REG_BL_BP0 0x20 /* block protect 0 */
-#define X1241REG_BL_WD1 0x10
-#define X1241REG_BL_WD0 0x08
+#define X1241REG_BL_WD1 0x10
+#define X1241REG_BL_WD0 0x08
#define X1241REG_HR_MIL 0x80 /* military time format */
/*
@@ -61,50 +61,50 @@
static int xicor_read(uint8_t addr)
{
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq((addr >> 8) & 0x7, SMB_CSR(R_SMB_CMD));
__raw_writeq(addr & 0xff, SMB_CSR(R_SMB_DATA));
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR2BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_RD1BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
- if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
- /* Clear error bit by writing a 1 */
- __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
- return -1;
- }
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ }
return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff);
}
static int xicor_write(uint8_t addr, int b)
{
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
__raw_writeq(addr, SMB_CSR(R_SMB_CMD));
__raw_writeq((addr & 0xff) | ((b & 0xff) << 8), SMB_CSR(R_SMB_DATA));
__raw_writeq(V_SMB_ADDR(X1241_CCR_ADDRESS) | V_SMB_TT_WR3BYTE,
SMB_CSR(R_SMB_START));
- while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
- ;
+ while (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_BUSY)
+ ;
- if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
- /* Clear error bit by writing a 1 */
- __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
- return -1;
- } else {
+ if (__raw_readq(SMB_CSR(R_SMB_STATUS)) & M_SMB_ERROR) {
+ /* Clear error bit by writing a 1 */
+ __raw_writeq(M_SMB_ERROR, SMB_CSR(R_SMB_STATUS));
+ return -1;
+ } else {
return 0;
}
}
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index e05ad4da44d..dd0ab982d77 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -41,17 +41,17 @@ static struct platform_device a20r_serial8250_device = {
};
static struct resource a20r_ds1216_rsrc[] = {
- {
- .start = 0x1c081ffc,
- .end = 0x1c081fff,
- .flags = IORESOURCE_MEM
- }
+ {
+ .start = 0x1c081ffc,
+ .end = 0x1c081fff,
+ .flags = IORESOURCE_MEM
+ }
};
static struct platform_device a20r_ds1216_device = {
- .name = "rtc-ds1216",
- .num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
- .resource = a20r_ds1216_rsrc
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(a20r_ds1216_rsrc),
+ .resource = a20r_ds1216_rsrc
};
static struct resource snirm_82596_rsrc[] = {
@@ -76,14 +76,14 @@ static struct resource snirm_82596_rsrc[] = {
.flags = IORESOURCE_IRQ
},
{
- .flags = 0x01 /* 16bit mpu port access */
+ .flags = 0x01 /* 16bit mpu port access */
}
};
static struct platform_device snirm_82596_pdev = {
- .name = "snirm_82596",
- .num_resources = ARRAY_SIZE(snirm_82596_rsrc),
- .resource = snirm_82596_rsrc
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rsrc),
+ .resource = snirm_82596_rsrc
};
static struct resource snirm_53c710_rsrc[] = {
@@ -100,9 +100,9 @@ static struct resource snirm_53c710_rsrc[] = {
};
static struct platform_device snirm_53c710_pdev = {
- .name = "snirm_53c710",
- .num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
- .resource = snirm_53c710_rsrc
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rsrc),
+ .resource = snirm_53c710_rsrc
};
static struct resource sc26xx_rsrc[] = {
@@ -171,7 +171,7 @@ static u32 a20r_ack_hwint(void)
" addiu %1, -1 \n"
" sw $1, 0(%0) \n"
" sync \n"
- ".set pop \n"
+ ".set pop \n"
:
: "Jr" (PCIMT_UCONF), "Jr" (0xbc000000));
write_c0_status(status);
@@ -236,13 +236,13 @@ static int __init snirm_a20r_setup_devinit(void)
switch (sni_brd_type) {
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- platform_device_register(&snirm_82596_pdev);
- platform_device_register(&snirm_53c710_pdev);
- platform_device_register(&sc26xx_pdev);
- platform_device_register(&a20r_serial8250_device);
- platform_device_register(&a20r_ds1216_device);
+ platform_device_register(&snirm_82596_pdev);
+ platform_device_register(&snirm_53c710_pdev);
+ platform_device_register(&sc26xx_pdev);
+ platform_device_register(&a20r_serial8250_device);
+ platform_device_register(&a20r_ds1216_device);
sni_eisa_root_init();
- break;
+ break;
}
return 0;
}
diff --git a/arch/mips/sni/eisa.c b/arch/mips/sni/eisa.c
index 6827feb4de9..179b5d556ad 100644
--- a/arch/mips/sni/eisa.c
+++ b/arch/mips/sni/eisa.c
@@ -22,7 +22,7 @@ static struct platform_device eisa_root_dev = {
};
static struct eisa_root_device eisa_bus_root = {
- .dev = &eisa_root_dev.dev,
+ .dev = &eisa_root_dev.dev,
.bus_base_addr = 0,
.res = &ioport_resource,
.slots = EISA_MAX_SLOTS,
diff --git a/arch/mips/sni/irq.c b/arch/mips/sni/irq.c
index 5a4ec75382e..ac61b90bcc6 100644
--- a/arch/mips/sni/irq.c
+++ b/arch/mips/sni/irq.c
@@ -58,25 +58,25 @@ void __init arch_init_irq(void)
case SNI_BRD_10NEW:
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- sni_a20r_irq_init();
- break;
+ sni_a20r_irq_init();
+ break;
case SNI_BRD_PCI_TOWER:
- sni_pcit_irq_init();
- break;
+ sni_pcit_irq_init();
+ break;
case SNI_BRD_PCI_TOWER_CPLUS:
- sni_pcit_cplus_irq_init();
- break;
+ sni_pcit_cplus_irq_init();
+ break;
case SNI_BRD_RM200:
- sni_rm200_irq_init();
- break;
+ sni_rm200_irq_init();
+ break;
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- sni_pcimt_irq_init();
- break;
+ sni_pcimt_irq_init();
+ break;
}
}
diff --git a/arch/mips/sni/pcimt.c b/arch/mips/sni/pcimt.c
index cdb1417fba5..cec4b8ca143 100644
--- a/arch/mips/sni/pcimt.c
+++ b/arch/mips/sni/pcimt.c
@@ -60,7 +60,7 @@ static inline void sni_pcimt_detect(void)
p += sprintf(p, "%s PCI", (csmsr & 0x80) ? "RM200" : "RM300");
if ((csmsr & 0x80) == 0)
p += sprintf(p, ", board revision %s",
- (csmsr & 0x20) ? "D" : "C");
+ (csmsr & 0x20) ? "D" : "C");
asic = csmsr & 0x80;
asic = (csmsr & 0x08) ? asic : !asic;
p += sprintf(p, ", ASIC PCI Rev %s", asic ? "1.0" : "1.1");
@@ -91,22 +91,22 @@ static struct platform_device pcimt_serial8250_device = {
};
static struct resource pcimt_cmos_rsrc[] = {
- {
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_IRQ
- }
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
};
static struct platform_device pcimt_cmos_device = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(pcimt_cmos_rsrc),
- .resource = pcimt_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcimt_cmos_rsrc),
+ .resource = pcimt_cmos_rsrc
};
@@ -191,7 +191,7 @@ static struct pci_controller sni_controller = {
.mem_offset = 0x00000000UL,
.io_resource = &sni_io_resource,
.io_offset = 0x00000000UL,
- .io_map_base = SNI_PORT_BASE
+ .io_map_base = SNI_PORT_BASE
};
static void enable_pcimt_irq(struct irq_data *d)
@@ -319,9 +319,9 @@ static int __init snirm_pcimt_setup_devinit(void)
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- platform_device_register(&pcimt_serial8250_device);
- platform_device_register(&pcimt_cmos_device);
- break;
+ platform_device_register(&pcimt_serial8250_device);
+ platform_device_register(&pcimt_cmos_device);
+ break;
}
return 0;
diff --git a/arch/mips/sni/pcit.c b/arch/mips/sni/pcit.c
index b5246373d16..7cddd03d1fe 100644
--- a/arch/mips/sni/pcit.c
+++ b/arch/mips/sni/pcit.c
@@ -59,22 +59,22 @@ static struct platform_device pcit_cplus_serial8250_device = {
};
static struct resource pcit_cmos_rsrc[] = {
- {
- .start = 0x70,
- .end = 0x71,
- .flags = IORESOURCE_IO
- },
- {
- .start = 8,
- .end = 8,
- .flags = IORESOURCE_IRQ
- }
+ {
+ .start = 0x70,
+ .end = 0x71,
+ .flags = IORESOURCE_IO
+ },
+ {
+ .start = 8,
+ .end = 8,
+ .flags = IORESOURCE_IRQ
+ }
};
static struct platform_device pcit_cmos_device = {
- .name = "rtc_cmos",
- .num_resources = ARRAY_SIZE(pcit_cmos_rsrc),
- .resource = pcit_cmos_rsrc
+ .name = "rtc_cmos",
+ .num_resources = ARRAY_SIZE(pcit_cmos_rsrc),
+ .resource = pcit_cmos_rsrc
};
static struct platform_device pcit_pcspeaker_pdev = {
@@ -153,7 +153,7 @@ static struct pci_controller sni_pcit_controller = {
.mem_offset = 0x00000000UL,
.io_resource = &sni_io_resource,
.io_offset = 0x00000000UL,
- .io_map_base = SNI_PORT_BASE
+ .io_map_base = SNI_PORT_BASE
};
static void enable_pcit_irq(struct irq_data *d)
@@ -272,16 +272,16 @@ static int __init snirm_pcit_setup_devinit(void)
{
switch (sni_brd_type) {
case SNI_BRD_PCI_TOWER:
- platform_device_register(&pcit_serial8250_device);
- platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
platform_device_register(&pcit_pcspeaker_pdev);
- break;
+ break;
case SNI_BRD_PCI_TOWER_CPLUS:
- platform_device_register(&pcit_cplus_serial8250_device);
- platform_device_register(&pcit_cmos_device);
+ platform_device_register(&pcit_cplus_serial8250_device);
+ platform_device_register(&pcit_cmos_device);
platform_device_register(&pcit_pcspeaker_pdev);
- break;
+ break;
}
return 0;
}
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index 3ab5b5d25b0..a046b302623 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -48,17 +48,17 @@ static struct platform_device rm200_serial8250_device = {
};
static struct resource rm200_ds1216_rsrc[] = {
- {
- .start = 0x1cd41ffc,
- .end = 0x1cd41fff,
- .flags = IORESOURCE_MEM
- }
+ {
+ .start = 0x1cd41ffc,
+ .end = 0x1cd41fff,
+ .flags = IORESOURCE_MEM
+ }
};
static struct platform_device rm200_ds1216_device = {
- .name = "rtc-ds1216",
- .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
- .resource = rm200_ds1216_rsrc
+ .name = "rtc-ds1216",
+ .num_resources = ARRAY_SIZE(rm200_ds1216_rsrc),
+ .resource = rm200_ds1216_rsrc
};
static struct resource snirm_82596_rm200_rsrc[] = {
@@ -88,9 +88,9 @@ static struct resource snirm_82596_rm200_rsrc[] = {
};
static struct platform_device snirm_82596_rm200_pdev = {
- .name = "snirm_82596",
- .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
- .resource = snirm_82596_rm200_rsrc
+ .name = "snirm_82596",
+ .num_resources = ARRAY_SIZE(snirm_82596_rm200_rsrc),
+ .resource = snirm_82596_rm200_rsrc
};
static struct resource snirm_53c710_rm200_rsrc[] = {
@@ -107,9 +107,9 @@ static struct resource snirm_53c710_rm200_rsrc[] = {
};
static struct platform_device snirm_53c710_rm200_pdev = {
- .name = "snirm_53c710",
- .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
- .resource = snirm_53c710_rm200_rsrc
+ .name = "snirm_53c710",
+ .num_resources = ARRAY_SIZE(snirm_53c710_rm200_rsrc),
+ .resource = snirm_53c710_rm200_rsrc
};
static int __init snirm_setup_devinit(void)
@@ -134,9 +134,9 @@ device_initcall(snirm_setup_devinit);
*/
static DEFINE_RAW_SPINLOCK(sni_rm200_i8259A_lock);
-#define PIC_CMD 0x00
-#define PIC_IMR 0x01
-#define PIC_ISR PIC_CMD
+#define PIC_CMD 0x00
+#define PIC_IMR 0x01
+#define PIC_ISR PIC_CMD
#define PIC_POLL PIC_ISR
#define PIC_OCW3 PIC_ISR
@@ -421,8 +421,8 @@ void __init sni_rm200_i8259_irqs(void)
}
-#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
-#define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
+#define SNI_RM200_INT_STAT_REG CKSEG1ADDR(0xbc000000)
+#define SNI_RM200_INT_ENA_REG CKSEG1ADDR(0xbc080000)
#define SNI_RM200_INT_START 24
#define SNI_RM200_INT_END 28
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index 2e9c283b8e6..5b09b3544ed 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -204,23 +204,23 @@ void __init plat_mem_setup(void)
case SNI_BRD_10NEW:
case SNI_BRD_TOWER_OASIC:
case SNI_BRD_MINITOWER:
- sni_a20r_init();
- break;
+ sni_a20r_init();
+ break;
case SNI_BRD_PCI_TOWER:
case SNI_BRD_PCI_TOWER_CPLUS:
- sni_pcit_init();
+ sni_pcit_init();
break;
case SNI_BRD_RM200:
- sni_rm200_init();
- break;
+ sni_rm200_init();
+ break;
case SNI_BRD_PCI_MTOWER:
case SNI_BRD_PCI_DESKTOP:
case SNI_BRD_PCI_MTOWER_CPLUS:
- sni_pcimt_init();
- break;
+ sni_pcimt_init();
+ break;
}
_machine_restart = sni_machine_restart;
@@ -247,16 +247,16 @@ static void quirk_cirrus_ram_size(struct pci_dev *dev)
*/
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if ((cmd & (PCI_COMMAND_IO|PCI_COMMAND_MEMORY))
- == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
- vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
+ == (PCI_COMMAND_IO|PCI_COMMAND_MEMORY)) {
+ vga_wseq(NULL, CL_SEQR6, 0x12); /* unlock all extension registers */
vga_wseq(NULL, CL_SEQRF, 0x18);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5434_8,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5436,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
- quirk_cirrus_ram_size);
+ quirk_cirrus_ram_size);
#endif
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index 494c9e7847a..cf8ec568b9d 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -10,12 +10,12 @@
#include <asm/time.h>
#include <asm-generic/rtc.h>
-#define SNI_CLOCK_TICK_RATE 3686400
-#define SNI_COUNTER2_DIV 64
-#define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
+#define SNI_CLOCK_TICK_RATE 3686400
+#define SNI_COUNTER2_DIV 64
+#define SNI_COUNTER0_DIV ((SNI_CLOCK_TICK_RATE / SNI_COUNTER2_DIV) / HZ)
static void a20r_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
@@ -33,14 +33,14 @@ static void a20r_set_mode(enum clock_event_mode mode,
*(volatile u8 *)(A20R_PT_CLOCK_BASE + 8) = SNI_COUNTER2_DIV >> 8;
wmb();
- break;
- case CLOCK_EVT_MODE_ONESHOT:
- case CLOCK_EVT_MODE_UNUSED:
- case CLOCK_EVT_MODE_SHUTDOWN:
- break;
- case CLOCK_EVT_MODE_RESUME:
- break;
- }
+ break;
+ case CLOCK_EVT_MODE_ONESHOT:
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ break;
+ case CLOCK_EVT_MODE_RESUME:
+ break;
+ }
}
static struct clock_event_device a20r_clockevent_device = {
@@ -82,15 +82,15 @@ static void __init sni_a20r_timer_setup(void)
struct irqaction *action = &a20r_irqaction;
unsigned int cpu = smp_processor_id();
- cd->cpumask = cpumask_of(cpu);
+ cd->cpumask = cpumask_of(cpu);
clockevents_register_device(cd);
action->dev_id = cd;
setup_irq(SNI_A20R_IRQ_TIMER, &a20r_irqaction);
}
-#define SNI_8254_TICK_RATE 1193182UL
+#define SNI_8254_TICK_RATE 1193182UL
-#define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255)
+#define SNI_8254_TCSAMP_COUNTER ((SNI_8254_TICK_RATE / HZ) + 255)
static __init unsigned long dosample(void)
{
diff --git a/arch/mips/txx9/Platform b/arch/mips/txx9/Platform
index a801abbe138..a176d1fd579 100644
--- a/arch/mips/txx9/Platform
+++ b/arch/mips/txx9/Platform
@@ -6,5 +6,5 @@ cflags-$(CONFIG_MACH_TX39XX) += \
cflags-$(CONFIG_MACH_TX49XX) += \
-I$(srctree)/arch/mips/include/asm/mach-tx49xx
-load-$(CONFIG_MACH_TX39XX) += 0xffffffff80050000
-load-$(CONFIG_MACH_TX49XX) += 0xffffffff80100000
+load-$(CONFIG_MACH_TX39XX) += 0xffffffff80050000
+load-$(CONFIG_MACH_TX49XX) += 0xffffffff80100000
diff --git a/arch/mips/txx9/generic/irq_tx4927.c b/arch/mips/txx9/generic/irq_tx4927.c
index 7e3ac5782da..ed8e702d448 100644
--- a/arch/mips/txx9/generic/irq_tx4927.c
+++ b/arch/mips/txx9/generic/irq_tx4927.c
@@ -2,7 +2,7 @@
* Common tx4927 irq handler
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c
index 6b067dbd2ae..0d7267e81a8 100644
--- a/arch/mips/txx9/generic/irq_tx4939.c
+++ b/arch/mips/txx9/generic/irq_tx4939.c
@@ -5,8 +5,8 @@
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
+ * ahennessy@mvista.com
+ * source@mvista.com
* Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
*
* This file is subject to the terms and conditions of the GNU General Public
diff --git a/arch/mips/txx9/generic/mem_tx4927.c b/arch/mips/txx9/generic/mem_tx4927.c
index 70f9626f822..deea2ceae8a 100644
--- a/arch/mips/txx9/generic/mem_tx4927.c
+++ b/arch/mips/txx9/generic/mem_tx4927.c
@@ -2,7 +2,7 @@
* common tx4927 memory interface
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index ce8f8b9b930..28713274e0c 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -2,7 +2,7 @@
* linux/arch/mips/txx9/pci.c
*
* Based on linux/arch/mips/txx9/rbtx4927/setup.c,
- * linux/arch/mips/txx9/rbtx4938/setup.c,
+ * linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001-2005 MontaVista Software Inc.
@@ -107,7 +107,7 @@ int txx9_pci_mem_high __initdata;
/*
* allocate pci_controller and resources.
- * mem_base, io_base: physical address. 0 for auto assignment.
+ * mem_base, io_base: physical address. 0 for auto assignment.
* mem_size and io_size means max size on auto assignment.
* pcic must be &txx9_primary_pcic or NULL.
*/
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 560fe899175..5524f2c7b05 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -513,19 +513,19 @@ void __init txx9_sio_init(unsigned long baseaddr, int irq,
}
#ifdef CONFIG_EARLY_PRINTK
-static void __init null_prom_putchar(char c)
+static void null_prom_putchar(char c)
{
}
-void (*txx9_prom_putchar)(char c) __initdata = null_prom_putchar;
+void (*txx9_prom_putchar)(char c) = null_prom_putchar;
-void __init prom_putchar(char c)
+void prom_putchar(char c)
{
txx9_prom_putchar(c);
}
static void __iomem *early_txx9_sio_port;
-static void __init early_txx9_sio_putchar(char c)
+static void early_txx9_sio_putchar(char c)
{
#define TXX9_SICISR 0x0c
#define TXX9_SITFIFO 0x1c
diff --git a/arch/mips/txx9/generic/setup_tx3927.c b/arch/mips/txx9/generic/setup_tx3927.c
index 9505d58454c..110e05c3eb8 100644
--- a/arch/mips/txx9/generic/setup_tx3927.c
+++ b/arch/mips/txx9/generic/setup_tx3927.c
@@ -132,6 +132,6 @@ void __init tx3927_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(tx3927_romcptr->cr[ch] & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index 3418b2a90f7..e714d6ce9a8 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -250,7 +250,7 @@ void __init tx4927_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4927_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index eb208011023..0a3bf2dfaba 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -329,7 +329,7 @@ void __init tx4938_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4938_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index 5ff7a9584da..729a5099178 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -301,7 +301,7 @@ void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
unsigned int ch_mask = 0;
__u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
- cts_mask |= ~1; /* only SIO0 have RTS/CTS */
+ cts_mask |= ~1; /* only SIO0 have RTS/CTS */
if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0)
cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */
if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2)
@@ -378,7 +378,7 @@ void __init tx4939_mtd_init(int ch)
unsigned long size = txx9_ce_res[ch].end - start + 1;
if (!(TX4939_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
+ return; /* disabled */
txx9_physmap_flash_init(ch, start, size, &pdata);
}
diff --git a/arch/mips/txx9/generic/smsc_fdc37m81x.c b/arch/mips/txx9/generic/smsc_fdc37m81x.c
index 8ebc3848f3a..f98baa6263d 100644
--- a/arch/mips/txx9/generic/smsc_fdc37m81x.c
+++ b/arch/mips/txx9/generic/smsc_fdc37m81x.c
@@ -18,40 +18,40 @@
/* Common Registers */
#define SMSC_FDC37M81X_CONFIG_INDEX 0x00
#define SMSC_FDC37M81X_CONFIG_DATA 0x01
-#define SMSC_FDC37M81X_CONF 0x02
-#define SMSC_FDC37M81X_INDEX 0x03
-#define SMSC_FDC37M81X_DNUM 0x07
-#define SMSC_FDC37M81X_DID 0x20
-#define SMSC_FDC37M81X_DREV 0x21
-#define SMSC_FDC37M81X_PCNT 0x22
-#define SMSC_FDC37M81X_PMGT 0x23
-#define SMSC_FDC37M81X_OSC 0x24
-#define SMSC_FDC37M81X_CONFPA0 0x26
-#define SMSC_FDC37M81X_CONFPA1 0x27
-#define SMSC_FDC37M81X_TEST4 0x2B
-#define SMSC_FDC37M81X_TEST5 0x2C
-#define SMSC_FDC37M81X_TEST1 0x2D
-#define SMSC_FDC37M81X_TEST2 0x2E
-#define SMSC_FDC37M81X_TEST3 0x2F
+#define SMSC_FDC37M81X_CONF 0x02
+#define SMSC_FDC37M81X_INDEX 0x03
+#define SMSC_FDC37M81X_DNUM 0x07
+#define SMSC_FDC37M81X_DID 0x20
+#define SMSC_FDC37M81X_DREV 0x21
+#define SMSC_FDC37M81X_PCNT 0x22
+#define SMSC_FDC37M81X_PMGT 0x23
+#define SMSC_FDC37M81X_OSC 0x24
+#define SMSC_FDC37M81X_CONFPA0 0x26
+#define SMSC_FDC37M81X_CONFPA1 0x27
+#define SMSC_FDC37M81X_TEST4 0x2B
+#define SMSC_FDC37M81X_TEST5 0x2C
+#define SMSC_FDC37M81X_TEST1 0x2D
+#define SMSC_FDC37M81X_TEST2 0x2E
+#define SMSC_FDC37M81X_TEST3 0x2F
/* Logical device numbers */
-#define SMSC_FDC37M81X_FDD 0x00
-#define SMSC_FDC37M81X_SERIAL1 0x04
-#define SMSC_FDC37M81X_SERIAL2 0x05
-#define SMSC_FDC37M81X_KBD 0x07
+#define SMSC_FDC37M81X_FDD 0x00
+#define SMSC_FDC37M81X_SERIAL1 0x04
+#define SMSC_FDC37M81X_SERIAL2 0x05
+#define SMSC_FDC37M81X_KBD 0x07
/* Logical device Config Registers */
-#define SMSC_FDC37M81X_ACTIVE 0x30
+#define SMSC_FDC37M81X_ACTIVE 0x30
#define SMSC_FDC37M81X_BASEADDR0 0x60
#define SMSC_FDC37M81X_BASEADDR1 0x61
-#define SMSC_FDC37M81X_INT 0x70
-#define SMSC_FDC37M81X_INT2 0x72
-#define SMSC_FDC37M81X_MODE 0xF0
+#define SMSC_FDC37M81X_INT 0x70
+#define SMSC_FDC37M81X_INT2 0x72
+#define SMSC_FDC37M81X_MODE 0xF0
/* Chip Config Values */
#define SMSC_FDC37M81X_CONFIG_ENTER 0x55
#define SMSC_FDC37M81X_CONFIG_EXIT 0xaa
-#define SMSC_FDC37M81X_CHIP_ID 0x4d
+#define SMSC_FDC37M81X_CHIP_ID 0x4d
static unsigned long g_smsc_fdc37m81x_base;
diff --git a/arch/mips/txx9/rbtx4927/irq.c b/arch/mips/txx9/rbtx4927/irq.c
index 6c22c496090..3f48292c9c6 100644
--- a/arch/mips/txx9/rbtx4927/irq.c
+++ b/arch/mips/txx9/rbtx4927/irq.c
@@ -2,7 +2,7 @@
* Toshiba RBTX4927 specific interrupt handlers
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4927/prom.c b/arch/mips/txx9/rbtx4927/prom.c
index cc97c6a6011..fe6d0b54763 100644
--- a/arch/mips/txx9/rbtx4927/prom.c
+++ b/arch/mips/txx9/rbtx4927/prom.c
@@ -2,7 +2,7 @@
* rbtx4927 specific prom routines
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4927/setup.c b/arch/mips/txx9/rbtx4927/setup.c
index b15adfc2d72..3c516ef625e 100644
--- a/arch/mips/txx9/rbtx4927/setup.c
+++ b/arch/mips/txx9/rbtx4927/setup.c
@@ -2,7 +2,7 @@
* Toshiba rbtx4927 specific setup
*
* Author: MontaVista Software, Inc.
- * source@mvista.com
+ * source@mvista.com
*
* Copyright 2001-2002 MontaVista Software Inc.
*
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
index d6e70dab3bd..c9afd05020e 100644
--- a/arch/mips/txx9/rbtx4938/setup.c
+++ b/arch/mips/txx9/rbtx4938/setup.c
@@ -107,10 +107,10 @@ static void __init rbtx4938_pci_setup(void)
/* SPI support */
/* chip select for SPI devices */
-#define SEEPROM1_CS 7 /* PIO7 */
-#define SEEPROM2_CS 0 /* IOC */
-#define SEEPROM3_CS 1 /* IOC */
-#define SRTC_CS 2 /* IOC */
+#define SEEPROM1_CS 7 /* PIO7 */
+#define SEEPROM2_CS 0 /* IOC */
+#define SEEPROM3_CS 1 /* IOC */
+#define SRTC_CS 2 /* IOC */
#define SPI_BUSNO 0
static int __init rbtx4938_ethaddr_init(void)
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index e15641d9309..2da5f25f98b 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -243,7 +243,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
}
static struct platform_driver rbtx4939_led_driver = {
- .driver = {
+ .driver = {
.name = "rbtx4939-led",
.owner = THIS_MODULE,
},
@@ -337,7 +337,7 @@ static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
shift = bdipsw & 3;
while (len) {
curlen = min_t(unsigned long, len,
- 0x400000 - (from & (0x400000 - 1)));
+ 0x400000 - (from & (0x400000 - 1)));
memcpy(to,
(void *)((from & ~0xc00000) |
((((from >> 22) + shift) & 3) << 22)),
diff --git a/arch/mips/vr41xx/common/bcu.c b/arch/mips/vr41xx/common/bcu.c
index 6346c59c9f9..ff7d1c66cf8 100644
--- a/arch/mips/vr41xx/common/bcu.c
+++ b/arch/mips/vr41xx/common/bcu.c
@@ -1,7 +1,7 @@
/*
* bcu.c, Bus Control Unit routines for the NEC VR4100 series.
*
- * Copyright (C) 2002 MontaVista Software Inc.
+ * Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
@@ -176,7 +176,7 @@ static inline unsigned long calculate_vtclock(uint16_t clkspeed, unsigned long p
}
static inline unsigned long calculate_tclock(uint16_t clkspeed, unsigned long pclock,
- unsigned long vtclock)
+ unsigned long vtclock)
{
unsigned long tclock = 0;
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 8ba7d04a5ec..05302bfdd11 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -217,24 +217,24 @@ static int __init vr41xx_cmu_init(void)
unsigned long start, size;
switch (current_cpu_type()) {
- case CPU_VR4111:
- case CPU_VR4121:
+ case CPU_VR4111:
+ case CPU_VR4121:
start = CMU_TYPE1_BASE;
size = CMU_TYPE1_SIZE;
- break;
- case CPU_VR4122:
- case CPU_VR4131:
+ break;
+ case CPU_VR4122:
+ case CPU_VR4131:
start = CMU_TYPE2_BASE;
size = CMU_TYPE2_SIZE;
break;
- case CPU_VR4133:
+ case CPU_VR4133:
start = CMU_TYPE3_BASE;
size = CMU_TYPE3_SIZE;
- break;
+ break;
default:
panic("Unexpected CPU of NEC VR4100 series");
break;
- }
+ }
if (request_mem_region(start, size, "CMU") == NULL)
return -EBUSY;
diff --git a/arch/mips/vr41xx/common/giu.c b/arch/mips/vr41xx/common/giu.c
index b32b3bc6044..32cc8d66b34 100644
--- a/arch/mips/vr41xx/common/giu.c
+++ b/arch/mips/vr41xx/common/giu.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series GIU platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c
index a39ef3207d7..41e873bc847 100644
--- a/arch/mips/vr41xx/common/icu.c
+++ b/arch/mips/vr41xx/common/icu.c
@@ -49,11 +49,11 @@ static unsigned char sysint1_assign[16] = {
static unsigned char sysint2_assign[16] = {
2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
-#define ICU1_TYPE1_BASE 0x0b000080UL
-#define ICU2_TYPE1_BASE 0x0b000200UL
+#define ICU1_TYPE1_BASE 0x0b000080UL
+#define ICU2_TYPE1_BASE 0x0b000200UL
-#define ICU1_TYPE2_BASE 0x0f000080UL
-#define ICU2_TYPE2_BASE 0x0f0000a0UL
+#define ICU1_TYPE2_BASE 0x0f000080UL
+#define ICU2_TYPE2_BASE 0x0f0000a0UL
#define ICU1_SIZE 0x20
#define ICU2_SIZE 0x1c
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 9fbf5f0d1fa..70a3f90131d 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -74,7 +74,7 @@ static inline void software_reset(void)
change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
flush_cache_all();
write_c0_wired(0);
- __asm__("jr %0"::"r"(0xbfc00000));
+ __asm__("jr %0"::"r"(0xbfc00000));
break;
}
}
diff --git a/arch/mips/vr41xx/common/rtc.c b/arch/mips/vr41xx/common/rtc.c
index 76e3e8af7c9..c1e3d200920 100644
--- a/arch/mips/vr41xx/common/rtc.c
+++ b/arch/mips/vr41xx/common/rtc.c
@@ -1,7 +1,7 @@
/*
* NEC VR4100 series RTC platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/type.c b/arch/mips/vr41xx/common/type.c
index ff841422b63..45836a92b7a 100644
--- a/arch/mips/vr41xx/common/type.c
+++ b/arch/mips/vr41xx/common/type.c
@@ -1,7 +1,7 @@
/*
* type.c, System type for NEC VR4100 series.
*
- * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/wrppmc/Platform b/arch/mips/wrppmc/Platform
index e758645e968..dc78b25b95f 100644
--- a/arch/mips/wrppmc/Platform
+++ b/arch/mips/wrppmc/Platform
@@ -2,6 +2,6 @@
# Wind River PPMC Board (4KC + GT64120)
#
platform-$(CONFIG_WR_PPMC) += wrppmc/
-cflags-$(CONFIG_WR_PPMC) += \
+cflags-$(CONFIG_WR_PPMC) += \
-I$(srctree)/arch/mips/include/asm/mach-wrppmc
load-$(CONFIG_WR_PPMC) += 0xffffffff80100000
diff --git a/arch/mips/wrppmc/irq.c b/arch/mips/wrppmc/irq.c
index c6e706274db..f237bf4d5c3 100644
--- a/arch/mips/wrppmc/irq.c
+++ b/arch/mips/wrppmc/irq.c
@@ -4,8 +4,8 @@
* Copyright (C) 2006, Wind River System Inc.
* Author: Rongkai.Zhan, <rongkai.zhan@windriver.com>
*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
diff --git a/arch/mips/wrppmc/serial.c b/arch/mips/wrppmc/serial.c
index 6f9d0858f59..83f0f7d0518 100644
--- a/arch/mips/wrppmc/serial.c
+++ b/arch/mips/wrppmc/serial.c
@@ -1,7 +1,7 @@
/*
* Registration of WRPPMC UART platform device.
*
- * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 4d5ea764857..a2a47d9d6a2 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
select ARCH_HAVE_NMI_SAFE_CMPXCHG
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER
+ select SYSCTL_ARCH_UNALIGN_ALLOW
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_VIRT_TO_BUS
select MODULES_USE_ELF_RELA
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index eca69bb8ef5..051c8b90231 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -79,16 +79,6 @@ asmlinkage long sys32_sendfile64(u32 out_fd, u32 in_fd,
(loff_t __user *)offset, count);
}
-
-/* lseek() needs a wrapper because 'offset' can be negative, but the top
- * half of the argument has been zeroed by syscall.S.
- */
-
-asmlinkage int sys32_lseek(unsigned int fd, int offset, unsigned int origin)
-{
- return sys_lseek(fd, offset, origin);
-}
-
asmlinkage long sys32_semctl(int semid, int semnum, int cmd, union semun arg)
{
union semun u;
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index fc9cab1cc2d..884b91b028f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -76,7 +76,7 @@
ENTRY_SAME(socket)
/* struct stat is MAYBE identical wide and narrow ?? */
ENTRY_COMP(newstat)
- ENTRY_DIFF(lseek)
+ ENTRY_COMP(lseek)
ENTRY_SAME(getpid) /* 20 */
/* the 'void * data' parameter may need re-packing in wide */
ENTRY_COMP(mount)
@@ -165,8 +165,8 @@
ENTRY_SAME(mmap2)
ENTRY_SAME(mmap) /* 90 */
ENTRY_SAME(munmap)
- ENTRY_SAME(truncate)
- ENTRY_SAME(ftruncate)
+ ENTRY_COMP(truncate)
+ ENTRY_COMP(ftruncate)
ENTRY_SAME(fchmod)
ENTRY_SAME(fchown) /* 95 */
ENTRY_SAME(getpriority)
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index d906f33441c..535b6d8a41c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -22,7 +22,7 @@ SYSCALL_SPU(chmod)
SYSCALL_SPU(lchown)
SYSCALL(ni_syscall)
OLDSYS(stat)
-SYSX_SPU(sys_lseek,ppc32_lseek,sys_lseek)
+COMPAT_SYS_SPU(lseek)
SYSCALL_SPU(getpid)
COMPAT_SYS(mount)
SYSX(sys_ni_syscall,sys_oldumount,sys_oldumount)
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index dbc44ba5b07..d0bafc0cdf0 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -146,24 +146,6 @@ asmlinkage long compat_sys_sendfile64_wrapper(u32 out_fd, u32 in_fd,
(off_t __user *)offset, count);
}
-off_t ppc32_lseek(unsigned int fd, u32 offset, unsigned int origin)
-{
- /* sign extend n */
- return sys_lseek(fd, (int)offset, origin);
-}
-
-long compat_sys_truncate(const char __user * path, u32 length)
-{
- /* sign extend length */
- return sys_truncate(path, (int)length);
-}
-
-long compat_sys_ftruncate(int fd, u32 length)
-{
- /* sign extend length */
- return sys_ftruncate(fd, (int)length);
-}
-
unsigned long compat_sys_mmap2(unsigned long addr, size_t len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index c14faf39ae3..3c98c4dc5ac 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -67,12 +67,6 @@ ENTRY(sys32_lchown16_wrapper)
llgfr %r4,%r4 # __kernel_old_uid_emu31_t
jg sys32_lchown16 # branch to system call
-ENTRY(sys32_lseek_wrapper)
- llgfr %r2,%r2 # unsigned int
- lgfr %r3,%r3 # off_t
- llgfr %r4,%r4 # unsigned int
- jg sys_lseek # branch to system call
-
#sys32_getpid_wrapper # void
ENTRY(sys32_mount_wrapper)
@@ -331,16 +325,6 @@ ENTRY(sys32_munmap_wrapper)
llgfr %r3,%r3 # size_t
jg sys_munmap # branch to system call
-ENTRY(sys32_truncate_wrapper)
- llgtr %r2,%r2 # const char *
- lgfr %r3,%r3 # long
- jg sys_truncate # branch to system call
-
-ENTRY(sys32_ftruncate_wrapper)
- llgfr %r2,%r2 # unsigned int
- llgfr %r3,%r3 # unsigned long
- jg sys_ftruncate # branch to system call
-
ENTRY(sys32_fchmod_wrapper)
llgfr %r2,%r2 # unsigned int
llgfr %r3,%r3 # mode_t
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index aaac708aa11..630b935d128 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -27,7 +27,7 @@ SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */
SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/
NI_SYSCALL /* old break syscall holder */
NI_SYSCALL /* old stat syscall holder */
-SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
+SYSCALL(sys_lseek,sys_lseek,compat_sys_lseek)
SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
@@ -100,8 +100,8 @@ SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
SYSCALL(sys_old_mmap,sys_old_mmap,old32_mmap_wrapper) /* 90 */
SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
-SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
-SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
+SYSCALL(sys_truncate,sys_truncate,compat_sys_truncate)
+SYSCALL(sys_ftruncate,sys_ftruncate,compat_sys_ftruncate)
SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/
SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index cd5dc4d411d..b524f91dd0e 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -726,7 +726,7 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
-void do_signal32(sigset_t *oldset, struct pt_regs * regs)
+void do_signal32(struct pt_regs * regs)
{
struct ksignal ksig;
unsigned long orig_i0 = 0;
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index 260ddcd412b..088134834da 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -21,7 +21,7 @@ sys_call_table32:
/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write
/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link
/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod
-/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys_lseek
+/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek
/*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16
/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause
/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice
@@ -43,8 +43,8 @@ sys_call_table32:
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
.word sys_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
- .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
-/*130*/ .word sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
+ .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
+/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
.word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
index 592f5a9a9c0..ad7a20cbc69 100644
--- a/arch/x86/ia32/sys_ia32.c
+++ b/arch/x86/ia32/sys_ia32.c
@@ -218,11 +218,6 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
-long sys32_lseek(unsigned int fd, int offset, unsigned int whence)
-{
- return sys_lseek(fd, offset, whence);
-}
-
long sys32_kill(int pid, int sig)
{
return sys_kill(pid, sig);
diff --git a/arch/x86/include/asm/sys_ia32.h b/arch/x86/include/asm/sys_ia32.h
index 0218d917f50..8459efc3968 100644
--- a/arch/x86/include/asm/sys_ia32.h
+++ b/arch/x86/include/asm/sys_ia32.h
@@ -43,7 +43,6 @@ asmlinkage long sys32_pwrite(unsigned int, const char __user *, u32, u32, u32);
asmlinkage long sys32_personality(unsigned long);
asmlinkage long sys32_sendfile(int, int, compat_off_t __user *, s32);
-long sys32_lseek(unsigned int, int, unsigned int);
long sys32_kill(int, int);
long sys32_fadvise64_64(int, __u32, __u32, __u32, __u32, int);
long sys32_vm86_warning(void);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e89acdf6b77..84d32855f65 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1056,15 +1056,6 @@ void __init setup_arch(char **cmdline_p)
setup_bios_corruption_check();
#endif
- /*
- * In the memory hotplug case, the kernel needs info from SRAT to
- * determine which memory is hotpluggable before allocating memory
- * using memblock.
- */
- acpi_boot_table_init();
- early_acpi_boot_init();
- early_parse_srat();
-
#ifdef CONFIG_X86_32
printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
(max_pfn_mapped<<PAGE_SHIFT) - 1);
@@ -1110,6 +1101,10 @@ void __init setup_arch(char **cmdline_p)
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
+ acpi_boot_table_init();
+
+ early_acpi_boot_init();
+
initmem_init();
memblock_find_dma_reserve();
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index ff3633c794c..72fe01e9e41 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -212,9 +212,10 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
* Allocate node data. Try node-local memory and then any node.
* Never allocate in DMA zone.
*/
- nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
+ nd_pa = memblock_alloc_nid(nd_size, SMP_CACHE_BYTES, nid);
if (!nd_pa) {
- pr_err("Cannot find %zu bytes in any node\n", nd_size);
+ pr_err("Cannot find %zu bytes in node %d\n",
+ nd_size, nid);
return;
}
nd = __va(nd_pa);
@@ -559,12 +560,10 @@ static int __init numa_init(int (*init_func)(void))
for (i = 0; i < MAX_LOCAL_APIC; i++)
set_apicid_to_node(i, NUMA_NO_NODE);
- /*
- * Do not clear numa_nodes_parsed or zero numa_meminfo here, because
- * SRAT was parsed earlier in early_parse_srat().
- */
+ nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 79836d01f78..cdd0da9dd53 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -141,126 +141,11 @@ static inline int save_add_info(void) {return 1;}
static inline int save_add_info(void) {return 0;}
#endif
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-static void __init
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
- int overlap, i;
- unsigned long start_pfn, end_pfn;
-
- start_pfn = PFN_DOWN(start);
- end_pfn = PFN_UP(end);
-
- /*
- * For movablemem_map=acpi:
- *
- * SRAT: |_____| |_____| |_________| |_________| ......
- * node id: 0 1 1 2
- * hotpluggable: n y y n
- * movablemem_map: |_____| |_________|
- *
- * Using movablemem_map, we can prevent memblock from allocating memory
- * on ZONE_MOVABLE at boot time.
- *
- * Before parsing SRAT, memblock has already reserve some memory ranges
- * for other purposes, such as for kernel image. We cannot prevent
- * kernel from using these memory, so we need to exclude these memory
- * even if it is hotpluggable.
- * Furthermore, to ensure the kernel has enough memory to boot, we make
- * all the memory on the node which the kernel resides in
- * un-hotpluggable.
- */
- if (hotpluggable && movablemem_map.acpi) {
- /* Exclude ranges reserved by memblock. */
- struct memblock_type *rgn = &memblock.reserved;
-
- for (i = 0; i < rgn->cnt; i++) {
- if (end <= rgn->regions[i].base ||
- start >= rgn->regions[i].base +
- rgn->regions[i].size)
- continue;
-
- /*
- * If the memory range overlaps the memory reserved by
- * memblock, then the kernel resides in this node.
- */
- node_set(node, movablemem_map.numa_nodes_kernel);
-
- goto out;
- }
-
- /*
- * If the kernel resides in this node, then the whole node
- * should not be hotpluggable.
- */
- if (node_isset(node, movablemem_map.numa_nodes_kernel))
- goto out;
-
- insert_movablemem_map(start_pfn, end_pfn);
-
- /*
- * numa_nodes_hotplug nodemask represents which nodes are put
- * into movablemem_map.map[].
- */
- node_set(node, movablemem_map.numa_nodes_hotplug);
- goto out;
- }
-
- /*
- * For movablemem_map=nn[KMG]@ss[KMG]:
- *
- * SRAT: |_____| |_____| |_________| |_________| ......
- * node id: 0 1 1 2
- * user specified: |__| |___|
- * movablemem_map: |___| |_________| |______| ......
- *
- * Using movablemem_map, we can prevent memblock from allocating memory
- * on ZONE_MOVABLE at boot time.
- *
- * NOTE: In this case, SRAT info will be ingored.
- */
- overlap = movablemem_map_overlap(start_pfn, end_pfn);
- if (overlap >= 0) {
- /*
- * If part of this range is in movablemem_map, we need to
- * add the range after it to extend the range to the end
- * of the node, because from the min address specified to
- * the end of the node will be ZONE_MOVABLE.
- */
- start_pfn = max(start_pfn,
- movablemem_map.map[overlap].start_pfn);
- insert_movablemem_map(start_pfn, end_pfn);
-
- /*
- * Set the nodemask, so that if the address range on one node
- * is not continuse, we can add the subsequent ranges on the
- * same node into movablemem_map.
- */
- node_set(node, movablemem_map.numa_nodes_hotplug);
- } else {
- if (node_isset(node, movablemem_map.numa_nodes_hotplug))
- /*
- * Insert the range if we already have movable ranges
- * on the same node.
- */
- insert_movablemem_map(start_pfn, end_pfn);
- }
-out:
- return;
-}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-static inline void
-handle_movablemem(int node, u64 start, u64 end, u32 hotpluggable)
-{
-}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
-
/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
- u32 hotpluggable;
int node, pxm;
if (srat_disabled())
@@ -269,8 +154,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
goto out_err_bad_srat;
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
goto out_err;
- hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
- if (hotpluggable && !save_add_info())
+ if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
goto out_err;
start = ma->base_address;
@@ -290,12 +174,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
node_set(node, numa_nodes_parsed);
- printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx] %s\n",
+ printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
node, pxm,
- (unsigned long long) start, (unsigned long long) end - 1,
- hotpluggable ? "Hot Pluggable": "");
-
- handle_movablemem(node, start, end, hotpluggable);
+ (unsigned long long) start, (unsigned long long) end - 1);
return 0;
out_err_bad_srat:
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index f2fe78ff22c..e6d55f0064d 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -25,7 +25,7 @@
16 i386 lchown sys_lchown16
17 i386 break
18 i386 oldstat sys_stat
-19 i386 lseek sys_lseek sys32_lseek
+19 i386 lseek sys_lseek compat_sys_lseek
20 i386 getpid sys_getpid
21 i386 mount sys_mount compat_sys_mount
22 i386 umount sys_oldumount
@@ -98,8 +98,8 @@
89 i386 readdir sys_old_readdir compat_sys_old_readdir
90 i386 mmap sys_old_mmap sys32_mmap
91 i386 munmap sys_munmap
-92 i386 truncate sys_truncate
-93 i386 ftruncate sys_ftruncate
+92 i386 truncate sys_truncate compat_sys_truncate
+93 i386 ftruncate sys_ftruncate compat_sys_ftruncate
94 i386 fchmod sys_fchmod
95 i386 fchown sys_fchown16
96 i386 getpriority sys_getpriority
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 59844ee149b..33e609f6358 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -282,10 +282,10 @@ acpi_table_parse_srat(enum acpi_srat_type id,
handler, max_entries);
}
-static int srat_mem_cnt;
-
-void __init early_parse_srat(void)
+int __init acpi_numa_init(void)
{
+ int cnt = 0;
+
/*
* Should not limit number with cpu num that is from NR_CPUS or nr_cpus=
* SRAT cpu entries could have different order with that in MADT.
@@ -295,24 +295,21 @@ void __init early_parse_srat(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, 0);
+ acpi_parse_x2apic_affinity, 0);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, 0);
- srat_mem_cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
- acpi_parse_memory_affinity,
- NR_NODE_MEMBLKS);
+ acpi_parse_processor_affinity, 0);
+ cnt = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
+ acpi_parse_memory_affinity,
+ NR_NODE_MEMBLKS);
}
-}
-int __init acpi_numa_init(void)
-{
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
acpi_numa_arch_fixup();
- if (srat_mem_cnt < 0)
- return srat_mem_cnt;
+ if (cnt < 0)
+ return cnt;
else if (!parsed_numa_memblks)
return -ENOENT;
return 0;
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 51c3ea2ed41..c599558faed 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -20,6 +20,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_dma.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -171,7 +172,13 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
if (dwc->initialized == true)
return;
- if (dws) {
+ if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) {
+ /* autoconfigure based on request line from DT */
+ if (dwc->direction == DMA_MEM_TO_DEV)
+ cfghi = DWC_CFGH_DST_PER(dwc->request_line);
+ else if (dwc->direction == DMA_DEV_TO_MEM)
+ cfghi = DWC_CFGH_SRC_PER(dwc->request_line);
+ } else if (dws) {
/*
* We need controller-specific data to set up slave
* transfers.
@@ -1226,49 +1233,64 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
-bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
+struct dw_dma_filter_args {
+ struct dw_dma *dw;
+ unsigned int req;
+ unsigned int src;
+ unsigned int dst;
+};
+
+static bool dw_dma_generic_filter(struct dma_chan *chan, void *param)
{
+ struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
- static struct dw_dma *last_dw;
- static char *last_bus_id;
- int i = -1;
+ struct dw_dma_filter_args *fargs = param;
+ struct dw_dma_slave *dws = &dwc->slave;
- /*
- * dmaengine framework calls this routine for all channels of all dma
- * controller, until true is returned. If 'param' bus_id is not
- * registered with a dma controller (dw), then there is no need of
- * running below function for all channels of dw.
- *
- * This block of code does this by saving the parameters of last
- * failure. If dw and param are same, i.e. trying on same dw with
- * different channel, return false.
- */
- if ((last_dw == dw) && (last_bus_id == param))
- return false;
- /*
- * Return true:
- * - If dw_dma's platform data is not filled with slave info, then all
- * dma controllers are fine for transfer.
- * - Or if param is NULL
- */
- if (!dw->sd || !param)
- return true;
+ /* ensure the device matches our channel */
+ if (chan->device != &fargs->dw->dma)
+ return false;
- while (++i < dw->sd_count) {
- if (!strcmp(dw->sd[i].bus_id, param)) {
- chan->private = &dw->sd[i];
- last_dw = NULL;
- last_bus_id = NULL;
+ dws->dma_dev = dw->dma.dev;
+ dws->cfg_hi = ~0;
+ dws->cfg_lo = ~0;
+ dws->src_master = fargs->src;
+ dws->dst_master = fargs->dst;
- return true;
- }
- }
+ dwc->request_line = fargs->req;
- last_dw = dw;
- last_bus_id = param;
- return false;
+ chan->private = dws;
+
+ return true;
+}
+
+static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dw_dma *dw = ofdma->of_dma_data;
+ struct dw_dma_filter_args fargs = {
+ .dw = dw,
+ };
+ dma_cap_mask_t cap;
+
+ if (dma_spec->args_count != 3)
+ return NULL;
+
+ fargs.req = be32_to_cpup(dma_spec->args+0);
+ fargs.src = be32_to_cpup(dma_spec->args+1);
+ fargs.dst = be32_to_cpup(dma_spec->args+2);
+
+ if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
+ fargs.src >= dw->nr_masters ||
+ fargs.dst >= dw->nr_masters))
+ return NULL;
+
+ dma_cap_zero(cap);
+ dma_cap_set(DMA_SLAVE, cap);
+
+ /* TODO: there should be a simpler way to do this */
+ return dma_request_channel(cap, dw_dma_generic_filter, &fargs);
}
-EXPORT_SYMBOL(dw_dma_generic_filter);
/* --------------------- Cyclic DMA API extensions -------------------- */
@@ -1554,9 +1576,8 @@ static void dw_dma_off(struct dw_dma *dw)
static struct dw_dma_platform_data *
dw_dma_parse_dt(struct platform_device *pdev)
{
- struct device_node *sn, *cn, *np = pdev->dev.of_node;
+ struct device_node *np = pdev->dev.of_node;
struct dw_dma_platform_data *pdata;
- struct dw_dma_slave *sd;
u32 tmp, arr[4];
if (!np) {
@@ -1568,7 +1589,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (!pdata)
return NULL;
- if (of_property_read_u32(np, "nr_channels", &pdata->nr_channels))
+ if (of_property_read_u32(np, "dma-channels", &pdata->nr_channels))
return NULL;
if (of_property_read_bool(np, "is_private"))
@@ -1583,7 +1604,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
if (!of_property_read_u32(np, "block_size", &tmp))
pdata->block_size = tmp;
- if (!of_property_read_u32(np, "nr_masters", &tmp)) {
+ if (!of_property_read_u32(np, "dma-masters", &tmp)) {
if (tmp > 4)
return NULL;
@@ -1595,36 +1616,6 @@ dw_dma_parse_dt(struct platform_device *pdev)
for (tmp = 0; tmp < pdata->nr_masters; tmp++)
pdata->data_width[tmp] = arr[tmp];
- /* parse slave data */
- sn = of_find_node_by_name(np, "slave_info");
- if (!sn)
- return pdata;
-
- /* calculate number of slaves */
- tmp = of_get_child_count(sn);
- if (!tmp)
- return NULL;
-
- sd = devm_kzalloc(&pdev->dev, sizeof(*sd) * tmp, GFP_KERNEL);
- if (!sd)
- return NULL;
-
- pdata->sd = sd;
- pdata->sd_count = tmp;
-
- for_each_child_of_node(sn, cn) {
- sd->dma_dev = &pdev->dev;
- of_property_read_string(cn, "bus_id", &sd->bus_id);
- of_property_read_u32(cn, "cfg_hi", &sd->cfg_hi);
- of_property_read_u32(cn, "cfg_lo", &sd->cfg_lo);
- if (!of_property_read_u32(cn, "src_master", &tmp))
- sd->src_master = tmp;
-
- if (!of_property_read_u32(cn, "dst_master", &tmp))
- sd->dst_master = tmp;
- sd++;
- }
-
return pdata;
}
#else
@@ -1705,8 +1696,6 @@ static int dw_probe(struct platform_device *pdev)
clk_prepare_enable(dw->clk);
dw->regs = regs;
- dw->sd = pdata->sd;
- dw->sd_count = pdata->sd_count;
/* get hardware configuration parameters */
if (autocfg) {
@@ -1837,6 +1826,14 @@ static int dw_probe(struct platform_device *pdev)
dma_async_device_register(&dw->dma);
+ if (pdev->dev.of_node) {
+ err = of_dma_controller_register(pdev->dev.of_node,
+ dw_dma_xlate, dw);
+ if (err && err != -ENODEV)
+ dev_err(&pdev->dev,
+ "could not register of_dma_controller\n");
+ }
+
return 0;
}
@@ -1845,6 +1842,8 @@ static int dw_remove(struct platform_device *pdev)
struct dw_dma *dw = platform_get_drvdata(pdev);
struct dw_dma_chan *dwc, *_dwc;
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
dw_dma_off(dw);
dma_async_device_unregister(&dw->dma);
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index 88dd8eb3195..cf0ce5c77d6 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -13,6 +13,7 @@
#include <linux/dw_dmac.h>
#define DW_DMA_MAX_NR_CHANNELS 8
+#define DW_DMA_MAX_NR_REQUESTS 16
/* flow controller */
enum dw_dma_fc {
@@ -211,6 +212,8 @@ struct dw_dma_chan {
/* hardware configuration */
unsigned int block_size;
bool nollp;
+ unsigned int request_line;
+ struct dw_dma_slave slave;
/* configuration passed via DMA_SLAVE_CONFIG */
struct dma_slave_config dma_sconfig;
@@ -239,10 +242,6 @@ struct dw_dma {
struct tasklet_struct tasklet;
struct clk *clk;
- /* slave information */
- struct dw_dma_slave *sd;
- unsigned int sd_count;
-
u8 all_chan_mask;
/* hardware configuration */
diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c
index 2d58f939d27..833dd1afbf4 100644
--- a/drivers/hsi/hsi.c
+++ b/drivers/hsi/hsi.c
@@ -420,7 +420,7 @@ static int hsi_event_notifier_call(struct notifier_block *nb,
/**
* hsi_register_port_event - Register a client to receive port events
* @cl: HSI client that wants to receive port events
- * @cb: Event handler callback
+ * @handler: Event handler callback
*
* Clients should register a callback to be able to receive
* events from the ports. Registration should happen after
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 91a02eeeb31..e30b490055a 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -210,7 +210,7 @@ config DM_DEBUG
config DM_BUFIO
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
This interface allows you to do buffered I/O on a device and acts
as a cache, holding recently-read blocks in memory and performing
@@ -218,7 +218,7 @@ config DM_BUFIO
config DM_BIO_PRISON
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
---help---
Some bio locking schemes used by other device-mapper targets
including thin provisioning.
@@ -251,8 +251,8 @@ config DM_SNAPSHOT
Allow volume managers to take writable snapshots of a device.
config DM_THIN_PROVISIONING
- tristate "Thin provisioning target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Thin provisioning target"
+ depends on BLK_DEV_DM
select DM_PERSISTENT_DATA
select DM_BIO_PRISON
---help---
@@ -268,6 +268,37 @@ config DM_DEBUG_BLOCK_STACK_TRACING
If unsure, say N.
+config DM_CACHE
+ tristate "Cache target (EXPERIMENTAL)"
+ depends on BLK_DEV_DM
+ default n
+ select DM_PERSISTENT_DATA
+ select DM_BIO_PRISON
+ ---help---
+ dm-cache attempts to improve performance of a block device by
+ moving frequently used data to a smaller, higher performance
+ device. Different 'policy' plugins can be used to change the
+ algorithms used to select which blocks are promoted, demoted,
+ cleaned etc. It supports writeback and writethrough modes.
+
+config DM_CACHE_MQ
+ tristate "MQ Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A cache policy that uses a multiqueue ordered by recent hit
+ count to select which blocks should be promoted and demoted.
+ This is meant to be a general purpose policy. It prioritises
+ reads over writes.
+
+config DM_CACHE_CLEANER
+ tristate "Cleaner Cache Policy (EXPERIMENTAL)"
+ depends on DM_CACHE
+ default y
+ ---help---
+ A simple cache policy that writes back all data to the
+ origin. Used when decommissioning a dm-cache.
+
config DM_MIRROR
tristate "Mirror target"
depends on BLK_DEV_DM
@@ -302,8 +333,8 @@ config DM_RAID
in one of the available parity distribution methods.
config DM_LOG_USERSPACE
- tristate "Mirror userspace logging (EXPERIMENTAL)"
- depends on DM_MIRROR && EXPERIMENTAL && NET
+ tristate "Mirror userspace logging"
+ depends on DM_MIRROR && NET
select CONNECTOR
---help---
The userspace logging module provides a mechanism for
@@ -350,8 +381,8 @@ config DM_MULTIPATH_ST
If unsure, say N.
config DM_DELAY
- tristate "I/O delaying target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "I/O delaying target"
+ depends on BLK_DEV_DM
---help---
A target that delays reads and/or writes and can send
them to different devices. Useful for testing.
@@ -365,14 +396,14 @@ config DM_UEVENT
Generate udev events for DM events.
config DM_FLAKEY
- tristate "Flakey target (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Flakey target"
+ depends on BLK_DEV_DM
---help---
A target that intermittently fails I/O for debugging purposes.
config DM_VERITY
- tristate "Verity target support (EXPERIMENTAL)"
- depends on BLK_DEV_DM && EXPERIMENTAL
+ tristate "Verity target support"
+ depends on BLK_DEV_DM
select CRYPTO
select CRYPTO_HASH
select DM_BUFIO
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 94dce8b4932..7ceeaefc0e9 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -11,6 +11,9 @@ dm-mirror-y += dm-raid1.o
dm-log-userspace-y \
+= dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
+dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o
+dm-cache-mq-y += dm-cache-policy-mq.o
+dm-cache-cleaner-y += dm-cache-policy-cleaner.o
md-mod-y += md.o bitmap.o
raid456-y += raid5.o
@@ -44,6 +47,9 @@ obj-$(CONFIG_DM_ZERO) += dm-zero.o
obj-$(CONFIG_DM_RAID) += dm-raid.o
obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
+obj-$(CONFIG_DM_CACHE) += dm-cache.o
+obj-$(CONFIG_DM_CACHE_MQ) += dm-cache-mq.o
+obj-$(CONFIG_DM_CACHE_CLEANER) += dm-cache-cleaner.o
ifeq ($(CONFIG_DM_UEVENT),y)
dm-mod-objs += dm-uevent.o
diff --git a/drivers/md/dm-bio-prison.c b/drivers/md/dm-bio-prison.c
index d9d3f1c7b66..85f0b707425 100644
--- a/drivers/md/dm-bio-prison.c
+++ b/drivers/md/dm-bio-prison.c
@@ -14,14 +14,6 @@
/*----------------------------------------------------------------*/
-struct dm_bio_prison_cell {
- struct hlist_node list;
- struct dm_bio_prison *prison;
- struct dm_cell_key key;
- struct bio *holder;
- struct bio_list bios;
-};
-
struct dm_bio_prison {
spinlock_t lock;
mempool_t *cell_pool;
@@ -87,6 +79,19 @@ void dm_bio_prison_destroy(struct dm_bio_prison *prison)
}
EXPORT_SYMBOL_GPL(dm_bio_prison_destroy);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, gfp_t gfp)
+{
+ return mempool_alloc(prison->cell_pool, gfp);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_alloc_cell);
+
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
+{
+ mempool_free(cell, prison->cell_pool);
+}
+EXPORT_SYMBOL_GPL(dm_bio_prison_free_cell);
+
static uint32_t hash_key(struct dm_bio_prison *prison, struct dm_cell_key *key)
{
const unsigned long BIG_PRIME = 4294967291UL;
@@ -114,91 +119,95 @@ static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
return NULL;
}
-/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
- *
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
- */
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref)
+static void __setup_new_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *holder,
+ uint32_t hash,
+ struct dm_bio_prison_cell *cell)
{
- int r = 1;
- unsigned long flags;
- uint32_t hash = hash_key(prison, key);
- struct dm_bio_prison_cell *cell, *cell2;
-
- BUG_ON(hash > prison->nr_buckets);
-
- spin_lock_irqsave(&prison->lock, flags);
-
- cell = __search_bucket(prison->cells + hash, key);
- if (cell) {
- bio_list_add(&cell->bios, inmate);
- goto out;
- }
+ memcpy(&cell->key, key, sizeof(cell->key));
+ cell->holder = holder;
+ bio_list_init(&cell->bios);
+ hlist_add_head(&cell->list, prison->cells + hash);
+}
- /*
- * Allocate a new cell
- */
- spin_unlock_irqrestore(&prison->lock, flags);
- cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO);
- spin_lock_irqsave(&prison->lock, flags);
+static int __bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ uint32_t hash = hash_key(prison, key);
+ struct dm_bio_prison_cell *cell;
- /*
- * We've been unlocked, so we have to double check that
- * nobody else has inserted this cell in the meantime.
- */
cell = __search_bucket(prison->cells + hash, key);
if (cell) {
- mempool_free(cell2, prison->cell_pool);
- bio_list_add(&cell->bios, inmate);
- goto out;
+ if (inmate)
+ bio_list_add(&cell->bios, inmate);
+ *cell_result = cell;
+ return 1;
}
- /*
- * Use new cell.
- */
- cell = cell2;
-
- cell->prison = prison;
- memcpy(&cell->key, key, sizeof(cell->key));
- cell->holder = inmate;
- bio_list_init(&cell->bios);
- hlist_add_head(&cell->list, prison->cells + hash);
+ __setup_new_cell(prison, key, inmate, hash, cell_prealloc);
+ *cell_result = cell_prealloc;
+ return 0;
+}
- r = 0;
+static int bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ unsigned long flags;
-out:
+ spin_lock_irqsave(&prison->lock, flags);
+ r = __bio_detain(prison, key, inmate, cell_prealloc, cell_result);
spin_unlock_irqrestore(&prison->lock, flags);
- *ref = cell;
-
return r;
}
+
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, inmate, cell_prealloc, cell_result);
+}
EXPORT_SYMBOL_GPL(dm_bio_detain);
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result)
+{
+ return bio_detain(prison, key, NULL, cell_prealloc, cell_result);
+}
+EXPORT_SYMBOL_GPL(dm_get_cell);
+
/*
* @inmates must have been initialised prior to this call
*/
-static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
if (inmates) {
- bio_list_add(inmates, cell->holder);
+ if (cell->holder)
+ bio_list_add(inmates, cell->holder);
bio_list_merge(inmates, &cell->bios);
}
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release(cell, bios);
@@ -209,20 +218,18 @@ EXPORT_SYMBOL_GPL(dm_cell_release);
/*
* Sometimes we don't want the holder, just the additional bios.
*/
-static void __cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
- struct dm_bio_prison *prison = cell->prison;
-
hlist_del(&cell->list);
bio_list_merge(inmates, &cell->bios);
-
- mempool_free(cell, prison->cell_pool);
}
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates)
{
unsigned long flags;
- struct dm_bio_prison *prison = cell->prison;
spin_lock_irqsave(&prison->lock, flags);
__cell_release_no_holder(cell, inmates);
@@ -230,9 +237,9 @@ void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list
}
EXPORT_SYMBOL_GPL(dm_cell_release_no_holder);
-void dm_cell_error(struct dm_bio_prison_cell *cell)
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell)
{
- struct dm_bio_prison *prison = cell->prison;
struct bio_list bios;
struct bio *bio;
unsigned long flags;
diff --git a/drivers/md/dm-bio-prison.h b/drivers/md/dm-bio-prison.h
index 53d1a7a84e2..3f833190ead 100644
--- a/drivers/md/dm-bio-prison.h
+++ b/drivers/md/dm-bio-prison.h
@@ -22,7 +22,6 @@
* subsequently unlocked the bios become available.
*/
struct dm_bio_prison;
-struct dm_bio_prison_cell;
/* FIXME: this needs to be more abstract */
struct dm_cell_key {
@@ -31,21 +30,62 @@ struct dm_cell_key {
dm_block_t block;
};
+/*
+ * Treat this as opaque, only in header so callers can manage allocation
+ * themselves.
+ */
+struct dm_bio_prison_cell {
+ struct hlist_node list;
+ struct dm_cell_key key;
+ struct bio *holder;
+ struct bio_list bios;
+};
+
struct dm_bio_prison *dm_bio_prison_create(unsigned nr_cells);
void dm_bio_prison_destroy(struct dm_bio_prison *prison);
/*
- * This may block if a new cell needs allocating. You must ensure that
- * cells will be unlocked even if the calling thread is blocked.
+ * These two functions just wrap a mempool. This is a transitory step:
+ * Eventually all bio prison clients should manage their own cell memory.
*
- * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called
+ * in interrupt context or passed GFP_NOWAIT.
*/
-int dm_bio_detain(struct dm_bio_prison *prison, struct dm_cell_key *key,
- struct bio *inmate, struct dm_bio_prison_cell **ref);
+struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison,
+ gfp_t gfp);
+void dm_bio_prison_free_cell(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
-void dm_cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios);
-void dm_cell_release_no_holder(struct dm_bio_prison_cell *cell, struct bio_list *inmates);
-void dm_cell_error(struct dm_bio_prison_cell *cell);
+/*
+ * Creates, or retrieves a cell for the given key.
+ *
+ * Returns 1 if pre-existing cell returned, zero if new cell created using
+ * @cell_prealloc.
+ */
+int dm_get_cell(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+/*
+ * An atomic op that combines retrieving a cell, and adding a bio to it.
+ *
+ * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
+ */
+int dm_bio_detain(struct dm_bio_prison *prison,
+ struct dm_cell_key *key,
+ struct bio *inmate,
+ struct dm_bio_prison_cell *cell_prealloc,
+ struct dm_bio_prison_cell **cell_result);
+
+void dm_cell_release(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios);
+void dm_cell_release_no_holder(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *inmates);
+void dm_cell_error(struct dm_bio_prison *prison,
+ struct dm_bio_prison_cell *cell);
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 93205e32a00..3c955e10a61 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1192,7 +1192,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
int dm_bufio_issue_flush(struct dm_bufio_client *c)
{
struct dm_io_request io_req = {
- .bi_rw = REQ_FLUSH,
+ .bi_rw = WRITE_FLUSH,
.mem.type = DM_IO_KMEM,
.mem.ptr.addr = NULL,
.client = c->dm_io,
diff --git a/drivers/md/dm-cache-block-types.h b/drivers/md/dm-cache-block-types.h
new file mode 100644
index 00000000000..bed4ad4e1b7
--- /dev/null
+++ b/drivers/md/dm-cache-block-types.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_BLOCK_TYPES_H
+#define DM_CACHE_BLOCK_TYPES_H
+
+#include "persistent-data/dm-block-manager.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * It's helpful to get sparse to differentiate between indexes into the
+ * origin device, indexes into the cache device, and indexes into the
+ * discard bitset.
+ */
+
+typedef dm_block_t __bitwise__ dm_oblock_t;
+typedef uint32_t __bitwise__ dm_cblock_t;
+typedef dm_block_t __bitwise__ dm_dblock_t;
+
+static inline dm_oblock_t to_oblock(dm_block_t b)
+{
+ return (__force dm_oblock_t) b;
+}
+
+static inline dm_block_t from_oblock(dm_oblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+static inline dm_cblock_t to_cblock(uint32_t b)
+{
+ return (__force dm_cblock_t) b;
+}
+
+static inline uint32_t from_cblock(dm_cblock_t b)
+{
+ return (__force uint32_t) b;
+}
+
+static inline dm_dblock_t to_dblock(dm_block_t b)
+{
+ return (__force dm_dblock_t) b;
+}
+
+static inline dm_block_t from_dblock(dm_dblock_t b)
+{
+ return (__force dm_block_t) b;
+}
+
+#endif /* DM_CACHE_BLOCK_TYPES_H */
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
new file mode 100644
index 00000000000..fbd3625f274
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.c
@@ -0,0 +1,1146 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-metadata.h"
+
+#include "persistent-data/dm-array.h"
+#include "persistent-data/dm-bitset.h"
+#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-disk.h"
+#include "persistent-data/dm-transaction-manager.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache metadata"
+
+#define CACHE_SUPERBLOCK_MAGIC 06142003
+#define CACHE_SUPERBLOCK_LOCATION 0
+#define CACHE_VERSION 1
+#define CACHE_METADATA_CACHE_SIZE 64
+
+/*
+ * 3 for btree insert +
+ * 2 for btree lookup used within space map
+ */
+#define CACHE_MAX_CONCURRENT_LOCKS 5
+#define SPACE_MAP_ROOT_SIZE 128
+
+enum superblock_flag_bits {
+ /* for spotting crashes that would invalidate the dirty bitset */
+ CLEAN_SHUTDOWN,
+};
+
+/*
+ * Each mapping from cache block -> origin block carries a set of flags.
+ */
+enum mapping_bits {
+ /*
+ * A valid mapping. Because we're using an array we clear this
+ * flag for an non existant mapping.
+ */
+ M_VALID = 1,
+
+ /*
+ * The data on the cache is different from that on the origin.
+ */
+ M_DIRTY = 2
+};
+
+struct cache_disk_superblock {
+ __le32 csum;
+ __le32 flags;
+ __le64 blocknr;
+
+ __u8 uuid[16];
+ __le64 magic;
+ __le32 version;
+
+ __u8 policy_name[CACHE_POLICY_NAME_SIZE];
+ __le32 policy_hint_size;
+
+ __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
+ __le64 mapping_root;
+ __le64 hint_root;
+
+ __le64 discard_root;
+ __le64 discard_block_size;
+ __le64 discard_nr_blocks;
+
+ __le32 data_block_size;
+ __le32 metadata_block_size;
+ __le32 cache_blocks;
+
+ __le32 compat_flags;
+ __le32 compat_ro_flags;
+ __le32 incompat_flags;
+
+ __le32 read_hits;
+ __le32 read_misses;
+ __le32 write_hits;
+ __le32 write_misses;
+} __packed;
+
+struct dm_cache_metadata {
+ struct block_device *bdev;
+ struct dm_block_manager *bm;
+ struct dm_space_map *metadata_sm;
+ struct dm_transaction_manager *tm;
+
+ struct dm_array_info info;
+ struct dm_array_info hint_info;
+ struct dm_disk_bitset discard_info;
+
+ struct rw_semaphore root_lock;
+ dm_block_t root;
+ dm_block_t hint_root;
+ dm_block_t discard_root;
+
+ sector_t discard_block_size;
+ dm_dblock_t discard_nr_blocks;
+
+ sector_t data_block_size;
+ dm_cblock_t cache_blocks;
+ bool changed:1;
+ bool clean_when_opened:1;
+
+ char policy_name[CACHE_POLICY_NAME_SIZE];
+ size_t policy_hint_size;
+ struct dm_cache_statistics stats;
+};
+
+/*-------------------------------------------------------------------
+ * superblock validator
+ *-----------------------------------------------------------------*/
+
+#define SUPERBLOCK_CSUM_XOR 9031977
+
+static void sb_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+
+ disk_super->blocknr = cpu_to_le64(dm_block_location(b));
+ disk_super->csum = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+}
+
+static int sb_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t sb_block_size)
+{
+ struct cache_disk_superblock *disk_super = dm_block_data(b);
+ __le32 csum_le;
+
+ if (dm_block_location(b) != le64_to_cpu(disk_super->blocknr)) {
+ DMERR("sb_check failed: blocknr %llu: wanted %llu",
+ le64_to_cpu(disk_super->blocknr),
+ (unsigned long long)dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ if (le64_to_cpu(disk_super->magic) != CACHE_SUPERBLOCK_MAGIC) {
+ DMERR("sb_check failed: magic %llu: wanted %llu",
+ le64_to_cpu(disk_super->magic),
+ (unsigned long long)CACHE_SUPERBLOCK_MAGIC);
+ return -EILSEQ;
+ }
+
+ csum_le = cpu_to_le32(dm_bm_checksum(&disk_super->flags,
+ sb_block_size - sizeof(__le32),
+ SUPERBLOCK_CSUM_XOR));
+ if (csum_le != disk_super->csum) {
+ DMERR("sb_check failed: csum %u: wanted %u",
+ le32_to_cpu(csum_le), le32_to_cpu(disk_super->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator sb_validator = {
+ .name = "superblock",
+ .prepare_for_write = sb_prepare_for_write,
+ .check = sb_check
+};
+
+/*----------------------------------------------------------------*/
+
+static int superblock_read_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_read_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock_zero(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock_zero(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+static int superblock_lock(struct dm_cache_metadata *cmd,
+ struct dm_block **sblock)
+{
+ return dm_bm_write_lock(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &sb_validator, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+static int __superblock_all_zeroes(struct dm_block_manager *bm, int *result)
+{
+ int r;
+ unsigned i;
+ struct dm_block *b;
+ __le64 *data_le, zero = cpu_to_le64(0);
+ unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
+
+ /*
+ * We can't use a validator here - it may be all zeroes.
+ */
+ r = dm_bm_read_lock(bm, CACHE_SUPERBLOCK_LOCATION, NULL, &b);
+ if (r)
+ return r;
+
+ data_le = dm_block_data(b);
+ *result = 1;
+ for (i = 0; i < sb_block_size; i++) {
+ if (data_le[i] != zero) {
+ *result = 0;
+ break;
+ }
+ }
+
+ return dm_bm_unlock(b);
+}
+
+static void __setup_mapping_info(struct dm_cache_metadata *cmd)
+{
+ struct dm_btree_value_type vt;
+
+ vt.context = NULL;
+ vt.size = sizeof(__le64);
+ vt.inc = NULL;
+ vt.dec = NULL;
+ vt.equal = NULL;
+ dm_array_info_init(&cmd->info, cmd->tm, &vt);
+
+ if (cmd->policy_hint_size) {
+ vt.size = sizeof(__le32);
+ dm_array_info_init(&cmd->hint_info, cmd->tm, &vt);
+ }
+}
+
+static int __write_initial_superblock(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+
+ /* FIXME: see if we can lose the max sectors limit */
+ if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
+ bdev_size = DM_CACHE_METADATA_MAX_SECTORS;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock_zero(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = 0;
+ memset(disk_super->uuid, 0, sizeof(disk_super->uuid));
+ disk_super->magic = cpu_to_le64(CACHE_SUPERBLOCK_MAGIC);
+ disk_super->version = cpu_to_le32(CACHE_VERSION);
+ memset(disk_super->policy_name, 0, CACHE_POLICY_NAME_SIZE);
+ disk_super->policy_hint_size = 0;
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0)
+ goto bad_locked;
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->metadata_block_size = cpu_to_le32(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->data_block_size = cpu_to_le32(cmd->data_block_size);
+ disk_super->cache_blocks = cpu_to_le32(0);
+ memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(0);
+ disk_super->read_misses = cpu_to_le32(0);
+ disk_super->write_hits = cpu_to_le32(0);
+ disk_super->write_misses = cpu_to_le32(0);
+
+ return dm_tm_commit(cmd->tm, sblock);
+
+bad_locked:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __format_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ r = dm_tm_create_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+
+ __setup_mapping_info(cmd);
+
+ r = dm_array_empty(&cmd->info, &cmd->root);
+ if (r < 0)
+ goto bad;
+
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+
+ r = dm_bitset_empty(&cmd->discard_info, &cmd->discard_root);
+ if (r < 0)
+ goto bad;
+
+ cmd->discard_block_size = 0;
+ cmd->discard_nr_blocks = 0;
+
+ r = __write_initial_superblock(cmd);
+ if (r)
+ goto bad;
+
+ cmd->clean_when_opened = true;
+ return 0;
+
+bad:
+ dm_tm_destroy(cmd->tm);
+ dm_sm_destroy(cmd->metadata_sm);
+
+ return r;
+}
+
+static int __check_incompat_features(struct cache_disk_superblock *disk_super,
+ struct dm_cache_metadata *cmd)
+{
+ uint32_t features;
+
+ features = le32_to_cpu(disk_super->incompat_flags) & ~DM_CACHE_FEATURE_INCOMPAT_SUPP;
+ if (features) {
+ DMERR("could not access metadata due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ /*
+ * Check for read-only metadata to skip the following RDWR checks.
+ */
+ if (get_disk_ro(cmd->bdev->bd_disk))
+ return 0;
+
+ features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP;
+ if (features) {
+ DMERR("could not access metadata RDWR due to unsupported optional features (%lx).",
+ (unsigned long)features);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __open_metadata(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct cache_disk_superblock *disk_super;
+ unsigned long sb_flags;
+
+ r = superblock_read_lock(cmd, &sblock);
+ if (r < 0) {
+ DMERR("couldn't read lock superblock");
+ return r;
+ }
+
+ disk_super = dm_block_data(sblock);
+
+ r = __check_incompat_features(disk_super, cmd);
+ if (r < 0)
+ goto bad;
+
+ r = dm_tm_open_with_sm(cmd->bm, CACHE_SUPERBLOCK_LOCATION,
+ disk_super->metadata_space_map_root,
+ sizeof(disk_super->metadata_space_map_root),
+ &cmd->tm, &cmd->metadata_sm);
+ if (r < 0) {
+ DMERR("tm_open_with_sm failed");
+ goto bad;
+ }
+
+ __setup_mapping_info(cmd);
+ dm_disk_bitset_init(cmd->tm, &cmd->discard_info);
+ sb_flags = le32_to_cpu(disk_super->flags);
+ cmd->clean_when_opened = test_bit(CLEAN_SHUTDOWN, &sb_flags);
+ return dm_bm_unlock(sblock);
+
+bad:
+ dm_bm_unlock(sblock);
+ return r;
+}
+
+static int __open_or_format_metadata(struct dm_cache_metadata *cmd,
+ bool format_device)
+{
+ int r, unformatted;
+
+ r = __superblock_all_zeroes(cmd->bm, &unformatted);
+ if (r)
+ return r;
+
+ if (unformatted)
+ return format_device ? __format_metadata(cmd) : -EPERM;
+
+ return __open_metadata(cmd);
+}
+
+static int __create_persistent_data_objects(struct dm_cache_metadata *cmd,
+ bool may_format_device)
+{
+ int r;
+ cmd->bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE,
+ CACHE_METADATA_CACHE_SIZE,
+ CACHE_MAX_CONCURRENT_LOCKS);
+ if (IS_ERR(cmd->bm)) {
+ DMERR("could not create block manager");
+ return PTR_ERR(cmd->bm);
+ }
+
+ r = __open_or_format_metadata(cmd, may_format_device);
+ if (r)
+ dm_block_manager_destroy(cmd->bm);
+
+ return r;
+}
+
+static void __destroy_persistent_data_objects(struct dm_cache_metadata *cmd)
+{
+ dm_sm_destroy(cmd->metadata_sm);
+ dm_tm_destroy(cmd->tm);
+ dm_block_manager_destroy(cmd->bm);
+}
+
+typedef unsigned long (*flags_mutator)(unsigned long);
+
+static void update_flags(struct cache_disk_superblock *disk_super,
+ flags_mutator mutator)
+{
+ uint32_t sb_flags = mutator(le32_to_cpu(disk_super->flags));
+ disk_super->flags = cpu_to_le32(sb_flags);
+}
+
+static unsigned long set_clean_shutdown(unsigned long flags)
+{
+ set_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static unsigned long clear_clean_shutdown(unsigned long flags)
+{
+ clear_bit(CLEAN_SHUTDOWN, &flags);
+ return flags;
+}
+
+static void read_superblock_fields(struct dm_cache_metadata *cmd,
+ struct cache_disk_superblock *disk_super)
+{
+ cmd->root = le64_to_cpu(disk_super->mapping_root);
+ cmd->hint_root = le64_to_cpu(disk_super->hint_root);
+ cmd->discard_root = le64_to_cpu(disk_super->discard_root);
+ cmd->discard_block_size = le64_to_cpu(disk_super->discard_block_size);
+ cmd->discard_nr_blocks = to_dblock(le64_to_cpu(disk_super->discard_nr_blocks));
+ cmd->data_block_size = le32_to_cpu(disk_super->data_block_size);
+ cmd->cache_blocks = to_cblock(le32_to_cpu(disk_super->cache_blocks));
+ strncpy(cmd->policy_name, disk_super->policy_name, sizeof(cmd->policy_name));
+ cmd->policy_hint_size = le32_to_cpu(disk_super->policy_hint_size);
+
+ cmd->stats.read_hits = le32_to_cpu(disk_super->read_hits);
+ cmd->stats.read_misses = le32_to_cpu(disk_super->read_misses);
+ cmd->stats.write_hits = le32_to_cpu(disk_super->write_hits);
+ cmd->stats.write_misses = le32_to_cpu(disk_super->write_misses);
+
+ cmd->changed = false;
+}
+
+/*
+ * The mutator updates the superblock flags.
+ */
+static int __begin_transaction_flags(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ update_flags(disk_super, mutator);
+ read_superblock_fields(cmd, disk_super);
+
+ return dm_bm_flush_and_unlock(cmd->bm, sblock);
+}
+
+static int __begin_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We re-read the superblock every time. Shouldn't need to do this
+ * really.
+ */
+ r = superblock_read_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+ read_superblock_fields(cmd, disk_super);
+ dm_bm_unlock(sblock);
+
+ return 0;
+}
+
+static int __commit_transaction(struct dm_cache_metadata *cmd,
+ flags_mutator mutator)
+{
+ int r;
+ size_t metadata_len;
+ struct cache_disk_superblock *disk_super;
+ struct dm_block *sblock;
+
+ /*
+ * We need to know if the cache_disk_superblock exceeds a 512-byte sector.
+ */
+ BUILD_BUG_ON(sizeof(struct cache_disk_superblock) > 512);
+
+ r = dm_bitset_flush(&cmd->discard_info, cmd->discard_root,
+ &cmd->discard_root);
+ if (r)
+ return r;
+
+ r = dm_tm_pre_commit(cmd->tm);
+ if (r < 0)
+ return r;
+
+ r = dm_sm_root_size(cmd->metadata_sm, &metadata_len);
+ if (r < 0)
+ return r;
+
+ r = superblock_lock(cmd, &sblock);
+ if (r)
+ return r;
+
+ disk_super = dm_block_data(sblock);
+
+ if (mutator)
+ update_flags(disk_super, mutator);
+
+ disk_super->mapping_root = cpu_to_le64(cmd->root);
+ disk_super->hint_root = cpu_to_le64(cmd->hint_root);
+ disk_super->discard_root = cpu_to_le64(cmd->discard_root);
+ disk_super->discard_block_size = cpu_to_le64(cmd->discard_block_size);
+ disk_super->discard_nr_blocks = cpu_to_le64(from_dblock(cmd->discard_nr_blocks));
+ disk_super->cache_blocks = cpu_to_le32(from_cblock(cmd->cache_blocks));
+ strncpy(disk_super->policy_name, cmd->policy_name, sizeof(disk_super->policy_name));
+
+ disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits);
+ disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses);
+ disk_super->write_hits = cpu_to_le32(cmd->stats.write_hits);
+ disk_super->write_misses = cpu_to_le32(cmd->stats.write_misses);
+
+ r = dm_sm_copy_root(cmd->metadata_sm, &disk_super->metadata_space_map_root,
+ metadata_len);
+ if (r < 0) {
+ dm_bm_unlock(sblock);
+ return r;
+ }
+
+ return dm_tm_commit(cmd->tm, sblock);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The mappings are held in a dm-array that has 64-bit values stored in
+ * little-endian format. The index is the cblock, the high 48bits of the
+ * value are the oblock and the low 16 bit the flags.
+ */
+#define FLAGS_MASK ((1 << 16) - 1)
+
+static __le64 pack_value(dm_oblock_t block, unsigned flags)
+{
+ uint64_t value = from_oblock(block);
+ value <<= 16;
+ value = value | (flags & FLAGS_MASK);
+ return cpu_to_le64(value);
+}
+
+static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
+{
+ uint64_t value = le64_to_cpu(value_le);
+ uint64_t b = value >> 16;
+ *block = to_oblock(b);
+ *flags = value & FLAGS_MASK;
+}
+
+/*----------------------------------------------------------------*/
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ int r;
+ struct dm_cache_metadata *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ DMERR("could not allocate metadata struct");
+ return NULL;
+ }
+
+ init_rwsem(&cmd->root_lock);
+ cmd->bdev = bdev;
+ cmd->data_block_size = data_block_size;
+ cmd->cache_blocks = 0;
+ cmd->policy_hint_size = policy_hint_size;
+ cmd->changed = true;
+
+ r = __create_persistent_data_objects(cmd, may_format_device);
+ if (r) {
+ kfree(cmd);
+ return ERR_PTR(r);
+ }
+
+ r = __begin_transaction_flags(cmd, clear_clean_shutdown);
+ if (r < 0) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(r);
+ }
+
+ return cmd;
+}
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
+{
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+}
+
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
+{
+ int r;
+ __le64 null_mapping = pack_value(0, 0);
+
+ down_write(&cmd->root_lock);
+ __dm_bless_for_disk(&null_mapping);
+ r = dm_array_resize(&cmd->info, cmd->root, from_cblock(cmd->cache_blocks),
+ from_cblock(new_cache_size),
+ &null_mapping, &cmd->root);
+ if (!r)
+ cmd->cache_blocks = new_cache_size;
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = dm_bitset_resize(&cmd->discard_info,
+ cmd->discard_root,
+ from_dblock(cmd->discard_nr_blocks),
+ from_dblock(new_nr_entries),
+ false, &cmd->discard_root);
+ if (!r) {
+ cmd->discard_block_size = discard_block_size;
+ cmd->discard_nr_blocks = new_nr_entries;
+ }
+
+ cmd->changed = true;
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __set_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_set_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __clear_discard(struct dm_cache_metadata *cmd, dm_dblock_t b)
+{
+ return dm_bitset_clear_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root);
+}
+
+static int __is_discarded(struct dm_cache_metadata *cmd, dm_dblock_t b,
+ bool *is_discarded)
+{
+ return dm_bitset_test_bit(&cmd->discard_info, cmd->discard_root,
+ from_dblock(b), &cmd->discard_root,
+ is_discarded);
+}
+
+static int __discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ r = (discard ? __set_discard : __clear_discard)(cmd, dblock);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd,
+ dm_dblock_t dblock, bool discard)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __discard(cmd, dblock, discard);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r = 0;
+ dm_block_t b;
+ bool discard;
+
+ for (b = 0; b < from_dblock(cmd->discard_nr_blocks); b++) {
+ dm_dblock_t dblock = to_dblock(b);
+
+ if (cmd->clean_when_opened) {
+ r = __is_discarded(cmd, dblock, &discard);
+ if (r)
+ return r;
+ } else
+ discard = false;
+
+ r = fn(context, cmd->discard_block_size, dblock, discard);
+ if (r)
+ break;
+ }
+
+ return r;
+}
+
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_discards(cmd, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd)
+{
+ dm_cblock_t r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->cache_blocks;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __remove(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+ __le64 value = pack_value(0, 0);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __remove(cmd, cblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int __insert(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+ __le64 value = pack_value(oblock, M_VALID);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+}
+
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, dm_oblock_t oblock)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __insert(cmd, cblock, oblock);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+struct thunk {
+ load_mapping_fn fn;
+ void *context;
+
+ struct dm_cache_metadata *cmd;
+ bool respect_dirty_flags;
+ bool hints_valid;
+};
+
+static bool hints_array_initialized(struct dm_cache_metadata *cmd)
+{
+ return cmd->hint_root && cmd->policy_hint_size;
+}
+
+static bool hints_array_available(struct dm_cache_metadata *cmd,
+ const char *policy_name)
+{
+ bool policy_names_match = !strncmp(cmd->policy_name, policy_name,
+ sizeof(cmd->policy_name));
+
+ return cmd->clean_when_opened && policy_names_match &&
+ hints_array_initialized(cmd);
+}
+
+static int __load_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ bool dirty;
+ __le64 value;
+ __le32 hint_value = 0;
+ dm_oblock_t oblock;
+ unsigned flags;
+ struct thunk *thunk = context;
+ struct dm_cache_metadata *cmd = thunk->cmd;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ if (flags & M_VALID) {
+ if (thunk->hints_valid) {
+ r = dm_array_get_value(&cmd->hint_info, cmd->hint_root,
+ cblock, &hint_value);
+ if (r && r != -ENODATA)
+ return r;
+ }
+
+ dirty = thunk->respect_dirty_flags ? (flags & M_DIRTY) : true;
+ r = thunk->fn(thunk->context, oblock, to_cblock(cblock),
+ dirty, le32_to_cpu(hint_value), thunk->hints_valid);
+ }
+
+ return r;
+}
+
+static int __load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ struct thunk thunk;
+
+ thunk.fn = fn;
+ thunk.context = context;
+
+ thunk.cmd = cmd;
+ thunk.respect_dirty_flags = cmd->clean_when_opened;
+ thunk.hints_valid = hints_array_available(cmd, policy_name);
+
+ return dm_array_walk(&cmd->info, cmd->root, __load_mapping, &thunk);
+}
+
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd, const char *policy_name,
+ load_mapping_fn fn, void *context)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = __load_mappings(cmd, policy_name, fn, context);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dump_mapping(void *context, uint64_t cblock, void *leaf)
+{
+ int r = 0;
+ __le64 value;
+ dm_oblock_t oblock;
+ unsigned flags;
+
+ memcpy(&value, leaf, sizeof(value));
+ unpack_value(value, &oblock, &flags);
+
+ return r;
+}
+
+static int __dump_mappings(struct dm_cache_metadata *cmd)
+{
+ return dm_array_walk(&cmd->info, cmd->root, __dump_mapping, NULL);
+}
+
+void dm_cache_dump(struct dm_cache_metadata *cmd)
+{
+ down_read(&cmd->root_lock);
+ __dump_mappings(cmd);
+ up_read(&cmd->root_lock);
+}
+
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd)
+{
+ int r;
+
+ down_read(&cmd->root_lock);
+ r = cmd->changed;
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+static int __dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty)
+{
+ int r;
+ unsigned flags;
+ dm_oblock_t oblock;
+ __le64 value;
+
+ r = dm_array_get_value(&cmd->info, cmd->root, from_cblock(cblock), &value);
+ if (r)
+ return r;
+
+ unpack_value(value, &oblock, &flags);
+
+ if (((flags & M_DIRTY) && dirty) || (!(flags & M_DIRTY) && !dirty))
+ /* nothing to be done */
+ return 0;
+
+ value = pack_value(oblock, flags | (dirty ? M_DIRTY : 0));
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->info, cmd->root, from_cblock(cblock),
+ &value, &cmd->root);
+ if (r)
+ return r;
+
+ cmd->changed = true;
+ return 0;
+
+}
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, bool dirty)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = __dirty(cmd, cblock, dirty);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_read(&cmd->root_lock);
+ memcpy(stats, &cmd->stats, sizeof(*stats));
+ up_read(&cmd->root_lock);
+}
+
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats)
+{
+ down_write(&cmd->root_lock);
+ memcpy(&cmd->stats, stats, sizeof(*stats));
+ up_write(&cmd->root_lock);
+}
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
+{
+ int r;
+ flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
+ clear_clean_shutdown);
+
+ down_write(&cmd->root_lock);
+ r = __commit_transaction(cmd, mutator);
+ if (r)
+ goto out;
+
+ r = __begin_transaction(cmd);
+
+out:
+ up_write(&cmd->root_lock);
+ return r;
+}
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result)
+{
+ int r = -EINVAL;
+
+ down_read(&cmd->root_lock);
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ up_read(&cmd->root_lock);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+ __le32 value;
+ size_t hint_size;
+ const char *policy_name = dm_cache_policy_get_name(policy);
+
+ if (!policy_name[0] ||
+ (strlen(policy_name) > sizeof(cmd->policy_name) - 1))
+ return -EINVAL;
+
+ if (strcmp(cmd->policy_name, policy_name)) {
+ strncpy(cmd->policy_name, policy_name, sizeof(cmd->policy_name));
+
+ hint_size = dm_cache_policy_get_hint_size(policy);
+ if (!hint_size)
+ return 0; /* short-circuit hints initialization */
+ cmd->policy_hint_size = hint_size;
+
+ if (cmd->hint_root) {
+ r = dm_array_del(&cmd->hint_info, cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ r = dm_array_empty(&cmd->hint_info, &cmd->hint_root);
+ if (r)
+ return r;
+
+ value = cpu_to_le32(0);
+ __dm_bless_for_disk(&value);
+ r = dm_array_resize(&cmd->hint_info, cmd->hint_root, 0,
+ from_cblock(cmd->cache_blocks),
+ &value, &cmd->hint_root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy)
+{
+ int r;
+
+ down_write(&cmd->root_lock);
+ r = begin_hints(cmd, policy);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
+
+static int save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+ __le32 value = cpu_to_le32(hint);
+ __dm_bless_for_disk(&value);
+
+ r = dm_array_set_value(&cmd->hint_info, cmd->hint_root,
+ from_cblock(cblock), &value, &cmd->hint_root);
+ cmd->changed = true;
+
+ return r;
+}
+
+int dm_cache_save_hint(struct dm_cache_metadata *cmd, dm_cblock_t cblock,
+ uint32_t hint)
+{
+ int r;
+
+ if (!hints_array_initialized(cmd))
+ return 0;
+
+ down_write(&cmd->root_lock);
+ r = save_hint(cmd, cblock, hint);
+ up_write(&cmd->root_lock);
+
+ return r;
+}
diff --git a/drivers/md/dm-cache-metadata.h b/drivers/md/dm-cache-metadata.h
new file mode 100644
index 00000000000..135864ea0ee
--- /dev/null
+++ b/drivers/md/dm-cache-metadata.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_METADATA_H
+#define DM_CACHE_METADATA_H
+
+#include "dm-cache-block-types.h"
+#include "dm-cache-policy-internal.h"
+
+/*----------------------------------------------------------------*/
+
+#define DM_CACHE_METADATA_BLOCK_SIZE 4096
+
+/* FIXME: remove this restriction */
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about 16k metadata blocks.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS (255 * (1 << 14) * (DM_CACHE_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+
+/*
+ * A metadata device larger than 16GB triggers a warning.
+ */
+#define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Ext[234]-style compat feature flags.
+ *
+ * A new feature which old metadata will still be compatible with should
+ * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
+ *
+ * A new feature that is not compatible with old code should define a
+ * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
+ * that flag.
+ *
+ * A new feature that is not compatible with old code accessing the
+ * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
+ * guard the relevant code with that flag.
+ *
+ * As these various flags are defined they should be added to the
+ * following masks.
+ */
+#define DM_CACHE_FEATURE_COMPAT_SUPP 0UL
+#define DM_CACHE_FEATURE_COMPAT_RO_SUPP 0UL
+#define DM_CACHE_FEATURE_INCOMPAT_SUPP 0UL
+
+/*
+ * Reopens or creates a new, empty metadata volume.
+ * Returns an ERR_PTR on failure.
+ */
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size);
+
+void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
+
+/*
+ * The metadata needs to know how many cache blocks there are. We don't
+ * care about the origin, assuming the core target is giving us valid
+ * origin blocks to map to.
+ */
+int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
+dm_cblock_t dm_cache_size(struct dm_cache_metadata *cmd);
+
+int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
+ sector_t discard_block_size,
+ dm_dblock_t new_nr_entries);
+
+typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discarded);
+int dm_cache_load_discards(struct dm_cache_metadata *cmd,
+ load_discard_fn fn, void *context);
+
+int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
+
+int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
+int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
+int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
+
+typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
+ dm_cblock_t cblock, bool dirty,
+ uint32_t hint, bool hint_valid);
+int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
+ const char *policy_name,
+ load_mapping_fn fn,
+ void *context);
+
+int dm_cache_set_dirty(struct dm_cache_metadata *cmd, dm_cblock_t cblock, bool dirty);
+
+struct dm_cache_statistics {
+ uint32_t read_hits;
+ uint32_t read_misses;
+ uint32_t write_hits;
+ uint32_t write_misses;
+};
+
+void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
+ struct dm_cache_statistics *stats);
+
+int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
+
+int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
+ dm_block_t *result);
+
+void dm_cache_dump(struct dm_cache_metadata *cmd);
+
+/*
+ * The policy is invited to save a 32bit hint value for every cblock (eg,
+ * for a hit count). These are stored against the policy name. If
+ * policies are changed, then hints will be lost. If the machine crashes,
+ * hints will be lost.
+ *
+ * The hints are indexed by the cblock, but many policies will not
+ * neccessarily have a fast way of accessing efficiently via cblock. So
+ * rather than querying the policy for each cblock, we let it walk its data
+ * structures and fill in the hints in whatever order it wishes.
+ */
+
+int dm_cache_begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
+
+/*
+ * requests hints for every cblock and stores in the metadata device.
+ */
+int dm_cache_save_hint(struct dm_cache_metadata *cmd,
+ dm_cblock_t cblock, uint32_t hint);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_METADATA_H */
diff --git a/drivers/md/dm-cache-policy-cleaner.c b/drivers/md/dm-cache-policy-cleaner.c
new file mode 100644
index 00000000000..cc05d70b3cb
--- /dev/null
+++ b/drivers/md/dm-cache-policy-cleaner.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * writeback cache policy supporting flushing out dirty cache blocks.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache cleaner"
+#define CLEANER_VERSION "1.0.0"
+
+/* Cache entry struct. */
+struct wb_cache_entry {
+ struct list_head list;
+ struct hlist_node hlist;
+
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ bool dirty:1;
+ bool pending:1;
+};
+
+struct hash {
+ struct hlist_head *table;
+ dm_block_t hash_bits;
+ unsigned nr_buckets;
+};
+
+struct policy {
+ struct dm_cache_policy policy;
+ spinlock_t lock;
+
+ struct list_head free;
+ struct list_head clean;
+ struct list_head clean_pending;
+ struct list_head dirty;
+
+ /*
+ * We know exactly how many cblocks will be needed,
+ * so we can allocate them up front.
+ */
+ dm_cblock_t cache_size, nr_cblocks_allocated;
+ struct wb_cache_entry *cblocks;
+ struct hash chash;
+};
+
+/*----------------------------------------------------------------------------*/
+
+/*
+ * Low-level functions.
+ */
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+static struct policy *to_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct policy, policy);
+}
+
+static struct list_head *list_pop(struct list_head *q)
+{
+ struct list_head *r = q->next;
+
+ list_del(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Allocate/free various resources. */
+static int alloc_hash(struct hash *hash, unsigned elts)
+{
+ hash->nr_buckets = next_power(elts >> 4, 16);
+ hash->hash_bits = ffs(hash->nr_buckets) - 1;
+ hash->table = vzalloc(sizeof(*hash->table) * hash->nr_buckets);
+
+ return hash->table ? 0 : -ENOMEM;
+}
+
+static void free_hash(struct hash *hash)
+{
+ vfree(hash->table);
+}
+
+static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size)
+{
+ int r = -ENOMEM;
+
+ p->cblocks = vzalloc(sizeof(*p->cblocks) * from_cblock(cache_size));
+ if (p->cblocks) {
+ unsigned u = from_cblock(cache_size);
+
+ while (u--)
+ list_add(&p->cblocks[u].list, &p->free);
+
+ p->nr_cblocks_allocated = 0;
+
+ /* Cache entries hash. */
+ r = alloc_hash(&p->chash, from_cblock(cache_size));
+ if (r)
+ vfree(p->cblocks);
+ }
+
+ return r;
+}
+
+static void free_cache_blocks_and_hash(struct policy *p)
+{
+ free_hash(&p->chash);
+ vfree(p->cblocks);
+}
+
+static struct wb_cache_entry *alloc_cache_entry(struct policy *p)
+{
+ struct wb_cache_entry *e;
+
+ BUG_ON(from_cblock(p->nr_cblocks_allocated) >= from_cblock(p->cache_size));
+
+ e = list_entry(list_pop(&p->free), struct wb_cache_entry, list);
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) + 1);
+
+ return e;
+}
+
+/*----------------------------------------------------------------------------*/
+
+/* Hash functions (lookup, insert, remove). */
+static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock)
+{
+ struct hash *hash = &p->chash;
+ unsigned h = hash_64(from_oblock(oblock), hash->hash_bits);
+ struct wb_cache_entry *cur;
+ struct hlist_head *bucket = &hash->table[h];
+
+ hlist_for_each_entry(cur, bucket, hlist) {
+ if (cur->oblock == oblock) {
+ /* Move upfront bucket for faster access. */
+ hlist_del(&cur->hlist);
+ hlist_add_head(&cur->hlist, bucket);
+ return cur;
+ }
+ }
+
+ return NULL;
+}
+
+static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), p->chash.hash_bits);
+
+ hlist_add_head(&e->hlist, &p->chash.table[h]);
+}
+
+static void remove_cache_hash_entry(struct wb_cache_entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/* Public interface (see dm-cache-policy.h */
+static int wb_map(struct dm_cache_policy *pe, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ spin_lock_irqsave(&p->lock, flags);
+
+ else if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return 0;
+}
+
+static int wb_lookup(struct dm_cache_policy *pe, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&p->lock, flags))
+ return -EWOULDBLOCK;
+
+ e = lookup_cache_entry(p, oblock);
+ if (e) {
+ *cblock = e->cblock;
+ r = 0;
+
+ } else
+ r = -ENOENT;
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static void __set_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock, bool set)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+
+ e = lookup_cache_entry(p, oblock);
+ BUG_ON(!e);
+
+ if (set) {
+ if (!e->dirty) {
+ e->dirty = true;
+ list_move(&e->list, &p->dirty);
+ }
+
+ } else {
+ if (e->dirty) {
+ e->pending = false;
+ e->dirty = false;
+ list_move(&e->list, &p->clean);
+ }
+ }
+}
+
+static void wb_set_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, true);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_clear_dirty(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ __set_clear_dirty(pe, oblock, false);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void add_cache_entry(struct policy *p, struct wb_cache_entry *e)
+{
+ insert_cache_hash_entry(p, e);
+ if (e->dirty)
+ list_add(&e->list, &p->dirty);
+ else
+ list_add(&e->list, &p->clean);
+}
+
+static int wb_load_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e = alloc_cache_entry(p);
+
+ if (e) {
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->dirty = false; /* blocks default to clean */
+ add_cache_entry(p, e);
+ r = 0;
+
+ } else
+ r = -ENOMEM;
+
+ return r;
+}
+
+static void wb_destroy(struct dm_cache_policy *pe)
+{
+ struct policy *p = to_policy(pe);
+
+ free_cache_blocks_and_hash(p);
+ kfree(p);
+}
+
+static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock)
+{
+ struct wb_cache_entry *r = lookup_cache_entry(p, oblock);
+
+ BUG_ON(!r);
+
+ remove_cache_hash_entry(r);
+ list_del(&r->list);
+
+ return r;
+}
+
+static void wb_remove_mapping(struct dm_cache_policy *pe, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, oblock);
+ list_add_tail(&e->list, &p->free);
+ BUG_ON(!from_cblock(p->nr_cblocks_allocated));
+ p->nr_cblocks_allocated = to_cblock(from_cblock(p->nr_cblocks_allocated) - 1);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static void wb_force_mapping(struct dm_cache_policy *pe,
+ dm_oblock_t current_oblock, dm_oblock_t oblock)
+{
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+ e = __wb_force_remove_mapping(p, current_oblock);
+ e->oblock = oblock;
+ add_cache_entry(p, e);
+ spin_unlock_irqrestore(&p->lock, flags);
+}
+
+static struct wb_cache_entry *get_next_dirty_entry(struct policy *p)
+{
+ struct list_head *l;
+ struct wb_cache_entry *r;
+
+ if (list_empty(&p->dirty))
+ return NULL;
+
+ l = list_pop(&p->dirty);
+ r = container_of(l, struct wb_cache_entry, list);
+ list_add(l, &p->clean_pending);
+
+ return r;
+}
+
+static int wb_writeback_work(struct dm_cache_policy *pe,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ int r = -ENOENT;
+ struct policy *p = to_policy(pe);
+ struct wb_cache_entry *e;
+ unsigned long flags;
+
+ spin_lock_irqsave(&p->lock, flags);
+
+ e = get_next_dirty_entry(p);
+ if (e) {
+ *oblock = e->oblock;
+ *cblock = e->cblock;
+ r = 0;
+ }
+
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ return r;
+}
+
+static dm_cblock_t wb_residency(struct dm_cache_policy *pe)
+{
+ return to_policy(pe)->nr_cblocks_allocated;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct policy *p)
+{
+ p->policy.destroy = wb_destroy;
+ p->policy.map = wb_map;
+ p->policy.lookup = wb_lookup;
+ p->policy.set_dirty = wb_set_dirty;
+ p->policy.clear_dirty = wb_clear_dirty;
+ p->policy.load_mapping = wb_load_mapping;
+ p->policy.walk_mappings = NULL;
+ p->policy.remove_mapping = wb_remove_mapping;
+ p->policy.writeback_work = wb_writeback_work;
+ p->policy.force_mapping = wb_force_mapping;
+ p->policy.residency = wb_residency;
+ p->policy.tick = NULL;
+}
+
+static struct dm_cache_policy *wb_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL);
+
+ if (!p)
+ return NULL;
+
+ init_policy_functions(p);
+ INIT_LIST_HEAD(&p->free);
+ INIT_LIST_HEAD(&p->clean);
+ INIT_LIST_HEAD(&p->clean_pending);
+ INIT_LIST_HEAD(&p->dirty);
+
+ p->cache_size = cache_size;
+ spin_lock_init(&p->lock);
+
+ /* Allocate cache entry structs and add them to free list. */
+ r = alloc_cache_blocks_with_hash(p, cache_size);
+ if (!r)
+ return &p->policy;
+
+ kfree(p);
+
+ return NULL;
+}
+/*----------------------------------------------------------------------------*/
+
+static struct dm_cache_policy_type wb_policy_type = {
+ .name = "cleaner",
+ .hint_size = 0,
+ .owner = THIS_MODULE,
+ .create = wb_create
+};
+
+static int __init wb_init(void)
+{
+ int r = dm_cache_policy_register(&wb_policy_type);
+
+ if (r < 0)
+ DMERR("register failed %d", r);
+ else
+ DMINFO("version " CLEANER_VERSION " loaded");
+
+ return r;
+}
+
+static void __exit wb_exit(void)
+{
+ dm_cache_policy_unregister(&wb_policy_type);
+}
+
+module_init(wb_init);
+module_exit(wb_exit);
+
+MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("cleaner cache policy");
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
new file mode 100644
index 00000000000..52a75beeced
--- /dev/null
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_INTERNAL_H
+#define DM_CACHE_POLICY_INTERNAL_H
+
+#include "dm-cache-policy.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Little inline functions that simplify calling the policy methods.
+ */
+static inline int policy_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ return p->map(p, oblock, can_block, can_migrate, discarded_oblock, bio, result);
+}
+
+static inline int policy_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ BUG_ON(!p->lookup);
+ return p->lookup(p, oblock, cblock);
+}
+
+static inline void policy_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->set_dirty)
+ p->set_dirty(p, oblock);
+}
+
+static inline void policy_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ if (p->clear_dirty)
+ p->clear_dirty(p, oblock);
+}
+
+static inline int policy_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ return p->load_mapping(p, oblock, cblock, hint, hint_valid);
+}
+
+static inline int policy_walk_mappings(struct dm_cache_policy *p,
+ policy_walk_fn fn, void *context)
+{
+ return p->walk_mappings ? p->walk_mappings(p, fn, context) : 0;
+}
+
+static inline int policy_writeback_work(struct dm_cache_policy *p,
+ dm_oblock_t *oblock,
+ dm_cblock_t *cblock)
+{
+ return p->writeback_work ? p->writeback_work(p, oblock, cblock) : -ENOENT;
+}
+
+static inline void policy_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ return p->remove_mapping(p, oblock);
+}
+
+static inline void policy_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ return p->force_mapping(p, current_oblock, new_oblock);
+}
+
+static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
+{
+ return p->residency(p);
+}
+
+static inline void policy_tick(struct dm_cache_policy *p)
+{
+ if (p->tick)
+ return p->tick(p);
+}
+
+static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ if (p->emit_config_values)
+ return p->emit_config_values(p, result, maxlen);
+
+ DMEMIT("0");
+ return 0;
+}
+
+static inline int policy_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ return p->set_config_value ? p->set_config_value(p, key, value) : -EINVAL;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Creates a new cache policy given a policy name, a cache size, an origin size and the block size.
+ */
+struct dm_cache_policy *dm_cache_policy_create(const char *name, dm_cblock_t cache_size,
+ sector_t origin_size, sector_t block_size);
+
+/*
+ * Destroys the policy. This drops references to the policy module as well
+ * as calling it's destroy method. So always use this rather than calling
+ * the policy->destroy method directly.
+ */
+void dm_cache_policy_destroy(struct dm_cache_policy *p);
+
+/*
+ * In case we've forgotten.
+ */
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_INTERNAL_H */
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
new file mode 100644
index 00000000000..96415325507
--- /dev/null
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -0,0 +1,1195 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy.h"
+#include "dm.h"
+
+#include <linux/hash.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache-policy-mq"
+#define MQ_VERSION "1.0.0"
+
+static struct kmem_cache *mq_entry_cache;
+
+/*----------------------------------------------------------------*/
+
+static unsigned next_power(unsigned n, unsigned min)
+{
+ return roundup_pow_of_two(max(n, min));
+}
+
+/*----------------------------------------------------------------*/
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+ return vzalloc(s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Large, sequential ios are probably better left on the origin device since
+ * spindles tend to have good bandwidth.
+ *
+ * The io_tracker tries to spot when the io is in one of these sequential
+ * modes.
+ *
+ * Two thresholds to switch between random and sequential io mode are defaulting
+ * as follows and can be adjusted via the constructor and message interfaces.
+ */
+#define RANDOM_THRESHOLD_DEFAULT 4
+#define SEQUENTIAL_THRESHOLD_DEFAULT 512
+
+enum io_pattern {
+ PATTERN_SEQUENTIAL,
+ PATTERN_RANDOM
+};
+
+struct io_tracker {
+ enum io_pattern pattern;
+
+ unsigned nr_seq_samples;
+ unsigned nr_rand_samples;
+ unsigned thresholds[2];
+
+ dm_oblock_t last_end_oblock;
+};
+
+static void iot_init(struct io_tracker *t,
+ int sequential_threshold, int random_threshold)
+{
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ t->last_end_oblock = 0;
+ t->thresholds[PATTERN_RANDOM] = random_threshold;
+ t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
+}
+
+static enum io_pattern iot_pattern(struct io_tracker *t)
+{
+ return t->pattern;
+}
+
+static void iot_update_stats(struct io_tracker *t, struct bio *bio)
+{
+ if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1)
+ t->nr_seq_samples++;
+ else {
+ /*
+ * Just one non-sequential IO is enough to reset the
+ * counters.
+ */
+ if (t->nr_seq_samples) {
+ t->nr_seq_samples = 0;
+ t->nr_rand_samples = 0;
+ }
+
+ t->nr_rand_samples++;
+ }
+
+ t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1);
+}
+
+static void iot_check_for_pattern_switch(struct io_tracker *t)
+{
+ switch (t->pattern) {
+ case PATTERN_SEQUENTIAL:
+ if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
+ t->pattern = PATTERN_RANDOM;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+
+ case PATTERN_RANDOM:
+ if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
+ t->pattern = PATTERN_SEQUENTIAL;
+ t->nr_seq_samples = t->nr_rand_samples = 0;
+ }
+ break;
+ }
+}
+
+static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
+{
+ iot_update_stats(t, bio);
+ iot_check_for_pattern_switch(t);
+}
+
+/*----------------------------------------------------------------*/
+
+
+/*
+ * This queue is divided up into different levels. Allowing us to push
+ * entries to the back of any of the levels. Think of it as a partially
+ * sorted queue.
+ */
+#define NR_QUEUE_LEVELS 16u
+
+struct queue {
+ struct list_head qs[NR_QUEUE_LEVELS];
+};
+
+static void queue_init(struct queue *q)
+{
+ unsigned i;
+
+ for (i = 0; i < NR_QUEUE_LEVELS; i++)
+ INIT_LIST_HEAD(q->qs + i);
+}
+
+/*
+ * Insert an entry to the back of the given level.
+ */
+static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
+{
+ list_add_tail(elt, q->qs + level);
+}
+
+static void queue_remove(struct list_head *elt)
+{
+ list_del(elt);
+}
+
+/*
+ * Shifts all regions down one level. This has no effect on the order of
+ * the queue.
+ */
+static void queue_shift_down(struct queue *q)
+{
+ unsigned level;
+
+ for (level = 1; level < NR_QUEUE_LEVELS; level++)
+ list_splice_init(q->qs + level, q->qs + level - 1);
+}
+
+/*
+ * Gives us the oldest entry of the lowest popoulated level. If the first
+ * level is emptied then we shift down one level.
+ */
+static struct list_head *queue_pop(struct queue *q)
+{
+ unsigned level;
+ struct list_head *r;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ if (!list_empty(q->qs + level)) {
+ r = q->qs[level].next;
+ list_del(r);
+
+ /* have we just emptied the bottom level? */
+ if (level == 0 && list_empty(q->qs))
+ queue_shift_down(q);
+
+ return r;
+ }
+
+ return NULL;
+}
+
+static struct list_head *list_pop(struct list_head *lh)
+{
+ struct list_head *r = lh->next;
+
+ BUG_ON(!r);
+ list_del_init(r);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Describes a cache entry. Used in both the cache and the pre_cache.
+ */
+struct entry {
+ struct hlist_node hlist;
+ struct list_head list;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock; /* valid iff in_cache */
+
+ /*
+ * FIXME: pack these better
+ */
+ bool in_cache:1;
+ unsigned hit_count;
+ unsigned generation;
+ unsigned tick;
+};
+
+struct mq_policy {
+ struct dm_cache_policy policy;
+
+ /* protects everything */
+ struct mutex lock;
+ dm_cblock_t cache_size;
+ struct io_tracker tracker;
+
+ /*
+ * We maintain two queues of entries. The cache proper contains
+ * the currently active mappings. Whereas the pre_cache tracks
+ * blocks that are being hit frequently and potential candidates
+ * for promotion to the cache.
+ */
+ struct queue pre_cache;
+ struct queue cache;
+
+ /*
+ * Keeps track of time, incremented by the core. We use this to
+ * avoid attributing multiple hits within the same tick.
+ *
+ * Access to tick_protected should be done with the spin lock held.
+ * It's copied to tick at the start of the map function (within the
+ * mutex).
+ */
+ spinlock_t tick_lock;
+ unsigned tick_protected;
+ unsigned tick;
+
+ /*
+ * A count of the number of times the map function has been called
+ * and found an entry in the pre_cache or cache. Currently used to
+ * calculate the generation.
+ */
+ unsigned hit_count;
+
+ /*
+ * A generation is a longish period that is used to trigger some
+ * book keeping effects. eg, decrementing hit counts on entries.
+ * This is needed to allow the cache to evolve as io patterns
+ * change.
+ */
+ unsigned generation;
+ unsigned generation_period; /* in lookups (will probably change) */
+
+ /*
+ * Entries in the pre_cache whose hit count passes the promotion
+ * threshold move to the cache proper. Working out the correct
+ * value for the promotion_threshold is crucial to this policy.
+ */
+ unsigned promote_threshold;
+
+ /*
+ * We need cache_size entries for the cache, and choose to have
+ * cache_size entries for the pre_cache too. One motivation for
+ * using the same size is to make the hit counts directly
+ * comparable between pre_cache and cache.
+ */
+ unsigned nr_entries;
+ unsigned nr_entries_allocated;
+ struct list_head free;
+
+ /*
+ * Cache blocks may be unallocated. We store this info in a
+ * bitset.
+ */
+ unsigned long *allocation_bitset;
+ unsigned nr_cblocks_allocated;
+ unsigned find_free_nr_words;
+ unsigned find_free_last_word;
+
+ /*
+ * The hash table allows us to quickly find an entry by origin
+ * block. Both pre_cache and cache entries are in here.
+ */
+ unsigned nr_buckets;
+ dm_block_t hash_bits;
+ struct hlist_head *table;
+};
+
+/*----------------------------------------------------------------*/
+/* Free/alloc mq cache entry structures. */
+static void takeout_queue(struct list_head *lh, struct queue *q)
+{
+ unsigned level;
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_splice(q->qs + level, lh);
+}
+
+static void free_entries(struct mq_policy *mq)
+{
+ struct entry *e, *tmp;
+
+ takeout_queue(&mq->free, &mq->pre_cache);
+ takeout_queue(&mq->free, &mq->cache);
+
+ list_for_each_entry_safe(e, tmp, &mq->free, list)
+ kmem_cache_free(mq_entry_cache, e);
+}
+
+static int alloc_entries(struct mq_policy *mq, unsigned elts)
+{
+ unsigned u = mq->nr_entries;
+
+ INIT_LIST_HEAD(&mq->free);
+ mq->nr_entries_allocated = 0;
+
+ while (u--) {
+ struct entry *e = kmem_cache_zalloc(mq_entry_cache, GFP_KERNEL);
+
+ if (!e) {
+ free_entries(mq);
+ return -ENOMEM;
+ }
+
+
+ list_add(&e->list, &mq->free);
+ }
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Simple hash table implementation. Should replace with the standard hash
+ * table that's making its way upstream.
+ */
+static void hash_insert(struct mq_policy *mq, struct entry *e)
+{
+ unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
+
+ hlist_add_head(&e->hlist, mq->table + h);
+}
+
+static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
+ struct hlist_head *bucket = mq->table + h;
+ struct entry *e;
+
+ hlist_for_each_entry(e, bucket, hlist)
+ if (e->oblock == oblock) {
+ hlist_del(&e->hlist);
+ hlist_add_head(&e->hlist, bucket);
+ return e;
+ }
+
+ return NULL;
+}
+
+static void hash_remove(struct entry *e)
+{
+ hlist_del(&e->hlist);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Allocates a new entry structure. The memory is allocated in one lump,
+ * so we just handing it out here. Returns NULL if all entries have
+ * already been allocated. Cannot fail otherwise.
+ */
+static struct entry *alloc_entry(struct mq_policy *mq)
+{
+ struct entry *e;
+
+ if (mq->nr_entries_allocated >= mq->nr_entries) {
+ BUG_ON(!list_empty(&mq->free));
+ return NULL;
+ }
+
+ e = list_entry(list_pop(&mq->free), struct entry, list);
+ INIT_LIST_HEAD(&e->list);
+ INIT_HLIST_NODE(&e->hlist);
+
+ mq->nr_entries_allocated++;
+ return e;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Mark cache blocks allocated or not in the bitset.
+ */
+static void alloc_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ set_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated++;
+}
+
+static void free_cblock(struct mq_policy *mq, dm_cblock_t cblock)
+{
+ BUG_ON(from_cblock(cblock) > from_cblock(mq->cache_size));
+ BUG_ON(!test_bit(from_cblock(cblock), mq->allocation_bitset));
+
+ clear_bit(from_cblock(cblock), mq->allocation_bitset);
+ mq->nr_cblocks_allocated--;
+}
+
+static bool any_free_cblocks(struct mq_policy *mq)
+{
+ return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
+}
+
+/*
+ * Fills result out with a cache block that isn't in use, or return
+ * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
+ * reponsible for that.
+ */
+static int __find_free_cblock(struct mq_policy *mq, unsigned begin, unsigned end,
+ dm_cblock_t *result, unsigned *last_word)
+{
+ int r = -ENOSPC;
+ unsigned w;
+
+ for (w = begin; w < end; w++) {
+ /*
+ * ffz is undefined if no zero exists
+ */
+ if (mq->allocation_bitset[w] != ~0UL) {
+ *last_word = w;
+ *result = to_cblock((w * BITS_PER_LONG) + ffz(mq->allocation_bitset[w]));
+ if (from_cblock(*result) < from_cblock(mq->cache_size))
+ r = 0;
+
+ break;
+ }
+ }
+
+ return r;
+}
+
+static int find_free_cblock(struct mq_policy *mq, dm_cblock_t *result)
+{
+ int r;
+
+ if (!any_free_cblocks(mq))
+ return -ENOSPC;
+
+ r = __find_free_cblock(mq, mq->find_free_last_word, mq->find_free_nr_words, result, &mq->find_free_last_word);
+ if (r == -ENOSPC && mq->find_free_last_word)
+ r = __find_free_cblock(mq, 0, mq->find_free_last_word, result, &mq->find_free_last_word);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Now we get to the meat of the policy. This section deals with deciding
+ * when to to add entries to the pre_cache and cache, and move between
+ * them.
+ */
+
+/*
+ * The queue level is based on the log2 of the hit count.
+ */
+static unsigned queue_level(struct entry *e)
+{
+ return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
+}
+
+/*
+ * Inserts the entry into the pre_cache or the cache. Ensures the cache
+ * block is marked as allocated if necc. Inserts into the hash table. Sets the
+ * tick which records when the entry was last moved about.
+ */
+static void push(struct mq_policy *mq, struct entry *e)
+{
+ e->tick = mq->tick;
+ hash_insert(mq, e);
+
+ if (e->in_cache) {
+ alloc_cblock(mq, e->cblock);
+ queue_push(&mq->cache, queue_level(e), &e->list);
+ } else
+ queue_push(&mq->pre_cache, queue_level(e), &e->list);
+}
+
+/*
+ * Removes an entry from pre_cache or cache. Removes from the hash table.
+ * Frees off the cache block if necc.
+ */
+static void del(struct mq_policy *mq, struct entry *e)
+{
+ queue_remove(&e->list);
+ hash_remove(e);
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+}
+
+/*
+ * Like del, except it removes the first entry in the queue (ie. the least
+ * recently used).
+ */
+static struct entry *pop(struct mq_policy *mq, struct queue *q)
+{
+ struct entry *e = container_of(queue_pop(q), struct entry, list);
+
+ if (e) {
+ hash_remove(e);
+
+ if (e->in_cache)
+ free_cblock(mq, e->cblock);
+ }
+
+ return e;
+}
+
+/*
+ * Has this entry already been updated?
+ */
+static bool updated_this_tick(struct mq_policy *mq, struct entry *e)
+{
+ return mq->tick == e->tick;
+}
+
+/*
+ * The promotion threshold is adjusted every generation. As are the counts
+ * of the entries.
+ *
+ * At the moment the threshold is taken by averaging the hit counts of some
+ * of the entries in the cache (the first 20 entries of the first level).
+ *
+ * We can be much cleverer than this though. For example, each promotion
+ * could bump up the threshold helping to prevent churn. Much more to do
+ * here.
+ */
+
+#define MAX_TO_AVERAGE 20
+
+static void check_generation(struct mq_policy *mq)
+{
+ unsigned total = 0, nr = 0, count = 0, level;
+ struct list_head *head;
+ struct entry *e;
+
+ if ((mq->hit_count >= mq->generation_period) &&
+ (mq->nr_cblocks_allocated == from_cblock(mq->cache_size))) {
+
+ mq->hit_count = 0;
+ mq->generation++;
+
+ for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
+ head = mq->cache.qs + level;
+ list_for_each_entry(e, head, list) {
+ nr++;
+ total += e->hit_count;
+
+ if (++count >= MAX_TO_AVERAGE)
+ break;
+ }
+ }
+
+ mq->promote_threshold = nr ? total / nr : 1;
+ if (mq->promote_threshold * nr < total)
+ mq->promote_threshold++;
+ }
+}
+
+/*
+ * Whenever we use an entry we bump up it's hit counter, and push it to the
+ * back to it's current level.
+ */
+static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e)
+{
+ if (updated_this_tick(mq, e))
+ return;
+
+ e->hit_count++;
+ mq->hit_count++;
+ check_generation(mq);
+
+ /* generation adjustment, to stop the counts increasing forever. */
+ /* FIXME: divide? */
+ /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */
+ e->generation = mq->generation;
+
+ del(mq, e);
+ push(mq, e);
+}
+
+/*
+ * Demote the least recently used entry from the cache to the pre_cache.
+ * Returns the new cache entry to use, and the old origin block it was
+ * mapped to.
+ *
+ * We drop the hit count on the demoted entry back to 1 to stop it bouncing
+ * straight back into the cache if it's subsequently hit. There are
+ * various options here, and more experimentation would be good:
+ *
+ * - just forget about the demoted entry completely (ie. don't insert it
+ into the pre_cache).
+ * - divide the hit count rather that setting to some hard coded value.
+ * - set the hit count to a hard coded value other than 1, eg, is it better
+ * if it goes in at level 2?
+ */
+static dm_cblock_t demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
+{
+ dm_cblock_t result;
+ struct entry *demoted = pop(mq, &mq->cache);
+
+ BUG_ON(!demoted);
+ result = demoted->cblock;
+ *oblock = demoted->oblock;
+ demoted->in_cache = false;
+ demoted->hit_count = 1;
+ push(mq, demoted);
+
+ return result;
+}
+
+/*
+ * We modify the basic promotion_threshold depending on the specific io.
+ *
+ * If the origin block has been discarded then there's no cost to copy it
+ * to the cache.
+ *
+ * We bias towards reads, since they can be demoted at no cost if they
+ * haven't been dirtied.
+ */
+#define DISCARDED_PROMOTE_THRESHOLD 1
+#define READ_PROMOTE_THRESHOLD 4
+#define WRITE_PROMOTE_THRESHOLD 8
+
+static unsigned adjusted_promote_threshold(struct mq_policy *mq,
+ bool discarded_oblock, int data_dir)
+{
+ if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE)
+ /*
+ * We don't need to do any copying at all, so give this a
+ * very low threshold. In practice this only triggers
+ * during initial population after a format.
+ */
+ return DISCARDED_PROMOTE_THRESHOLD;
+
+ return data_dir == READ ?
+ (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
+ (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
+}
+
+static bool should_promote(struct mq_policy *mq, struct entry *e,
+ bool discarded_oblock, int data_dir)
+{
+ return e->hit_count >=
+ adjusted_promote_threshold(mq, discarded_oblock, data_dir);
+}
+
+static int cache_entry_found(struct mq_policy *mq,
+ struct entry *e,
+ struct policy_result *result)
+{
+ requeue_and_update_tick(mq, e);
+
+ if (e->in_cache) {
+ result->op = POLICY_HIT;
+ result->cblock = e->cblock;
+ }
+
+ return 0;
+}
+
+/*
+ * Moves and entry from the pre_cache to the cache. The main work is
+ * finding which cache block to use.
+ */
+static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
+ struct policy_result *result)
+{
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_REPLACE;
+ cblock = demote_cblock(mq, &result->old_oblock);
+ } else
+ result->op = POLICY_NEW;
+
+ result->cblock = e->cblock = cblock;
+
+ del(mq, e);
+ e->in_cache = true;
+ push(mq, e);
+
+ return 0;
+}
+
+static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ bool updated = updated_this_tick(mq, e);
+
+ requeue_and_update_tick(mq, e);
+
+ if ((!discarded_oblock && updated) ||
+ !should_promote(mq, e, discarded_oblock, data_dir))
+ result->op = POLICY_MISS;
+ else if (!can_migrate)
+ r = -EWOULDBLOCK;
+ else
+ r = pre_cache_to_cache(mq, e, result);
+
+ return r;
+}
+
+static void insert_in_pre_cache(struct mq_policy *mq,
+ dm_oblock_t oblock)
+{
+ struct entry *e = alloc_entry(mq);
+
+ if (!e)
+ /*
+ * There's no spare entry structure, so we grab the least
+ * used one from the pre_cache.
+ */
+ e = pop(mq, &mq->pre_cache);
+
+ if (unlikely(!e)) {
+ DMWARN("couldn't pop from pre cache");
+ return;
+ }
+
+ e->in_cache = false;
+ e->oblock = oblock;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+}
+
+static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
+ struct policy_result *result)
+{
+ struct entry *e;
+ dm_cblock_t cblock;
+
+ if (find_free_cblock(mq, &cblock) == -ENOSPC) {
+ result->op = POLICY_MISS;
+ insert_in_pre_cache(mq, oblock);
+ return;
+ }
+
+ e = alloc_entry(mq);
+ if (unlikely(!e)) {
+ result->op = POLICY_MISS;
+ return;
+ }
+
+ e->oblock = oblock;
+ e->cblock = cblock;
+ e->in_cache = true;
+ e->hit_count = 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ result->op = POLICY_NEW;
+ result->cblock = e->cblock;
+}
+
+static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) {
+ if (can_migrate)
+ insert_in_cache(mq, oblock, result);
+ else
+ return -EWOULDBLOCK;
+ } else {
+ insert_in_pre_cache(mq, oblock);
+ result->op = POLICY_MISS;
+ }
+
+ return 0;
+}
+
+/*
+ * Looks the oblock up in the hash table, then decides whether to put in
+ * pre_cache, or cache etc.
+ */
+static int map(struct mq_policy *mq, dm_oblock_t oblock,
+ bool can_migrate, bool discarded_oblock,
+ int data_dir, struct policy_result *result)
+{
+ int r = 0;
+ struct entry *e = hash_lookup(mq, oblock);
+
+ if (e && e->in_cache)
+ r = cache_entry_found(mq, e, result);
+ else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
+ result->op = POLICY_MISS;
+ else if (e)
+ r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
+ data_dir, result);
+ else
+ r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
+ data_dir, result);
+
+ if (r == -EWOULDBLOCK)
+ result->op = POLICY_MISS;
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Public interface, via the policy struct. See dm-cache-policy.h for a
+ * description of these.
+ */
+
+static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
+{
+ return container_of(p, struct mq_policy, policy);
+}
+
+static void mq_destroy(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ free_bitset(mq->allocation_bitset);
+ kfree(mq->table);
+ free_entries(mq);
+ kfree(mq);
+}
+
+static void copy_tick(struct mq_policy *mq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick = mq->tick_protected;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ result->op = POLICY_MISS;
+
+ if (can_block)
+ mutex_lock(&mq->lock);
+ else if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ copy_tick(mq);
+
+ iot_examine_bio(&mq->tracker, bio);
+ r = map(mq, oblock, can_migrate, discarded_oblock,
+ bio_data_dir(bio), result);
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
+{
+ int r;
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ if (!mutex_trylock(&mq->lock))
+ return -EWOULDBLOCK;
+
+ e = hash_lookup(mq, oblock);
+ if (e && e->in_cache) {
+ *cblock = e->cblock;
+ r = 0;
+ } else
+ r = -ENOENT;
+
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static int mq_load_mapping(struct dm_cache_policy *p,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ uint32_t hint, bool hint_valid)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ e = alloc_entry(mq);
+ if (!e)
+ return -ENOMEM;
+
+ e->cblock = cblock;
+ e->oblock = oblock;
+ e->in_cache = true;
+ e->hit_count = hint_valid ? hint : 1;
+ e->generation = mq->generation;
+ push(mq, e);
+
+ return 0;
+}
+
+static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ int r = 0;
+ struct entry *e;
+ unsigned level;
+
+ mutex_lock(&mq->lock);
+
+ for (level = 0; level < NR_QUEUE_LEVELS; level++)
+ list_for_each_entry(e, &mq->cache.qs[level], list) {
+ r = fn(context, e->cblock, e->oblock, e->hit_count);
+ if (r)
+ goto out;
+ }
+
+out:
+ mutex_unlock(&mq->lock);
+
+ return r;
+}
+
+static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+{
+ struct entry *e = hash_lookup(mq, oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->in_cache = false;
+ push(mq, e);
+}
+
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ remove_mapping(mq, oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static void force_mapping(struct mq_policy *mq,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct entry *e = hash_lookup(mq, current_oblock);
+
+ BUG_ON(!e || !e->in_cache);
+
+ del(mq, e);
+ e->oblock = new_oblock;
+ push(mq, e);
+}
+
+static void mq_force_mapping(struct dm_cache_policy *p,
+ dm_oblock_t current_oblock, dm_oblock_t new_oblock)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ mutex_lock(&mq->lock);
+ force_mapping(mq, current_oblock, new_oblock);
+ mutex_unlock(&mq->lock);
+}
+
+static dm_cblock_t mq_residency(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+
+ /* FIXME: lock mutex, not sure we can block here */
+ return to_cblock(mq->nr_cblocks_allocated);
+}
+
+static void mq_tick(struct dm_cache_policy *p)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mq->tick_lock, flags);
+ mq->tick_protected++;
+ spin_unlock_irqrestore(&mq->tick_lock, flags);
+}
+
+static int mq_set_config_value(struct dm_cache_policy *p,
+ const char *key, const char *value)
+{
+ struct mq_policy *mq = to_mq_policy(p);
+ enum io_pattern pattern;
+ unsigned long tmp;
+
+ if (!strcasecmp(key, "random_threshold"))
+ pattern = PATTERN_RANDOM;
+ else if (!strcasecmp(key, "sequential_threshold"))
+ pattern = PATTERN_SEQUENTIAL;
+ else
+ return -EINVAL;
+
+ if (kstrtoul(value, 10, &tmp))
+ return -EINVAL;
+
+ mq->tracker.thresholds[pattern] = tmp;
+
+ return 0;
+}
+
+static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
+{
+ ssize_t sz = 0;
+ struct mq_policy *mq = to_mq_policy(p);
+
+ DMEMIT("4 random_threshold %u sequential_threshold %u",
+ mq->tracker.thresholds[PATTERN_RANDOM],
+ mq->tracker.thresholds[PATTERN_SEQUENTIAL]);
+
+ return 0;
+}
+
+/* Init the policy plugin interface function pointers. */
+static void init_policy_functions(struct mq_policy *mq)
+{
+ mq->policy.destroy = mq_destroy;
+ mq->policy.map = mq_map;
+ mq->policy.lookup = mq_lookup;
+ mq->policy.load_mapping = mq_load_mapping;
+ mq->policy.walk_mappings = mq_walk_mappings;
+ mq->policy.remove_mapping = mq_remove_mapping;
+ mq->policy.writeback_work = NULL;
+ mq->policy.force_mapping = mq_force_mapping;
+ mq->policy.residency = mq_residency;
+ mq->policy.tick = mq_tick;
+ mq->policy.emit_config_values = mq_emit_config_values;
+ mq->policy.set_config_value = mq_set_config_value;
+}
+
+static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ int r;
+ struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+
+ if (!mq)
+ return NULL;
+
+ init_policy_functions(mq);
+ iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
+
+ mq->cache_size = cache_size;
+ mq->tick_protected = 0;
+ mq->tick = 0;
+ mq->hit_count = 0;
+ mq->generation = 0;
+ mq->promote_threshold = 0;
+ mutex_init(&mq->lock);
+ spin_lock_init(&mq->tick_lock);
+ mq->find_free_nr_words = dm_div_up(from_cblock(mq->cache_size), BITS_PER_LONG);
+ mq->find_free_last_word = 0;
+
+ queue_init(&mq->pre_cache);
+ queue_init(&mq->cache);
+ mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
+
+ mq->nr_entries = 2 * from_cblock(cache_size);
+ r = alloc_entries(mq, mq->nr_entries);
+ if (r)
+ goto bad_cache_alloc;
+
+ mq->nr_entries_allocated = 0;
+ mq->nr_cblocks_allocated = 0;
+
+ mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
+ mq->hash_bits = ffs(mq->nr_buckets) - 1;
+ mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ if (!mq->table)
+ goto bad_alloc_table;
+
+ mq->allocation_bitset = alloc_bitset(from_cblock(cache_size));
+ if (!mq->allocation_bitset)
+ goto bad_alloc_bitset;
+
+ return &mq->policy;
+
+bad_alloc_bitset:
+ kfree(mq->table);
+bad_alloc_table:
+ free_entries(mq);
+bad_cache_alloc:
+ kfree(mq);
+
+ return NULL;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_cache_policy_type mq_policy_type = {
+ .name = "mq",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static struct dm_cache_policy_type default_policy_type = {
+ .name = "default",
+ .hint_size = 4,
+ .owner = THIS_MODULE,
+ .create = mq_create
+};
+
+static int __init mq_init(void)
+{
+ int r;
+
+ mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
+ sizeof(struct entry),
+ __alignof__(struct entry),
+ 0, NULL);
+ if (!mq_entry_cache)
+ goto bad;
+
+ r = dm_cache_policy_register(&mq_policy_type);
+ if (r) {
+ DMERR("register failed %d", r);
+ goto bad_register_mq;
+ }
+
+ r = dm_cache_policy_register(&default_policy_type);
+ if (!r) {
+ DMINFO("version " MQ_VERSION " loaded");
+ return 0;
+ }
+
+ DMERR("register failed (as default) %d", r);
+
+ dm_cache_policy_unregister(&mq_policy_type);
+bad_register_mq:
+ kmem_cache_destroy(mq_entry_cache);
+bad:
+ return -ENOMEM;
+}
+
+static void __exit mq_exit(void)
+{
+ dm_cache_policy_unregister(&mq_policy_type);
+ dm_cache_policy_unregister(&default_policy_type);
+
+ kmem_cache_destroy(mq_entry_cache);
+}
+
+module_init(mq_init);
+module_exit(mq_exit);
+
+MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("mq cache policy");
+
+MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy.c b/drivers/md/dm-cache-policy.c
new file mode 100644
index 00000000000..2cbf5fdaac5
--- /dev/null
+++ b/drivers/md/dm-cache-policy.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-cache-policy-internal.h"
+#include "dm.h"
+
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*----------------------------------------------------------------*/
+
+#define DM_MSG_PREFIX "cache-policy"
+
+static DEFINE_SPINLOCK(register_lock);
+static LIST_HEAD(register_list);
+
+static struct dm_cache_policy_type *__find_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ list_for_each_entry(t, &register_list, list)
+ if (!strcmp(t->name, name))
+ return t;
+
+ return NULL;
+}
+
+static struct dm_cache_policy_type *__get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t = __find_policy(name);
+
+ if (t && !try_module_get(t->owner)) {
+ DMWARN("couldn't get module %s", name);
+ t = ERR_PTR(-EINVAL);
+ }
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy_once(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ spin_lock(&register_lock);
+ t = __get_policy_once(name);
+ spin_unlock(&register_lock);
+
+ return t;
+}
+
+static struct dm_cache_policy_type *get_policy(const char *name)
+{
+ struct dm_cache_policy_type *t;
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ if (t)
+ return t;
+
+ request_module("dm-cache-%s", name);
+
+ t = get_policy_once(name);
+ if (IS_ERR(t))
+ return NULL;
+
+ return t;
+}
+
+static void put_policy(struct dm_cache_policy_type *t)
+{
+ module_put(t->owner);
+}
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type)
+{
+ int r;
+
+ /* One size fits all for now */
+ if (type->hint_size != 0 && type->hint_size != 4) {
+ DMWARN("hint size must be 0 or 4 but %llu supplied.", (unsigned long long) type->hint_size);
+ return -EINVAL;
+ }
+
+ spin_lock(&register_lock);
+ if (__find_policy(type->name)) {
+ DMWARN("attempt to register policy under duplicate name %s", type->name);
+ r = -EINVAL;
+ } else {
+ list_add(&type->list, &register_list);
+ r = 0;
+ }
+ spin_unlock(&register_lock);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_register);
+
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type)
+{
+ spin_lock(&register_lock);
+ list_del_init(&type->list);
+ spin_unlock(&register_lock);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_unregister);
+
+struct dm_cache_policy *dm_cache_policy_create(const char *name,
+ dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t cache_block_size)
+{
+ struct dm_cache_policy *p = NULL;
+ struct dm_cache_policy_type *type;
+
+ type = get_policy(name);
+ if (!type) {
+ DMWARN("unknown policy type");
+ return NULL;
+ }
+
+ p = type->create(cache_size, origin_size, cache_block_size);
+ if (!p) {
+ put_policy(type);
+ return NULL;
+ }
+ p->private = type;
+
+ return p;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_create);
+
+void dm_cache_policy_destroy(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ p->destroy(p);
+ put_policy(t);
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_destroy);
+
+const char *dm_cache_policy_get_name(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->name;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_name);
+
+size_t dm_cache_policy_get_hint_size(struct dm_cache_policy *p)
+{
+ struct dm_cache_policy_type *t = p->private;
+
+ return t->hint_size;
+}
+EXPORT_SYMBOL_GPL(dm_cache_policy_get_hint_size);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
new file mode 100644
index 00000000000..f0f51b26054
--- /dev/null
+++ b/drivers/md/dm-cache-policy.h
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#ifndef DM_CACHE_POLICY_H
+#define DM_CACHE_POLICY_H
+
+#include "dm-cache-block-types.h"
+
+#include <linux/device-mapper.h>
+
+/*----------------------------------------------------------------*/
+
+/* FIXME: make it clear which methods are optional. Get debug policy to
+ * double check this at start.
+ */
+
+/*
+ * The cache policy makes the important decisions about which blocks get to
+ * live on the faster cache device.
+ *
+ * When the core target has to remap a bio it calls the 'map' method of the
+ * policy. This returns an instruction telling the core target what to do.
+ *
+ * POLICY_HIT:
+ * That block is in the cache. Remap to the cache and carry on.
+ *
+ * POLICY_MISS:
+ * This block is on the origin device. Remap and carry on.
+ *
+ * POLICY_NEW:
+ * This block is currently on the origin device, but the policy wants to
+ * move it. The core should:
+ *
+ * - hold any further io to this origin block
+ * - copy the origin to the given cache block
+ * - release all the held blocks
+ * - remap the original block to the cache
+ *
+ * POLICY_REPLACE:
+ * This block is currently on the origin device. The policy wants to
+ * move it to the cache, with the added complication that the destination
+ * cache block needs a writeback first. The core should:
+ *
+ * - hold any further io to this origin block
+ * - hold any further io to the origin block that's being written back
+ * - writeback
+ * - copy new block to cache
+ * - release held blocks
+ * - remap bio to cache and reissue.
+ *
+ * Should the core run into trouble while processing a POLICY_NEW or
+ * POLICY_REPLACE instruction it will roll back the policies mapping using
+ * remove_mapping() or force_mapping(). These methods must not fail. This
+ * approach avoids having transactional semantics in the policy (ie, the
+ * core informing the policy when a migration is complete), and hence makes
+ * it easier to write new policies.
+ *
+ * In general policy methods should never block, except in the case of the
+ * map function when can_migrate is set. So be careful to implement using
+ * bounded, preallocated memory.
+ */
+enum policy_operation {
+ POLICY_HIT,
+ POLICY_MISS,
+ POLICY_NEW,
+ POLICY_REPLACE
+};
+
+/*
+ * This is the instruction passed back to the core target.
+ */
+struct policy_result {
+ enum policy_operation op;
+ dm_oblock_t old_oblock; /* POLICY_REPLACE */
+ dm_cblock_t cblock; /* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
+};
+
+typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
+ dm_oblock_t oblock, uint32_t hint);
+
+/*
+ * The cache policy object. Just a bunch of methods. It is envisaged that
+ * this structure will be embedded in a bigger, policy specific structure
+ * (ie. use container_of()).
+ */
+struct dm_cache_policy {
+
+ /*
+ * FIXME: make it clear which methods are optional, and which may
+ * block.
+ */
+
+ /*
+ * Destroys this object.
+ */
+ void (*destroy)(struct dm_cache_policy *p);
+
+ /*
+ * See large comment above.
+ *
+ * oblock - the origin block we're interested in.
+ *
+ * can_block - indicates whether the current thread is allowed to
+ * block. -EWOULDBLOCK returned if it can't and would.
+ *
+ * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
+ * instructions. If denied and the policy would have
+ * returned one of these instructions it should
+ * return -EWOULDBLOCK.
+ *
+ * discarded_oblock - indicates whether the whole origin block is
+ * in a discarded state (FIXME: better to tell the
+ * policy about this sooner, so it can recycle that
+ * cache block if it wants.)
+ * bio - the bio that triggered this call.
+ * result - gets filled in with the instruction.
+ *
+ * May only return 0, or -EWOULDBLOCK (if !can_migrate)
+ */
+ int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ bool can_block, bool can_migrate, bool discarded_oblock,
+ struct bio *bio, struct policy_result *result);
+
+ /*
+ * Sometimes we want to see if a block is in the cache, without
+ * triggering any update of stats. (ie. it's not a real hit).
+ *
+ * Must not block.
+ *
+ * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK
+ * would be typical).
+ */
+ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
+
+ /*
+ * oblock must be a mapped block. Must not block.
+ */
+ void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
+
+ /*
+ * Called when a cache target is first created. Used to load a
+ * mapping from the metadata device into the policy.
+ */
+ int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
+ dm_cblock_t cblock, uint32_t hint, bool hint_valid);
+
+ int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
+ void *context);
+
+ /*
+ * Override functions used on the error paths of the core target.
+ * They must succeed.
+ */
+ void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
+ void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
+ dm_oblock_t new_oblock);
+
+ int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
+
+
+ /*
+ * How full is the cache?
+ */
+ dm_cblock_t (*residency)(struct dm_cache_policy *p);
+
+ /*
+ * Because of where we sit in the block layer, we can be asked to
+ * map a lot of little bios that are all in the same block (no
+ * queue merging has occurred). To stop the policy being fooled by
+ * these the core target sends regular tick() calls to the policy.
+ * The policy should only count an entry as hit once per tick.
+ */
+ void (*tick)(struct dm_cache_policy *p);
+
+ /*
+ * Configuration.
+ */
+ int (*emit_config_values)(struct dm_cache_policy *p,
+ char *result, unsigned maxlen);
+ int (*set_config_value)(struct dm_cache_policy *p,
+ const char *key, const char *value);
+
+ /*
+ * Book keeping ptr for the policy register, not for general use.
+ */
+ void *private;
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * We maintain a little register of the different policy types.
+ */
+#define CACHE_POLICY_NAME_SIZE 16
+
+struct dm_cache_policy_type {
+ /* For use by the register code only. */
+ struct list_head list;
+
+ /*
+ * Policy writers should fill in these fields. The name field is
+ * what gets passed on the target line to select your policy.
+ */
+ char name[CACHE_POLICY_NAME_SIZE];
+
+ /*
+ * Policies may store a hint for each each cache block.
+ * Currently the size of this hint must be 0 or 4 bytes but we
+ * expect to relax this in future.
+ */
+ size_t hint_size;
+
+ struct module *owner;
+ struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
+ sector_t origin_size,
+ sector_t block_size);
+};
+
+int dm_cache_policy_register(struct dm_cache_policy_type *type);
+void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
+
+/*----------------------------------------------------------------*/
+
+#endif /* DM_CACHE_POLICY_H */
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
new file mode 100644
index 00000000000..0f4e84b15c3
--- /dev/null
+++ b/drivers/md/dm-cache-target.c
@@ -0,0 +1,2584 @@
+/*
+ * Copyright (C) 2012 Red Hat. All rights reserved.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-bio-prison.h"
+#include "dm-cache-metadata.h"
+
+#include <linux/dm-io.h>
+#include <linux/dm-kcopyd.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#define DM_MSG_PREFIX "cache"
+
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
+ "A percentage of time allocated for copying to and/or from cache");
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Glossary:
+ *
+ * oblock: index of an origin block
+ * cblock: index of a cache block
+ * promotion: movement of a block from origin to cache
+ * demotion: movement of a block from cache to origin
+ * migration: movement of a block between the origin and cache device,
+ * either direction
+ */
+
+/*----------------------------------------------------------------*/
+
+static size_t bitset_size_in_bytes(unsigned nr_entries)
+{
+ return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
+}
+
+static unsigned long *alloc_bitset(unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ return vzalloc(s);
+}
+
+static void clear_bitset(void *bitset, unsigned nr_entries)
+{
+ size_t s = bitset_size_in_bytes(nr_entries);
+ memset(bitset, 0, s);
+}
+
+static void free_bitset(unsigned long *bits)
+{
+ vfree(bits);
+}
+
+/*----------------------------------------------------------------*/
+
+#define PRISON_CELLS 1024
+#define MIGRATION_POOL_SIZE 128
+#define COMMIT_PERIOD HZ
+#define MIGRATION_COUNT_WINDOW 10
+
+/*
+ * The block size of the device holding cache data must be >= 32KB
+ */
+#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
+
+/*
+ * FIXME: the cache is read/write for the time being.
+ */
+enum cache_mode {
+ CM_WRITE, /* metadata may be changed */
+ CM_READ_ONLY, /* metadata may not be changed */
+};
+
+struct cache_features {
+ enum cache_mode mode;
+ bool write_through:1;
+};
+
+struct cache_stats {
+ atomic_t read_hit;
+ atomic_t read_miss;
+ atomic_t write_hit;
+ atomic_t write_miss;
+ atomic_t demotion;
+ atomic_t promotion;
+ atomic_t copies_avoided;
+ atomic_t cache_cell_clash;
+ atomic_t commit_count;
+ atomic_t discard_count;
+};
+
+struct cache {
+ struct dm_target *ti;
+ struct dm_target_callbacks callbacks;
+
+ /*
+ * Metadata is written to this device.
+ */
+ struct dm_dev *metadata_dev;
+
+ /*
+ * The slower of the two data devices. Typically a spindle.
+ */
+ struct dm_dev *origin_dev;
+
+ /*
+ * The faster of the two data devices. Typically an SSD.
+ */
+ struct dm_dev *cache_dev;
+
+ /*
+ * Cache features such as write-through.
+ */
+ struct cache_features features;
+
+ /*
+ * Size of the origin device in _complete_ blocks and native sectors.
+ */
+ dm_oblock_t origin_blocks;
+ sector_t origin_sectors;
+
+ /*
+ * Size of the cache device in blocks.
+ */
+ dm_cblock_t cache_size;
+
+ /*
+ * Fields for converting from sectors to blocks.
+ */
+ uint32_t sectors_per_block;
+ int sectors_per_block_shift;
+
+ struct dm_cache_metadata *cmd;
+
+ spinlock_t lock;
+ struct bio_list deferred_bios;
+ struct bio_list deferred_flush_bios;
+ struct list_head quiesced_migrations;
+ struct list_head completed_migrations;
+ struct list_head need_commit_migrations;
+ sector_t migration_threshold;
+ atomic_t nr_migrations;
+ wait_queue_head_t migration_wait;
+
+ /*
+ * cache_size entries, dirty if set
+ */
+ dm_cblock_t nr_dirty;
+ unsigned long *dirty_bitset;
+
+ /*
+ * origin_blocks entries, discarded if set.
+ */
+ sector_t discard_block_size; /* a power of 2 times sectors per block */
+ dm_dblock_t discard_nr_blocks;
+ unsigned long *discard_bitset;
+
+ struct dm_kcopyd_client *copier;
+ struct workqueue_struct *wq;
+ struct work_struct worker;
+
+ struct delayed_work waker;
+ unsigned long last_commit_jiffies;
+
+ struct dm_bio_prison *prison;
+ struct dm_deferred_set *all_io_ds;
+
+ mempool_t *migration_pool;
+ struct dm_cache_migration *next_migration;
+
+ struct dm_cache_policy *policy;
+ unsigned policy_nr_args;
+
+ bool need_tick_bio:1;
+ bool sized:1;
+ bool quiescing:1;
+ bool commit_requested:1;
+ bool loaded_mappings:1;
+ bool loaded_discards:1;
+
+ struct cache_stats stats;
+
+ /*
+ * Rather than reconstructing the table line for the status we just
+ * save it and regurgitate.
+ */
+ unsigned nr_ctr_args;
+ const char **ctr_args;
+};
+
+struct per_bio_data {
+ bool tick:1;
+ unsigned req_nr:2;
+ struct dm_deferred_entry *all_io_entry;
+};
+
+struct dm_cache_migration {
+ struct list_head list;
+ struct cache *cache;
+
+ unsigned long start_jiffies;
+ dm_oblock_t old_oblock;
+ dm_oblock_t new_oblock;
+ dm_cblock_t cblock;
+
+ bool err:1;
+ bool writeback:1;
+ bool demote:1;
+ bool promote:1;
+
+ struct dm_bio_prison_cell *old_ocell;
+ struct dm_bio_prison_cell *new_ocell;
+};
+
+/*
+ * Processing a bio in the worker thread may require these memory
+ * allocations. We prealloc to avoid deadlocks (the same worker thread
+ * frees them back to the mempool).
+ */
+struct prealloc {
+ struct dm_cache_migration *mg;
+ struct dm_bio_prison_cell *cell1;
+ struct dm_bio_prison_cell *cell2;
+};
+
+static void wake_worker(struct cache *cache)
+{
+ queue_work(cache->wq, &cache->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
+{
+ /* FIXME: change to use a local slab. */
+ return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
+}
+
+static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
+{
+ dm_bio_prison_free_cell(cache->prison, cell);
+}
+
+static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
+{
+ if (!p->mg) {
+ p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (!p->mg)
+ return -ENOMEM;
+ }
+
+ if (!p->cell1) {
+ p->cell1 = alloc_prison_cell(cache);
+ if (!p->cell1)
+ return -ENOMEM;
+ }
+
+ if (!p->cell2) {
+ p->cell2 = alloc_prison_cell(cache);
+ if (!p->cell2)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
+{
+ if (p->cell2)
+ free_prison_cell(cache, p->cell2);
+
+ if (p->cell1)
+ free_prison_cell(cache, p->cell1);
+
+ if (p->mg)
+ mempool_free(p->mg, cache->migration_pool);
+}
+
+static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
+{
+ struct dm_cache_migration *mg = p->mg;
+
+ BUG_ON(!mg);
+ p->mg = NULL;
+
+ return mg;
+}
+
+/*
+ * You must have a cell within the prealloc struct to return. If not this
+ * function will BUG() rather than returning NULL.
+ */
+static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
+{
+ struct dm_bio_prison_cell *r = NULL;
+
+ if (p->cell1) {
+ r = p->cell1;
+ p->cell1 = NULL;
+
+ } else if (p->cell2) {
+ r = p->cell2;
+ p->cell2 = NULL;
+ } else
+ BUG();
+
+ return r;
+}
+
+/*
+ * You can't have more than two cells in a prealloc struct. BUG() will be
+ * called if you try and overfill.
+ */
+static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
+{
+ if (!p->cell2)
+ p->cell2 = cell;
+
+ else if (!p->cell1)
+ p->cell1 = cell;
+
+ else
+ BUG();
+}
+
+/*----------------------------------------------------------------*/
+
+static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
+{
+ key->virtual = 0;
+ key->dev = 0;
+ key->block = from_oblock(oblock);
+}
+
+/*
+ * The caller hands in a preallocated cell, and a free function for it.
+ * The cell will be freed if there's an error, or if it wasn't used because
+ * a cell with that key already exists.
+ */
+typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
+
+static int bio_detain(struct cache *cache, dm_oblock_t oblock,
+ struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
+ cell_free_fn free_fn, void *free_context,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+
+ build_key(oblock, &key);
+ r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
+ if (r)
+ free_fn(free_context, cell_prealloc);
+
+ return r;
+}
+
+static int get_cell(struct cache *cache,
+ dm_oblock_t oblock,
+ struct prealloc *structs,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_cell_key key;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ cell_prealloc = prealloc_get_cell(structs);
+
+ build_key(oblock, &key);
+ r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
+ if (r)
+ prealloc_put_cell(structs, cell_prealloc);
+
+ return r;
+}
+
+ /*----------------------------------------------------------------*/
+
+static bool is_dirty(struct cache *cache, dm_cblock_t b)
+{
+ return test_bit(from_cblock(b), cache->dirty_bitset);
+}
+
+static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+ policy_set_dirty(cache->policy, oblock);
+ }
+}
+
+static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
+ policy_clear_dirty(cache->policy, oblock);
+ cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
+ if (!from_cblock(cache->nr_dirty))
+ dm_table_event(cache->ti->table);
+ }
+}
+
+/*----------------------------------------------------------------*/
+static bool block_size_is_power_of_two(struct cache *cache)
+{
+ return cache->sectors_per_block_shift >= 0;
+}
+
+static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
+{
+ sector_t discard_blocks = cache->discard_block_size;
+ dm_block_t b = from_oblock(oblock);
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(discard_blocks, cache->sectors_per_block);
+ else
+ discard_blocks >>= cache->sectors_per_block_shift;
+
+ (void) sector_div(b, discard_blocks);
+
+ return to_dblock(b);
+}
+
+static void set_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ atomic_inc(&cache->stats.discard_count);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ set_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void clear_discard(struct cache *cache, dm_dblock_t b)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ clear_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_discarded(struct cache *cache, dm_dblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(b), cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
+ cache->discard_bitset);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+/*----------------------------------------------------------------*/
+
+static void load_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ dm_cache_metadata_get_stats(cache->cmd, &stats);
+ atomic_set(&cache->stats.read_hit, stats.read_hits);
+ atomic_set(&cache->stats.read_miss, stats.read_misses);
+ atomic_set(&cache->stats.write_hit, stats.write_hits);
+ atomic_set(&cache->stats.write_miss, stats.write_misses);
+}
+
+static void save_stats(struct cache *cache)
+{
+ struct dm_cache_statistics stats;
+
+ stats.read_hits = atomic_read(&cache->stats.read_hit);
+ stats.read_misses = atomic_read(&cache->stats.read_miss);
+ stats.write_hits = atomic_read(&cache->stats.write_hit);
+ stats.write_misses = atomic_read(&cache->stats.write_miss);
+
+ dm_cache_metadata_set_stats(cache->cmd, &stats);
+}
+
+/*----------------------------------------------------------------
+ * Per bio data
+ *--------------------------------------------------------------*/
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
+ BUG_ON(!pb);
+ return pb;
+}
+
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ pb->tick = false;
+ pb->req_nr = dm_bio_get_target_bio_nr(bio);
+ pb->all_io_entry = NULL;
+
+ return pb;
+}
+
+/*----------------------------------------------------------------
+ * Remapping
+ *--------------------------------------------------------------*/
+static void remap_to_origin(struct cache *cache, struct bio *bio)
+{
+ bio->bi_bdev = cache->origin_dev->bdev;
+}
+
+static void remap_to_cache(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ sector_t bi_sector = bio->bi_sector;
+
+ bio->bi_bdev = cache->cache_dev->bdev;
+ if (!block_size_is_power_of_two(cache))
+ bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
+ sector_div(bi_sector, cache->sectors_per_block);
+ else
+ bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (bi_sector & (cache->sectors_per_block - 1));
+}
+
+static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ if (cache->need_tick_bio &&
+ !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
+ pb->tick = true;
+ cache->need_tick_bio = false;
+ }
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock)
+{
+ check_if_tick_bio_needed(cache, bio);
+ remap_to_origin(cache, bio);
+ if (bio_data_dir(bio) == WRITE)
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+}
+
+static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
+ dm_oblock_t oblock, dm_cblock_t cblock)
+{
+ remap_to_cache(cache, bio, cblock);
+ if (bio_data_dir(bio) == WRITE) {
+ set_dirty(cache, oblock, cblock);
+ clear_discard(cache, oblock_to_dblock(cache, oblock));
+ }
+}
+
+static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
+{
+ sector_t block_nr = bio->bi_sector;
+
+ if (!block_size_is_power_of_two(cache))
+ (void) sector_div(block_nr, cache->sectors_per_block);
+ else
+ block_nr >>= cache->sectors_per_block_shift;
+
+ return to_oblock(block_nr);
+}
+
+static int bio_triggers_commit(struct cache *cache, struct bio *bio)
+{
+ return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
+}
+
+static void issue(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ if (!bio_triggers_commit(cache, bio)) {
+ generic_make_request(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in do_worker().
+ */
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->commit_requested = true;
+ bio_list_add(&cache->deferred_flush_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+/*----------------------------------------------------------------
+ * Migration processing
+ *
+ * Migration covers moving data from the origin device to the cache, or
+ * vice versa.
+ *--------------------------------------------------------------*/
+static void free_migration(struct dm_cache_migration *mg)
+{
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
+static void inc_nr_migrations(struct cache *cache)
+{
+ atomic_inc(&cache->nr_migrations);
+}
+
+static void dec_nr_migrations(struct cache *cache)
+{
+ atomic_dec(&cache->nr_migrations);
+
+ /*
+ * Wake the worker in case we're suspending the target.
+ */
+ wake_up(&cache->migration_wait);
+}
+
+static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ (holder ? dm_cell_release : dm_cell_release_no_holder)
+ (cache->prison, cell, &cache->deferred_bios);
+ free_prison_cell(cache, cell);
+}
+
+static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
+ bool holder)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __cell_defer(cache, cell, holder);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void cleanup_migration(struct dm_cache_migration *mg)
+{
+ dec_nr_migrations(mg->cache);
+ free_migration(mg);
+}
+
+static void migration_failure(struct dm_cache_migration *mg)
+{
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN_LIMIT("writeback failed; couldn't copy block");
+ set_dirty(cache, mg->old_oblock, mg->cblock);
+ cell_defer(cache, mg->old_ocell, false);
+
+ } else if (mg->demote) {
+ DMWARN_LIMIT("demotion failed; couldn't copy block");
+ policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
+
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, 1);
+ } else {
+ DMWARN_LIMIT("promotion failed; couldn't copy block");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cell_defer(cache, mg->new_ocell, 1);
+ }
+
+ cleanup_migration(mg);
+}
+
+static void migration_success_pre_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ cell_defer(cache, mg->old_ocell, false);
+ clear_dirty(cache, mg->old_oblock, mg->cblock);
+ cleanup_migration(mg);
+ return;
+
+ } else if (mg->demote) {
+ if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
+ DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
+ policy_force_mapping(cache->policy, mg->new_oblock,
+ mg->old_oblock);
+ if (mg->promote)
+ cell_defer(cache, mg->new_ocell, true);
+ cleanup_migration(mg);
+ return;
+ }
+ } else {
+ if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
+ DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
+ policy_remove_mapping(cache->policy, mg->new_oblock);
+ cleanup_migration(mg);
+ return;
+ }
+ }
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->need_commit_migrations);
+ cache->commit_requested = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void migration_success_post_commit(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback) {
+ DMWARN("writeback unexpectedly triggered commit");
+ return;
+
+ } else if (mg->demote) {
+ cell_defer(cache, mg->old_ocell, mg->promote ? 0 : 1);
+
+ if (mg->promote) {
+ mg->demote = false;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->quiesced_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ } else
+ cleanup_migration(mg);
+
+ } else {
+ cell_defer(cache, mg->new_ocell, true);
+ clear_dirty(cache, mg->new_oblock, mg->cblock);
+ cleanup_migration(mg);
+ }
+}
+
+static void copy_complete(int read_err, unsigned long write_err, void *context)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
+ struct cache *cache = mg->cache;
+
+ if (read_err || write_err)
+ mg->err = true;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_add_tail(&mg->list, &cache->completed_migrations);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void issue_copy_real(struct dm_cache_migration *mg)
+{
+ int r;
+ struct dm_io_region o_region, c_region;
+ struct cache *cache = mg->cache;
+
+ o_region.bdev = cache->origin_dev->bdev;
+ o_region.count = cache->sectors_per_block;
+
+ c_region.bdev = cache->cache_dev->bdev;
+ c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.count = cache->sectors_per_block;
+
+ if (mg->writeback || mg->demote) {
+ /* demote */
+ o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
+ } else {
+ /* promote */
+ o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
+ r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
+ }
+
+ if (r < 0)
+ migration_failure(mg);
+}
+
+static void avoid_copy(struct dm_cache_migration *mg)
+{
+ atomic_inc(&mg->cache->stats.copies_avoided);
+ migration_success_pre_commit(mg);
+}
+
+static void issue_copy(struct dm_cache_migration *mg)
+{
+ bool avoid;
+ struct cache *cache = mg->cache;
+
+ if (mg->writeback || mg->demote)
+ avoid = !is_dirty(cache, mg->cblock) ||
+ is_discarded_oblock(cache, mg->old_oblock);
+ else
+ avoid = is_discarded_oblock(cache, mg->new_oblock);
+
+ avoid ? avoid_copy(mg) : issue_copy_real(mg);
+}
+
+static void complete_migration(struct dm_cache_migration *mg)
+{
+ if (mg->err)
+ migration_failure(mg);
+ else
+ migration_success_pre_commit(mg);
+}
+
+static void process_migrations(struct cache *cache, struct list_head *head,
+ void (*fn)(struct dm_cache_migration *))
+{
+ unsigned long flags;
+ struct list_head list;
+ struct dm_cache_migration *mg, *tmp;
+
+ INIT_LIST_HEAD(&list);
+ spin_lock_irqsave(&cache->lock, flags);
+ list_splice_init(head, &list);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ list_for_each_entry_safe(mg, tmp, &list, list)
+ fn(mg);
+}
+
+static void __queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
+}
+
+static void queue_quiesced_migration(struct dm_cache_migration *mg)
+{
+ unsigned long flags;
+ struct cache *cache = mg->cache;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
+{
+ unsigned long flags;
+ struct dm_cache_migration *mg, *tmp;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ list_for_each_entry_safe(mg, tmp, work, list)
+ __queue_quiesced_migration(mg);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void check_for_quiesced_migrations(struct cache *cache,
+ struct per_bio_data *pb)
+{
+ struct list_head work;
+
+ if (!pb->all_io_entry)
+ return;
+
+ INIT_LIST_HEAD(&work);
+ if (pb->all_io_entry)
+ dm_deferred_entry_dec(pb->all_io_entry, &work);
+
+ if (!list_empty(&work))
+ queue_quiesced_migrations(cache, &work);
+}
+
+static void quiesce_migration(struct dm_cache_migration *mg)
+{
+ if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
+ queue_quiesced_migration(mg);
+}
+
+static void promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = false;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->new_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = NULL;
+ mg->new_ocell = cell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void writeback(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t oblock, dm_cblock_t cblock,
+ struct dm_bio_prison_cell *cell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = true;
+ mg->demote = false;
+ mg->promote = false;
+ mg->cache = cache;
+ mg->old_oblock = oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = cell;
+ mg->new_ocell = NULL;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+static void demote_then_promote(struct cache *cache, struct prealloc *structs,
+ dm_oblock_t old_oblock, dm_oblock_t new_oblock,
+ dm_cblock_t cblock,
+ struct dm_bio_prison_cell *old_ocell,
+ struct dm_bio_prison_cell *new_ocell)
+{
+ struct dm_cache_migration *mg = prealloc_get_migration(structs);
+
+ mg->err = false;
+ mg->writeback = false;
+ mg->demote = true;
+ mg->promote = true;
+ mg->cache = cache;
+ mg->old_oblock = old_oblock;
+ mg->new_oblock = new_oblock;
+ mg->cblock = cblock;
+ mg->old_ocell = old_ocell;
+ mg->new_ocell = new_ocell;
+ mg->start_jiffies = jiffies;
+
+ inc_nr_migrations(cache);
+ quiesce_migration(mg);
+}
+
+/*----------------------------------------------------------------
+ * bio processing
+ *--------------------------------------------------------------*/
+static void defer_bio(struct cache *cache, struct bio *bio)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_add(&cache->deferred_bios, bio);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ wake_worker(cache);
+}
+
+static void process_flush_bio(struct cache *cache, struct bio *bio)
+{
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ BUG_ON(bio->bi_size);
+ if (!pb->req_nr)
+ remap_to_origin(cache, bio);
+ else
+ remap_to_cache(cache, bio, 0);
+
+ issue(cache, bio);
+}
+
+/*
+ * People generally discard large parts of a device, eg, the whole device
+ * when formatting. Splitting these large discards up into cache block
+ * sized ios and then quiescing (always neccessary for discard) takes too
+ * long.
+ *
+ * We keep it simple, and allow any size of discard to come in, and just
+ * mark off blocks on the discard bitset. No passdown occurs!
+ *
+ * To implement passdown we need to change the bio_prison such that a cell
+ * can have a key that spans many blocks.
+ */
+static void process_discard_bio(struct cache *cache, struct bio *bio)
+{
+ dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
+ cache->discard_block_size);
+ dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
+ dm_block_t b;
+
+ (void) sector_div(end_block, cache->discard_block_size);
+
+ for (b = start_block; b < end_block; b++)
+ set_discard(cache, to_dblock(b));
+
+ bio_endio(bio, 0);
+}
+
+static bool spare_migration_bandwidth(struct cache *cache)
+{
+ sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ cache->sectors_per_block;
+ return current_volume < cache->migration_threshold;
+}
+
+static bool is_writethrough_io(struct cache *cache, struct bio *bio,
+ dm_cblock_t cblock)
+{
+ return bio_data_dir(bio) == WRITE &&
+ cache->features.write_through && !is_dirty(cache, cblock);
+}
+
+static void inc_hit_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_hit : &cache->stats.write_hit);
+}
+
+static void inc_miss_counter(struct cache *cache, struct bio *bio)
+{
+ atomic_inc(bio_data_dir(bio) == READ ?
+ &cache->stats.read_miss : &cache->stats.write_miss);
+}
+
+static void process_bio(struct cache *cache, struct prealloc *structs,
+ struct bio *bio)
+{
+ int r;
+ bool release_cell = true;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+ bool discarded_block = is_discarded_oblock(cache, block);
+ bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, block, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &new_ocell);
+ if (r > 0)
+ return;
+
+ r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
+ bio, &lookup_result);
+
+ if (r == -EWOULDBLOCK)
+ /* migration has been denied */
+ lookup_result.op = POLICY_MISS;
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ } else
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+
+ issue(cache, bio);
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ issue(cache, bio);
+ }
+ break;
+
+ case POLICY_NEW:
+ atomic_inc(&cache->stats.promotion);
+ promote(cache, structs, block, lookup_result.cblock, new_ocell);
+ release_cell = false;
+ break;
+
+ case POLICY_REPLACE:
+ cell_prealloc = prealloc_get_cell(structs);
+ r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
+ (cell_free_fn) prealloc_put_cell,
+ structs, &old_ocell);
+ if (r > 0) {
+ /*
+ * We have to be careful to avoid lock inversion of
+ * the cells. So we back off, and wait for the
+ * old_ocell to become free.
+ */
+ policy_force_mapping(cache->policy, block,
+ lookup_result.old_oblock);
+ atomic_inc(&cache->stats.cache_cell_clash);
+ break;
+ }
+ atomic_inc(&cache->stats.demotion);
+ atomic_inc(&cache->stats.promotion);
+
+ demote_then_promote(cache, structs, lookup_result.old_oblock,
+ block, lookup_result.cblock,
+ old_ocell, new_ocell);
+ release_cell = false;
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ }
+
+ if (release_cell)
+ cell_defer(cache, new_ocell, false);
+}
+
+static int need_commit_due_to_time(struct cache *cache)
+{
+ return jiffies < cache->last_commit_jiffies ||
+ jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
+}
+
+static int commit_if_needed(struct cache *cache)
+{
+ if (dm_cache_changed_this_transaction(cache->cmd) &&
+ (cache->commit_requested || need_commit_due_to_time(cache))) {
+ atomic_inc(&cache->stats.commit_count);
+ cache->last_commit_jiffies = jiffies;
+ cache->commit_requested = false;
+ return dm_cache_commit(cache->cmd, false);
+ }
+
+ return 0;
+}
+
+static void process_deferred_bios(struct cache *cache)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+ struct prealloc structs;
+
+ memset(&structs, 0, sizeof(structs));
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while (!bio_list_empty(&bios)) {
+ /*
+ * If we've got no free migration structs, and processing
+ * this bio might require one, we pause until there are some
+ * prepared mappings to process.
+ */
+ if (prealloc_data_structs(cache, &structs)) {
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&cache->deferred_bios, &bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+ break;
+ }
+
+ bio = bio_list_pop(&bios);
+
+ if (bio->bi_rw & REQ_FLUSH)
+ process_flush_bio(cache, bio);
+ else if (bio->bi_rw & REQ_DISCARD)
+ process_discard_bio(cache, bio);
+ else
+ process_bio(cache, &structs, bio);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
+{
+ unsigned long flags;
+ struct bio_list bios;
+ struct bio *bio;
+
+ bio_list_init(&bios);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ bio_list_merge(&bios, &cache->deferred_flush_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ submit_bios ? generic_make_request(bio) : bio_io_error(bio);
+}
+
+static void writeback_some_dirty_blocks(struct cache *cache)
+{
+ int r = 0;
+ dm_oblock_t oblock;
+ dm_cblock_t cblock;
+ struct prealloc structs;
+ struct dm_bio_prison_cell *old_ocell;
+
+ memset(&structs, 0, sizeof(structs));
+
+ while (spare_migration_bandwidth(cache)) {
+ if (prealloc_data_structs(cache, &structs))
+ break;
+
+ r = policy_writeback_work(cache->policy, &oblock, &cblock);
+ if (r)
+ break;
+
+ r = get_cell(cache, oblock, &structs, &old_ocell);
+ if (r) {
+ policy_set_dirty(cache->policy, oblock);
+ break;
+ }
+
+ writeback(cache, &structs, oblock, cblock, old_ocell);
+ }
+
+ prealloc_free_structs(cache, &structs);
+}
+
+/*----------------------------------------------------------------
+ * Main worker loop
+ *--------------------------------------------------------------*/
+static void start_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 1;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static void stop_quiescing(struct cache *cache)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->quiescing = 0;
+ spin_unlock_irqrestore(&cache->lock, flags);
+}
+
+static bool is_quiescing(struct cache *cache)
+{
+ int r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cache->lock, flags);
+ r = cache->quiescing;
+ spin_unlock_irqrestore(&cache->lock, flags);
+
+ return r;
+}
+
+static void wait_for_migrations(struct cache *cache)
+{
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+}
+
+static void stop_worker(struct cache *cache)
+{
+ cancel_delayed_work(&cache->waker);
+ flush_workqueue(cache->wq);
+}
+
+static void requeue_deferred_io(struct cache *cache)
+{
+ struct bio *bio;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
+ bio_list_merge(&bios, &cache->deferred_bios);
+ bio_list_init(&cache->deferred_bios);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+}
+
+static int more_work(struct cache *cache)
+{
+ if (is_quiescing(cache))
+ return !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+ else
+ return !bio_list_empty(&cache->deferred_bios) ||
+ !bio_list_empty(&cache->deferred_flush_bios) ||
+ !list_empty(&cache->quiesced_migrations) ||
+ !list_empty(&cache->completed_migrations) ||
+ !list_empty(&cache->need_commit_migrations);
+}
+
+static void do_worker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(ws, struct cache, worker);
+
+ do {
+ if (!is_quiescing(cache))
+ process_deferred_bios(cache);
+
+ process_migrations(cache, &cache->quiesced_migrations, issue_copy);
+ process_migrations(cache, &cache->completed_migrations, complete_migration);
+
+ writeback_some_dirty_blocks(cache);
+
+ if (commit_if_needed(cache)) {
+ process_deferred_flush_bios(cache, false);
+
+ /*
+ * FIXME: rollback metadata or just go into a
+ * failure mode and error everything
+ */
+ } else {
+ process_deferred_flush_bios(cache, true);
+ process_migrations(cache, &cache->need_commit_migrations,
+ migration_success_post_commit);
+ }
+ } while (more_work(cache));
+}
+
+/*
+ * We want to commit periodically so that not too much
+ * unwritten metadata builds up.
+ */
+static void do_waker(struct work_struct *ws)
+{
+ struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
+ wake_worker(cache);
+ queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
+}
+
+/*----------------------------------------------------------------*/
+
+static int is_congested(struct dm_dev *dev, int bdi_bits)
+{
+ struct request_queue *q = bdev_get_queue(dev->bdev);
+ return bdi_congested(&q->backing_dev_info, bdi_bits);
+}
+
+static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
+{
+ struct cache *cache = container_of(cb, struct cache, callbacks);
+
+ return is_congested(cache->origin_dev, bdi_bits) ||
+ is_congested(cache->cache_dev, bdi_bits);
+}
+
+/*----------------------------------------------------------------
+ * Target methods
+ *--------------------------------------------------------------*/
+
+/*
+ * This function gets called on the error paths of the constructor, so we
+ * have to cope with a partially initialised struct.
+ */
+static void destroy(struct cache *cache)
+{
+ unsigned i;
+
+ if (cache->next_migration)
+ mempool_free(cache->next_migration, cache->migration_pool);
+
+ if (cache->migration_pool)
+ mempool_destroy(cache->migration_pool);
+
+ if (cache->all_io_ds)
+ dm_deferred_set_destroy(cache->all_io_ds);
+
+ if (cache->prison)
+ dm_bio_prison_destroy(cache->prison);
+
+ if (cache->wq)
+ destroy_workqueue(cache->wq);
+
+ if (cache->dirty_bitset)
+ free_bitset(cache->dirty_bitset);
+
+ if (cache->discard_bitset)
+ free_bitset(cache->discard_bitset);
+
+ if (cache->copier)
+ dm_kcopyd_client_destroy(cache->copier);
+
+ if (cache->cmd)
+ dm_cache_metadata_close(cache->cmd);
+
+ if (cache->metadata_dev)
+ dm_put_device(cache->ti, cache->metadata_dev);
+
+ if (cache->origin_dev)
+ dm_put_device(cache->ti, cache->origin_dev);
+
+ if (cache->cache_dev)
+ dm_put_device(cache->ti, cache->cache_dev);
+
+ if (cache->policy)
+ dm_cache_policy_destroy(cache->policy);
+
+ for (i = 0; i < cache->nr_ctr_args ; i++)
+ kfree(cache->ctr_args[i]);
+ kfree(cache->ctr_args);
+
+ kfree(cache);
+}
+
+static void cache_dtr(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ destroy(cache);
+}
+
+static sector_t get_dev_size(struct dm_dev *dev)
+{
+ return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Construct a cache device mapping.
+ *
+ * cache <metadata dev> <cache dev> <origin dev> <block size>
+ * <#feature args> [<feature arg>]*
+ * <policy> <#policy args> [<policy arg>]*
+ *
+ * metadata dev : fast device holding the persistent metadata
+ * cache dev : fast device holding cached data blocks
+ * origin dev : slow device holding original data blocks
+ * block size : cache unit size in sectors
+ *
+ * #feature args : number of feature arguments passed
+ * feature args : writethrough. (The default is writeback.)
+ *
+ * policy : the replacement policy to use
+ * #policy args : an even number of policy arguments corresponding
+ * to key/value pairs passed to the policy
+ * policy args : key/value pairs passed to the policy
+ * E.g. 'sequential_threshold 1024'
+ * See cache-policies.txt for details.
+ *
+ * Optional feature arguments are:
+ * writethrough : write through caching that prohibits cache block
+ * content from being different from origin block content.
+ * Without this argument, the default behaviour is to write
+ * back cache block contents later for performance reasons,
+ * so they may differ from the corresponding origin blocks.
+ */
+struct cache_args {
+ struct dm_target *ti;
+
+ struct dm_dev *metadata_dev;
+
+ struct dm_dev *cache_dev;
+ sector_t cache_sectors;
+
+ struct dm_dev *origin_dev;
+ sector_t origin_sectors;
+
+ uint32_t block_size;
+
+ const char *policy_name;
+ int policy_argc;
+ const char **policy_argv;
+
+ struct cache_features features;
+};
+
+static void destroy_cache_args(struct cache_args *ca)
+{
+ if (ca->metadata_dev)
+ dm_put_device(ca->ti, ca->metadata_dev);
+
+ if (ca->cache_dev)
+ dm_put_device(ca->ti, ca->cache_dev);
+
+ if (ca->origin_dev)
+ dm_put_device(ca->ti, ca->origin_dev);
+
+ kfree(ca);
+}
+
+static bool at_least_one_arg(struct dm_arg_set *as, char **error)
+{
+ if (!as->argc) {
+ *error = "Insufficient args";
+ return false;
+ }
+
+ return true;
+}
+
+static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+ sector_t metadata_dev_size;
+ char b[BDEVNAME_SIZE];
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->metadata_dev);
+ if (r) {
+ *error = "Error opening metadata device";
+ return r;
+ }
+
+ metadata_dev_size = get_dev_size(ca->metadata_dev);
+ if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
+ DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
+ bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
+
+ return 0;
+}
+
+static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->cache_dev);
+ if (r) {
+ *error = "Error opening cache device";
+ return r;
+ }
+ ca->cache_sectors = get_dev_size(ca->cache_dev);
+
+ return 0;
+}
+
+static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
+ &ca->origin_dev);
+ if (r) {
+ *error = "Error opening origin device";
+ return r;
+ }
+
+ ca->origin_sectors = get_dev_size(ca->origin_dev);
+ if (ca->ti->len > ca->origin_sectors) {
+ *error = "Device size larger than cached device";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ unsigned long tmp;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ if (kstrtoul(dm_shift_arg(as), 10, &tmp) || !tmp ||
+ tmp < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
+ tmp & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
+ *error = "Invalid data block size";
+ return -EINVAL;
+ }
+
+ if (tmp > ca->cache_sectors) {
+ *error = "Data block size is larger than the cache device";
+ return -EINVAL;
+ }
+
+ ca->block_size = tmp;
+
+ return 0;
+}
+
+static void init_features(struct cache_features *cf)
+{
+ cf->mode = CM_WRITE;
+ cf->write_through = false;
+}
+
+static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1, "Invalid number of cache feature arguments"},
+ };
+
+ int r;
+ unsigned argc;
+ const char *arg;
+ struct cache_features *cf = &ca->features;
+
+ init_features(cf);
+
+ r = dm_read_arg_group(_args, as, &argc, error);
+ if (r)
+ return -EINVAL;
+
+ while (argc--) {
+ arg = dm_shift_arg(as);
+
+ if (!strcasecmp(arg, "writeback"))
+ cf->write_through = false;
+
+ else if (!strcasecmp(arg, "writethrough"))
+ cf->write_through = true;
+
+ else {
+ *error = "Unrecognised cache feature requested";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
+ char **error)
+{
+ static struct dm_arg _args[] = {
+ {0, 1024, "Invalid number of policy arguments"},
+ };
+
+ int r;
+
+ if (!at_least_one_arg(as, error))
+ return -EINVAL;
+
+ ca->policy_name = dm_shift_arg(as);
+
+ r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
+ if (r)
+ return -EINVAL;
+
+ ca->policy_argv = (const char **)as->argv;
+ dm_consume_args(as, ca->policy_argc);
+
+ return 0;
+}
+
+static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
+ char **error)
+{
+ int r;
+ struct dm_arg_set as;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ r = parse_metadata_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_cache_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_origin_dev(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_block_size(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_features(ca, &as, error);
+ if (r)
+ return r;
+
+ r = parse_policy(ca, &as, error);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
+static struct kmem_cache *migration_cache;
+
+static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv)
+{
+ int r = 0;
+
+ if (argc & 1) {
+ DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
+ return -EINVAL;
+ }
+
+ while (argc) {
+ r = policy_set_config_value(p, argv[0], argv[1]);
+ if (r) {
+ DMWARN("policy_set_config_value failed: key = '%s', value = '%s'",
+ argv[0], argv[1]);
+ return r;
+ }
+
+ argc -= 2;
+ argv += 2;
+ }
+
+ return r;
+}
+
+static int create_cache_policy(struct cache *cache, struct cache_args *ca,
+ char **error)
+{
+ int r;
+
+ cache->policy = dm_cache_policy_create(ca->policy_name,
+ cache->cache_size,
+ cache->origin_sectors,
+ cache->sectors_per_block);
+ if (!cache->policy) {
+ *error = "Error creating cache's policy";
+ return -ENOMEM;
+ }
+
+ r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv);
+ if (r)
+ dm_cache_policy_destroy(cache->policy);
+
+ return r;
+}
+
+/*
+ * We want the discard block size to be a power of two, at least the size
+ * of the cache block size, and have no more than 2^14 discard blocks
+ * across the origin.
+ */
+#define MAX_DISCARD_BLOCKS (1 << 14)
+
+static bool too_many_discard_blocks(sector_t discard_block_size,
+ sector_t origin_size)
+{
+ (void) sector_div(origin_size, discard_block_size);
+
+ return origin_size > MAX_DISCARD_BLOCKS;
+}
+
+static sector_t calculate_discard_block_size(sector_t cache_block_size,
+ sector_t origin_size)
+{
+ sector_t discard_block_size;
+
+ discard_block_size = roundup_pow_of_two(cache_block_size);
+
+ if (origin_size)
+ while (too_many_discard_blocks(discard_block_size, origin_size))
+ discard_block_size *= 2;
+
+ return discard_block_size;
+}
+
+#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100)
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio);
+
+static int cache_create(struct cache_args *ca, struct cache **result)
+{
+ int r = 0;
+ char **error = &ca->ti->error;
+ struct cache *cache;
+ struct dm_target *ti = ca->ti;
+ dm_block_t origin_blocks;
+ struct dm_cache_metadata *cmd;
+ bool may_format = ca->features.mode == CM_WRITE;
+
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (!cache)
+ return -ENOMEM;
+
+ cache->ti = ca->ti;
+ ti->private = cache;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
+ ti->num_flush_bios = 2;
+ ti->flush_supported = true;
+
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->discard_zeroes_data_unsupported = true;
+
+ memcpy(&cache->features, &ca->features, sizeof(cache->features));
+
+ if (cache->features.write_through)
+ ti->num_write_bios = cache_num_write_bios;
+
+ cache->callbacks.congested_fn = cache_is_congested;
+ dm_table_add_target_callbacks(ti->table, &cache->callbacks);
+
+ cache->metadata_dev = ca->metadata_dev;
+ cache->origin_dev = ca->origin_dev;
+ cache->cache_dev = ca->cache_dev;
+
+ ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
+
+ /* FIXME: factor out this whole section */
+ origin_blocks = cache->origin_sectors = ca->origin_sectors;
+ (void) sector_div(origin_blocks, ca->block_size);
+ cache->origin_blocks = to_oblock(origin_blocks);
+
+ cache->sectors_per_block = ca->block_size;
+ if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
+ r = -EINVAL;
+ goto bad;
+ }
+
+ if (ca->block_size & (ca->block_size - 1)) {
+ dm_block_t cache_size = ca->cache_sectors;
+
+ cache->sectors_per_block_shift = -1;
+ (void) sector_div(cache_size, ca->block_size);
+ cache->cache_size = to_cblock(cache_size);
+ } else {
+ cache->sectors_per_block_shift = __ffs(ca->block_size);
+ cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
+ }
+
+ r = create_cache_policy(cache, ca, error);
+ if (r)
+ goto bad;
+ cache->policy_nr_args = ca->policy_argc;
+
+ cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
+ ca->block_size, may_format,
+ dm_cache_policy_get_hint_size(cache->policy));
+ if (IS_ERR(cmd)) {
+ *error = "Error creating metadata object";
+ r = PTR_ERR(cmd);
+ goto bad;
+ }
+ cache->cmd = cmd;
+
+ spin_lock_init(&cache->lock);
+ bio_list_init(&cache->deferred_bios);
+ bio_list_init(&cache->deferred_flush_bios);
+ INIT_LIST_HEAD(&cache->quiesced_migrations);
+ INIT_LIST_HEAD(&cache->completed_migrations);
+ INIT_LIST_HEAD(&cache->need_commit_migrations);
+ cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
+ atomic_set(&cache->nr_migrations, 0);
+ init_waitqueue_head(&cache->migration_wait);
+
+ cache->nr_dirty = 0;
+ cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
+ if (!cache->dirty_bitset) {
+ *error = "could not allocate dirty bitset";
+ goto bad;
+ }
+ clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
+
+ cache->discard_block_size =
+ calculate_discard_block_size(cache->sectors_per_block,
+ cache->origin_sectors);
+ cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
+ cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
+ if (!cache->discard_bitset) {
+ *error = "could not allocate discard bitset";
+ goto bad;
+ }
+ clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
+
+ cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
+ if (IS_ERR(cache->copier)) {
+ *error = "could not create kcopyd client";
+ r = PTR_ERR(cache->copier);
+ goto bad;
+ }
+
+ cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!cache->wq) {
+ *error = "could not create workqueue for metadata object";
+ goto bad;
+ }
+ INIT_WORK(&cache->worker, do_worker);
+ INIT_DELAYED_WORK(&cache->waker, do_waker);
+ cache->last_commit_jiffies = jiffies;
+
+ cache->prison = dm_bio_prison_create(PRISON_CELLS);
+ if (!cache->prison) {
+ *error = "could not create bio prison";
+ goto bad;
+ }
+
+ cache->all_io_ds = dm_deferred_set_create();
+ if (!cache->all_io_ds) {
+ *error = "could not create all_io deferred set";
+ goto bad;
+ }
+
+ cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
+ migration_cache);
+ if (!cache->migration_pool) {
+ *error = "Error creating cache's migration mempool";
+ goto bad;
+ }
+
+ cache->next_migration = NULL;
+
+ cache->need_tick_bio = true;
+ cache->sized = false;
+ cache->quiescing = false;
+ cache->commit_requested = false;
+ cache->loaded_mappings = false;
+ cache->loaded_discards = false;
+
+ load_stats(cache);
+
+ atomic_set(&cache->stats.demotion, 0);
+ atomic_set(&cache->stats.promotion, 0);
+ atomic_set(&cache->stats.copies_avoided, 0);
+ atomic_set(&cache->stats.cache_cell_clash, 0);
+ atomic_set(&cache->stats.commit_count, 0);
+ atomic_set(&cache->stats.discard_count, 0);
+
+ *result = cache;
+ return 0;
+
+bad:
+ destroy(cache);
+ return r;
+}
+
+static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
+{
+ unsigned i;
+ const char **copy;
+
+ copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return -ENOMEM;
+ for (i = 0; i < argc; i++) {
+ copy[i] = kstrdup(argv[i], GFP_KERNEL);
+ if (!copy[i]) {
+ while (i--)
+ kfree(copy[i]);
+ kfree(copy);
+ return -ENOMEM;
+ }
+ }
+
+ cache->nr_ctr_args = argc;
+ cache->ctr_args = copy;
+
+ return 0;
+}
+
+static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r = -EINVAL;
+ struct cache_args *ca;
+ struct cache *cache = NULL;
+
+ ca = kzalloc(sizeof(*ca), GFP_KERNEL);
+ if (!ca) {
+ ti->error = "Error allocating memory for cache";
+ return -ENOMEM;
+ }
+ ca->ti = ti;
+
+ r = parse_cache_args(ca, argc, argv, &ti->error);
+ if (r)
+ goto out;
+
+ r = cache_create(ca, &cache);
+
+ r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
+ if (r) {
+ destroy(cache);
+ goto out;
+ }
+
+ ti->private = cache;
+
+out:
+ destroy_cache_args(ca);
+ return r;
+}
+
+static unsigned cache_num_write_bios(struct dm_target *ti, struct bio *bio)
+{
+ int r;
+ struct cache *cache = ti->private;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ dm_cblock_t cblock;
+
+ r = policy_lookup(cache->policy, block, &cblock);
+ if (r < 0)
+ return 2; /* assume the worst */
+
+ return (!r && !is_dirty(cache, cblock)) ? 2 : 1;
+}
+
+static int cache_map(struct dm_target *ti, struct bio *bio)
+{
+ struct cache *cache = ti->private;
+
+ int r;
+ dm_oblock_t block = get_bio_block(cache, bio);
+ bool can_migrate = false;
+ bool discarded_block;
+ struct dm_bio_prison_cell *cell;
+ struct policy_result lookup_result;
+ struct per_bio_data *pb;
+
+ if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ /*
+ * This can only occur if the io goes to a partial block at
+ * the end of the origin device. We don't cache these.
+ * Just remap to the origin and carry on.
+ */
+ remap_to_origin_clear_discard(cache, bio, block);
+ return DM_MAPIO_REMAPPED;
+ }
+
+ pb = init_per_bio_data(bio);
+
+ if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ /*
+ * Check to see if that block is currently migrating.
+ */
+ cell = alloc_prison_cell(cache);
+ if (!cell) {
+ defer_bio(cache, bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ r = bio_detain(cache, block, bio, cell,
+ (cell_free_fn) free_prison_cell,
+ cache, &cell);
+ if (r) {
+ if (r < 0)
+ defer_bio(cache, bio);
+
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ discarded_block = is_discarded_oblock(cache, block);
+
+ r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
+ bio, &lookup_result);
+ if (r == -EWOULDBLOCK) {
+ cell_defer(cache, cell, true);
+ return DM_MAPIO_SUBMITTED;
+
+ } else if (r) {
+ DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ switch (lookup_result.op) {
+ case POLICY_HIT:
+ inc_hit_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (is_writethrough_io(cache, bio, lookup_result.cblock)) {
+ /*
+ * No need to mark anything dirty in write through mode.
+ */
+ pb->req_nr == 0 ?
+ remap_to_cache(cache, bio, lookup_result.cblock) :
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ } else {
+ remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ case POLICY_MISS:
+ inc_miss_counter(cache, bio);
+ pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
+
+ if (pb->req_nr != 0) {
+ /*
+ * This is a duplicate writethrough io that is no
+ * longer needed because the block has been demoted.
+ */
+ bio_endio(bio, 0);
+ cell_defer(cache, cell, false);
+ return DM_MAPIO_SUBMITTED;
+ } else {
+ remap_to_origin_clear_discard(cache, bio, block);
+ cell_defer(cache, cell, false);
+ }
+ break;
+
+ default:
+ DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
+ (unsigned) lookup_result.op);
+ bio_io_error(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
+{
+ struct cache *cache = ti->private;
+ unsigned long flags;
+ struct per_bio_data *pb = get_per_bio_data(bio);
+
+ if (pb->tick) {
+ policy_tick(cache->policy);
+
+ spin_lock_irqsave(&cache->lock, flags);
+ cache->need_tick_bio = true;
+ spin_unlock_irqrestore(&cache->lock, flags);
+ }
+
+ check_for_quiesced_migrations(cache, pb);
+
+ return 0;
+}
+
+static int write_dirty_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ for (i = 0; i < from_cblock(cache->cache_size); i++) {
+ r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
+ is_dirty(cache, to_cblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int write_discard_bitset(struct cache *cache)
+{
+ unsigned i, r;
+
+ r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
+ cache->discard_nr_blocks);
+ if (r) {
+ DMERR("could not resize on-disk discard bitset");
+ return r;
+ }
+
+ for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
+ r = dm_cache_set_discard(cache->cmd, to_dblock(i),
+ is_discarded(cache, to_dblock(i)));
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
+ uint32_t hint)
+{
+ struct cache *cache = context;
+ return dm_cache_save_hint(cache->cmd, cblock, hint);
+}
+
+static int write_hints(struct cache *cache)
+{
+ int r;
+
+ r = dm_cache_begin_hints(cache->cmd, cache->policy);
+ if (r) {
+ DMERR("dm_cache_begin_hints failed");
+ return r;
+ }
+
+ r = policy_walk_mappings(cache->policy, save_hint, cache);
+ if (r)
+ DMERR("policy_walk_mappings failed");
+
+ return r;
+}
+
+/*
+ * returns true on success
+ */
+static bool sync_metadata(struct cache *cache)
+{
+ int r1, r2, r3, r4;
+
+ r1 = write_dirty_bitset(cache);
+ if (r1)
+ DMERR("could not write dirty bitset");
+
+ r2 = write_discard_bitset(cache);
+ if (r2)
+ DMERR("could not write discard bitset");
+
+ save_stats(cache);
+
+ r3 = write_hints(cache);
+ if (r3)
+ DMERR("could not write hints");
+
+ /*
+ * If writing the above metadata failed, we still commit, but don't
+ * set the clean shutdown flag. This will effectively force every
+ * dirty bit to be set on reload.
+ */
+ r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
+ if (r4)
+ DMERR("could not write cache metadata. Data loss may occur.");
+
+ return !r1 && !r2 && !r3 && !r4;
+}
+
+static void cache_postsuspend(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ start_quiescing(cache);
+ wait_for_migrations(cache);
+ stop_worker(cache);
+ requeue_deferred_io(cache);
+ stop_quiescing(cache);
+
+ (void) sync_metadata(cache);
+}
+
+static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
+ bool dirty, uint32_t hint, bool hint_valid)
+{
+ int r;
+ struct cache *cache = context;
+
+ r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
+ if (r)
+ return r;
+
+ if (dirty)
+ set_dirty(cache, oblock, cblock);
+ else
+ clear_dirty(cache, oblock, cblock);
+
+ return 0;
+}
+
+static int load_discard(void *context, sector_t discard_block_size,
+ dm_dblock_t dblock, bool discard)
+{
+ struct cache *cache = context;
+
+ /* FIXME: handle mis-matched block size */
+
+ if (discard)
+ set_discard(cache, dblock);
+ else
+ clear_discard(cache, dblock);
+
+ return 0;
+}
+
+static int cache_preresume(struct dm_target *ti)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+ sector_t actual_cache_size = get_dev_size(cache->cache_dev);
+ (void) sector_div(actual_cache_size, cache->sectors_per_block);
+
+ /*
+ * Check to see if the cache has resized.
+ */
+ if (from_cblock(cache->cache_size) != actual_cache_size || !cache->sized) {
+ cache->cache_size = to_cblock(actual_cache_size);
+
+ r = dm_cache_resize(cache->cmd, cache->cache_size);
+ if (r) {
+ DMERR("could not resize cache metadata");
+ return r;
+ }
+
+ cache->sized = true;
+ }
+
+ if (!cache->loaded_mappings) {
+ r = dm_cache_load_mappings(cache->cmd,
+ dm_cache_policy_get_name(cache->policy),
+ load_mapping, cache);
+ if (r) {
+ DMERR("could not load cache mappings");
+ return r;
+ }
+
+ cache->loaded_mappings = true;
+ }
+
+ if (!cache->loaded_discards) {
+ r = dm_cache_load_discards(cache->cmd, load_discard, cache);
+ if (r) {
+ DMERR("could not load origin discards");
+ return r;
+ }
+
+ cache->loaded_discards = true;
+ }
+
+ return r;
+}
+
+static void cache_resume(struct dm_target *ti)
+{
+ struct cache *cache = ti->private;
+
+ cache->need_tick_bio = true;
+ do_waker(&cache->waker.work);
+}
+
+/*
+ * Status format:
+ *
+ * <#used metadata blocks>/<#total metadata blocks>
+ * <#read hits> <#read misses> <#write hits> <#write misses>
+ * <#demotions> <#promotions> <#blocks in cache> <#dirty>
+ * <#features> <features>*
+ * <#core args> <core args>
+ * <#policy args> <policy args>*
+ */
+static void cache_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
+{
+ int r = 0;
+ unsigned i;
+ ssize_t sz = 0;
+ dm_block_t nr_free_blocks_metadata = 0;
+ dm_block_t nr_blocks_metadata = 0;
+ char buf[BDEVNAME_SIZE];
+ struct cache *cache = ti->private;
+ dm_cblock_t residency;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ /* Commit to ensure statistics aren't out-of-date */
+ if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
+ r = dm_cache_commit(cache->cmd, false);
+ if (r)
+ DMERR("could not commit metadata for accurate status");
+ }
+
+ r = dm_cache_get_free_metadata_block_count(cache->cmd,
+ &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata free block count");
+ goto err;
+ }
+
+ r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
+ if (r) {
+ DMERR("could not get metadata device size");
+ goto err;
+ }
+
+ residency = policy_residency(cache->policy);
+
+ DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
+ (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
+ (unsigned long long)nr_blocks_metadata,
+ (unsigned) atomic_read(&cache->stats.read_hit),
+ (unsigned) atomic_read(&cache->stats.read_miss),
+ (unsigned) atomic_read(&cache->stats.write_hit),
+ (unsigned) atomic_read(&cache->stats.write_miss),
+ (unsigned) atomic_read(&cache->stats.demotion),
+ (unsigned) atomic_read(&cache->stats.promotion),
+ (unsigned long long) from_cblock(residency),
+ cache->nr_dirty);
+
+ if (cache->features.write_through)
+ DMEMIT("1 writethrough ");
+ else
+ DMEMIT("0 ");
+
+ DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
+ if (sz < maxlen) {
+ r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
+ if (r)
+ DMERR("policy_emit_config_values returned %d", r);
+ }
+
+ break;
+
+ case STATUSTYPE_TABLE:
+ format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
+ DMEMIT("%s ", buf);
+ format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
+ DMEMIT("%s", buf);
+
+ for (i = 0; i < cache->nr_ctr_args - 1; i++)
+ DMEMIT(" %s", cache->ctr_args[i]);
+ if (cache->nr_ctr_args)
+ DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
+ }
+
+ return;
+
+err:
+ DMEMIT("Error");
+}
+
+#define NOT_CORE_OPTION 1
+
+static int process_config_option(struct cache *cache, char **argv)
+{
+ unsigned long tmp;
+
+ if (!strcasecmp(argv[0], "migration_threshold")) {
+ if (kstrtoul(argv[1], 10, &tmp))
+ return -EINVAL;
+
+ cache->migration_threshold = tmp;
+ return 0;
+ }
+
+ return NOT_CORE_OPTION;
+}
+
+/*
+ * Supports <key> <value>.
+ *
+ * The key migration_threshold is supported by the cache target core.
+ */
+static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
+{
+ int r;
+ struct cache *cache = ti->private;
+
+ if (argc != 2)
+ return -EINVAL;
+
+ r = process_config_option(cache, argv);
+ if (r == NOT_CORE_OPTION)
+ return policy_set_config_value(cache->policy, argv[0], argv[1]);
+
+ return r;
+}
+
+static int cache_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ int r = 0;
+ struct cache *cache = ti->private;
+
+ r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
+ if (!r)
+ r = fn(ti, cache->origin_dev, 0, ti->len, data);
+
+ return r;
+}
+
+/*
+ * We assume I/O is going to the origin (which is the volume
+ * more likely to have restrictions e.g. by being striped).
+ * (Looking up the exact location of the data would be expensive
+ * and could always be out of date by the time the bio is submitted.)
+ */
+static int cache_bvec_merge(struct dm_target *ti,
+ struct bvec_merge_data *bvm,
+ struct bio_vec *biovec, int max_size)
+{
+ struct cache *cache = ti->private;
+ struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
+
+ if (!q->merge_bvec_fn)
+ return max_size;
+
+ bvm->bi_bdev = cache->origin_dev->bdev;
+ return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
+}
+
+static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
+{
+ /*
+ * FIXME: these limits may be incompatible with the cache device
+ */
+ limits->max_discard_sectors = cache->discard_block_size * 1024;
+ limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
+}
+
+static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct cache *cache = ti->private;
+
+ blk_limits_io_min(limits, 0);
+ blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
+ set_discard_limits(cache, limits);
+}
+
+/*----------------------------------------------------------------*/
+
+static struct target_type cache_target = {
+ .name = "cache",
+ .version = {1, 0, 0},
+ .module = THIS_MODULE,
+ .ctr = cache_ctr,
+ .dtr = cache_dtr,
+ .map = cache_map,
+ .end_io = cache_end_io,
+ .postsuspend = cache_postsuspend,
+ .preresume = cache_preresume,
+ .resume = cache_resume,
+ .status = cache_status,
+ .message = cache_message,
+ .iterate_devices = cache_iterate_devices,
+ .merge = cache_bvec_merge,
+ .io_hints = cache_io_hints,
+};
+
+static int __init dm_cache_init(void)
+{
+ int r;
+
+ r = dm_register_target(&cache_target);
+ if (r) {
+ DMERR("cache target registration failed: %d", r);
+ return r;
+ }
+
+ migration_cache = KMEM_CACHE(dm_cache_migration, 0);
+ if (!migration_cache) {
+ dm_unregister_target(&cache_target);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void __exit dm_cache_exit(void)
+{
+ dm_unregister_target(&cache_target);
+ kmem_cache_destroy(migration_cache);
+}
+
+module_init(dm_cache_init);
+module_exit(dm_cache_exit);
+
+MODULE_DESCRIPTION(DM_NAME " cache target");
+MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f7369f9d859..13c15480d94 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1234,20 +1234,6 @@ static int crypt_decode_key(u8 *key, char *hex, unsigned int size)
return 0;
}
-/*
- * Encode key into its hex representation
- */
-static void crypt_encode_key(char *hex, u8 *key, unsigned int size)
-{
- unsigned int i;
-
- for (i = 0; i < size; i++) {
- sprintf(hex, "%02x", *key);
- hex += 2;
- key++;
- }
-}
-
static void crypt_free_tfms(struct crypt_config *cc)
{
unsigned i;
@@ -1651,7 +1637,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (opt_params == 1 && opt_string &&
!strcasecmp(opt_string, "allow_discards"))
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
else if (opt_params) {
ret = -EINVAL;
ti->error = "Invalid feature arguments";
@@ -1679,7 +1665,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->discard_zeroes_data_unsupported = true;
return 0;
@@ -1717,11 +1703,11 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int crypt_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void crypt_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct crypt_config *cc = ti->private;
- unsigned int sz = 0;
+ unsigned i, sz = 0;
switch (type) {
case STATUSTYPE_INFO:
@@ -1731,27 +1717,20 @@ static int crypt_status(struct dm_target *ti, status_type_t type,
case STATUSTYPE_TABLE:
DMEMIT("%s ", cc->cipher_string);
- if (cc->key_size > 0) {
- if ((maxlen - sz) < ((cc->key_size << 1) + 1))
- return -ENOMEM;
-
- crypt_encode_key(result + sz, cc->key, cc->key_size);
- sz += cc->key_size << 1;
- } else {
- if (sz >= maxlen)
- return -ENOMEM;
- result[sz++] = '-';
- }
+ if (cc->key_size > 0)
+ for (i = 0; i < cc->key_size; i++)
+ DMEMIT("%02x", cc->key[i]);
+ else
+ DMEMIT("-");
DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
cc->dev->name, (unsigned long long)cc->start);
- if (ti->num_discard_requests)
+ if (ti->num_discard_bios)
DMEMIT(" 1 allow_discards");
break;
}
- return 0;
}
static void crypt_postsuspend(struct dm_target *ti)
@@ -1845,7 +1824,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 12, 0},
+ .version = {1, 12, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index cc1bd048acb..496d5f3646a 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -198,8 +198,8 @@ out:
mutex_init(&dc->timer_lock);
atomic_set(&dc->may_delay, 1);
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->private = dc;
return 0;
@@ -293,8 +293,8 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->read_delay, bio);
}
-static int delay_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void delay_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct delay_c *dc = ti->private;
int sz = 0;
@@ -314,8 +314,6 @@ static int delay_status(struct dm_target *ti, status_type_t type,
dc->write_delay);
break;
}
-
- return 0;
}
static int delay_iterate_devices(struct dm_target *ti,
@@ -337,7 +335,7 @@ out:
static struct target_type delay_target = {
.name = "delay",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = delay_ctr,
.dtr = delay_dtr,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 9721f2ffb1a..7fcf21cb4ff 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -216,8 +216,8 @@ static int flakey_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->private = fc;
return 0;
@@ -337,8 +337,8 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error)
return error;
}
-static int flakey_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void flakey_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct flakey_c *fc = ti->private;
@@ -368,7 +368,6 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
break;
}
- return 0;
}
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
@@ -411,7 +410,7 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_
static struct target_type flakey_target = {
.name = "flakey",
- .version = {1, 3, 0},
+ .version = {1, 3, 1},
.module = THIS_MODULE,
.ctr = flakey_ctr,
.dtr = flakey_dtr,
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 0666b5d14b8..aa04f022464 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1067,6 +1067,7 @@ static void retrieve_status(struct dm_table *table,
num_targets = dm_table_get_num_targets(table);
for (i = 0; i < num_targets; i++) {
struct dm_target *ti = dm_table_get_target(table, i);
+ size_t l;
remaining = len - (outptr - outbuf);
if (remaining <= sizeof(struct dm_target_spec)) {
@@ -1093,14 +1094,17 @@ static void retrieve_status(struct dm_table *table,
if (ti->type->status) {
if (param->flags & DM_NOFLUSH_FLAG)
status_flags |= DM_STATUS_NOFLUSH_FLAG;
- if (ti->type->status(ti, type, status_flags, outptr, remaining)) {
- param->flags |= DM_BUFFER_FULL_FLAG;
- break;
- }
+ ti->type->status(ti, type, status_flags, outptr, remaining);
} else
outptr[0] = '\0';
- outptr += strlen(outptr) + 1;
+ l = strlen(outptr) + 1;
+ if (l == remaining) {
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ break;
+ }
+
+ outptr += l;
used = param->data_start + (outptr - outbuf);
outptr = align_ptr(outptr);
@@ -1410,6 +1414,22 @@ static int table_status(struct dm_ioctl *param, size_t param_size)
return 0;
}
+static bool buffer_test_overflow(char *result, unsigned maxlen)
+{
+ return !maxlen || strlen(result) + 1 >= maxlen;
+}
+
+/*
+ * Process device-mapper dependent messages.
+ * Returns a number <= 1 if message was processed by device mapper.
+ * Returns 2 if message should be delivered to the target.
+ */
+static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
+ char *result, unsigned maxlen)
+{
+ return 2;
+}
+
/*
* Pass a message to the target that's at the supplied device offset.
*/
@@ -1421,6 +1441,8 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
struct dm_table *table;
struct dm_target *ti;
struct dm_target_msg *tmsg = (void *) param + param->data_start;
+ size_t maxlen;
+ char *result = get_result_buffer(param, param_size, &maxlen);
md = find_device(param);
if (!md)
@@ -1444,6 +1466,10 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
goto out_argv;
}
+ r = message_for_md(md, argc, argv, result, maxlen);
+ if (r <= 1)
+ goto out_argv;
+
table = dm_get_live_table(md);
if (!table)
goto out_argv;
@@ -1469,44 +1495,68 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
out_argv:
kfree(argv);
out:
- param->data_size = 0;
+ if (r >= 0)
+ __dev_status(md, param);
+
+ if (r == 1) {
+ param->flags |= DM_DATA_OUT_FLAG;
+ if (buffer_test_overflow(result, maxlen))
+ param->flags |= DM_BUFFER_FULL_FLAG;
+ else
+ param->data_size = param->data_start + strlen(result) + 1;
+ r = 0;
+ }
+
dm_put(md);
return r;
}
+/*
+ * The ioctl parameter block consists of two parts, a dm_ioctl struct
+ * followed by a data buffer. This flag is set if the second part,
+ * which has a variable size, is not used by the function processing
+ * the ioctl.
+ */
+#define IOCTL_FLAGS_NO_PARAMS 1
+
/*-----------------------------------------------------------------
* Implementation of open/close/ioctl on the special char
* device.
*---------------------------------------------------------------*/
-static ioctl_fn lookup_ioctl(unsigned int cmd)
+static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
{
static struct {
int cmd;
+ int flags;
ioctl_fn fn;
} _ioctls[] = {
- {DM_VERSION_CMD, NULL}, /* version is dealt with elsewhere */
- {DM_REMOVE_ALL_CMD, remove_all},
- {DM_LIST_DEVICES_CMD, list_devices},
-
- {DM_DEV_CREATE_CMD, dev_create},
- {DM_DEV_REMOVE_CMD, dev_remove},
- {DM_DEV_RENAME_CMD, dev_rename},
- {DM_DEV_SUSPEND_CMD, dev_suspend},
- {DM_DEV_STATUS_CMD, dev_status},
- {DM_DEV_WAIT_CMD, dev_wait},
-
- {DM_TABLE_LOAD_CMD, table_load},
- {DM_TABLE_CLEAR_CMD, table_clear},
- {DM_TABLE_DEPS_CMD, table_deps},
- {DM_TABLE_STATUS_CMD, table_status},
-
- {DM_LIST_VERSIONS_CMD, list_versions},
-
- {DM_TARGET_MSG_CMD, target_message},
- {DM_DEV_SET_GEOMETRY_CMD, dev_set_geometry}
+ {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
+ {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all},
+ {DM_LIST_DEVICES_CMD, 0, list_devices},
+
+ {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create},
+ {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove},
+ {DM_DEV_RENAME_CMD, 0, dev_rename},
+ {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
+ {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
+ {DM_DEV_WAIT_CMD, 0, dev_wait},
+
+ {DM_TABLE_LOAD_CMD, 0, table_load},
+ {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
+ {DM_TABLE_DEPS_CMD, 0, table_deps},
+ {DM_TABLE_STATUS_CMD, 0, table_status},
+
+ {DM_LIST_VERSIONS_CMD, 0, list_versions},
+
+ {DM_TARGET_MSG_CMD, 0, target_message},
+ {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry}
};
- return (cmd >= ARRAY_SIZE(_ioctls)) ? NULL : _ioctls[cmd].fn;
+ if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
+ return NULL;
+
+ *ioctl_flags = _ioctls[cmd].flags;
+ return _ioctls[cmd].fn;
}
/*
@@ -1543,7 +1593,8 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
return r;
}
-#define DM_PARAMS_VMALLOC 0x0001 /* Params alloced with vmalloc not kmalloc */
+#define DM_PARAMS_KMALLOC 0x0001 /* Params alloced with kmalloc */
+#define DM_PARAMS_VMALLOC 0x0002 /* Params alloced with vmalloc */
#define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
@@ -1551,66 +1602,80 @@ static void free_params(struct dm_ioctl *param, size_t param_size, int param_fla
if (param_flags & DM_WIPE_BUFFER)
memset(param, 0, param_size);
+ if (param_flags & DM_PARAMS_KMALLOC)
+ kfree(param);
if (param_flags & DM_PARAMS_VMALLOC)
vfree(param);
- else
- kfree(param);
}
-static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl **param, int *param_flags)
+static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
+ int ioctl_flags,
+ struct dm_ioctl **param, int *param_flags)
{
- struct dm_ioctl tmp, *dmi;
+ struct dm_ioctl *dmi;
int secure_data;
+ const size_t minimum_data_size = sizeof(*param_kernel) - sizeof(param_kernel->data);
- if (copy_from_user(&tmp, user, sizeof(tmp) - sizeof(tmp.data)))
+ if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
- if (tmp.data_size < (sizeof(tmp) - sizeof(tmp.data)))
+ if (param_kernel->data_size < minimum_data_size)
return -EINVAL;
- secure_data = tmp.flags & DM_SECURE_DATA_FLAG;
+ secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
*param_flags = secure_data ? DM_WIPE_BUFFER : 0;
+ if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
+ dmi = param_kernel;
+ dmi->data_size = minimum_data_size;
+ goto data_copied;
+ }
+
/*
* Try to avoid low memory issues when a device is suspended.
* Use kmalloc() rather than vmalloc() when we can.
*/
dmi = NULL;
- if (tmp.data_size <= KMALLOC_MAX_SIZE)
- dmi = kmalloc(tmp.data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (param_kernel->data_size <= KMALLOC_MAX_SIZE) {
+ dmi = kmalloc(param_kernel->data_size, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (dmi)
+ *param_flags |= DM_PARAMS_KMALLOC;
+ }
if (!dmi) {
- dmi = __vmalloc(tmp.data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
- *param_flags |= DM_PARAMS_VMALLOC;
+ dmi = __vmalloc(param_kernel->data_size, GFP_NOIO | __GFP_REPEAT | __GFP_HIGH, PAGE_KERNEL);
+ if (dmi)
+ *param_flags |= DM_PARAMS_VMALLOC;
}
if (!dmi) {
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
return -EFAULT;
return -ENOMEM;
}
- if (copy_from_user(dmi, user, tmp.data_size))
+ if (copy_from_user(dmi, user, param_kernel->data_size))
goto bad;
+data_copied:
/*
* Abort if something changed the ioctl data while it was being copied.
*/
- if (dmi->data_size != tmp.data_size) {
+ if (dmi->data_size != param_kernel->data_size) {
DMERR("rejecting ioctl: data size modified while processing parameters");
goto bad;
}
/* Wipe the user buffer so we do not return it to userspace */
- if (secure_data && clear_user(user, tmp.data_size))
+ if (secure_data && clear_user(user, param_kernel->data_size))
goto bad;
*param = dmi;
return 0;
bad:
- free_params(dmi, tmp.data_size, *param_flags);
+ free_params(dmi, param_kernel->data_size, *param_flags);
return -EFAULT;
}
@@ -1621,6 +1686,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
param->flags &= ~DM_BUFFER_FULL_FLAG;
param->flags &= ~DM_UEVENT_GENERATED_FLAG;
param->flags &= ~DM_SECURE_DATA_FLAG;
+ param->flags &= ~DM_DATA_OUT_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
@@ -1648,11 +1714,13 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
{
int r = 0;
+ int ioctl_flags;
int param_flags;
unsigned int cmd;
struct dm_ioctl *uninitialized_var(param);
ioctl_fn fn = NULL;
size_t input_param_size;
+ struct dm_ioctl param_kernel;
/* only root can play with this */
if (!capable(CAP_SYS_ADMIN))
@@ -1677,7 +1745,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
if (cmd == DM_VERSION_CMD)
return 0;
- fn = lookup_ioctl(cmd);
+ fn = lookup_ioctl(cmd, &ioctl_flags);
if (!fn) {
DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
return -ENOTTY;
@@ -1686,7 +1754,7 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
/*
* Copy the parameters into kernel space.
*/
- r = copy_params(user, &param, &param_flags);
+ r = copy_params(user, &param_kernel, ioctl_flags, &param, &param_flags);
if (r)
return r;
@@ -1699,6 +1767,10 @@ static int ctl_ioctl(uint command, struct dm_ioctl __user *user)
param->data_size = sizeof(*param);
r = fn(param, input_param_size);
+ if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
+ unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
+ DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
+
/*
* Copy the results back to userland.
*/
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 68c02673263..d581fe5d2fa 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -22,6 +22,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/delay.h>
#include <linux/device-mapper.h>
#include <linux/dm-kcopyd.h>
@@ -51,6 +52,8 @@ struct dm_kcopyd_client {
struct workqueue_struct *kcopyd_wq;
struct work_struct kcopyd_work;
+ struct dm_kcopyd_throttle *throttle;
+
/*
* We maintain three lists of jobs:
*
@@ -68,6 +71,117 @@ struct dm_kcopyd_client {
static struct page_list zero_page_list;
+static DEFINE_SPINLOCK(throttle_spinlock);
+
+/*
+ * IO/IDLE accounting slowly decays after (1 << ACCOUNT_INTERVAL_SHIFT) period.
+ * When total_period >= (1 << ACCOUNT_INTERVAL_SHIFT) the counters are divided
+ * by 2.
+ */
+#define ACCOUNT_INTERVAL_SHIFT SHIFT_HZ
+
+/*
+ * Sleep this number of milliseconds.
+ *
+ * The value was decided experimentally.
+ * Smaller values seem to cause an increased copy rate above the limit.
+ * The reason for this is unknown but possibly due to jiffies rounding errors
+ * or read/write cache inside the disk.
+ */
+#define SLEEP_MSEC 100
+
+/*
+ * Maximum number of sleep events. There is a theoretical livelock if more
+ * kcopyd clients do work simultaneously which this limit avoids.
+ */
+#define MAX_SLEEPS 10
+
+static void io_job_start(struct dm_kcopyd_throttle *t)
+{
+ unsigned throttle, now, difference;
+ int slept = 0, skew;
+
+ if (unlikely(!t))
+ return;
+
+try_again:
+ spin_lock_irq(&throttle_spinlock);
+
+ throttle = ACCESS_ONCE(t->throttle);
+
+ if (likely(throttle >= 100))
+ goto skip_limit;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+ if (t->num_io_jobs)
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+
+ if (unlikely(t->total_period >= (1 << ACCOUNT_INTERVAL_SHIFT))) {
+ int shift = fls(t->total_period >> ACCOUNT_INTERVAL_SHIFT);
+ t->total_period >>= shift;
+ t->io_period >>= shift;
+ }
+
+ skew = t->io_period - throttle * t->total_period / 100;
+
+ if (unlikely(skew > 0) && slept < MAX_SLEEPS) {
+ slept++;
+ spin_unlock_irq(&throttle_spinlock);
+ msleep(SLEEP_MSEC);
+ goto try_again;
+ }
+
+skip_limit:
+ t->num_io_jobs++;
+
+ spin_unlock_irq(&throttle_spinlock);
+}
+
+static void io_job_finish(struct dm_kcopyd_throttle *t)
+{
+ unsigned long flags;
+
+ if (unlikely(!t))
+ return;
+
+ spin_lock_irqsave(&throttle_spinlock, flags);
+
+ t->num_io_jobs--;
+
+ if (likely(ACCESS_ONCE(t->throttle) >= 100))
+ goto skip_limit;
+
+ if (!t->num_io_jobs) {
+ unsigned now, difference;
+
+ now = jiffies;
+ difference = now - t->last_jiffies;
+ t->last_jiffies = now;
+
+ t->io_period += difference;
+ t->total_period += difference;
+
+ /*
+ * Maintain sane values if we got a temporary overflow.
+ */
+ if (unlikely(t->io_period > t->total_period))
+ t->io_period = t->total_period;
+ }
+
+skip_limit:
+ spin_unlock_irqrestore(&throttle_spinlock, flags);
+}
+
+
static void wake(struct dm_kcopyd_client *kc)
{
queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
@@ -348,6 +462,8 @@ static void complete_io(unsigned long error, void *context)
struct kcopyd_job *job = (struct kcopyd_job *) context;
struct dm_kcopyd_client *kc = job->kc;
+ io_job_finish(kc->throttle);
+
if (error) {
if (job->rw & WRITE)
job->write_err |= error;
@@ -389,6 +505,8 @@ static int run_io_job(struct kcopyd_job *job)
.client = job->kc->io_client,
};
+ io_job_start(job->kc->throttle);
+
if (job->rw == READ)
r = dm_io(&io_req, 1, &job->source, NULL);
else
@@ -695,7 +813,7 @@ int kcopyd_cancel(struct kcopyd_job *job, int block)
/*-----------------------------------------------------------------
* Client setup
*---------------------------------------------------------------*/
-struct dm_kcopyd_client *dm_kcopyd_client_create(void)
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle)
{
int r = -ENOMEM;
struct dm_kcopyd_client *kc;
@@ -708,6 +826,7 @@ struct dm_kcopyd_client *dm_kcopyd_client_create(void)
INIT_LIST_HEAD(&kc->complete_jobs);
INIT_LIST_HEAD(&kc->io_jobs);
INIT_LIST_HEAD(&kc->pages_jobs);
+ kc->throttle = throttle;
kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
if (!kc->job_pool)
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 328cad5617a..4f99d267340 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -53,9 +53,9 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
- ti->num_write_same_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->num_write_same_bios = 1;
ti->private = lc;
return 0;
@@ -95,8 +95,8 @@ static int linear_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-static int linear_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void linear_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct linear_c *lc = (struct linear_c *) ti->private;
@@ -110,7 +110,6 @@ static int linear_status(struct dm_target *ti, status_type_t type,
(unsigned long long)lc->start);
break;
}
- return 0;
}
static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
@@ -155,7 +154,7 @@ static int linear_iterate_devices(struct dm_target *ti,
static struct target_type linear_target = {
.name = "linear",
- .version = {1, 2, 0},
+ .version = {1, 2, 1},
.module = THIS_MODULE,
.ctr = linear_ctr,
.dtr = linear_dtr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 573bd04591b..51bb81676be 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -905,8 +905,8 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
goto bad;
}
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
return 0;
@@ -1378,8 +1378,8 @@ static void multipath_resume(struct dm_target *ti)
* [priority selector-name num_ps_args [ps_args]*
* num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
*/
-static int multipath_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void multipath_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int sz = 0;
unsigned long flags;
@@ -1485,8 +1485,6 @@ static int multipath_status(struct dm_target *ti, status_type_t type,
}
spin_unlock_irqrestore(&m->lock, flags);
-
- return 0;
}
static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
@@ -1695,7 +1693,7 @@ out:
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 9e58dbd8d8c..9a01d1e4c78 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1151,7 +1151,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
@@ -1201,8 +1201,8 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_SUBMITTED;
}
-static int raid_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void raid_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct raid_set *rs = ti->private;
unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
@@ -1344,8 +1344,6 @@ static int raid_status(struct dm_target *ti, status_type_t type,
DMEMIT(" -");
}
}
-
- return 0;
}
static int raid_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data)
@@ -1405,7 +1403,7 @@ static void raid_resume(struct dm_target *ti)
static struct target_type raid_target = {
.name = "raid",
- .version = {1, 4, 1},
+ .version = {1, 4, 2},
.module = THIS_MODULE,
.ctr = raid_ctr,
.dtr = raid_dtr,
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index fa519185ebb..d053098c6a9 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -82,6 +82,9 @@ struct mirror_set {
struct mirror mirror[0];
};
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
+ "A percentage of time allocated for raid resynchronization");
+
static void wakeup_mirrord(void *context)
{
struct mirror_set *ms = context;
@@ -1072,8 +1075,8 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
goto err_free_context;
- ti->num_flush_requests = 1;
- ti->num_discard_requests = 1;
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
ti->per_bio_data_size = sizeof(struct dm_raid1_bio_record);
ti->discard_zeroes_data_unsupported = true;
@@ -1111,7 +1114,7 @@ static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto err_destroy_wq;
}
- ms->kcopyd_client = dm_kcopyd_client_create();
+ ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(ms->kcopyd_client)) {
r = PTR_ERR(ms->kcopyd_client);
goto err_destroy_wq;
@@ -1347,8 +1350,8 @@ static char device_status_char(struct mirror *m)
}
-static int mirror_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void mirror_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned int m, sz = 0;
struct mirror_set *ms = (struct mirror_set *) ti->private;
@@ -1383,8 +1386,6 @@ static int mirror_status(struct dm_target *ti, status_type_t type,
if (ms->features & DM_RAID1_HANDLE_ERRORS)
DMEMIT(" 1 handle_errors");
}
-
- return 0;
}
static int mirror_iterate_devices(struct dm_target *ti,
@@ -1403,7 +1404,7 @@ static int mirror_iterate_devices(struct dm_target *ti,
static struct target_type mirror_target = {
.name = "mirror",
- .version = {1, 13, 1},
+ .version = {1, 13, 2},
.module = THIS_MODULE,
.ctr = mirror_ctr,
.dtr = mirror_dtr,
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 10079e07edf..c0e07026a8d 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -124,6 +124,9 @@ struct dm_snapshot {
#define RUNNING_MERGE 0
#define SHUTDOWN_MERGE 1
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
struct dm_dev *dm_snap_origin(struct dm_snapshot *s)
{
return s->origin;
@@ -1037,7 +1040,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
int i;
int r = -EINVAL;
char *origin_path, *cow_path;
- unsigned args_used, num_flush_requests = 1;
+ unsigned args_used, num_flush_bios = 1;
fmode_t origin_mode = FMODE_READ;
if (argc != 4) {
@@ -1047,7 +1050,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
if (dm_target_is_snapshot_merge(ti)) {
- num_flush_requests = 2;
+ num_flush_bios = 2;
origin_mode = FMODE_WRITE;
}
@@ -1108,7 +1111,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_hash_tables;
}
- s->kcopyd_client = dm_kcopyd_client_create();
+ s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(s->kcopyd_client)) {
r = PTR_ERR(s->kcopyd_client);
ti->error = "Could not create kcopyd client";
@@ -1127,7 +1130,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
spin_lock_init(&s->tracked_chunk_lock);
ti->private = s;
- ti->num_flush_requests = num_flush_requests;
+ ti->num_flush_bios = num_flush_bios;
ti->per_bio_data_size = sizeof(struct dm_snap_tracked_chunk);
/* Add snapshot to the list of snapshots for this origin */
@@ -1691,7 +1694,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio);
if (bio->bi_rw & REQ_FLUSH) {
- if (!dm_bio_get_target_request_nr(bio))
+ if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev;
else
bio->bi_bdev = s->cow->bdev;
@@ -1836,8 +1839,8 @@ static void snapshot_merge_resume(struct dm_target *ti)
start_merge(s);
}
-static int snapshot_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void snapshot_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
unsigned sz = 0;
struct dm_snapshot *snap = ti->private;
@@ -1883,8 +1886,6 @@ static int snapshot_status(struct dm_target *ti, status_type_t type,
maxlen - sz);
break;
}
-
- return 0;
}
static int snapshot_iterate_devices(struct dm_target *ti,
@@ -2104,7 +2105,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->private = dev;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
return 0;
}
@@ -2138,8 +2139,8 @@ static void origin_resume(struct dm_target *ti)
ti->max_io_len = get_origin_minimum_chunksize(dev->bdev);
}
-static int origin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void origin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_dev *dev = ti->private;
@@ -2152,8 +2153,6 @@ static int origin_status(struct dm_target *ti, status_type_t type,
snprintf(result, maxlen, "%s", dev->name);
break;
}
-
- return 0;
}
static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
@@ -2180,7 +2179,7 @@ static int origin_iterate_devices(struct dm_target *ti,
static struct target_type origin_target = {
.name = "snapshot-origin",
- .version = {1, 8, 0},
+ .version = {1, 8, 1},
.module = THIS_MODULE,
.ctr = origin_ctr,
.dtr = origin_dtr,
@@ -2193,7 +2192,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 0},
+ .version = {1, 11, 1},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
@@ -2306,3 +2305,5 @@ module_exit(dm_snapshot_exit);
MODULE_DESCRIPTION(DM_NAME " snapshot target");
MODULE_AUTHOR("Joe Thornber");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("dm-snapshot-origin");
+MODULE_ALIAS("dm-snapshot-merge");
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index c89cde86d40..d8837d313f5 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -160,9 +160,9 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
if (r)
return r;
- ti->num_flush_requests = stripes;
- ti->num_discard_requests = stripes;
- ti->num_write_same_requests = stripes;
+ ti->num_flush_bios = stripes;
+ ti->num_discard_bios = stripes;
+ ti->num_write_same_bios = stripes;
sc->chunk_size = chunk_size;
if (chunk_size & (chunk_size - 1))
@@ -276,19 +276,19 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
{
struct stripe_c *sc = ti->private;
uint32_t stripe;
- unsigned target_request_nr;
+ unsigned target_bio_nr;
if (bio->bi_rw & REQ_FLUSH) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- bio->bi_bdev = sc->stripe[target_request_nr].dev->bdev;
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev;
return DM_MAPIO_REMAPPED;
}
if (unlikely(bio->bi_rw & REQ_DISCARD) ||
unlikely(bio->bi_rw & REQ_WRITE_SAME)) {
- target_request_nr = dm_bio_get_target_request_nr(bio);
- BUG_ON(target_request_nr >= sc->stripes);
- return stripe_map_range(sc, bio, target_request_nr);
+ target_bio_nr = dm_bio_get_target_bio_nr(bio);
+ BUG_ON(target_bio_nr >= sc->stripes);
+ return stripe_map_range(sc, bio, target_bio_nr);
}
stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector);
@@ -312,8 +312,8 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
*
*/
-static int stripe_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void stripe_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct stripe_c *sc = (struct stripe_c *) ti->private;
char buffer[sc->stripes + 1];
@@ -340,7 +340,6 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
(unsigned long long)sc->stripe[i].physical_start);
break;
}
- return 0;
}
static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
@@ -428,7 +427,7 @@ static int stripe_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
static struct target_type stripe_target = {
.name = "striped",
- .version = {1, 5, 0},
+ .version = {1, 5, 1},
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index daf25d0890b..e50dad0c65f 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -217,7 +217,6 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
if (alloc_targets(t, num_targets)) {
kfree(t);
- t = NULL;
return -ENOMEM;
}
@@ -823,8 +822,8 @@ int dm_table_add_target(struct dm_table *t, const char *type,
t->highs[t->num_targets++] = tgt->begin + tgt->len - 1;
- if (!tgt->num_discard_requests && tgt->discards_supported)
- DMWARN("%s: %s: ignoring discards_supported because num_discard_requests is zero.",
+ if (!tgt->num_discard_bios && tgt->discards_supported)
+ DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
dm_device_name(t->md), type);
return 0;
@@ -1360,7 +1359,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_flush_requests)
+ if (!ti->num_flush_bios)
continue;
if (ti->flush_supported)
@@ -1439,7 +1438,7 @@ static bool dm_table_supports_write_same(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_write_same_requests)
+ if (!ti->num_write_same_bios)
return false;
if (!ti->type->iterate_devices ||
@@ -1657,7 +1656,7 @@ bool dm_table_supports_discards(struct dm_table *t)
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
- if (!ti->num_discard_requests)
+ if (!ti->num_discard_bios)
continue;
if (ti->discards_supported)
diff --git a/drivers/md/dm-target.c b/drivers/md/dm-target.c
index 617d21a7725..37ba5db71cd 100644
--- a/drivers/md/dm-target.c
+++ b/drivers/md/dm-target.c
@@ -116,7 +116,7 @@ static int io_err_ctr(struct dm_target *tt, unsigned int argc, char **args)
/*
* Return error for discards instead of -EOPNOTSUPP
*/
- tt->num_discard_requests = 1;
+ tt->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 4d6e85367b8..00cee02f8fc 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -280,7 +280,7 @@ static void unpack_block_time(uint64_t v, dm_block_t *b, uint32_t *t)
*t = v & ((1 << 24) - 1);
}
-static void data_block_inc(void *context, void *value_le)
+static void data_block_inc(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -292,7 +292,7 @@ static void data_block_inc(void *context, void *value_le)
dm_sm_inc_block(sm, b);
}
-static void data_block_dec(void *context, void *value_le)
+static void data_block_dec(void *context, const void *value_le)
{
struct dm_space_map *sm = context;
__le64 v_le;
@@ -304,7 +304,7 @@ static void data_block_dec(void *context, void *value_le)
dm_sm_dec_block(sm, b);
}
-static int data_block_equal(void *context, void *value1_le, void *value2_le)
+static int data_block_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
uint64_t b1, b2;
@@ -318,7 +318,7 @@ static int data_block_equal(void *context, void *value1_le, void *value2_le)
return b1 == b2;
}
-static void subtree_inc(void *context, void *value)
+static void subtree_inc(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -329,7 +329,7 @@ static void subtree_inc(void *context, void *value)
dm_tm_inc(info->tm, root);
}
-static void subtree_dec(void *context, void *value)
+static void subtree_dec(void *context, const void *value)
{
struct dm_btree_info *info = context;
__le64 root_le;
@@ -341,7 +341,7 @@ static void subtree_dec(void *context, void *value)
DMERR("btree delete failed\n");
}
-static int subtree_equal(void *context, void *value1_le, void *value2_le)
+static int subtree_equal(void *context, const void *value1_le, const void *value2_le)
{
__le64 v1_le, v2_le;
memcpy(&v1_le, value1_le, sizeof(v1_le));
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5409607d487..009339d6282 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -26,6 +26,9 @@
#define PRISON_CELLS 1024
#define COMMIT_PERIOD HZ
+DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
+ "A percentage of time allocated for copy on write");
+
/*
* The block size of the device holding pool data must be
* between 64KB and 1GB.
@@ -227,6 +230,78 @@ struct thin_c {
/*----------------------------------------------------------------*/
/*
+ * wake_worker() is used when new work is queued and when pool_resume is
+ * ready to continue deferred IO processing.
+ */
+static void wake_worker(struct pool *pool)
+{
+ queue_work(pool->wq, &pool->worker);
+}
+
+/*----------------------------------------------------------------*/
+
+static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
+ struct dm_bio_prison_cell **cell_result)
+{
+ int r;
+ struct dm_bio_prison_cell *cell_prealloc;
+
+ /*
+ * Allocate a cell from the prison's mempool.
+ * This might block but it can't fail.
+ */
+ cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
+
+ r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
+ if (r)
+ /*
+ * We reused an old cell; we can get rid of
+ * the new one.
+ */
+ dm_bio_prison_free_cell(pool->prison, cell_prealloc);
+
+ return r;
+}
+
+static void cell_release(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_release_no_holder(struct pool *pool,
+ struct dm_bio_prison_cell *cell,
+ struct bio_list *bios)
+{
+ dm_cell_release_no_holder(pool->prison, cell, bios);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+static void cell_defer_no_holder_no_free(struct thin_c *tc,
+ struct dm_bio_prison_cell *cell)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ dm_cell_release_no_holder(pool->prison, cell, &pool->deferred_bios);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ wake_worker(pool);
+}
+
+static void cell_error(struct pool *pool,
+ struct dm_bio_prison_cell *cell)
+{
+ dm_cell_error(pool->prison, cell);
+ dm_bio_prison_free_cell(pool->prison, cell);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
* A global list of pools that uses a struct mapped_device as a key.
*/
static struct dm_thin_pool_table {
@@ -330,14 +405,20 @@ static void requeue_io(struct thin_c *tc)
* target.
*/
+static bool block_size_is_power_of_two(struct pool *pool)
+{
+ return pool->sectors_per_block_shift >= 0;
+}
+
static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
{
+ struct pool *pool = tc->pool;
sector_t block_nr = bio->bi_sector;
- if (tc->pool->sectors_per_block_shift < 0)
- (void) sector_div(block_nr, tc->pool->sectors_per_block);
+ if (block_size_is_power_of_two(pool))
+ block_nr >>= pool->sectors_per_block_shift;
else
- block_nr >>= tc->pool->sectors_per_block_shift;
+ (void) sector_div(block_nr, pool->sectors_per_block);
return block_nr;
}
@@ -348,12 +429,12 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
sector_t bi_sector = bio->bi_sector;
bio->bi_bdev = tc->pool_dev->bdev;
- if (tc->pool->sectors_per_block_shift < 0)
- bio->bi_sector = (block * pool->sectors_per_block) +
- sector_div(bi_sector, pool->sectors_per_block);
- else
+ if (block_size_is_power_of_two(pool))
bio->bi_sector = (block << pool->sectors_per_block_shift) |
(bi_sector & (pool->sectors_per_block - 1));
+ else
+ bio->bi_sector = (block * pool->sectors_per_block) +
+ sector_div(bi_sector, pool->sectors_per_block);
}
static void remap_to_origin(struct thin_c *tc, struct bio *bio)
@@ -420,15 +501,6 @@ static void remap_and_issue(struct thin_c *tc, struct bio *bio,
issue(tc, bio);
}
-/*
- * wake_worker() is used when new work is queued and when pool_resume is
- * ready to continue deferred IO processing.
- */
-static void wake_worker(struct pool *pool)
-{
- queue_work(pool->wq, &pool->worker);
-}
-
/*----------------------------------------------------------------*/
/*
@@ -515,14 +587,14 @@ static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release(cell, &pool->deferred_bios);
+ cell_release(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&tc->pool->lock, flags);
wake_worker(pool);
}
/*
- * Same as cell_defer except it omits the original holder of the cell.
+ * Same as cell_defer above, except it omits the original holder of the cell.
*/
static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
{
@@ -530,7 +602,7 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
- dm_cell_release_no_holder(cell, &pool->deferred_bios);
+ cell_release_no_holder(pool, cell, &pool->deferred_bios);
spin_unlock_irqrestore(&pool->lock, flags);
wake_worker(pool);
@@ -540,13 +612,15 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
{
if (m->bio)
m->bio->bi_end_io = m->saved_bi_end_io;
- dm_cell_error(m->cell);
+ cell_error(m->tc->pool, m->cell);
list_del(&m->list);
mempool_free(m, m->tc->pool->mapping_pool);
}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
+ struct pool *pool = tc->pool;
struct bio *bio;
int r;
@@ -555,7 +629,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
bio->bi_end_io = m->saved_bi_end_io;
if (m->err) {
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -567,7 +641,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
DMERR_LIMIT("dm_thin_insert_block() failed");
- dm_cell_error(m->cell);
+ cell_error(pool, m->cell);
goto out;
}
@@ -585,7 +659,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
out:
list_del(&m->list);
- mempool_free(m, tc->pool->mapping_pool);
+ mempool_free(m, pool->mapping_pool);
}
static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
@@ -736,7 +810,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_copy() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -802,7 +876,7 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
if (r < 0) {
mempool_free(m, pool->mapping_pool);
DMERR_LIMIT("dm_kcopyd_zero() failed");
- dm_cell_error(cell);
+ cell_error(pool, cell);
}
}
}
@@ -908,13 +982,13 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void no_space(struct dm_bio_prison_cell *cell)
+static void no_space(struct pool *pool, struct dm_bio_prison_cell *cell)
{
struct bio *bio;
struct bio_list bios;
bio_list_init(&bios);
- dm_cell_release(cell, &bios);
+ cell_release(pool, cell, &bios);
while ((bio = bio_list_pop(&bios)))
retry_on_resume(bio);
@@ -932,7 +1006,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
struct dm_thin_new_mapping *m;
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(tc->pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -944,7 +1018,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
* on this block.
*/
build_data_key(tc->td, lookup_result.block, &key2);
- if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
+ if (bio_detain(tc->pool, &key2, bio, &cell2)) {
cell_defer_no_holder(tc, cell);
break;
}
@@ -1020,13 +1094,13 @@ static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
break;
case -ENOSPC:
- no_space(cell);
+ no_space(tc->pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- dm_cell_error(cell);
+ cell_error(tc->pool, cell);
break;
}
}
@@ -1044,7 +1118,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
* of being broken so we have nothing further to do here.
*/
build_data_key(tc->td, lookup_result->block, &key);
- if (dm_bio_detain(pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
if (bio_data_dir(bio) == WRITE && bio->bi_size)
@@ -1065,12 +1139,13 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
{
int r;
dm_block_t data_block;
+ struct pool *pool = tc->pool;
/*
* Remap empty bios (flushes) immediately, without provisioning.
*/
if (!bio->bi_size) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, 0);
@@ -1097,14 +1172,14 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
break;
case -ENOSPC:
- no_space(cell);
+ no_space(pool, cell);
break;
default:
DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
__func__, r);
- set_pool_mode(tc->pool, PM_READ_ONLY);
- dm_cell_error(cell);
+ set_pool_mode(pool, PM_READ_ONLY);
+ cell_error(pool, cell);
break;
}
}
@@ -1112,6 +1187,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
static void process_bio(struct thin_c *tc, struct bio *bio)
{
int r;
+ struct pool *pool = tc->pool;
dm_block_t block = get_bio_block(tc, bio);
struct dm_bio_prison_cell *cell;
struct dm_cell_key key;
@@ -1122,7 +1198,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
* being provisioned so we have nothing further to do here.
*/
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
+ if (bio_detain(pool, &key, bio, &cell))
return;
r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
@@ -1130,9 +1206,9 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case 0:
if (lookup_result.shared) {
process_shared_bio(tc, bio, block, &lookup_result);
- cell_defer_no_holder(tc, cell);
+ cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
} else {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_and_issue(tc, bio, lookup_result.block);
@@ -1141,7 +1217,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
case -ENODATA:
if (bio_data_dir(bio) == READ && tc->origin_dev) {
- inc_all_io_entry(tc->pool, bio);
+ inc_all_io_entry(pool, bio);
cell_defer_no_holder(tc, cell);
remap_to_origin_and_issue(tc, bio);
@@ -1378,7 +1454,8 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
dm_block_t block = get_bio_block(tc, bio);
struct dm_thin_device *td = tc->td;
struct dm_thin_lookup_result result;
- struct dm_bio_prison_cell *cell1, *cell2;
+ struct dm_bio_prison_cell cell1, cell2;
+ struct dm_bio_prison_cell *cell_result;
struct dm_cell_key key;
thin_hook_bio(tc, bio);
@@ -1420,18 +1497,18 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
}
build_virtual_key(tc->td, block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
return DM_MAPIO_SUBMITTED;
build_data_key(tc->td, result.block, &key);
- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
- cell_defer_no_holder(tc, cell1);
+ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
+ cell_defer_no_holder_no_free(tc, &cell1);
return DM_MAPIO_SUBMITTED;
}
inc_all_io_entry(tc->pool, bio);
- cell_defer_no_holder(tc, cell2);
- cell_defer_no_holder(tc, cell1);
+ cell_defer_no_holder_no_free(tc, &cell2);
+ cell_defer_no_holder_no_free(tc, &cell1);
remap(tc, bio, result.block);
return DM_MAPIO_REMAPPED;
@@ -1636,7 +1713,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
goto bad_prison;
}
- pool->copier = dm_kcopyd_client_create();
+ pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
if (IS_ERR(pool->copier)) {
r = PTR_ERR(pool->copier);
*error = "Error creating pool's kcopyd client";
@@ -1938,7 +2015,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->data_dev = data_dev;
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
/*
* Only need to enable discards if the pool should pass
@@ -1946,7 +2023,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
* processing will cause mappings to be removed from the btree.
*/
if (pf.discard_enabled && pf.discard_passdown) {
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
/*
* Setting 'discards_supported' circumvents the normal
@@ -2299,8 +2376,8 @@ static void emit_flags(struct pool_features *pf, char *result,
* <transaction id> <used metadata sectors>/<total metadata sectors>
* <used data sectors>/<total data sectors> <held metadata root>
*/
-static int pool_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void pool_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
unsigned sz = 0;
@@ -2326,32 +2403,41 @@ static int pool_status(struct dm_target *ti, status_type_t type,
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
(void) commit_or_fallback(pool);
- r = dm_pool_get_metadata_transaction_id(pool->pmd,
- &transaction_id);
- if (r)
- return r;
+ r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
+ if (r) {
+ DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_metadata_block_count(pool->pmd,
- &nr_free_blocks_metadata);
- if (r)
- return r;
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
+ if (r) {
+ DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_dev_size returned %d", r);
+ goto err;
+ }
- r = dm_pool_get_free_block_count(pool->pmd,
- &nr_free_blocks_data);
- if (r)
- return r;
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
+ if (r) {
+ DMERR("dm_pool_get_free_block_count returned %d", r);
+ goto err;
+ }
r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_data_dev_size returned %d", r);
+ goto err;
+ }
r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_pool_get_metadata_snap returned %d", r);
+ goto err;
+ }
DMEMIT("%llu %llu/%llu %llu/%llu ",
(unsigned long long)transaction_id,
@@ -2388,8 +2474,10 @@ static int pool_status(struct dm_target *ti, status_type_t type,
emit_flags(&pt->requested_pf, result, sz, maxlen);
break;
}
+ return;
- return 0;
+err:
+ DMEMIT("Error");
}
static int pool_iterate_devices(struct dm_target *ti,
@@ -2414,11 +2502,6 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
}
-static bool block_size_is_power_of_two(struct pool *pool)
-{
- return pool->sectors_per_block_shift >= 0;
-}
-
static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
{
struct pool *pool = pt->pool;
@@ -2432,15 +2515,8 @@ static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
if (pt->adjusted_pf.discard_passdown) {
data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
limits->discard_granularity = data_limits->discard_granularity;
- } else if (block_size_is_power_of_two(pool))
+ } else
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
- else
- /*
- * Use largest power of 2 that is a factor of sectors_per_block
- * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
- */
- limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
- DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
}
static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2468,7 +2544,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 6, 0},
+ .version = {1, 6, 1},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2588,17 +2664,17 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (r)
goto bad_thin_open;
- ti->num_flush_requests = 1;
+ ti->num_flush_bios = 1;
ti->flush_supported = true;
ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
/* In case the pool supports discards, pass them on. */
if (tc->pool->pf.discard_enabled) {
ti->discards_supported = true;
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
ti->discard_zeroes_data_unsupported = true;
- /* Discard requests must be split on a block boundary */
- ti->split_discard_requests = true;
+ /* Discard bios must be split on a block boundary */
+ ti->split_discard_bios = true;
}
dm_put(pool_md);
@@ -2676,8 +2752,8 @@ static void thin_postsuspend(struct dm_target *ti)
/*
* <nr mapped sectors> <highest mapped sector>
*/
-static int thin_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void thin_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
int r;
ssize_t sz = 0;
@@ -2687,7 +2763,7 @@ static int thin_status(struct dm_target *ti, status_type_t type,
if (get_pool_mode(tc->pool) == PM_FAIL) {
DMEMIT("Fail");
- return 0;
+ return;
}
if (!tc->td)
@@ -2696,12 +2772,16 @@ static int thin_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
r = dm_thin_get_mapped_count(tc->td, &mapped);
- if (r)
- return r;
+ if (r) {
+ DMERR("dm_thin_get_mapped_count returned %d", r);
+ goto err;
+ }
r = dm_thin_get_highest_mapped_block(tc->td, &highest);
- if (r < 0)
- return r;
+ if (r < 0) {
+ DMERR("dm_thin_get_highest_mapped_block returned %d", r);
+ goto err;
+ }
DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
if (r)
@@ -2721,7 +2801,10 @@ static int thin_status(struct dm_target *ti, status_type_t type,
}
}
- return 0;
+ return;
+
+err:
+ DMEMIT("Error");
}
static int thin_iterate_devices(struct dm_target *ti,
@@ -2748,7 +2831,7 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 7, 0},
+ .version = {1, 7, 1},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 52cde982164..6ad538375c3 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -508,8 +508,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
/*
* Status: V (valid) or C (corruption found)
*/
-static int verity_status(struct dm_target *ti, status_type_t type,
- unsigned status_flags, char *result, unsigned maxlen)
+static void verity_status(struct dm_target *ti, status_type_t type,
+ unsigned status_flags, char *result, unsigned maxlen)
{
struct dm_verity *v = ti->private;
unsigned sz = 0;
@@ -540,8 +540,6 @@ static int verity_status(struct dm_target *ti, status_type_t type,
DMEMIT("%02x", v->salt[x]);
break;
}
-
- return 0;
}
static int verity_ioctl(struct dm_target *ti, unsigned cmd,
@@ -860,7 +858,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 1, 0},
+ .version = {1, 1, 1},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index 69a5c3b3b34..c99003e0d47 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -25,7 +25,7 @@ static int zero_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/*
* Silently drop discards, avoiding -EOPNOTSUPP.
*/
- ti->num_discard_requests = 1;
+ ti->num_discard_bios = 1;
return 0;
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index bb2cd3ce9b0..7e469260fe5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -163,7 +163,6 @@ struct mapped_device {
* io objects are allocated from here.
*/
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
@@ -197,7 +196,6 @@ struct mapped_device {
*/
struct dm_md_mempools {
mempool_t *io_pool;
- mempool_t *tio_pool;
struct bio_set *bs;
};
@@ -205,12 +203,6 @@ struct dm_md_mempools {
static struct kmem_cache *_io_cache;
static struct kmem_cache *_rq_tio_cache;
-/*
- * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
- * still used for _io_cache, I'm leaving this for a later cleanup
- */
-static struct kmem_cache *_rq_bio_info_cache;
-
static int __init local_init(void)
{
int r = -ENOMEM;
@@ -224,13 +216,9 @@ static int __init local_init(void)
if (!_rq_tio_cache)
goto out_free_io_cache;
- _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
- if (!_rq_bio_info_cache)
- goto out_free_rq_tio_cache;
-
r = dm_uevent_init();
if (r)
- goto out_free_rq_bio_info_cache;
+ goto out_free_rq_tio_cache;
_major = major;
r = register_blkdev(_major, _name);
@@ -244,8 +232,6 @@ static int __init local_init(void)
out_uevent_exit:
dm_uevent_exit();
-out_free_rq_bio_info_cache:
- kmem_cache_destroy(_rq_bio_info_cache);
out_free_rq_tio_cache:
kmem_cache_destroy(_rq_tio_cache);
out_free_io_cache:
@@ -256,7 +242,6 @@ out_free_io_cache:
static void local_exit(void)
{
- kmem_cache_destroy(_rq_bio_info_cache);
kmem_cache_destroy(_rq_tio_cache);
kmem_cache_destroy(_io_cache);
unregister_blkdev(_major, _name);
@@ -448,12 +433,12 @@ static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
gfp_t gfp_mask)
{
- return mempool_alloc(md->tio_pool, gfp_mask);
+ return mempool_alloc(md->io_pool, gfp_mask);
}
static void free_rq_tio(struct dm_rq_target_io *tio)
{
- mempool_free(tio, tio->md->tio_pool);
+ mempool_free(tio, tio->md->io_pool);
}
static int md_in_flight(struct mapped_device *md)
@@ -985,12 +970,13 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
}
EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
-static void __map_bio(struct dm_target *ti, struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct mapped_device *md;
struct bio *clone = &tio->clone;
+ struct dm_target *ti = tio->ti;
clone->bi_end_io = clone_endio;
clone->bi_private = tio;
@@ -1031,32 +1017,54 @@ struct clone_info {
unsigned short idx;
};
+static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
+{
+ bio->bi_sector = sector;
+ bio->bi_size = to_bytes(len);
+}
+
+static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
+{
+ bio->bi_idx = idx;
+ bio->bi_vcnt = idx + bv_count;
+ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+}
+
+static void clone_bio_integrity(struct bio *bio, struct bio *clone,
+ unsigned short idx, unsigned len, unsigned offset,
+ unsigned trim)
+{
+ if (!bio_integrity(bio))
+ return;
+
+ bio_integrity_clone(clone, bio, GFP_NOIO);
+
+ if (trim)
+ bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
+}
+
/*
* Creates a little bio that just does part of a bvec.
*/
-static void split_bvec(struct dm_target_io *tio, struct bio *bio,
- sector_t sector, unsigned short idx, unsigned int offset,
- unsigned int len, struct bio_set *bs)
+static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
+ sector_t sector, unsigned short idx,
+ unsigned offset, unsigned len)
{
struct bio *clone = &tio->clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
*clone->bi_io_vec = *bv;
- clone->bi_sector = sector;
+ bio_setup_sector(clone, sector, len);
+
clone->bi_bdev = bio->bi_bdev;
clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
- clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
clone->bi_io_vec->bv_len = clone->bi_size;
clone->bi_flags |= 1 << BIO_CLONED;
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, offset), len);
- }
+ clone_bio_integrity(bio, clone, idx, len, offset, 1);
}
/*
@@ -1064,29 +1072,23 @@ static void split_bvec(struct dm_target_io *tio, struct bio *bio,
*/
static void clone_bio(struct dm_target_io *tio, struct bio *bio,
sector_t sector, unsigned short idx,
- unsigned short bv_count, unsigned int len,
- struct bio_set *bs)
+ unsigned short bv_count, unsigned len)
{
struct bio *clone = &tio->clone;
+ unsigned trim = 0;
__bio_clone(clone, bio);
- clone->bi_sector = sector;
- clone->bi_idx = idx;
- clone->bi_vcnt = idx + bv_count;
- clone->bi_size = to_bytes(len);
- clone->bi_flags &= ~(1 << BIO_SEG_VALID);
-
- if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO);
-
- if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
- bio_integrity_trim(clone,
- bio_sector_offset(bio, idx, 0), len);
- }
+ bio_setup_sector(clone, sector, len);
+ bio_setup_bv(clone, idx, bv_count);
+
+ if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
+ trim = 1;
+ clone_bio_integrity(bio, clone, idx, len, 0, trim);
}
static struct dm_target_io *alloc_tio(struct clone_info *ci,
- struct dm_target *ti, int nr_iovecs)
+ struct dm_target *ti, int nr_iovecs,
+ unsigned target_bio_nr)
{
struct dm_target_io *tio;
struct bio *clone;
@@ -1097,96 +1099,104 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
tio->io = ci->io;
tio->ti = ti;
memset(&tio->info, 0, sizeof(tio->info));
- tio->target_request_nr = 0;
+ tio->target_bio_nr = target_bio_nr;
return tio;
}
-static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
- unsigned request_nr, sector_t len)
+static void __clone_and_map_simple_bio(struct clone_info *ci,
+ struct dm_target *ti,
+ unsigned target_bio_nr, sector_t len)
{
- struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs);
+ struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
struct bio *clone = &tio->clone;
- tio->target_request_nr = request_nr;
-
/*
* Discard requests require the bio's inline iovecs be initialized.
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
-
__bio_clone(clone, ci->bio);
- if (len) {
- clone->bi_sector = ci->sector;
- clone->bi_size = to_bytes(len);
- }
+ if (len)
+ bio_setup_sector(clone, ci->sector, len);
- __map_bio(ti, tio);
+ __map_bio(tio);
}
-static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
- unsigned num_requests, sector_t len)
+static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
+ unsigned num_bios, sector_t len)
{
- unsigned request_nr;
+ unsigned target_bio_nr;
- for (request_nr = 0; request_nr < num_requests; request_nr++)
- __issue_target_request(ci, ti, request_nr, len);
+ for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
+ __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
}
-static int __clone_and_map_empty_flush(struct clone_info *ci)
+static int __send_empty_flush(struct clone_info *ci)
{
unsigned target_nr = 0;
struct dm_target *ti;
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
- __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
+ __send_duplicate_bios(ci, ti, ti->num_flush_bios, 0);
return 0;
}
-/*
- * Perform all io with a single clone.
- */
-static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
+ sector_t sector, int nr_iovecs,
+ unsigned short idx, unsigned short bv_count,
+ unsigned offset, unsigned len,
+ unsigned split_bvec)
{
struct bio *bio = ci->bio;
struct dm_target_io *tio;
+ unsigned target_bio_nr;
+ unsigned num_target_bios = 1;
+
+ /*
+ * Does the target want to receive duplicate copies of the bio?
+ */
+ if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
+ num_target_bios = ti->num_write_bios(ti, bio);
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, bio->bi_vcnt - ci->idx,
- ci->sector_count, ci->md->bs);
- __map_bio(ti, tio);
- ci->sector_count = 0;
+ for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
+ tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr);
+ if (split_bvec)
+ clone_split_bio(tio, bio, sector, idx, offset, len);
+ else
+ clone_bio(tio, bio, sector, idx, bv_count, len);
+ __map_bio(tio);
+ }
}
-typedef unsigned (*get_num_requests_fn)(struct dm_target *ti);
+typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
-static unsigned get_num_discard_requests(struct dm_target *ti)
+static unsigned get_num_discard_bios(struct dm_target *ti)
{
- return ti->num_discard_requests;
+ return ti->num_discard_bios;
}
-static unsigned get_num_write_same_requests(struct dm_target *ti)
+static unsigned get_num_write_same_bios(struct dm_target *ti)
{
- return ti->num_write_same_requests;
+ return ti->num_write_same_bios;
}
typedef bool (*is_split_required_fn)(struct dm_target *ti);
static bool is_split_required_for_discard(struct dm_target *ti)
{
- return ti->split_discard_requests;
+ return ti->split_discard_bios;
}
-static int __clone_and_map_changing_extent_only(struct clone_info *ci,
- get_num_requests_fn get_num_requests,
- is_split_required_fn is_split_required)
+static int __send_changing_extent_only(struct clone_info *ci,
+ get_num_bios_fn get_num_bios,
+ is_split_required_fn is_split_required)
{
struct dm_target *ti;
sector_t len;
- unsigned num_requests;
+ unsigned num_bios;
do {
ti = dm_table_find_target(ci->map, ci->sector);
@@ -1199,8 +1209,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
* reconfiguration might also have changed that since the
* check was performed.
*/
- num_requests = get_num_requests ? get_num_requests(ti) : 0;
- if (!num_requests)
+ num_bios = get_num_bios ? get_num_bios(ti) : 0;
+ if (!num_bios)
return -EOPNOTSUPP;
if (is_split_required && !is_split_required(ti))
@@ -1208,7 +1218,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
else
len = min(ci->sector_count, max_io_len(ci->sector, ti));
- __issue_target_requests(ci, ti, num_requests, len);
+ __send_duplicate_bios(ci, ti, num_bios, len);
ci->sector += len;
} while (ci->sector_count -= len);
@@ -1216,108 +1226,129 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
return 0;
}
-static int __clone_and_map_discard(struct clone_info *ci)
+static int __send_discard(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_discard_requests,
- is_split_required_for_discard);
+ return __send_changing_extent_only(ci, get_num_discard_bios,
+ is_split_required_for_discard);
}
-static int __clone_and_map_write_same(struct clone_info *ci)
+static int __send_write_same(struct clone_info *ci)
{
- return __clone_and_map_changing_extent_only(ci, get_num_write_same_requests, NULL);
+ return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
}
-static int __clone_and_map(struct clone_info *ci)
+/*
+ * Find maximum number of sectors / bvecs we can process with a single bio.
+ */
+static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
{
struct bio *bio = ci->bio;
- struct dm_target *ti;
- sector_t len = 0, max;
- struct dm_target_io *tio;
-
- if (unlikely(bio->bi_rw & REQ_DISCARD))
- return __clone_and_map_discard(ci);
- else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
- return __clone_and_map_write_same(ci);
+ sector_t bv_len, total_len = 0;
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
+ for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
+ bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
- max = max_io_len(ci->sector, ti);
+ if (bv_len > max)
+ break;
- if (ci->sector_count <= max) {
- /*
- * Optimise for the simple case where we can do all of
- * the remaining io with a single clone.
- */
- __clone_and_map_simple(ci, ti);
+ max -= bv_len;
+ total_len += bv_len;
+ }
- } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
- /*
- * There are some bvecs that don't span targets.
- * Do as many of these as possible.
- */
- int i;
- sector_t remaining = max;
- sector_t bv_len;
+ return total_len;
+}
- for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
- bv_len = to_sector(bio->bi_io_vec[i].bv_len);
+static int __split_bvec_across_targets(struct clone_info *ci,
+ struct dm_target *ti, sector_t max)
+{
+ struct bio *bio = ci->bio;
+ struct bio_vec *bv = bio->bi_io_vec + ci->idx;
+ sector_t remaining = to_sector(bv->bv_len);
+ unsigned offset = 0;
+ sector_t len;
- if (bv_len > remaining)
- break;
+ do {
+ if (offset) {
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- remaining -= bv_len;
- len += bv_len;
+ max = max_io_len(ci->sector, ti);
}
- tio = alloc_tio(ci, ti, bio->bi_max_vecs);
- clone_bio(tio, bio, ci->sector, ci->idx, i - ci->idx, len,
- ci->md->bs);
- __map_bio(ti, tio);
+ len = min(remaining, max);
+
+ __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
+ bv->bv_offset + offset, len, 1);
ci->sector += len;
ci->sector_count -= len;
- ci->idx = i;
+ offset += to_bytes(len);
+ } while (remaining -= len);
- } else {
- /*
- * Handle a bvec that must be split between two or more targets.
- */
- struct bio_vec *bv = bio->bi_io_vec + ci->idx;
- sector_t remaining = to_sector(bv->bv_len);
- unsigned int offset = 0;
+ ci->idx++;
+
+ return 0;
+}
+
+/*
+ * Select the correct strategy for processing a non-flush bio.
+ */
+static int __split_and_process_non_flush(struct clone_info *ci)
+{
+ struct bio *bio = ci->bio;
+ struct dm_target *ti;
+ sector_t len, max;
+ int idx;
+
+ if (unlikely(bio->bi_rw & REQ_DISCARD))
+ return __send_discard(ci);
+ else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ return __send_write_same(ci);
- do {
- if (offset) {
- ti = dm_table_find_target(ci->map, ci->sector);
- if (!dm_target_is_valid(ti))
- return -EIO;
+ ti = dm_table_find_target(ci->map, ci->sector);
+ if (!dm_target_is_valid(ti))
+ return -EIO;
- max = max_io_len(ci->sector, ti);
- }
+ max = max_io_len(ci->sector, ti);
- len = min(remaining, max);
+ /*
+ * Optimise for the simple case where we can do all of
+ * the remaining io with a single clone.
+ */
+ if (ci->sector_count <= max) {
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, bio->bi_vcnt - ci->idx, 0,
+ ci->sector_count, 0);
+ ci->sector_count = 0;
+ return 0;
+ }
- tio = alloc_tio(ci, ti, 1);
- split_bvec(tio, bio, ci->sector, ci->idx,
- bv->bv_offset + offset, len, ci->md->bs);
+ /*
+ * There are some bvecs that don't span targets.
+ * Do as many of these as possible.
+ */
+ if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
+ len = __len_within_target(ci, max, &idx);
- __map_bio(ti, tio);
+ __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
+ ci->idx, idx - ci->idx, 0, len, 0);
- ci->sector += len;
- ci->sector_count -= len;
- offset += to_bytes(len);
- } while (remaining -= len);
+ ci->sector += len;
+ ci->sector_count -= len;
+ ci->idx = idx;
- ci->idx++;
+ return 0;
}
- return 0;
+ /*
+ * Handle a bvec that must be split between two or more targets.
+ */
+ return __split_bvec_across_targets(ci, ti, max);
}
/*
- * Split the bio into several clones and submit it to targets.
+ * Entry point to split a bio into clones and submit them to the targets.
*/
static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
{
@@ -1341,16 +1372,17 @@ static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
ci.idx = bio->bi_idx;
start_io_acct(ci.io);
+
if (bio->bi_rw & REQ_FLUSH) {
ci.bio = &ci.md->flush_bio;
ci.sector_count = 0;
- error = __clone_and_map_empty_flush(&ci);
+ error = __send_empty_flush(&ci);
/* dec_pending submits any data associated with flush */
} else {
ci.bio = bio;
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error)
- error = __clone_and_map(&ci);
+ error = __split_and_process_non_flush(&ci);
}
/* drop the extra reference count */
@@ -1923,8 +1955,6 @@ static void free_dev(struct mapped_device *md)
unlock_fs(md);
bdput(md->bdev);
destroy_workqueue(md->wq);
- if (md->tio_pool)
- mempool_destroy(md->tio_pool);
if (md->io_pool)
mempool_destroy(md->io_pool);
if (md->bs)
@@ -1947,24 +1977,33 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
{
struct dm_md_mempools *p = dm_table_get_md_mempools(t);
- if (md->io_pool && (md->tio_pool || dm_table_get_type(t) == DM_TYPE_BIO_BASED) && md->bs) {
- /*
- * The md already has necessary mempools. Reload just the
- * bioset because front_pad may have changed because
- * a different table was loaded.
- */
- bioset_free(md->bs);
- md->bs = p->bs;
- p->bs = NULL;
+ if (md->io_pool && md->bs) {
+ /* The md already has necessary mempools. */
+ if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
+ /*
+ * Reload bioset because front_pad may have changed
+ * because a different table was loaded.
+ */
+ bioset_free(md->bs);
+ md->bs = p->bs;
+ p->bs = NULL;
+ } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
+ /*
+ * There's no need to reload with request-based dm
+ * because the size of front_pad doesn't change.
+ * Note for future: If you are to reload bioset,
+ * prep-ed requests in the queue may refer
+ * to bio from the old bioset, so you must walk
+ * through the queue to unprep.
+ */
+ }
goto out;
}
- BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
+ BUG_ON(!p || md->io_pool || md->bs);
md->io_pool = p->io_pool;
p->io_pool = NULL;
- md->tio_pool = p->tio_pool;
- p->tio_pool = NULL;
md->bs = p->bs;
p->bs = NULL;
@@ -2395,7 +2434,7 @@ static void dm_queue_flush(struct mapped_device *md)
*/
struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
- struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
+ struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
int r;
@@ -2418,10 +2457,12 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
dm_table_put(live_map);
}
- r = dm_calculate_queue_limits(table, &limits);
- if (r) {
- map = ERR_PTR(r);
- goto out;
+ if (!live_map) {
+ r = dm_calculate_queue_limits(table, &limits);
+ if (r) {
+ map = ERR_PTR(r);
+ goto out;
+ }
}
map = __bind(md, table, &limits);
@@ -2719,52 +2760,42 @@ EXPORT_SYMBOL_GPL(dm_noflush_suspending);
struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
{
- struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
- unsigned int pool_size = (type == DM_TYPE_BIO_BASED) ? 16 : MIN_IOS;
+ struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
+ struct kmem_cache *cachep;
+ unsigned int pool_size;
+ unsigned int front_pad;
if (!pools)
return NULL;
- per_bio_data_size = roundup(per_bio_data_size, __alignof__(struct dm_target_io));
+ if (type == DM_TYPE_BIO_BASED) {
+ cachep = _io_cache;
+ pool_size = 16;
+ front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
+ } else if (type == DM_TYPE_REQUEST_BASED) {
+ cachep = _rq_tio_cache;
+ pool_size = MIN_IOS;
+ front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
+ /* per_bio_data_size is not used. See __bind_mempools(). */
+ WARN_ON(per_bio_data_size != 0);
+ } else
+ goto out;
- pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
- mempool_create_slab_pool(MIN_IOS, _io_cache) :
- mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
+ pools->io_pool = mempool_create_slab_pool(MIN_IOS, cachep);
if (!pools->io_pool)
- goto free_pools_and_out;
-
- pools->tio_pool = NULL;
- if (type == DM_TYPE_REQUEST_BASED) {
- pools->tio_pool = mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
- if (!pools->tio_pool)
- goto free_io_pool_and_out;
- }
+ goto out;
- pools->bs = (type == DM_TYPE_BIO_BASED) ?
- bioset_create(pool_size,
- per_bio_data_size + offsetof(struct dm_target_io, clone)) :
- bioset_create(pool_size,
- offsetof(struct dm_rq_clone_bio_info, clone));
+ pools->bs = bioset_create(pool_size, front_pad);
if (!pools->bs)
- goto free_tio_pool_and_out;
+ goto out;
if (integrity && bioset_integrity_create(pools->bs, pool_size))
- goto free_bioset_and_out;
+ goto out;
return pools;
-free_bioset_and_out:
- bioset_free(pools->bs);
-
-free_tio_pool_and_out:
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
-free_io_pool_and_out:
- mempool_destroy(pools->io_pool);
-
-free_pools_and_out:
- kfree(pools);
+out:
+ dm_free_md_mempools(pools);
return NULL;
}
@@ -2777,9 +2808,6 @@ void dm_free_md_mempools(struct dm_md_mempools *pools)
if (pools->io_pool)
mempool_destroy(pools->io_pool);
- if (pools->tio_pool)
- mempool_destroy(pools->tio_pool);
-
if (pools->bs)
bioset_free(pools->bs);
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index ceb359050a5..19b26879541 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -1,6 +1,6 @@
config DM_PERSISTENT_DATA
tristate
- depends on BLK_DEV_DM && EXPERIMENTAL
+ depends on BLK_DEV_DM
select LIBCRC32C
select DM_BUFIO
---help---
diff --git a/drivers/md/persistent-data/Makefile b/drivers/md/persistent-data/Makefile
index d8e7cb767c1..ff528792c35 100644
--- a/drivers/md/persistent-data/Makefile
+++ b/drivers/md/persistent-data/Makefile
@@ -1,5 +1,7 @@
obj-$(CONFIG_DM_PERSISTENT_DATA) += dm-persistent-data.o
dm-persistent-data-objs := \
+ dm-array.o \
+ dm-bitset.o \
dm-block-manager.o \
dm-space-map-common.o \
dm-space-map-disk.o \
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
new file mode 100644
index 00000000000..172147eb1d4
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.c
@@ -0,0 +1,808 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-array.h"
+#include "dm-space-map.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "array"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The array is implemented as a fully populated btree, which points to
+ * blocks that contain the packed values. This is more space efficient
+ * than just using a btree since we don't store 1 key per value.
+ */
+struct array_block {
+ __le32 csum;
+ __le32 max_entries;
+ __le32 nr_entries;
+ __le32 value_size;
+ __le64 blocknr; /* Block this node is supposed to live in. */
+} __packed;
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Validator methods. As usual we calculate a checksum, and also write the
+ * block location into the header (paranoia about ssds remapping areas by
+ * mistake).
+ */
+#define CSUM_XOR 595846735
+
+static void array_block_prepare_for_write(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+
+ bh_le->blocknr = cpu_to_le64(dm_block_location(b));
+ bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+}
+
+static int array_block_check(struct dm_block_validator *v,
+ struct dm_block *b,
+ size_t size_of_block)
+{
+ struct array_block *bh_le = dm_block_data(b);
+ __le32 csum_disk;
+
+ if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
+ DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
+ (unsigned long long) le64_to_cpu(bh_le->blocknr),
+ (unsigned long long) dm_block_location(b));
+ return -ENOTBLK;
+ }
+
+ csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
+ size_of_block - sizeof(__le32),
+ CSUM_XOR));
+ if (csum_disk != bh_le->csum) {
+ DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
+ (unsigned) le32_to_cpu(csum_disk),
+ (unsigned) le32_to_cpu(bh_le->csum));
+ return -EILSEQ;
+ }
+
+ return 0;
+}
+
+static struct dm_block_validator array_validator = {
+ .name = "array",
+ .prepare_for_write = array_block_prepare_for_write,
+ .check = array_block_check
+};
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Functions for manipulating the array blocks.
+ */
+
+/*
+ * Returns a pointer to a value within an array block.
+ *
+ * index - The index into _this_ specific block.
+ */
+static void *element_at(struct dm_array_info *info, struct array_block *ab,
+ unsigned index)
+{
+ unsigned char *entry = (unsigned char *) (ab + 1);
+
+ entry += index * info->value_type.size;
+
+ return entry;
+}
+
+/*
+ * Utility function that calls one of the value_type methods on every value
+ * in an array block.
+ */
+static void on_entries(struct dm_array_info *info, struct array_block *ab,
+ void (*fn)(void *, const void *))
+{
+ unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
+
+ for (i = 0; i < nr_entries; i++)
+ fn(info->value_type.context, element_at(info, ab, i));
+}
+
+/*
+ * Increment every value in an array block.
+ */
+static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->inc)
+ on_entries(info, ab, vt->inc);
+}
+
+/*
+ * Decrement every value in an array block.
+ */
+static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
+{
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ if (vt->dec)
+ on_entries(info, ab, vt->dec);
+}
+
+/*
+ * Each array block can hold this many values.
+ */
+static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
+{
+ return (size_of_block - sizeof(struct array_block)) / value_size;
+}
+
+/*
+ * Allocate a new array block. The caller will need to unlock block.
+ */
+static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
+ if (r)
+ return r;
+
+ (*ab) = dm_block_data(*block);
+ (*ab)->max_entries = cpu_to_le32(max_entries);
+ (*ab)->nr_entries = cpu_to_le32(0);
+ (*ab)->value_size = cpu_to_le32(info->value_type.size);
+
+ return 0;
+}
+
+/*
+ * Pad an array block out with a particular value. Every instance will
+ * cause an increment of the value_type. new_nr must always be more than
+ * the current number of entries.
+ */
+static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
+ const void *value, unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i < new_nr; i++) {
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ memcpy(element_at(info, ab, i), value, vt->size);
+ }
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Remove some entries from the back of an array block. Every value
+ * removed will be decremented. new_nr must be <= the current number of
+ * entries.
+ */
+static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
+ unsigned new_nr)
+{
+ unsigned i;
+ uint32_t nr_entries;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
+ BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
+
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = nr_entries; i > new_nr; i--)
+ if (vt->dec)
+ vt->dec(vt->context, element_at(info, ab, i - 1));
+ ab->nr_entries = cpu_to_le32(new_nr);
+}
+
+/*
+ * Read locks a block, and coerces it to an array block. The caller must
+ * unlock 'block' when finished.
+ */
+static int get_ablock(struct dm_array_info *info, dm_block_t b,
+ struct dm_block **block, struct array_block **ab)
+{
+ int r;
+
+ r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ return 0;
+}
+
+/*
+ * Unlocks an array block.
+ */
+static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
+{
+ return dm_tm_unlock(info->btree_info.tm, block);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * Btree manipulation.
+ */
+
+/*
+ * Looks up an array block in the btree, and then read locks it.
+ *
+ * index is the index of the index of the array_block, (ie. the array index
+ * / max_entries).
+ */
+static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r;
+ uint64_t key = index;
+ __le64 block_le;
+
+ r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
+ if (r)
+ return r;
+
+ return get_ablock(info, le64_to_cpu(block_le), block, ab);
+}
+
+/*
+ * Insert an array block into the btree. The block is _not_ unlocked.
+ */
+static int insert_ablock(struct dm_array_info *info, uint64_t index,
+ struct dm_block *block, dm_block_t *root)
+{
+ __le64 block_le = cpu_to_le64(dm_block_location(block));
+
+ __dm_bless_for_disk(block_le);
+ return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
+}
+
+/*
+ * Looks up an array block in the btree. Then shadows it, and updates the
+ * btree to point to this new shadow. 'root' is an input/output parameter
+ * for both the current root block, and the new one.
+ */
+static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
+ unsigned index, struct dm_block **block,
+ struct array_block **ab)
+{
+ int r, inc;
+ uint64_t key = index;
+ dm_block_t b;
+ __le64 block_le;
+
+ /*
+ * lookup
+ */
+ r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
+ if (r)
+ return r;
+ b = le64_to_cpu(block_le);
+
+ /*
+ * shadow
+ */
+ r = dm_tm_shadow_block(info->btree_info.tm, b,
+ &array_validator, block, &inc);
+ if (r)
+ return r;
+
+ *ab = dm_block_data(*block);
+ if (inc)
+ inc_ablock_entries(info, *ab);
+
+ /*
+ * Reinsert.
+ *
+ * The shadow op will often be a noop. Only insert if it really
+ * copied data.
+ */
+ if (dm_block_location(*block) != b)
+ r = insert_ablock(info, index, *block, root);
+
+ return r;
+}
+
+/*
+ * Allocate an new array block, and fill it with some values.
+ */
+static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
+ uint32_t max_entries,
+ unsigned block_index, uint32_t nr,
+ const void *value, dm_block_t *root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(info, ab, value, nr);
+ r = insert_ablock(info, block_index, block, root);
+ unlock_ablock(info, block);
+
+ return r;
+}
+
+static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
+ unsigned begin_block, unsigned end_block,
+ unsigned max_entries, const void *value,
+ dm_block_t *root)
+{
+ int r = 0;
+
+ for (; !r && begin_block != end_block; begin_block++)
+ r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
+
+ return r;
+}
+
+/*
+ * There are a bunch of functions involved with resizing an array. This
+ * structure holds information that commonly needed by them. Purely here
+ * to reduce parameter count.
+ */
+struct resize {
+ /*
+ * Describes the array.
+ */
+ struct dm_array_info *info;
+
+ /*
+ * The current root of the array. This gets updated.
+ */
+ dm_block_t root;
+
+ /*
+ * Metadata block size. Used to calculate the nr entries in an
+ * array block.
+ */
+ size_t size_of_block;
+
+ /*
+ * Maximum nr entries in an array block.
+ */
+ unsigned max_entries;
+
+ /*
+ * nr of completely full blocks in the array.
+ *
+ * 'old' refers to before the resize, 'new' after.
+ */
+ unsigned old_nr_full_blocks, new_nr_full_blocks;
+
+ /*
+ * Number of entries in the final block. 0 iff only full blocks in
+ * the array.
+ */
+ unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
+
+ /*
+ * The default value used when growing the array.
+ */
+ const void *value;
+};
+
+/*
+ * Removes a consecutive set of array blocks from the btree. The values
+ * in block are decremented as a side effect of the btree remove.
+ *
+ * begin_index - the index of the first array block to remove.
+ * end_index - the one-past-the-end value. ie. this block is not removed.
+ */
+static int drop_blocks(struct resize *resize, unsigned begin_index,
+ unsigned end_index)
+{
+ int r;
+
+ while (begin_index != end_index) {
+ uint64_t key = begin_index++;
+ r = dm_btree_remove(&resize->info->btree_info, resize->root,
+ &key, &resize->root);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+/*
+ * Calculates how many blocks are needed for the array.
+ */
+static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
+ unsigned nr_entries_in_last_block)
+{
+ return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
+}
+
+/*
+ * Shrink an array.
+ */
+static int shrink(struct resize *resize)
+{
+ int r;
+ unsigned begin, end;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ /*
+ * Lose some blocks from the back?
+ */
+ if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
+ begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block);
+ end = total_nr_blocks_needed(resize->old_nr_full_blocks,
+ resize->old_nr_entries_in_last_block);
+
+ r = drop_blocks(resize, begin, end);
+ if (r)
+ return r;
+ }
+
+ /*
+ * Trim the new tail block
+ */
+ if (resize->new_nr_entries_in_last_block) {
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->new_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
+ unlock_ablock(resize->info, block);
+ }
+
+ return 0;
+}
+
+/*
+ * Grow an array.
+ */
+static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ r = shadow_ablock(resize->info, &resize->root,
+ resize->old_nr_full_blocks, &block, &ab);
+ if (r)
+ return r;
+
+ fill_ablock(resize->info, ab, resize->value, new_nr_entries);
+ unlock_ablock(resize->info, block);
+
+ return r;
+}
+
+static int grow_add_tail_block(struct resize *resize)
+{
+ return insert_new_ablock(resize->info, resize->size_of_block,
+ resize->max_entries,
+ resize->new_nr_full_blocks,
+ resize->new_nr_entries_in_last_block,
+ resize->value, &resize->root);
+}
+
+static int grow_needs_more_blocks(struct resize *resize)
+{
+ int r;
+
+ if (resize->old_nr_entries_in_last_block > 0) {
+ r = grow_extend_tail_block(resize, resize->max_entries);
+ if (r)
+ return r;
+ }
+
+ r = insert_full_ablocks(resize->info, resize->size_of_block,
+ resize->old_nr_full_blocks,
+ resize->new_nr_full_blocks,
+ resize->max_entries, resize->value,
+ &resize->root);
+ if (r)
+ return r;
+
+ if (resize->new_nr_entries_in_last_block)
+ r = grow_add_tail_block(resize);
+
+ return r;
+}
+
+static int grow(struct resize *resize)
+{
+ if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
+ return grow_needs_more_blocks(resize);
+
+ else if (resize->old_nr_entries_in_last_block)
+ return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
+
+ else
+ return grow_add_tail_block(resize);
+}
+
+/*----------------------------------------------------------------*/
+
+/*
+ * These are the value_type functions for the btree elements, which point
+ * to array blocks.
+ */
+static void block_inc(void *context, const void *value)
+{
+ __le64 block_le;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
+}
+
+static void block_dec(void *context, const void *value)
+{
+ int r;
+ uint64_t b;
+ __le64 block_le;
+ uint32_t ref_count;
+ struct dm_block *block;
+ struct array_block *ab;
+ struct dm_array_info *info = context;
+
+ memcpy(&block_le, value, sizeof(block_le));
+ b = le64_to_cpu(block_le);
+
+ r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
+ if (r) {
+ DMERR_LIMIT("couldn't get reference count for block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ if (ref_count == 1) {
+ /*
+ * We're about to drop the last reference to this ablock.
+ * So we need to decrement the ref count of the contents.
+ */
+ r = get_ablock(info, b, &block, &ab);
+ if (r) {
+ DMERR_LIMIT("couldn't get array block %llu",
+ (unsigned long long) b);
+ return;
+ }
+
+ dec_ablock_entries(info, ab);
+ unlock_ablock(info, block);
+ }
+
+ dm_tm_dec(info->btree_info.tm, b);
+}
+
+static int block_equal(void *context, const void *value1, const void *value2)
+{
+ return !memcmp(value1, value2, sizeof(__le64));
+}
+
+/*----------------------------------------------------------------*/
+
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ struct dm_btree_value_type *bvt = &info->btree_info.value_type;
+
+ memcpy(&info->value_type, vt, sizeof(info->value_type));
+ info->btree_info.tm = tm;
+ info->btree_info.levels = 1;
+
+ bvt->context = info;
+ bvt->size = sizeof(__le64);
+ bvt->inc = block_inc;
+ bvt->dec = block_dec;
+ bvt->equal = block_equal;
+}
+EXPORT_SYMBOL_GPL(dm_array_info_init);
+
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
+{
+ return dm_btree_empty(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_empty);
+
+static int array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct resize resize;
+
+ if (old_size == new_size)
+ return 0;
+
+ resize.info = info;
+ resize.root = root;
+ resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ resize.max_entries = calc_max_entries(info->value_type.size,
+ resize.size_of_block);
+
+ resize.old_nr_full_blocks = old_size / resize.max_entries;
+ resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
+ resize.new_nr_full_blocks = new_size / resize.max_entries;
+ resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
+ resize.value = value;
+
+ r = ((new_size > old_size) ? grow : shrink)(&resize);
+ if (r)
+ return r;
+
+ *new_root = resize.root;
+ return 0;
+}
+
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r = array_resize(info, root, old_size, new_size, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_resize);
+
+int dm_array_del(struct dm_array_info *info, dm_block_t root)
+{
+ return dm_btree_del(&info->btree_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_array_del);
+
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value_le)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned entry, max_entries;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = lookup_ablock(info, root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries))
+ r = -ENODATA;
+ else
+ memcpy(value_le, element_at(info, ab, entry),
+ info->value_type.size);
+
+ unlock_ablock(info, block);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_get_value);
+
+static int array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+{
+ int r;
+ struct dm_block *block;
+ struct array_block *ab;
+ size_t size_of_block;
+ unsigned max_entries;
+ unsigned entry;
+ void *old_value;
+ struct dm_btree_value_type *vt = &info->value_type;
+
+ size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
+ max_entries = calc_max_entries(info->value_type.size, size_of_block);
+
+ r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
+ if (r)
+ return r;
+ *new_root = root;
+
+ entry = index % max_entries;
+ if (entry >= le32_to_cpu(ab->nr_entries)) {
+ r = -ENODATA;
+ goto out;
+ }
+
+ old_value = element_at(info, ab, entry);
+ if (vt->dec &&
+ (!vt->equal || !vt->equal(vt->context, old_value, value))) {
+ vt->dec(vt->context, old_value);
+ if (vt->inc)
+ vt->inc(vt->context, value);
+ }
+
+ memcpy(old_value, value, info->value_type.size);
+
+out:
+ unlock_ablock(info, block);
+ return r;
+}
+
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value)
+{
+ int r;
+
+ r = array_set_value(info, root, index, value, new_root);
+ __dm_unbless_for_disk(value);
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_array_set_value);
+
+struct walk_info {
+ struct dm_array_info *info;
+ int (*fn)(void *context, uint64_t key, void *leaf);
+ void *context;
+};
+
+static int walk_ablock(void *context, uint64_t *keys, void *leaf)
+{
+ struct walk_info *wi = context;
+
+ int r;
+ unsigned i;
+ __le64 block_le;
+ unsigned nr_entries, max_entries;
+ struct dm_block *block;
+ struct array_block *ab;
+
+ memcpy(&block_le, leaf, sizeof(block_le));
+ r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
+ if (r)
+ return r;
+
+ max_entries = le32_to_cpu(ab->max_entries);
+ nr_entries = le32_to_cpu(ab->nr_entries);
+ for (i = 0; i < nr_entries; i++) {
+ r = wi->fn(wi->context, keys[0] * max_entries + i,
+ element_at(wi->info, ab, i));
+
+ if (r)
+ break;
+ }
+
+ unlock_ablock(wi->info, block);
+ return r;
+}
+
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *, uint64_t key, void *leaf),
+ void *context)
+{
+ struct walk_info wi;
+
+ wi.info = info;
+ wi.fn = fn;
+ wi.context = context;
+
+ return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
+}
+EXPORT_SYMBOL_GPL(dm_array_walk);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-array.h b/drivers/md/persistent-data/dm-array.h
new file mode 100644
index 00000000000..ea177d6fa58
--- /dev/null
+++ b/drivers/md/persistent-data/dm-array.h
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_ARRAY_H
+#define _LINUX_DM_ARRAY_H
+
+#include "dm-btree.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * The dm-array is a persistent version of an array. It packs the data
+ * more efficiently than a btree which will result in less disk space use,
+ * and a performance boost. The element get and set operations are still
+ * O(ln(n)), but with a much smaller constant.
+ *
+ * The value type structure is reused from the btree type to support proper
+ * reference counting of values.
+ *
+ * The arrays implicitly know their length, and bounds are checked for
+ * lookups and updated. It doesn't store this in an accessible place
+ * because it would waste a whole metadata block. Make sure you store the
+ * size along with the array root in your encompassing data.
+ *
+ * Array entries are indexed via an unsigned integer starting from zero.
+ * Arrays are not sparse; if you resize an array to have 'n' entries then
+ * 'n - 1' will be the last valid index.
+ *
+ * Typical use:
+ *
+ * a) initialise a dm_array_info structure. This describes the array
+ * values and ties it into a specific transaction manager. It holds no
+ * instance data; the same info can be used for many similar arrays if
+ * you wish.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an array. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty array with dm_array_empty().
+ *
+ * Like the other data structures in this library, dm_array objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * c) resize an array with dm_array_resize().
+ *
+ * d) Get a value from the array with dm_array_get_value().
+ *
+ * e) Set a value in the array with dm_array_set_value().
+ *
+ * f) Walk an array of values in index order with dm_array_walk(). More
+ * efficient than making many calls to dm_array_get_value().
+ *
+ * g) Destroy the array with dm_array_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_array_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Describes an array. Don't initialise this structure yourself, use the
+ * init function below.
+ */
+struct dm_array_info {
+ struct dm_transaction_manager *tm;
+ struct dm_btree_value_type value_type;
+ struct dm_btree_info btree_info;
+};
+
+/*
+ * Sets up a dm_array_info structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * info - the structure being filled in.
+ * tm - the transaction manager that should supervise this structure.
+ * vt - describes the leaf values.
+ */
+void dm_array_info_init(struct dm_array_info *info,
+ struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
+/*
+ * Create an empty, zero length array.
+ *
+ * info - describes the array
+ * root - on success this will be filled out with the root block
+ */
+int dm_array_empty(struct dm_array_info *info, dm_block_t *root);
+
+/*
+ * Resizes the array.
+ *
+ * info - describes the array
+ * root - the root block of the array on disk
+ * old_size - the caller is responsible for remembering the size of
+ * the array
+ * new_size - can be bigger or smaller than old_size
+ * value - if we're growing the array the new entries will have this value
+ * new_root - on success, points to the new root block
+ *
+ * If growing the inc function for 'value' will be called the appropriate
+ * number of times. So if the caller is holding a reference they may want
+ * to drop it.
+ */
+int dm_array_resize(struct dm_array_info *info, dm_block_t root,
+ uint32_t old_size, uint32_t new_size,
+ const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Frees a whole array. The value_type's decrement operation will be called
+ * for all values in the array
+ */
+int dm_array_del(struct dm_array_info *info, dm_block_t root);
+
+/*
+ * Lookup a value in the array
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - the value to be read. Will be in on-disk format of course.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, void *value);
+
+/*
+ * Set an entry in the array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * index - array index
+ * value - value to be written to disk. Make sure you confirm the value is
+ * in on-disk format with__dm_bless_for_disk() before calling.
+ * new_root - the new root block
+ *
+ * The old value being overwritten will be decremented, the new value
+ * incremented.
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
+ uint32_t index, const void *value, dm_block_t *new_root)
+ __dm_written_to_disk(value);
+
+/*
+ * Walk through all the entries in an array.
+ *
+ * info - describes the array
+ * root - root block of the array
+ * fn - called back for every element
+ * context - passed to the callback
+ */
+int dm_array_walk(struct dm_array_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t key, void *leaf),
+ void *context);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_ARRAY_H */
diff --git a/drivers/md/persistent-data/dm-bitset.c b/drivers/md/persistent-data/dm-bitset.c
new file mode 100644
index 00000000000..cd9a86d4cdf
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm-bitset.h"
+#include "dm-transaction-manager.h"
+
+#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "bitset"
+#define BITS_PER_ARRAY_ENTRY 64
+
+/*----------------------------------------------------------------*/
+
+static struct dm_btree_value_type bitset_bvt = {
+ .context = NULL,
+ .size = sizeof(__le64),
+ .inc = NULL,
+ .dec = NULL,
+ .equal = NULL,
+};
+
+/*----------------------------------------------------------------*/
+
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info)
+{
+ dm_array_info_init(&info->array_info, tm, &bitset_bvt);
+ info->current_index_set = false;
+}
+EXPORT_SYMBOL_GPL(dm_disk_bitset_init);
+
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root)
+{
+ return dm_array_empty(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_empty);
+
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root)
+{
+ uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY);
+ uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY);
+ __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0);
+
+ __dm_bless_for_disk(&value);
+ return dm_array_resize(&info->array_info, root, old_blocks, new_blocks,
+ &value, new_root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_resize);
+
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root)
+{
+ return dm_array_del(&info->array_info, root);
+}
+EXPORT_SYMBOL_GPL(dm_bitset_del);
+
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root)
+{
+ int r;
+ __le64 value;
+
+ if (!info->current_index_set)
+ return 0;
+
+ value = cpu_to_le64(info->current_bits);
+
+ __dm_bless_for_disk(&value);
+ r = dm_array_set_value(&info->array_info, root, info->current_index,
+ &value, new_root);
+ if (r)
+ return r;
+
+ info->current_index_set = false;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_flush);
+
+static int read_bits(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t array_index)
+{
+ int r;
+ __le64 value;
+
+ r = dm_array_get_value(&info->array_info, root, array_index, &value);
+ if (r)
+ return r;
+
+ info->current_bits = le64_to_cpu(value);
+ info->current_index_set = true;
+ info->current_index = array_index;
+ return 0;
+}
+
+static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned array_index = index / BITS_PER_ARRAY_ENTRY;
+
+ if (info->current_index_set) {
+ if (info->current_index == array_index)
+ return 0;
+
+ r = dm_bitset_flush(info, root, new_root);
+ if (r)
+ return r;
+ }
+
+ return read_bits(info, root, array_index);
+}
+
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ set_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_set_bit);
+
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ clear_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_clear_bit);
+
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result)
+{
+ int r;
+ unsigned b = index % BITS_PER_ARRAY_ENTRY;
+
+ r = get_array_entry(info, root, index, new_root);
+ if (r)
+ return r;
+
+ *result = test_bit(b, (unsigned long *) &info->current_bits);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dm_bitset_test_bit);
+
+/*----------------------------------------------------------------*/
diff --git a/drivers/md/persistent-data/dm-bitset.h b/drivers/md/persistent-data/dm-bitset.h
new file mode 100644
index 00000000000..e1b9bea14aa
--- /dev/null
+++ b/drivers/md/persistent-data/dm-bitset.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This file is released under the GPL.
+ */
+#ifndef _LINUX_DM_BITSET_H
+#define _LINUX_DM_BITSET_H
+
+#include "dm-array.h"
+
+/*----------------------------------------------------------------*/
+
+/*
+ * This bitset type is a thin wrapper round a dm_array of 64bit words. It
+ * uses a tiny, one word cache to reduce the number of array lookups and so
+ * increase performance.
+ *
+ * Like the dm-array that it's based on, the caller needs to keep track of
+ * the size of the bitset separately. The underlying dm-array implicitly
+ * knows how many words it's storing and will return -ENODATA if you try
+ * and access an out of bounds word. However, an out of bounds bit in the
+ * final word will _not_ be detected, you have been warned.
+ *
+ * Bits are indexed from zero.
+
+ * Typical use:
+ *
+ * a) Initialise a dm_disk_bitset structure with dm_disk_bitset_init().
+ * This describes the bitset and includes the cache. It's not called it
+ * dm_bitset_info in line with other data structures because it does
+ * include instance data.
+ *
+ * b) Get yourself a root. The root is the index of a block of data on the
+ * disk that holds a particular instance of an bitset. You may have a
+ * pre existing root in your metadata that you wish to use, or you may
+ * want to create a brand new, empty bitset with dm_bitset_empty().
+ *
+ * Like the other data structures in this library, dm_bitset objects are
+ * immutable between transactions. Update functions will return you the
+ * root for a _new_ array. If you've incremented the old root, via
+ * dm_tm_inc(), before calling the update function you may continue to use
+ * it in parallel with the new root.
+ *
+ * Even read operations may trigger the cache to be flushed and as such
+ * return a root for a new, updated bitset.
+ *
+ * c) resize a bitset with dm_bitset_resize().
+ *
+ * d) Set a bit with dm_bitset_set_bit().
+ *
+ * e) Clear a bit with dm_bitset_clear_bit().
+ *
+ * f) Test a bit with dm_bitset_test_bit().
+ *
+ * g) Flush all updates from the cache with dm_bitset_flush().
+ *
+ * h) Destroy the bitset with dm_bitset_del(). This tells the transaction
+ * manager that you're no longer using this data structure so it can
+ * recycle it's blocks. (dm_bitset_dec() would be a better name for it,
+ * but del is in keeping with dm_btree_del()).
+ */
+
+/*
+ * Opaque object. Unlike dm_array_info, you should have one of these per
+ * bitset. Initialise with dm_disk_bitset_init().
+ */
+struct dm_disk_bitset {
+ struct dm_array_info array_info;
+
+ uint32_t current_index;
+ uint64_t current_bits;
+
+ bool current_index_set:1;
+};
+
+/*
+ * Sets up a dm_disk_bitset structure. You don't need to do anything with
+ * this structure when you finish using it.
+ *
+ * tm - the transaction manager that should supervise this structure
+ * info - the structure being initialised
+ */
+void dm_disk_bitset_init(struct dm_transaction_manager *tm,
+ struct dm_disk_bitset *info);
+
+/*
+ * Create an empty, zero length bitset.
+ *
+ * info - describes the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *new_root);
+
+/*
+ * Resize the bitset.
+ *
+ * info - describes the bitset
+ * old_root - the root block of the array on disk
+ * old_nr_entries - the number of bits in the old bitset
+ * new_nr_entries - the number of bits you want in the new bitset
+ * default_value - the value for any new bits
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t old_root,
+ uint32_t old_nr_entries, uint32_t new_nr_entries,
+ bool default_value, dm_block_t *new_root);
+
+/*
+ * Frees the bitset.
+ */
+int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root);
+
+/*
+ * Set a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Clears a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root);
+
+/*
+ * Tests a bit.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * index - the bit index
+ * new_root - on success, points to the new root block (cached values may have been written)
+ * result - the bit value you're after
+ *
+ * -ENODATA will be returned if the index is out of bounds.
+ */
+int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root,
+ uint32_t index, dm_block_t *new_root, bool *result);
+
+/*
+ * Flush any cached changes to disk.
+ *
+ * info - describes the bitset
+ * root - the root block of the bitset
+ * new_root - on success, points to the new root block
+ */
+int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root,
+ dm_block_t *new_root);
+
+/*----------------------------------------------------------------*/
+
+#endif /* _LINUX_DM_BITSET_H */
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index 28c3ed072a7..81b513890e2 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -613,6 +613,7 @@ int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
return dm_bufio_write_dirty_buffers(bm->bufio);
}
+EXPORT_SYMBOL_GPL(dm_bm_flush_and_unlock);
void dm_bm_set_read_only(struct dm_block_manager *bm)
{
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index accbb05f17b..37d367bb9aa 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -64,6 +64,7 @@ struct ro_spine {
void init_ro_spine(struct ro_spine *s, struct dm_btree_info *info);
int exit_ro_spine(struct ro_spine *s);
int ro_step(struct ro_spine *s, dm_block_t new_child);
+void ro_pop(struct ro_spine *s);
struct btree_node *ro_node(struct ro_spine *s);
struct shadow_spine {
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index f199a0c4ed0..cf9fd676ae4 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -164,6 +164,13 @@ int ro_step(struct ro_spine *s, dm_block_t new_child)
return r;
}
+void ro_pop(struct ro_spine *s)
+{
+ BUG_ON(!s->count);
+ --s->count;
+ unlock_block(s->info, s->nodes[s->count]);
+}
+
struct btree_node *ro_node(struct ro_spine *s)
{
struct dm_block *block;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 4caf66918cd..35865425e4b 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -807,3 +807,55 @@ int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
return r ? r : count;
}
EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
+
+/*
+ * FIXME: We shouldn't use a recursive algorithm when we have limited stack
+ * space. Also this only works for single level trees.
+ */
+static int walk_node(struct ro_spine *s, dm_block_t block,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ unsigned i, nr;
+ struct btree_node *n;
+ uint64_t keys;
+
+ r = ro_step(s, block);
+ n = ro_node(s);
+
+ nr = le32_to_cpu(n->header.nr_entries);
+ for (i = 0; i < nr; i++) {
+ if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
+ r = walk_node(s, value64(n, i), fn, context);
+ if (r)
+ goto out;
+ } else {
+ keys = le64_to_cpu(*key_ptr(n, i));
+ r = fn(context, &keys, value_ptr(n, i));
+ if (r)
+ goto out;
+ }
+ }
+
+out:
+ ro_pop(s);
+ return r;
+}
+
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context)
+{
+ int r;
+ struct ro_spine spine;
+
+ BUG_ON(info->levels > 1);
+
+ init_ro_spine(&spine, info);
+ r = walk_node(&spine, root, fn, context);
+ exit_ro_spine(&spine);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/md/persistent-data/dm-btree.h b/drivers/md/persistent-data/dm-btree.h
index a2cd50441ca..8672d159e0b 100644
--- a/drivers/md/persistent-data/dm-btree.h
+++ b/drivers/md/persistent-data/dm-btree.h
@@ -58,21 +58,21 @@ struct dm_btree_value_type {
* somewhere.) This method is _not_ called for insertion of a new
* value: It is assumed the ref count is already 1.
*/
- void (*inc)(void *context, void *value);
+ void (*inc)(void *context, const void *value);
/*
* This value is being deleted. The btree takes care of freeing
* the memory pointed to by @value. Often the del function just
* needs to decrement a reference count somewhere.
*/
- void (*dec)(void *context, void *value);
+ void (*dec)(void *context, const void *value);
/*
* A test for equality between two values. When a value is
* overwritten with a new one, the old one has the dec method
* called _unless_ the new and old value are deemed equal.
*/
- int (*equal)(void *context, void *value1, void *value2);
+ int (*equal)(void *context, const void *value1, const void *value2);
};
/*
@@ -142,4 +142,13 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int dm_btree_find_highest_key(struct dm_btree_info *info, dm_block_t root,
uint64_t *result_keys);
+/*
+ * Iterate through the a btree, calling fn() on each entry.
+ * It only works for single level trees and is internally recursive, so
+ * monitor stack usage carefully.
+ */
+int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
+ int (*fn)(void *context, uint64_t *keys, void *leaf),
+ void *context);
+
#endif /* _LINUX_DM_BTREE_H */
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index 3aa9a969b37..36f5d52775a 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -103,6 +103,7 @@
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
+#include <asm/sections.h>
#define v1printk(a...) do { \
if (verbose) \
@@ -222,6 +223,7 @@ static unsigned long lookup_addr(char *arg)
addr = (unsigned long)do_fork;
else if (!strcmp(arg, "hw_break_val"))
addr = (unsigned long)&hw_break_val;
+ addr = (unsigned long) dereference_function_descriptor((void *)addr);
return addr;
}
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 03f2eb5627e..557bec599f4 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -74,8 +74,8 @@ config MTD_REDBOOT_PARTS_READONLY
endif # MTD_REDBOOT_PARTS
config MTD_CMDLINE_PARTS
- bool "Command line partition table parsing"
- depends on MTD = "y"
+ tristate "Command line partition table parsing"
+ depends on MTD
---help---
Allow generic configuration of the MTD partition tables via the kernel
command line. Multiple flash resources are supported for hardware where
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index 7c057a05adb..ddc0a4287a4 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -142,7 +142,13 @@ static int __init ar7_parser_init(void)
return register_mtd_parser(&ar7_parser);
}
+static void __exit ar7_parser_exit(void)
+{
+ deregister_mtd_parser(&ar7_parser);
+}
+
module_init(ar7_parser_init);
+module_exit(ar7_parser_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR( "Felix Fietkau <nbd@openwrt.org>, "
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index e06d782489a..63feb75cc8e 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -14,17 +14,11 @@
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
/* 10 parts were found on sflash on Netgear WNDR4500 */
#define BCM47XXPART_MAX_PARTS 12
-/*
- * Amount of bytes we read when analyzing each block of flash memory.
- * Set it big enough to allow detecting partition and reading important data.
- */
-#define BCM47XXPART_BYTES_TO_READ 0x404
-
/* Magics */
#define BOARD_DATA_MAGIC 0x5246504D /* MPFR */
#define POT_MAGIC1 0x54544f50 /* POTT */
@@ -59,13 +53,21 @@ static int bcm47xxpart_parse(struct mtd_info *master,
uint32_t *buf;
size_t bytes_read;
uint32_t offset;
- uint32_t blocksize = 0x10000;
+ uint32_t blocksize = master->erasesize;
struct trx_header *trx;
+ int trx_part = -1;
+ int last_trx_part = -1;
+ int max_bytes_to_read = 0x8004;
+
+ if (blocksize <= 0x10000)
+ blocksize = 0x10000;
+ if (blocksize == 0x20000)
+ max_bytes_to_read = 0x18004;
/* Alloc */
parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS,
GFP_KERNEL);
- buf = kzalloc(BCM47XXPART_BYTES_TO_READ, GFP_KERNEL);
+ buf = kzalloc(max_bytes_to_read, GFP_KERNEL);
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
@@ -80,7 +82,7 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ if (mtd_read(master, offset, max_bytes_to_read,
&bytes_read, (uint8_t *)buf) < 0) {
pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
offset);
@@ -95,9 +97,16 @@ static int bcm47xxpart_parse(struct mtd_info *master,
}
/* Standard NVRAM */
- if (buf[0x000 / 4] == NVRAM_HEADER) {
+ if (buf[0x000 / 4] == NVRAM_HEADER ||
+ buf[0x1000 / 4] == NVRAM_HEADER ||
+ buf[0x8000 / 4] == NVRAM_HEADER ||
+ (blocksize == 0x20000 && (
+ buf[0x10000 / 4] == NVRAM_HEADER ||
+ buf[0x11000 / 4] == NVRAM_HEADER ||
+ buf[0x18000 / 4] == NVRAM_HEADER))) {
bcm47xxpart_add_part(&parts[curr_part++], "nvram",
offset, 0);
+ offset = rounddown(offset, blocksize);
continue;
}
@@ -131,6 +140,10 @@ static int bcm47xxpart_parse(struct mtd_info *master,
if (buf[0x000 / 4] == TRX_MAGIC) {
trx = (struct trx_header *)buf;
+ trx_part = curr_part;
+ bcm47xxpart_add_part(&parts[curr_part++], "firmware",
+ offset, 0);
+
i = 0;
/* We have LZMA loader if offset[2] points to sth */
if (trx->offset[2]) {
@@ -154,6 +167,8 @@ static int bcm47xxpart_parse(struct mtd_info *master,
offset + trx->offset[i], 0);
i++;
+ last_trx_part = curr_part - 1;
+
/*
* We have whole TRX scanned, skip to the next part. Use
* roundown (not roundup), as the loop will increase
@@ -169,11 +184,15 @@ static int bcm47xxpart_parse(struct mtd_info *master,
* Assume that partitions end at the beginning of the one they are
* followed by.
*/
- for (i = 0; i < curr_part - 1; i++)
- parts[i].size = parts[i + 1].offset - parts[i].offset;
- if (curr_part > 0)
- parts[curr_part - 1].size =
- master->size - parts[curr_part - 1].offset;
+ for (i = 0; i < curr_part; i++) {
+ u64 next_part_offset = (i < curr_part - 1) ?
+ parts[i + 1].offset : master->size;
+
+ parts[i].size = next_part_offset - parts[i].offset;
+ if (i == last_trx_part && trx_part >= 0)
+ parts[trx_part].size = next_part_offset -
+ parts[trx_part].offset;
+ }
*pparts = parts;
return curr_part;
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index b86197286f2..fff665d59a0 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -33,6 +33,8 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/mtd/map.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/cfi.h>
@@ -74,6 +76,10 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
+
static struct mtd_chip_driver cfi_amdstd_chipdrv = {
.probe = NULL, /* Not usable directly */
.destroy = cfi_amdstd_destroy,
@@ -496,6 +502,7 @@ static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
{
struct cfi_private *cfi = map->fldrv_priv;
+ struct device_node __maybe_unused *np = map->device_node;
struct mtd_info *mtd;
int i;
@@ -570,6 +577,17 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
cfi_tell_features(extp);
#endif
+#ifdef CONFIG_OF
+ if (np && of_property_read_bool(
+ np, "use-advanced-sector-protection")
+ && extp->BlkProtUnprot == 8) {
+ printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
+ mtd->_lock = cfi_ppb_lock;
+ mtd->_unlock = cfi_ppb_unlock;
+ mtd->_is_locked = cfi_ppb_is_locked;
+ }
+#endif
+
bootloc = extp->TopBottom;
if ((bootloc < 2) || (bootloc > 5)) {
printk(KERN_WARNING "%s: CFI contains unrecognised boot "
@@ -2172,6 +2190,205 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
}
+/*
+ * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
+ */
+
+struct ppb_lock {
+ struct flchip *chip;
+ loff_t offset;
+ int locked;
+};
+
+#define MAX_SECTORS 512
+
+#define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
+#define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
+#define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
+
+static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ struct flchip *chip,
+ unsigned long adr, int len, void *thunk)
+{
+ struct cfi_private *cfi = map->fldrv_priv;
+ unsigned long timeo;
+ int ret;
+
+ mutex_lock(&chip->mutex);
+ ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+ }
+
+ pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
+
+ cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
+ cfi->device_type, NULL);
+ /* PPB entry command */
+ cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
+ cfi->device_type, NULL);
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+ map_write(map, CMD(0xA0), chip->start + adr);
+ map_write(map, CMD(0x00), chip->start + adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+ * have to unlock all sectors of this device instead
+ */
+ chip->state = FL_UNLOCKING;
+ map_write(map, CMD(0x80), chip->start);
+ map_write(map, CMD(0x30), chip->start);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
+ chip->state = FL_JEDEC_QUERY;
+ /* Return locked status: 0->locked, 1->unlocked */
+ ret = !cfi_read_query(map, adr);
+ } else
+ BUG();
+
+ /*
+ * Wait for some time as unlocking of all sectors takes quite long
+ */
+ timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
+ for (;;) {
+ if (chip_ready(map, adr))
+ break;
+
+ if (time_after(jiffies, timeo)) {
+ printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
+ ret = -EIO;
+ break;
+ }
+
+ UDELAY(map, chip, adr, 1);
+ }
+
+ /* Exit BC commands */
+ map_write(map, CMD(0x90), chip->start);
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+ put_chip(map, chip, adr + chip->start);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+}
+
+static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ struct mtd_erase_region_info *regions = mtd->eraseregions;
+ struct map_info *map = mtd->priv;
+ struct cfi_private *cfi = map->fldrv_priv;
+ struct ppb_lock *sect;
+ unsigned long adr;
+ loff_t offset;
+ uint64_t length;
+ int chipnum;
+ int i;
+ int sectors;
+ int ret;
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors. So lets
+ * first check the locking status of all sectors and save
+ * it for future use.
+ */
+ sect = kzalloc(MAX_SECTORS * sizeof(struct ppb_lock), GFP_KERNEL);
+ if (!sect)
+ return -ENOMEM;
+
+ /*
+ * This code to walk all sectors is a slightly modified version
+ * of the cfi_varsize_frob() code.
+ */
+ i = 0;
+ chipnum = 0;
+ adr = 0;
+ sectors = 0;
+ offset = 0;
+ length = mtd->size;
+
+ while (length) {
+ int size = regions[i].erasesize;
+
+ /*
+ * Only test sectors that shall not be unlocked. The other
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+ if ((adr < ofs) || (adr >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+ sect[sectors].offset = offset;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+ }
+
+ adr += size;
+ offset += size;
+ length -= size;
+
+ if (offset == regions[i].offset + size * regions[i].numblocks)
+ i++;
+
+ if (adr >> cfi->chipshift) {
+ adr = 0;
+ chipnum++;
+
+ if (chipnum >= cfi->numchips)
+ break;
+ }
+
+ sectors++;
+ if (sectors >= MAX_SECTORS) {
+ printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
+ MAX_SECTORS);
+ kfree(sect);
+ return -EINVAL;
+ }
+ }
+
+ /* Now unlock the whole chip */
+ ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_UNLOCK);
+ if (ret) {
+ kfree(sect);
+ return ret;
+ }
+
+ /*
+ * PPB unlocking always unlocks all sectors of the flash chip.
+ * We need to re-lock all previously locked sectors.
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+ do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+ kfree(sect);
+ return ret;
+}
+
+static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
+ uint64_t len)
+{
+ return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
+ DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
+}
static void cfi_amdstd_sync (struct mtd_info *mtd)
{
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index c533f27d863..721caebbc5c 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -22,11 +22,22 @@
*
* mtdparts=<mtddef>[;<mtddef]
* <mtddef> := <mtd-id>:<partdef>[,<partdef>]
- * where <mtd-id> is the name from the "cat /proc/mtd" command
- * <partdef> := <size>[@offset][<name>][ro][lk]
+ * <partdef> := <size>[@<offset>][<name>][ro][lk]
* <mtd-id> := unique name used in mapping driver/device (mtd->name)
* <size> := standard linux memsize OR "-" to denote all remaining space
+ * size is automatically truncated at end of device
+ * if specified or trucated size is 0 the part is skipped
+ * <offset> := standard linux memsize
+ * if omitted the part will immediately follow the previous part
+ * or 0 if the first part
* <name> := '(' NAME ')'
+ * NAME will appear in /proc/mtd
+ *
+ * <size> and <offset> can be specified such that the parts are out of order
+ * in physical memory and may even overlap.
+ *
+ * The parts are assigned MTD numbers in the order they are specified in the
+ * command line regardless of their order in physical memory.
*
* Examples:
*
@@ -70,6 +81,7 @@ struct cmdline_mtd_partition {
static struct cmdline_mtd_partition *partitions;
/* the command line passed to mtdpart_setup() */
+static char *mtdparts;
static char *cmdline;
static int cmdline_parsed;
@@ -330,6 +342,14 @@ static int parse_cmdline_partitions(struct mtd_info *master,
if (part->parts[i].size == SIZE_REMAINING)
part->parts[i].size = master->size - offset;
+ if (offset + part->parts[i].size > master->size) {
+ printk(KERN_WARNING ERRP
+ "%s: partitioning exceeds flash size, truncating\n",
+ part->mtd_id);
+ part->parts[i].size = master->size - offset;
+ }
+ offset += part->parts[i].size;
+
if (part->parts[i].size == 0) {
printk(KERN_WARNING ERRP
"%s: skipping zero sized partition\n",
@@ -337,16 +357,8 @@ static int parse_cmdline_partitions(struct mtd_info *master,
part->num_parts--;
memmove(&part->parts[i], &part->parts[i + 1],
sizeof(*part->parts) * (part->num_parts - i));
- continue;
- }
-
- if (offset + part->parts[i].size > master->size) {
- printk(KERN_WARNING ERRP
- "%s: partitioning exceeds flash size, truncating\n",
- part->mtd_id);
- part->parts[i].size = master->size - offset;
+ i--;
}
- offset += part->parts[i].size;
}
*pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts,
@@ -365,7 +377,7 @@ static int parse_cmdline_partitions(struct mtd_info *master,
*
* This function needs to be visible for bootloaders.
*/
-static int mtdpart_setup(char *s)
+static int __init mtdpart_setup(char *s)
{
cmdline = s;
return 1;
@@ -381,10 +393,21 @@ static struct mtd_part_parser cmdline_parser = {
static int __init cmdline_parser_init(void)
{
+ if (mtdparts)
+ mtdpart_setup(mtdparts);
return register_mtd_parser(&cmdline_parser);
}
+static void __exit cmdline_parser_exit(void)
+{
+ deregister_mtd_parser(&cmdline_parser);
+}
+
module_init(cmdline_parser_init);
+module_exit(cmdline_parser_exit);
+
+MODULE_PARM_DESC(mtdparts, "Partitioning specification");
+module_param(mtdparts, charp, 0);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marius Groeger <mag@sysgo.de>");
diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile
index 395733a30ef..369a1943ca2 100644
--- a/drivers/mtd/devices/Makefile
+++ b/drivers/mtd/devices/Makefile
@@ -17,8 +17,10 @@ obj-$(CONFIG_MTD_LART) += lart.o
obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o
obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o
obj-$(CONFIG_MTD_M25P80) += m25p80.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH) += elm.o
obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o
obj-$(CONFIG_MTD_SST25L) += sst25l.o
obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o
-CFLAGS_docg3.o += -I$(src) \ No newline at end of file
+
+CFLAGS_docg3.o += -I$(src)
diff --git a/drivers/mtd/devices/bcm47xxsflash.c b/drivers/mtd/devices/bcm47xxsflash.c
index 4714584aa99..95266285acb 100644
--- a/drivers/mtd/devices/bcm47xxsflash.c
+++ b/drivers/mtd/devices/bcm47xxsflash.c
@@ -5,6 +5,8 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
+#include "bcm47xxsflash.h"
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Serial flash driver for BCMA bus");
@@ -13,26 +15,28 @@ static const char *probes[] = { "bcm47xxpart", NULL };
static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len,
size_t *retlen, u_char *buf)
{
- struct bcma_sflash *sflash = mtd->priv;
+ struct bcm47xxsflash *b47s = mtd->priv;
/* Check address range */
if ((from + len) > mtd->size)
return -EINVAL;
- memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(sflash->window + from),
+ memcpy_fromio(buf, (void __iomem *)KSEG0ADDR(b47s->window + from),
len);
+ *retlen = len;
return len;
}
-static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
- struct mtd_info *mtd)
+static void bcm47xxsflash_fill_mtd(struct bcm47xxsflash *b47s)
{
- mtd->priv = sflash;
+ struct mtd_info *mtd = &b47s->mtd;
+
+ mtd->priv = b47s;
mtd->name = "bcm47xxsflash";
mtd->owner = THIS_MODULE;
mtd->type = MTD_ROM;
- mtd->size = sflash->size;
+ mtd->size = b47s->size;
mtd->_read = bcm47xxsflash_read;
/* TODO: implement writing support and verify/change following code */
@@ -40,19 +44,30 @@ static void bcm47xxsflash_fill_mtd(struct bcma_sflash *sflash,
mtd->writebufsize = mtd->writesize = 1;
}
-static int bcm47xxsflash_probe(struct platform_device *pdev)
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static int bcm47xxsflash_bcma_probe(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s;
int err;
- sflash->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL);
- if (!sflash->mtd) {
+ b47s = kzalloc(sizeof(*b47s), GFP_KERNEL);
+ if (!b47s) {
err = -ENOMEM;
goto out;
}
- bcm47xxsflash_fill_mtd(sflash, sflash->mtd);
+ sflash->priv = b47s;
- err = mtd_device_parse_register(sflash->mtd, probes, NULL, NULL, 0);
+ b47s->window = sflash->window;
+ b47s->blocksize = sflash->blocksize;
+ b47s->numblocks = sflash->numblocks;
+ b47s->size = sflash->size;
+ bcm47xxsflash_fill_mtd(b47s);
+
+ err = mtd_device_parse_register(&b47s->mtd, probes, NULL, NULL, 0);
if (err) {
pr_err("Failed to register MTD device: %d\n", err);
goto err_dev_reg;
@@ -61,34 +76,40 @@ static int bcm47xxsflash_probe(struct platform_device *pdev)
return 0;
err_dev_reg:
- kfree(sflash->mtd);
+ kfree(&b47s->mtd);
out:
return err;
}
-static int bcm47xxsflash_remove(struct platform_device *pdev)
+static int bcm47xxsflash_bcma_remove(struct platform_device *pdev)
{
struct bcma_sflash *sflash = dev_get_platdata(&pdev->dev);
+ struct bcm47xxsflash *b47s = sflash->priv;
- mtd_device_unregister(sflash->mtd);
- kfree(sflash->mtd);
+ mtd_device_unregister(&b47s->mtd);
+ kfree(b47s);
return 0;
}
static struct platform_driver bcma_sflash_driver = {
- .remove = bcm47xxsflash_remove,
+ .probe = bcm47xxsflash_bcma_probe,
+ .remove = bcm47xxsflash_bcma_remove,
.driver = {
.name = "bcma_sflash",
.owner = THIS_MODULE,
},
};
+/**************************************************
+ * Init
+ **************************************************/
+
static int __init bcm47xxsflash_init(void)
{
int err;
- err = platform_driver_probe(&bcma_sflash_driver, bcm47xxsflash_probe);
+ err = platform_driver_register(&bcma_sflash_driver);
if (err)
pr_err("Failed to register BCMA serial flash driver: %d\n",
err);
diff --git a/drivers/mtd/devices/bcm47xxsflash.h b/drivers/mtd/devices/bcm47xxsflash.h
new file mode 100644
index 00000000000..ebf6f710e23
--- /dev/null
+++ b/drivers/mtd/devices/bcm47xxsflash.h
@@ -0,0 +1,15 @@
+#ifndef __BCM47XXSFLASH_H
+#define __BCM47XXSFLASH_H
+
+#include <linux/mtd/mtd.h>
+
+struct bcm47xxsflash {
+ u32 window;
+ u32 blocksize;
+ u16 numblocks;
+ u32 size;
+
+ struct mtd_info mtd;
+};
+
+#endif /* BCM47XXSFLASH */
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
new file mode 100644
index 00000000000..2ec5da9ee24
--- /dev/null
+++ b/drivers/mtd/devices/elm.c
@@ -0,0 +1,404 @@
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_IRQSTATUS 0x018
+#define ELM_IRQENABLE 0x01c
+#define ELM_LOCATION_CONFIG 0x020
+#define ELM_PAGE_CTRL 0x080
+#define ELM_SYNDROME_FRAGMENT_0 0x400
+#define ELM_SYNDROME_FRAGMENT_6 0x418
+#define ELM_LOCATION_STATUS 0x800
+#define ELM_ERROR_LOCATION_0 0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK 0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK BIT(8)
+#define ECC_NB_ERRORS_MASK 0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK 0x1fff
+
+#define ELM_ECC_SIZE 0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE 0x40
+#define ERROR_LOCATION_SIZE 0x100
+
+struct elm_info {
+ struct device *dev;
+ void __iomem *elm_base;
+ struct completion elm_completion;
+ struct list_head list;
+ enum bch_ecc bch_type;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+ writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+ return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev: ELM device
+ * @bch_type: Type of BCH ecc
+ */
+void elm_config(struct device *dev, enum bch_ecc bch_type)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_get_drvdata(dev);
+
+ reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+ elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+ info->bch_type = bch_type;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info: elm info
+ * @index: index number of syndrome fragment vector
+ * @enable: enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+ bool enable)
+{
+ u32 reg_val;
+
+ reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+ if (enable)
+ reg_val |= BIT(index); /* enable page mode */
+ else
+ reg_val &= ~BIT(index); /* disable page mode */
+
+ elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info: elm info
+ * @err_vec: elm error vectors
+ * @ecc: buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+ struct elm_errorvec *err_vec, u8 *ecc)
+{
+ int i, offset;
+ u32 val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ elm_configure_page_mode(info, i, true);
+ offset = ELM_SYNDROME_FRAGMENT_0 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+
+ /* BCH8 */
+ if (info->bch_type) {
+
+ /* syndrome fragment 0 = ecc[9-12B] */
+ val = cpu_to_be32(*(u32 *) &ecc[9]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[5-8B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[5]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 2 = ecc[1-4B] */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[1]);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 3 = ecc[0B] */
+ offset += 4;
+ val = ecc[0];
+ elm_write_reg(info, offset, val);
+ } else {
+ /* syndrome fragment 0 = ecc[20-52b] bits */
+ val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+ ((ecc[2] & 0xf) << 28);
+ elm_write_reg(info, offset, val);
+
+ /* syndrome fragment 1 = ecc[0-20b] bits */
+ offset += 4;
+ val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+ elm_write_reg(info, offset, val);
+ }
+ }
+
+ /* Update ecc pointer with ecc byte size */
+ ecc += info->bch_type ? BCH8_SIZE : BCH4_SIZE;
+ }
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, offset;
+ u32 reg_val;
+
+ /*
+ * Set syndrome vector valid, so that ELM module
+ * will process it for vectors error is reported
+ */
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+ if (err_vec[i].error_reported) {
+ offset = ELM_SYNDROME_FRAGMENT_6 +
+ SYNDROME_FRAGMENT_REG_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+ reg_val |= ELM_SYNDROME_VALID;
+ elm_write_reg(info, offset, reg_val);
+ }
+ }
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info: elm info
+ * @err_vec: elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+ struct elm_errorvec *err_vec)
+{
+ int i, j, errors = 0;
+ int offset;
+ u32 reg_val;
+
+ for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+
+ /* Check error reported */
+ if (err_vec[i].error_reported) {
+ offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+ reg_val = elm_read_reg(info, offset);
+
+ /* Check correctable error or not */
+ if (reg_val & ECC_CORRECTABLE_MASK) {
+ offset = ELM_ERROR_LOCATION_0 +
+ ERROR_LOCATION_SIZE * i;
+
+ /* Read count of correctable errors */
+ err_vec[i].error_count = reg_val &
+ ECC_NB_ERRORS_MASK;
+
+ /* Update the error locations in error vector */
+ for (j = 0; j < err_vec[i].error_count; j++) {
+
+ reg_val = elm_read_reg(info, offset);
+ err_vec[i].error_loc[j] = reg_val &
+ ECC_ERROR_LOCATION_MASK;
+
+ /* Update error location register */
+ offset += 4;
+ }
+
+ errors += err_vec[i].error_count;
+ } else {
+ err_vec[i].error_uncorrectable = true;
+ }
+
+ /* Clearing interrupts for processed error vectors */
+ elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+ /* Disable page mode */
+ elm_configure_page_mode(info, i, false);
+ }
+ }
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev: device pointer
+ * @ecc_calc: calculated ECC bytes from GPMC
+ * @err_vec: elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec)
+{
+ struct elm_info *info = dev_get_drvdata(dev);
+ u32 reg_val;
+
+ /* Enable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+ elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+ elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+ /* Load valid ecc byte to syndrome fragment register */
+ elm_load_syndrome(info, err_vec, ecc_calc);
+
+ /* Enable syndrome processing for which syndrome fragment is updated */
+ elm_start_processing(info, err_vec);
+
+ /* Wait for ELM module to finish locating error correction */
+ wait_for_completion(&info->elm_completion);
+
+ /* Disable page mode interrupt */
+ reg_val = elm_read_reg(info, ELM_IRQENABLE);
+ elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+ elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+ u32 reg_val;
+ struct elm_info *info = dev_id;
+
+ reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+ /* All error vectors processed */
+ if (reg_val & INTR_STATUS_PAGE_VALID) {
+ elm_write_reg(info, ELM_IRQSTATUS,
+ reg_val & INTR_STATUS_PAGE_VALID);
+ complete(&info->elm_completion);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res, *irq;
+ struct elm_info *info;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ info->dev = &pdev->dev;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENODEV;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "no memory resource defined\n");
+ return -ENODEV;
+ }
+
+ info->elm_base = devm_request_and_ioremap(&pdev->dev, res);
+ if (!info->elm_base)
+ return -EADDRNOTAVAIL;
+
+ ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+ pdev->name, info);
+ if (ret) {
+ dev_err(&pdev->dev, "failure requesting irq %i\n", irq->start);
+ return ret;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev)) {
+ ret = -EINVAL;
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+ }
+
+ init_completion(&info->elm_completion);
+ INIT_LIST_HEAD(&info->list);
+ list_add(&info->list, &elm_devices);
+ platform_set_drvdata(pdev, info);
+ return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+ { .compatible = "ti,am3352-elm" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+ .driver = {
+ .name = "elm",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(elm_of_match),
+ },
+ .probe = elm_probe,
+ .remove = elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform: elm");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 4eeeb2d7f6e..5b6b0728be2 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -565,6 +565,96 @@ time_out:
return ret;
}
+static int m25p80_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset < flash->mtd.size-(flash->mtd.size/2))
+ status_new = status_old | SR_BP2 | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+ else if (offset < flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset < flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset < flash->mtd.size-(flash->mtd.size/64))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+
+ /* Only modify protection if it will not unlock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) >
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
+static int m25p80_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+ struct m25p *flash = mtd_to_m25p(mtd);
+ uint32_t offset = ofs;
+ uint8_t status_old, status_new;
+ int res = 0;
+
+ mutex_lock(&flash->lock);
+ /* Wait until finished previous command */
+ if (wait_till_ready(flash)) {
+ res = 1;
+ goto err;
+ }
+
+ status_old = read_sr(flash);
+
+ if (offset+len > flash->mtd.size-(flash->mtd.size/64))
+ status_new = status_old & ~(SR_BP2|SR_BP1|SR_BP0);
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/32))
+ status_new = (status_old & ~(SR_BP2|SR_BP1)) | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/16))
+ status_new = (status_old & ~(SR_BP2|SR_BP0)) | SR_BP1;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/8))
+ status_new = (status_old & ~SR_BP2) | SR_BP1 | SR_BP0;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/4))
+ status_new = (status_old & ~(SR_BP0|SR_BP1)) | SR_BP2;
+ else if (offset+len > flash->mtd.size-(flash->mtd.size/2))
+ status_new = (status_old & ~SR_BP1) | SR_BP2 | SR_BP0;
+ else
+ status_new = (status_old & ~SR_BP0) | SR_BP2 | SR_BP1;
+
+ /* Only modify protection if it will not lock other areas */
+ if ((status_new&(SR_BP2|SR_BP1|SR_BP0)) <
+ (status_old&(SR_BP2|SR_BP1|SR_BP0))) {
+ write_enable(flash);
+ if (write_sr(flash, status_new) < 0) {
+ res = 1;
+ goto err;
+ }
+ }
+
+err: mutex_unlock(&flash->lock);
+ return res;
+}
+
/****************************************************************************/
/*
@@ -642,6 +732,10 @@ static const struct spi_device_id m25p_ids[] = {
/* Everspin */
{ "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
+ /* GigaDevice */
+ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) },
+ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) },
+
/* Intel/Numonyx -- xxxs33b */
{ "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
{ "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
@@ -899,6 +993,12 @@ static int m25p_probe(struct spi_device *spi)
flash->mtd._erase = m25p80_erase;
flash->mtd._read = m25p80_read;
+ /* flash protection support for STmicro chips */
+ if (JEDEC_MFR(info->jedec_id) == CFI_MFR_ST) {
+ flash->mtd._lock = m25p80_lock;
+ flash->mtd._unlock = m25p80_unlock;
+ }
+
/* sst flash chips use AAI word program */
if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST)
flash->mtd._write = sst_write;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 62ba82c396c..3ed17c4d435 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -429,7 +429,7 @@ config MTD_GPIO_ADDR
config MTD_UCLINUX
bool "Generic uClinux RAM/ROM filesystem support"
- depends on MTD_RAM=y && (!MMU || COLDFIRE)
+ depends on (MTD_RAM=y || MTD_ROM=y) && (!MMU || COLDFIRE)
help
Map driver to support image based filesystems for uClinux.
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7901d72c924..363939dfad0 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -68,9 +68,6 @@ static int of_flash_remove(struct platform_device *dev)
kfree(info->list[i].res);
}
}
-
- kfree(info);
-
return 0;
}
@@ -199,8 +196,9 @@ static int of_flash_probe(struct platform_device *dev)
map_indirect = of_property_read_bool(dp, "no-unaligned-direct-access");
err = -ENOMEM;
- info = kzalloc(sizeof(struct of_flash) +
- sizeof(struct of_flash_list) * count, GFP_KERNEL);
+ info = devm_kzalloc(&dev->dev,
+ sizeof(struct of_flash) +
+ sizeof(struct of_flash_list) * count, GFP_KERNEL);
if (!info)
goto err_flash_remove;
@@ -241,6 +239,7 @@ static int of_flash_probe(struct platform_device *dev)
info->list[i].map.phys = res.start;
info->list[i].map.size = res_size;
info->list[i].map.bankwidth = be32_to_cpup(width);
+ info->list[i].map.device_node = dp;
err = -ENOMEM;
info->list[i].map.virt = ioremap(info->list[i].map.phys,
diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c
index 299bf88a6f4..c1af83db520 100644
--- a/drivers/mtd/maps/uclinux.c
+++ b/drivers/mtd/maps/uclinux.c
@@ -23,12 +23,26 @@
/****************************************************************************/
+#ifdef CONFIG_MTD_ROM
+#define MAP_NAME "rom"
+#else
+#define MAP_NAME "ram"
+#endif
+
+/*
+ * Blackfin uses uclinux_ram_map during startup, so it must not be static.
+ * Provide a dummy declaration to make sparse happy.
+ */
+extern struct map_info uclinux_ram_map;
+
struct map_info uclinux_ram_map = {
- .name = "RAM",
- .phys = (unsigned long)__bss_stop,
+ .name = MAP_NAME,
.size = 0,
};
+static unsigned long physaddr = -1;
+module_param(physaddr, ulong, S_IRUGO);
+
static struct mtd_info *uclinux_ram_mtdinfo;
/****************************************************************************/
@@ -60,11 +74,17 @@ static int __init uclinux_mtd_init(void)
struct map_info *mapp;
mapp = &uclinux_ram_map;
+
+ if (physaddr == -1)
+ mapp->phys = (resource_size_t)__bss_stop;
+ else
+ mapp->phys = physaddr;
+
if (!mapp->size)
mapp->size = PAGE_ALIGN(ntohl(*((unsigned long *)(mapp->phys + 8))));
mapp->bankwidth = 4;
- printk("uclinux[mtd]: RAM probe address=0x%x size=0x%x\n",
+ printk("uclinux[mtd]: probe address=0x%x size=0x%x\n",
(int) mapp->phys, (int) mapp->size);
/*
@@ -82,7 +102,7 @@ static int __init uclinux_mtd_init(void)
simple_map_init(mapp);
- mtd = do_map_probe("map_ram", mapp);
+ mtd = do_map_probe("map_" MAP_NAME, mapp);
if (!mtd) {
printk("uclinux[mtd]: failed to find a mapping?\n");
return(-ENXIO);
@@ -118,6 +138,6 @@ module_exit(uclinux_mtd_cleanup);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Greg Ungerer <gerg@snapgear.com>");
-MODULE_DESCRIPTION("Generic RAM based MTD for uClinux");
+MODULE_DESCRIPTION("Generic MTD for uClinux");
/****************************************************************************/
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index c516a940808..ffcbcca2fd2 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -101,6 +101,8 @@ struct atmel_nand_host {
u8 pmecc_corr_cap;
u16 pmecc_sector_size;
u32 pmecc_lookup_table_offset;
+ u32 pmecc_lookup_table_offset_512;
+ u32 pmecc_lookup_table_offset_1024;
int pmecc_bytes_per_sector;
int pmecc_sector_number;
@@ -908,6 +910,84 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
}
+/*
+ * Get ECC requirement in ONFI parameters, returns -1 if ONFI
+ * parameters is not supported.
+ * return 0 if success to get the ECC requirement.
+ */
+static int get_onfi_ecc_param(struct nand_chip *chip,
+ int *ecc_bits, int *sector_size)
+{
+ *ecc_bits = *sector_size = 0;
+
+ if (chip->onfi_params.ecc_bits == 0xff)
+ /* TODO: the sector_size and ecc_bits need to be find in
+ * extended ecc parameter, currently we don't support it.
+ */
+ return -1;
+
+ *ecc_bits = chip->onfi_params.ecc_bits;
+
+ /* The default sector size (ecc codeword size) is 512 */
+ *sector_size = 512;
+
+ return 0;
+}
+
+/*
+ * Get ecc requirement from ONFI parameters ecc requirement.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to ONFI ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+ int *cap, int *sector_size)
+{
+ /* Get ECC requirement from ONFI parameters */
+ *cap = *sector_size = 0;
+ if (host->nand_chip.onfi_version) {
+ if (!get_onfi_ecc_param(&host->nand_chip, cap, sector_size))
+ dev_info(host->dev, "ONFI params, minimum required ECC: %d bits in %d bytes\n",
+ *cap, *sector_size);
+ else
+ dev_info(host->dev, "NAND chip ECC reqirement is in Extended ONFI parameter, we don't support yet.\n");
+ } else {
+ dev_info(host->dev, "NAND chip is not ONFI compliant, assume ecc_bits is 2 in 512 bytes");
+ }
+ if (*cap == 0 && *sector_size == 0) {
+ *cap = 2;
+ *sector_size = 512;
+ }
+
+ /* If dts file doesn't specify then use the one in ONFI parameters */
+ if (host->pmecc_corr_cap == 0) {
+ /* use the most fitable ecc bits (the near bigger one ) */
+ if (*cap <= 2)
+ host->pmecc_corr_cap = 2;
+ else if (*cap <= 4)
+ host->pmecc_corr_cap = 4;
+ else if (*cap < 8)
+ host->pmecc_corr_cap = 8;
+ else if (*cap < 12)
+ host->pmecc_corr_cap = 12;
+ else if (*cap < 24)
+ host->pmecc_corr_cap = 24;
+ else
+ return -EINVAL;
+ }
+ if (host->pmecc_sector_size == 0) {
+ /* use the most fitable sector size (the near smaller one ) */
+ if (*sector_size >= 1024)
+ host->pmecc_sector_size = 1024;
+ else if (*sector_size >= 512)
+ host->pmecc_sector_size = 512;
+ else
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct atmel_nand_host *host)
{
@@ -916,8 +996,22 @@ static int __init atmel_pmecc_nand_init_params(struct platform_device *pdev,
struct resource *regs, *regs_pmerr, *regs_rom;
int cap, sector_size, err_no;
+ err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+ if (err_no) {
+ dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+ return err_no;
+ }
+
+ if (cap != host->pmecc_corr_cap ||
+ sector_size != host->pmecc_sector_size)
+ dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
cap = host->pmecc_corr_cap;
sector_size = host->pmecc_sector_size;
+ host->pmecc_lookup_table_offset = (sector_size == 512) ?
+ host->pmecc_lookup_table_offset_512 :
+ host->pmecc_lookup_table_offset_1024;
+
dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
cap, sector_size);
@@ -1215,7 +1309,7 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
static int atmel_of_init_port(struct atmel_nand_host *host,
struct device_node *np)
{
- u32 val, table_offset;
+ u32 val;
u32 offset[2];
int ecc_mode;
struct atmel_nand_data *board = &host->board;
@@ -1259,42 +1353,41 @@ static int atmel_of_init_port(struct atmel_nand_host *host,
/* use PMECC, get correction capability, sector size and lookup
* table offset.
+ * If correction bits and sector size are not specified, then find
+ * them from NAND ONFI parameters.
*/
- if (of_property_read_u32(np, "atmel,pmecc-cap", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Capability\n");
- return -EINVAL;
- } else if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
- (val != 24)) {
- dev_err(host->dev,
- "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+ if ((val != 2) && (val != 4) && (val != 8) && (val != 12) &&
+ (val != 24)) {
+ dev_err(host->dev,
+ "Unsupported PMECC correction capability: %d; should be 2, 4, 8, 12 or 24\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_corr_cap = (u8)val;
}
- host->pmecc_corr_cap = (u8)val;
- if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) != 0) {
- dev_err(host->dev, "Cannot decide PMECC Sector Size\n");
- return -EINVAL;
- } else if ((val != 512) && (val != 1024)) {
- dev_err(host->dev,
- "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
- val);
- return -EINVAL;
+ if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+ if ((val != 512) && (val != 1024)) {
+ dev_err(host->dev,
+ "Unsupported PMECC sector size: %d; should be 512 or 1024 bytes\n",
+ val);
+ return -EINVAL;
+ }
+ host->pmecc_sector_size = (u16)val;
}
- host->pmecc_sector_size = (u16)val;
if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
offset, 2) != 0) {
dev_err(host->dev, "Cannot get PMECC lookup table offset\n");
return -EINVAL;
}
- table_offset = host->pmecc_sector_size == 512 ? offset[0] : offset[1];
-
- if (!table_offset) {
+ if (!offset[0] && !offset[1]) {
dev_err(host->dev, "Invalid PMECC lookup table offset\n");
return -EINVAL;
}
- host->pmecc_lookup_table_offset = table_offset;
+ host->pmecc_lookup_table_offset_512 = offset[0];
+ host->pmecc_lookup_table_offset_1024 = offset[1];
return 0;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
index 0bdb2ce4da7..c005a62330b 100644
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
@@ -1,6 +1,10 @@
#ifndef __BCM47XXNFLASH_H
#define __BCM47XXNFLASH_H
+#ifndef pr_fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#endif
+
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
index 8363a9a5fa3..7bae569fdc7 100644
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ b/drivers/mtd/nand/bcm47xxnflash/main.c
@@ -9,14 +9,14 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rafał Miłecki");
@@ -77,6 +77,7 @@ static int bcm47xxnflash_remove(struct platform_device *pdev)
}
static struct platform_driver bcm47xxnflash_driver = {
+ .probe = bcm47xxnflash_probe,
.remove = bcm47xxnflash_remove,
.driver = {
.name = "bcma_nflash",
@@ -88,13 +89,10 @@ static int __init bcm47xxnflash_init(void)
{
int err;
- /*
- * Platform device "bcma_nflash" exists on SoCs and is registered very
- * early, it won't be added during runtime (use platform_driver_probe).
- */
- err = platform_driver_probe(&bcm47xxnflash_driver, bcm47xxnflash_probe);
+ err = platform_driver_register(&bcm47xxnflash_driver);
if (err)
- pr_err("Failed to register serial flash driver: %d\n", err);
+ pr_err("Failed to register bcm47xx nand flash driver: %d\n",
+ err);
return err;
}
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
index 595de4012e7..b2ab373c9ee 100644
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
@@ -9,13 +9,13 @@
*
*/
+#include "bcm47xxnflash.h"
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/bcma/bcma.h>
-#include "bcm47xxnflash.h"
-
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
* shown ~1000 retries as maxiumum. */
#define NFLASH_READY_RETRIES 10000
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index feae55c7b88..94e17af8e45 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -606,7 +606,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
if (pdev->id < 0 || pdev->id > 3)
return -ENODEV;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info) {
dev_err(&pdev->dev, "unable to allocate memory\n");
ret = -ENOMEM;
@@ -623,11 +623,11 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
goto err_nomem;
}
- vaddr = ioremap(res1->start, resource_size(res1));
- base = ioremap(res2->start, resource_size(res2));
+ vaddr = devm_request_and_ioremap(&pdev->dev, res1);
+ base = devm_request_and_ioremap(&pdev->dev, res2);
if (!vaddr || !base) {
dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EINVAL;
+ ret = -EADDRNOTAVAIL;
goto err_ioremap;
}
@@ -717,7 +717,7 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
}
info->chip.ecc.mode = ecc_mode;
- info->clk = clk_get(&pdev->dev, "aemif");
+ info->clk = devm_clk_get(&pdev->dev, "aemif");
if (IS_ERR(info->clk)) {
ret = PTR_ERR(info->clk);
dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
@@ -845,8 +845,6 @@ err_timing:
clk_disable_unprepare(info->clk);
err_clk_enable:
- clk_put(info->clk);
-
spin_lock_irq(&davinci_nand_lock);
if (ecc_mode == NAND_ECC_HW_SYNDROME)
ecc4_busy = false;
@@ -855,13 +853,7 @@ err_clk_enable:
err_ecc:
err_clk:
err_ioremap:
- if (base)
- iounmap(base);
- if (vaddr)
- iounmap(vaddr);
-
err_nomem:
- kfree(info);
return ret;
}
@@ -874,15 +866,9 @@ static int __exit nand_davinci_remove(struct platform_device *pdev)
ecc4_busy = false;
spin_unlock_irq(&davinci_nand_lock);
- iounmap(info->base);
- iounmap(info->vaddr);
-
nand_release(&info->mtd);
clk_disable_unprepare(info->clk);
- clk_put(info->clk);
-
- kfree(info);
return 0;
}
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index ad6222627fe..f1f7f12ab50 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -176,8 +176,8 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
ifc_nand_ctrl->page = page_addr;
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, page_addr);
- out_be32(&ifc->ifc_nand.col0, (oob ? IFC_NAND_COL_MS : 0) | column);
+ iowrite32be(page_addr, &ifc->ifc_nand.row0);
+ iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
buf_num = page_addr & priv->bufnum_mask;
@@ -239,18 +239,19 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int i;
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, priv->bank << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT,
+ &ifc->ifc_nand.nand_csel);
dev_vdbg(priv->dev,
"%s: fir0=%08x fcr0=%08x\n",
__func__,
- in_be32(&ifc->ifc_nand.nand_fir0),
- in_be32(&ifc->ifc_nand.nand_fcr0));
+ ioread32be(&ifc->ifc_nand.nand_fir0),
+ ioread32be(&ifc->ifc_nand.nand_fcr0));
ctrl->nand_stat = 0;
/* start read/write seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -273,7 +274,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
int sector_end = sector + chip->ecc.steps - 1;
for (i = sector / 4; i <= sector_end / 4; i++)
- eccstat[i] = in_be32(&ifc->ifc_nand.nand_eccstat[i]);
+ eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]);
for (i = sector; i <= sector_end; i++) {
errors = check_read_ecc(mtd, ctrl, eccstat, i);
@@ -313,31 +314,33 @@ static void fsl_ifc_do_read(struct nand_chip *chip,
/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
if (mtd->writesize > 512) {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
-
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
+
+ iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
} else {
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fir1);
if (oob)
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READOOB <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
else
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(NAND_CMD_READ0 <<
+ IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
}
}
@@ -357,7 +360,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
switch (command) {
/* READ0 read the entire buffer to use hardware ECC. */
case NAND_CMD_READ0:
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, page_addr, 0);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -372,7 +375,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* READOOB reads only the OOB because no ECC is performed. */
case NAND_CMD_READOOB:
- out_be32(&ifc->ifc_nand.nand_fbcr, mtd->oobsize - column);
+ iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, column, page_addr, 1);
ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
@@ -388,19 +391,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
if (command == NAND_CMD_PARAM)
timing = IFC_FIR_OP_RBCD;
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (timing << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- command << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, column);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (timing << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(column, &ifc->ifc_nand.row3);
/*
* although currently it's 8 bytes for READID, we always read
* the maximum 256 bytes(for PARAM)
*/
- out_be32(&ifc->ifc_nand.nand_fbcr, 256);
+ iowrite32be(256, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 256;
set_addr(mtd, 0, 0, 0);
@@ -415,16 +418,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* ERASE2 uses the block and page address from ERASE1 */
case NAND_CMD_ERASE2:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT));
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- (NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT));
+ iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+ &ifc->ifc_nand.nand_fcr0);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
ifc_nand_ctrl->read_bytes = 0;
fsl_ifc_run_command(mtd);
return;
@@ -440,26 +443,28 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD1_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
} else {
nand_fcr0 = ((NAND_CMD_PAGEPROG <<
IFC_NAND_FCR0_CMD1_SHIFT) |
(NAND_CMD_SEQIN <<
IFC_NAND_FCR0_CMD2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fir1,
- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT));
+ iowrite32be(
+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT,
+ &ifc->ifc_nand.nand_fir1);
if (column >= mtd->writesize)
nand_fcr0 |=
@@ -474,7 +479,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
column -= mtd->writesize;
ifc_nand_ctrl->oob = 1;
}
- out_be32(&ifc->ifc_nand.nand_fcr0, nand_fcr0);
+ iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
return;
}
@@ -482,10 +487,11 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
case NAND_CMD_PAGEPROG: {
if (ifc_nand_ctrl->oob) {
- out_be32(&ifc->ifc_nand.nand_fbcr,
- ifc_nand_ctrl->index - ifc_nand_ctrl->column);
+ iowrite32be(ifc_nand_ctrl->index -
+ ifc_nand_ctrl->column,
+ &ifc->ifc_nand.nand_fbcr);
} else {
- out_be32(&ifc->ifc_nand.nand_fbcr, 0);
+ iowrite32be(0, &ifc->ifc_nand.nand_fbcr);
}
fsl_ifc_run_command(mtd);
@@ -493,12 +499,12 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
}
case NAND_CMD_STATUS:
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
@@ -512,10 +518,10 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
return;
case NAND_CMD_RESET:
- out_be32(&ifc->ifc_nand.nand_fir0,
- IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT);
+ iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
fsl_ifc_run_command(mtd);
return;
@@ -639,18 +645,18 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
u32 nand_fsr;
/* Use READ_STATUS command, but wait for the device to be ready */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0, NAND_CMD_STATUS <<
- IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.nand_fbcr, 1);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(1, &ifc->ifc_nand.nand_fbcr);
set_addr(mtd, 0, 0, 0);
ifc_nand_ctrl->read_bytes = 1;
fsl_ifc_run_command(mtd);
- nand_fsr = in_be32(&ifc->ifc_nand.nand_fsr);
+ nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr);
/*
* The chip always seems to report that it is
@@ -744,34 +750,34 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
uint32_t cs = priv->bank;
/* Save CSOR and CSOR_ext */
- csor = in_be32(&ifc->csor_cs[cs].csor);
- csor_ext = in_be32(&ifc->csor_cs[cs].csor_ext);
+ csor = ioread32be(&ifc->csor_cs[cs].csor);
+ csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext);
/* chage PageSize 8K and SpareSize 1K*/
csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
- out_be32(&ifc->csor_cs[cs].csor, csor_8k);
- out_be32(&ifc->csor_cs[cs].csor_ext, 0x0000400);
+ iowrite32be(csor_8k, &ifc->csor_cs[cs].csor);
+ iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext);
/* READID */
- out_be32(&ifc->ifc_nand.nand_fir0,
- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT));
- out_be32(&ifc->ifc_nand.nand_fcr0,
- NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT);
- out_be32(&ifc->ifc_nand.row3, 0x0);
+ iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+ &ifc->ifc_nand.nand_fir0);
+ iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+ &ifc->ifc_nand.nand_fcr0);
+ iowrite32be(0x0, &ifc->ifc_nand.row3);
- out_be32(&ifc->ifc_nand.nand_fbcr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr);
/* Program ROW0/COL0 */
- out_be32(&ifc->ifc_nand.row0, 0x0);
- out_be32(&ifc->ifc_nand.col0, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.row0);
+ iowrite32be(0x0, &ifc->ifc_nand.col0);
/* set the chip select for NAND Transaction */
- out_be32(&ifc->ifc_nand.nand_csel, cs << IFC_NAND_CSEL_SHIFT);
+ iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel);
/* start read seq */
- out_be32(&ifc->ifc_nand.nandseq_strt, IFC_NAND_SEQ_STRT_FIR_STRT);
+ iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
/* wait for command complete flag or timeout */
wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
@@ -781,8 +787,8 @@ static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
/* Restore CSOR and CSOR_ext */
- out_be32(&ifc->csor_cs[cs].csor, csor);
- out_be32(&ifc->csor_cs[cs].csor_ext, csor_ext);
+ iowrite32be(csor, &ifc->csor_cs[cs].csor);
+ iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext);
}
static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
@@ -799,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
/* fill in nand_chip structure */
/* set up function call table */
- if ((in_be32(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
+ if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16)
chip->read_byte = fsl_ifc_read_byte16;
else
chip->read_byte = fsl_ifc_read_byte;
@@ -813,13 +819,13 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->bbt_td = &bbt_main_descr;
chip->bbt_md = &bbt_mirror_descr;
- out_be32(&ifc->ifc_nand.ncfgr, 0x0);
+ iowrite32be(0x0, &ifc->ifc_nand.ncfgr);
/* set up nand options */
chip->bbt_options = NAND_BBT_USE_FLASH;
- if (in_be32(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
+ if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) {
chip->read_byte = fsl_ifc_read_byte16;
chip->options |= NAND_BUSWIDTH_16;
} else {
@@ -832,7 +838,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.read_page = fsl_ifc_read_page;
chip->ecc.write_page = fsl_ifc_write_page;
- csor = in_be32(&ifc->csor_cs[priv->bank].csor);
+ csor = ioread32be(&ifc->csor_cs[priv->bank].csor);
/* Hardware generates ECC per 512 Bytes */
chip->ecc.size = 512;
@@ -884,7 +890,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
chip->ecc.mode = NAND_ECC_SOFT;
}
- ver = in_be32(&ifc->ifc_rev);
+ ver = ioread32be(&ifc->ifc_rev);
if (ver == FSL_IFC_V1_1_0)
fsl_ifc_sram_init(priv);
@@ -910,7 +916,7 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank,
phys_addr_t addr)
{
- u32 cspr = in_be32(&ifc->cspr_cs[bank].cspr);
+ u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr);
if (!(cspr & CSPR_V))
return 0;
@@ -997,17 +1003,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev)
dev_set_drvdata(priv->dev, priv);
- out_be32(&ifc->ifc_nand.nand_evter_en,
- IFC_NAND_EVTER_EN_OPC_EN |
- IFC_NAND_EVTER_EN_FTOER_EN |
- IFC_NAND_EVTER_EN_WPER_EN);
+ iowrite32be(IFC_NAND_EVTER_EN_OPC_EN |
+ IFC_NAND_EVTER_EN_FTOER_EN |
+ IFC_NAND_EVTER_EN_WPER_EN,
+ &ifc->ifc_nand.nand_evter_en);
/* enable NAND Machine Interrupts */
- out_be32(&ifc->ifc_nand.nand_evter_intr_en,
- IFC_NAND_EVTER_INTR_OPCIR_EN |
- IFC_NAND_EVTER_INTR_FTOERIR_EN |
- IFC_NAND_EVTER_INTR_WPERIR_EN);
-
+ iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN |
+ IFC_NAND_EVTER_INTR_FTOERIR_EN |
+ IFC_NAND_EVTER_INTR_WPERIR_EN,
+ &ifc->ifc_nand.nand_evter_intr_en);
priv->mtd.name = kasprintf(GFP_KERNEL, "%x.flash", (unsigned)res.start);
if (!priv->mtd.name) {
ret = -ENOMEM;
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index a0924515c39..588f5374047 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -61,6 +61,16 @@
& BM_BCH_FLASH0LAYOUT0_ECC0) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
@@ -93,6 +103,16 @@
& BM_BCH_FLASH0LAYOUT1_ECCN) \
)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14 10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14 \
+ (0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x) \
+ ((GPMI_IS_MX6Q(x) && ((v) == 14)) \
+ ? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14) \
+ & MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14) \
+ : 0 \
+ )
+
#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
@@ -103,4 +123,6 @@
? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
)
+
+#define HW_BCH_VERSION 0x00000160
#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index d84699c7968..4f8857fa48a 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -208,6 +208,11 @@ void gpmi_dump_info(struct gpmi_nand_data *this)
}
/* start to print out the BCH info */
+ pr_err("Show BCH registers :\n");
+ for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+ reg = readl(r->bch_regs + i * 0x10);
+ pr_err("offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+ }
pr_err("BCH Geometry :\n");
pr_err("GF length : %u\n", geo->gf_len);
pr_err("ECC Strength : %u\n", geo->ecc_strength);
@@ -232,6 +237,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
unsigned int metadata_size;
unsigned int ecc_strength;
unsigned int page_size;
+ unsigned int gf_len;
int ret;
if (common_nfc_set_geometry(this))
@@ -242,6 +248,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
metadata_size = bch_geo->metadata_size;
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
+ gf_len = bch_geo->gf_len;
ret = gpmi_enable_clk(this);
if (ret)
@@ -263,11 +270,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT0);
writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+ | BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
r->bch_regs + HW_BCH_FLASH0LAYOUT1);
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index e9b1c47e3cf..717881a3d1b 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -94,6 +94,25 @@ static inline int get_ecc_strength(struct gpmi_nand_data *this)
return round_down(ecc_strength, 2);
}
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+ struct bch_geometry *geo = &this->bch_geometry;
+
+ /* Do the sanity check. */
+ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+ /* The mx23/mx28 only support the GF13. */
+ if (geo->gf_len == 14)
+ return false;
+
+ if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX)
+ return false;
+ } else if (GPMI_IS_MX6Q(this)) {
+ if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX)
+ return false;
+ }
+ return true;
+}
+
int common_nfc_set_geometry(struct gpmi_nand_data *this)
{
struct bch_geometry *geo = &this->bch_geometry;
@@ -112,17 +131,24 @@ int common_nfc_set_geometry(struct gpmi_nand_data *this)
/* The default for the length of Galois Field. */
geo->gf_len = 13;
- /* The default for chunk size. There is no oobsize greater then 512. */
+ /* The default for chunk size. */
geo->ecc_chunk_size = 512;
- while (geo->ecc_chunk_size < mtd->oobsize)
+ while (geo->ecc_chunk_size < mtd->oobsize) {
geo->ecc_chunk_size *= 2; /* keep C >= O */
+ geo->gf_len = 14;
+ }
geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
/* We use the same ECC strength for all chunks. */
geo->ecc_strength = get_ecc_strength(this);
- if (!geo->ecc_strength) {
- pr_err("wrong ECC strength.\n");
+ if (!gpmi_check_ecc(this)) {
+ dev_err(this->dev,
+ "We can not support this nand chip."
+ " Its required ecc strength(%d) is beyond our"
+ " capability(%d).\n", geo->ecc_strength,
+ (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX
+ : MXS_ECC_STRENGTH_MAX));
return -EINVAL;
}
@@ -920,8 +946,7 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
dma_addr_t auxiliary_phys;
unsigned int i;
unsigned char *status;
- unsigned int failed;
- unsigned int corrected;
+ unsigned int max_bitflips = 0;
int ret;
pr_debug("page number is : %d\n", page);
@@ -945,35 +970,25 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
payload_virt, payload_phys);
if (ret) {
pr_err("Error in ECC-based read: %d\n", ret);
- goto exit_nfc;
+ return ret;
}
/* handle the block mark swapping */
block_mark_swapping(this, payload_virt, auxiliary_virt);
/* Loop over status bytes, accumulating ECC status. */
- failed = 0;
- corrected = 0;
- status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
- failed++;
+ mtd->ecc_stats.failed++;
continue;
}
- corrected += *status;
- }
-
- /*
- * Propagate ECC status to the owning MTD only when failed or
- * corrected times nearly reaches our ECC correction threshold.
- */
- if (failed || corrected >= (nfc_geo->ecc_strength - 1)) {
- mtd->ecc_stats.failed += failed;
- mtd->ecc_stats.corrected += corrected;
+ mtd->ecc_stats.corrected += *status;
+ max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
if (oob_required) {
@@ -995,8 +1010,8 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
this->payload_virt, this->payload_phys,
nfc_geo->payload_size,
payload_virt, payload_phys);
-exit_nfc:
- return ret;
+
+ return max_bitflips;
}
static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1668,8 +1683,8 @@ exit_nfc_init:
release_resources(this);
exit_acquire_resources:
platform_set_drvdata(pdev, NULL);
- kfree(this);
dev_err(this->dev, "driver registration failed: %d\n", ret);
+ kfree(this);
return ret;
}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index 3d93a5e3909..07294773127 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -284,6 +284,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
#define STATUS_ERASED 0xff
#define STATUS_UNCORRECTABLE 0xfe
+/* BCH's bit correction capability. */
+#define MXS_ECC_STRENGTH_MAX 20 /* mx23 and mx28 */
+#define MX6_ECC_STRENGTH_MAX 40
+
/* Use the platform_id to distinguish different Archs. */
#define IS_MX23 0x0
#define IS_MX28 0x1
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 60ac5b98b71..07e5784e5cd 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -530,12 +530,23 @@ static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
static void send_read_id_v3(struct mxc_nand_host *host)
{
+ struct nand_chip *this = &host->nand;
+
/* Read ID into main buffer */
writel(NFC_ID, NFC_V3_LAUNCH);
wait_op_done(host, true);
memcpy32_fromio(host->data_buf, host->main_area0, 16);
+
+ if (this->options & NAND_BUSWIDTH_16) {
+ /* compress the ID info */
+ host->data_buf[1] = host->data_buf[2];
+ host->data_buf[2] = host->data_buf[4];
+ host->data_buf[3] = host->data_buf[6];
+ host->data_buf[4] = host->data_buf[8];
+ host->data_buf[5] = host->data_buf[10];
+ }
}
/* Request the NANDFC to perform a read of the NAND device ID. */
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3766682a028..43214151b88 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -825,13 +825,8 @@ static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
{
- unsigned long timeo = jiffies;
int status, state = chip->state;
-
- if (state == FL_ERASING)
- timeo += (HZ * 400) / 1000;
- else
- timeo += (HZ * 20) / 1000;
+ unsigned long timeo = (state == FL_ERASING ? 400 : 20);
led_trigger_event(nand_led_trigger, LED_FULL);
@@ -849,6 +844,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
if (in_interrupt() || oops_in_progress)
panic_nand_wait(mtd, chip, timeo);
else {
+ timeo = jiffies + msecs_to_jiffies(timeo);
while (time_before(jiffies, timeo)) {
if (chip->dev_ready) {
if (chip->dev_ready(mtd))
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index b7cfe0d3712..053c9a2d47c 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -55,8 +55,7 @@ struct mtd_info;
#define MODULE_AUTHOR(x) /* x */
#define MODULE_DESCRIPTION(x) /* x */
-#define printk printf
-#define KERN_ERR ""
+#define pr_err printf
#endif
/*
@@ -507,7 +506,7 @@ int __nand_correct_data(unsigned char *buf,
if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
return 1; /* error in ECC data; no action needed */
- printk(KERN_ERR "uncorrectable error : ");
+ pr_err("%s: uncorrectable ECC error", __func__);
return -1;
}
EXPORT_SYMBOL(__nand_correct_data);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 8f30d385bfa..891c52a30e6 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -1468,12 +1468,12 @@ int do_read_error(struct nandsim *ns, int num)
void do_bit_flips(struct nandsim *ns, int num)
{
- if (bitflips && random32() < (1 << 22)) {
+ if (bitflips && prandom_u32() < (1 << 22)) {
int flips = 1;
if (bitflips > 1)
- flips = (random32() % (int) bitflips) + 1;
+ flips = (prandom_u32() % (int) bitflips) + 1;
while (flips--) {
- int pos = random32() % (num * 8);
+ int pos = prandom_u32() % (num * 8);
ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
NS_WARN("read_page: flipping bit %d in page %d "
"reading from %d ecc: corrected=%u failed=%u\n",
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1d333497cfc..8e820ddf4e0 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -22,9 +22,12 @@
#include <linux/omap-dma.h>
#include <linux/io.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#ifdef CONFIG_MTD_NAND_OMAP_BCH
#include <linux/bch.h>
+#include <linux/platform_data/elm.h>
#endif
#include <linux/platform_data/mtd-nand-omap2.h>
@@ -117,6 +120,33 @@
#define OMAP24XX_DMA_GPMC 4
+#define BCH8_MAX_ERROR 8 /* upto 8 bit correctable */
+#define BCH4_MAX_ERROR 4 /* upto 4 bit correctable */
+
+#define SECTOR_BYTES 512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD 4
+#define BCH8_ECC_MAX ((SECTOR_BYTES + BCH8_ECC_OOB_BYTES) * 8)
+#define BCH4_ECC_MAX ((SECTOR_BYTES + BCH4_ECC_OOB_BYTES) * 8)
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1 1 /* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0 0x1a /* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1 0x2 /* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0 0xd /* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1 0x3 /* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6 6 /* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0 0x0 /* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1 0x20 /* ecc_size1 = 32 */
+
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+ 0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+#endif
+
/* oob info generated runtime depending on ecc algorithm and layout selected */
static struct nand_ecclayout omap_oobinfo;
/* Define some generic bad / good block scan pattern which are used
@@ -156,6 +186,9 @@ struct omap_nand_info {
#ifdef CONFIG_MTD_NAND_OMAP_BCH
struct bch_control *bch;
struct nand_ecclayout ecclayout;
+ bool is_elm_used;
+ struct device *elm_dev;
+ struct device_node *of_node;
#endif
};
@@ -1031,6 +1064,13 @@ static int omap_dev_ready(struct mtd_info *mtd)
* omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
* @mtd: MTD device structure
* @mode: Read/Write mode
+ *
+ * When using BCH, sector size is hardcoded to 512 bytes.
+ * Using wrapping mode 6 both for reading and writing if ELM module not uses
+ * for error correction.
+ * On writing,
+ * eccsize0 = 0 (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
*/
static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
{
@@ -1039,32 +1079,57 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_chip *chip = mtd->priv;
- u32 val;
+ u32 val, wr_mode;
+ unsigned int ecc_size1, ecc_size0;
+
+ /* Using wrapping mode 6 for writing */
+ wr_mode = BCH_WRAPMODE_6;
- nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
- dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
- nsectors = 1;
/*
- * Program GPMC to perform correction on one 512-byte sector at a time.
- * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
- * gives a slight (5%) performance gain (but requires additional code).
+ * ECC engine enabled for valid ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
*/
+ ecc_size0 = BCH_ECC_SIZE0;
+ ecc_size1 = BCH_ECC_SIZE1;
+
+ /* Perform ecc calculation on 512-byte sector */
+ nsectors = 1;
+
+ /* Update number of error correction */
+ nerrors = info->nand.ecc.strength;
+
+ /* Multi sector reading/writing for NAND flash with page size < 4096 */
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+ if (mode == NAND_ECC_READ) {
+ /* Using wrapping mode 1 for reading */
+ wr_mode = BCH_WRAPMODE_1;
+
+ /*
+ * ECC engine enabled for ecc_size0 nibbles
+ * and disabled for ecc_size1 nibbles.
+ */
+ ecc_size0 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE0 : BCH4R_ECC_SIZE0;
+ ecc_size1 = (nerrors == 8) ?
+ BCH8R_ECC_SIZE1 : BCH4R_ECC_SIZE1;
+ }
+
+ /* Perform ecc calculation for one page (< 4096) */
+ nsectors = info->nand.ecc.steps;
+ }
writel(ECC1, info->reg.gpmc_ecc_control);
- /*
- * When using BCH, sector size is hardcoded to 512 bytes.
- * Here we are using wrapping mode 6 both for reading and writing, with:
- * size0 = 0 (no additional protected byte in spare area)
- * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
- */
- val = (32 << ECCSIZE1_SHIFT) | (0 << ECCSIZE0_SHIFT);
+ /* Configure ecc size for BCH */
+ val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
writel(val, info->reg.gpmc_ecc_size_config);
+ dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
/* BCH configuration */
val = ((1 << 16) | /* enable BCH */
(((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
- (0x06 << 8) | /* wrap mode = 6 */
+ (wr_mode << 8) | /* wrap mode */
(dev_width << 7) | /* bus width */
(((nsectors-1) & 0x7) << 4) | /* number of sectors */
(info->gpmc_cs << 1) | /* ECC CS */
@@ -1072,7 +1137,7 @@ static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
writel(val, info->reg.gpmc_ecc_config);
- /* clear ecc and enable bits */
+ /* Clear ecc and enable bits */
writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
}
@@ -1162,6 +1227,298 @@ static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
}
/**
+ * omap3_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int omap3_calculate_ecc_bch(struct mtd_info *mtd, const u_char *dat,
+ u_char *ecc_code)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+ int i, eccbchtsel;
+
+ nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+ /*
+ * find BCH scheme used
+ * 0 -> BCH4
+ * 1 -> BCH8
+ */
+ eccbchtsel = ((readl(info->reg.gpmc_ecc_config) >> 12) & 0x3);
+
+ for (i = 0; i < nsectors; i++) {
+
+ /* Read hw-computed remainder */
+ bch_val1 = readl(info->reg.gpmc_bch_result0[i]);
+ bch_val2 = readl(info->reg.gpmc_bch_result1[i]);
+ if (eccbchtsel) {
+ bch_val3 = readl(info->reg.gpmc_bch_result2[i]);
+ bch_val4 = readl(info->reg.gpmc_bch_result3[i]);
+ }
+
+ if (eccbchtsel) {
+ /* BCH8 ecc scheme */
+ *ecc_code++ = (bch_val4 & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val3 & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val2 & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+ *ecc_code++ = (bch_val1 & 0xFF);
+ /*
+ * Setting 14th byte to zero to handle
+ * erased page & maintain compatibility
+ * with RBL
+ */
+ *ecc_code++ = 0x0;
+ } else {
+ /* BCH4 ecc scheme */
+ *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val2 & 0xF) << 4) |
+ ((bch_val1 >> 28) & 0xF);
+ *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+ *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+ *ecc_code++ = ((bch_val1 & 0xF) << 4);
+ /*
+ * Setting 8th byte to zero to handle
+ * erased page
+ */
+ *ecc_code++ = 0x0;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data: data sector buffer
+ * @oob: oob buffer
+ * @info: omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+ struct omap_nand_info *info)
+{
+ int flip_bits = 0, i;
+
+ for (i = 0; i < info->nand.ecc.size; i++) {
+ flip_bits += hweight8(~data[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+ flip_bits += hweight8(~oob[i]);
+ if (flip_bits > info->nand.ecc.strength)
+ return 0;
+ }
+
+ /*
+ * Bit flips falls in correctable level.
+ * Fill data area with 0xFF
+ */
+ if (flip_bits) {
+ memset(data, 0xFF, info->nand.ecc.size);
+ memset(oob, 0xFF, info->nand.ecc.bytes);
+ }
+
+ return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd: MTD device structure
+ * @data: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of error/erased pages non-zero error vector is reported.
+ * In case of non-zero ecc vector, check read_ecc at fixed offset
+ * (x = 13/7 in case of BCH8/4 == 0) to find page programmed or not.
+ * To handle bit flips in this data, count the number of 0's in
+ * read_ecc[x] and check if it greater than 4. If it is less, it is
+ * programmed page, else erased page.
+ *
+ * 1. If page is erased, check with standard ecc vector (ecc vector
+ * for erased page to find any bit flip). If check fails, bit flip
+ * is present in erased page. Count the bit flips in erased page and
+ * if it falls under correctable level, report page with 0xFF and
+ * update the correctable bit information.
+ * 2. If error is reported on programmed page, update elm error
+ * vector and correct the page with ELM error correction routine.
+ *
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+ u_char *read_ecc, u_char *calc_ecc)
+{
+ struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
+ mtd);
+ int eccsteps = info->nand.ecc.steps;
+ int i , j, stat = 0;
+ int eccsize, eccflag, ecc_vector_size;
+ struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+ u_char *ecc_vec = calc_ecc;
+ u_char *spare_ecc = read_ecc;
+ u_char *erased_ecc_vec;
+ enum bch_ecc type;
+ bool is_error_reported = false;
+
+ /* Initialize elm error vector to zero */
+ memset(err_vec, 0, sizeof(err_vec));
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR) {
+ type = BCH8_ECC;
+ erased_ecc_vec = bch8_vector;
+ } else {
+ type = BCH4_ECC;
+ erased_ecc_vec = bch4_vector;
+ }
+
+ ecc_vector_size = info->nand.ecc.bytes;
+
+ /*
+ * Remove extra byte padding for BCH8 RBL
+ * compatibility and erased page handling
+ */
+ eccsize = ecc_vector_size - 1;
+
+ for (i = 0; i < eccsteps ; i++) {
+ eccflag = 0; /* initialize eccflag */
+
+ /*
+ * Check any error reported,
+ * In case of error, non zero ecc reported.
+ */
+
+ for (j = 0; (j < eccsize); j++) {
+ if (calc_ecc[j] != 0) {
+ eccflag = 1; /* non zero ecc, error present */
+ break;
+ }
+ }
+
+ if (eccflag == 1) {
+ /*
+ * Set threshold to minimum of 4, half of ecc.strength/2
+ * to allow max bit flip in byte to 4
+ */
+ unsigned int threshold = min_t(unsigned int, 4,
+ info->nand.ecc.strength / 2);
+
+ /*
+ * Check data area is programmed by counting
+ * number of 0's at fixed offset in spare area.
+ * Checking count of 0's against threshold.
+ * In case programmed page expects at least threshold
+ * zeros in byte.
+ * If zeros are less than threshold for programmed page/
+ * zeros are more than threshold erased page, either
+ * case page reported as uncorrectable.
+ */
+ if (hweight8(~read_ecc[eccsize]) >= threshold) {
+ /*
+ * Update elm error vector as
+ * data area is programmed
+ */
+ err_vec[i].error_reported = true;
+ is_error_reported = true;
+ } else {
+ /* Error reported in erased page */
+ int bitflip_count;
+ u_char *buf = &data[info->nand.ecc.size * i];
+
+ if (memcmp(calc_ecc, erased_ecc_vec, eccsize)) {
+ bitflip_count = erased_sector_bitflips(
+ buf, read_ecc, info);
+
+ if (bitflip_count)
+ stat += bitflip_count;
+ else
+ return -EINVAL;
+ }
+ }
+ }
+
+ /* Update the ecc vector */
+ calc_ecc += ecc_vector_size;
+ read_ecc += ecc_vector_size;
+ }
+
+ /* Check if any error reported */
+ if (!is_error_reported)
+ return 0;
+
+ /* Decode BCH error using ELM module */
+ elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+ for (i = 0; i < eccsteps; i++) {
+ if (err_vec[i].error_reported) {
+ for (j = 0; j < err_vec[i].error_count; j++) {
+ u32 bit_pos, byte_pos, error_max, pos;
+
+ if (type == BCH8_ECC)
+ error_max = BCH8_ECC_MAX;
+ else
+ error_max = BCH4_ECC_MAX;
+
+ if (info->nand.ecc.strength == BCH8_MAX_ERROR)
+ pos = err_vec[i].error_loc[j];
+ else
+ /* Add 4 to take care 4 bit padding */
+ pos = err_vec[i].error_loc[j] +
+ BCH4_BIT_PAD;
+
+ /* Calculate bit position of error */
+ bit_pos = pos % 8;
+
+ /* Calculate byte position of error */
+ byte_pos = (error_max - pos - 1) / 8;
+
+ if (pos < error_max) {
+ if (byte_pos < 512)
+ data[byte_pos] ^= 1 << bit_pos;
+ else
+ spare_ecc[byte_pos - 512] ^=
+ 1 << bit_pos;
+ }
+ /* else, not interested to correct ecc */
+ }
+ }
+
+ /* Update number of correctable errors */
+ stat += err_vec[i].error_count;
+
+ /* Update page data with sector size */
+ data += info->nand.ecc.size;
+ spare_ecc += ecc_vector_size;
+ }
+
+ for (i = 0; i < eccsteps; i++)
+ /* Return error if uncorrectable error present */
+ if (err_vec[i].error_uncorrectable)
+ return -EINVAL;
+
+ return stat;
+}
+
+/**
* omap3_correct_data_bch - Decode received data and correct errors
* @mtd: MTD device structure
* @data: page data
@@ -1194,6 +1551,92 @@ static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
}
/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ const uint8_t *buf, int oob_required)
+{
+ int i;
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+ /* Write data */
+ chip->write_buf(mtd, buf, mtd->writesize);
+
+ /* Update ecc vector from GPMC result registers */
+ chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+ for (i = 0; i < chip->ecc.total; i++)
+ chip->oob_poi[eccpos[i]] = ecc_calc[i];
+
+ /* Write ecc vector to OOB area */
+ chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+ return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+ uint8_t *buf, int oob_required, int page)
+{
+ uint8_t *ecc_calc = chip->buffers->ecccalc;
+ uint8_t *ecc_code = chip->buffers->ecccode;
+ uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *oob = &chip->oob_poi[eccpos[0]];
+ uint32_t oob_pos = mtd->writesize + chip->ecc.layout->eccpos[0];
+ int stat;
+ unsigned int max_bitflips = 0;
+
+ /* Enable GPMC ecc engine */
+ chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+ /* Read data */
+ chip->read_buf(mtd, buf, mtd->writesize);
+
+ /* Read oob bytes */
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_pos, -1);
+ chip->read_buf(mtd, oob, chip->ecc.total);
+
+ /* Calculate ecc bytes */
+ chip->ecc.calculate(mtd, buf, ecc_calc);
+
+ memcpy(ecc_code, &chip->oob_poi[eccpos[0]], chip->ecc.total);
+
+ stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+
+ return max_bitflips;
+}
+
+/**
* omap3_free_bch - Release BCH ecc resources
* @mtd: MTD device structure
*/
@@ -1218,43 +1661,86 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
#ifdef CONFIG_MTD_NAND_OMAP_BCH8
- const int hw_errors = 8;
+ const int hw_errors = BCH8_MAX_ERROR;
#else
- const int hw_errors = 4;
+ const int hw_errors = BCH4_MAX_ERROR;
#endif
+ enum bch_ecc bch_type;
+ const __be32 *parp;
+ int lenp;
+ struct device_node *elm_node;
+
info->bch = NULL;
- max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
+ max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ?
+ BCH8_MAX_ERROR : BCH4_MAX_ERROR;
if (max_errors != hw_errors) {
pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
max_errors, hw_errors);
goto fail;
}
- /* software bch library is only used to detect and locate errors */
- info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
- if (!info->bch)
- goto fail;
+ info->nand.ecc.size = 512;
+ info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
+ info->nand.ecc.mode = NAND_ECC_HW;
+ info->nand.ecc.strength = max_errors;
- info->nand.ecc.size = 512;
- info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
- info->nand.ecc.correct = omap3_correct_data_bch;
- info->nand.ecc.mode = NAND_ECC_HW;
+ if (hw_errors == BCH8_MAX_ERROR)
+ bch_type = BCH8_ECC;
+ else
+ bch_type = BCH4_ECC;
- /*
- * The number of corrected errors in an ecc block that will trigger
- * block scrubbing defaults to the ecc strength (4 or 8).
- * Set mtd->bitflip_threshold here to define a custom threshold.
- */
+ /* Detect availability of ELM module */
+ parp = of_get_property(info->of_node, "elm_id", &lenp);
+ if ((parp == NULL) && (lenp != (sizeof(void *) * 2))) {
+ pr_err("Missing elm_id property, fall back to Software BCH\n");
+ info->is_elm_used = false;
+ } else {
+ struct platform_device *pdev;
- if (max_errors == 8) {
- info->nand.ecc.strength = 8;
- info->nand.ecc.bytes = 13;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ elm_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ pdev = of_find_device_by_node(elm_node);
+ info->elm_dev = &pdev->dev;
+ elm_config(info->elm_dev, bch_type);
+ info->is_elm_used = true;
+ }
+
+ if (info->is_elm_used && (mtd->writesize <= 4096)) {
+
+ if (hw_errors == BCH8_MAX_ERROR)
+ info->nand.ecc.bytes = BCH8_SIZE;
+ else
+ info->nand.ecc.bytes = BCH4_SIZE;
+
+ info->nand.ecc.correct = omap_elm_correct_data;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch;
+ info->nand.ecc.read_page = omap_read_page_bch;
+ info->nand.ecc.write_page = omap_write_page_bch;
} else {
- info->nand.ecc.strength = 4;
- info->nand.ecc.bytes = 7;
- info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ /*
+ * software bch library is only used to detect and
+ * locate errors
+ */
+ info->bch = init_bch(13, max_errors,
+ 0x201b /* hw polynomial */);
+ if (!info->bch)
+ goto fail;
+
+ info->nand.ecc.correct = omap3_correct_data_bch;
+
+ /*
+ * The number of corrected errors in an ecc block that will
+ * trigger block scrubbing defaults to the ecc strength (4 or 8)
+ * Set mtd->bitflip_threshold here to define a custom threshold.
+ */
+
+ if (max_errors == 8) {
+ info->nand.ecc.bytes = 13;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
+ } else {
+ info->nand.ecc.bytes = 7;
+ info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
+ }
}
pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
@@ -1270,7 +1756,7 @@ fail:
*/
static int omap3_init_bch_tail(struct mtd_info *mtd)
{
- int i, steps;
+ int i, steps, offset;
struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
mtd);
struct nand_ecclayout *layout = &info->ecclayout;
@@ -1292,11 +1778,21 @@ static int omap3_init_bch_tail(struct mtd_info *mtd)
goto fail;
}
+ /* ECC layout compatible with RBL for BCH8 */
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ offset = 2;
+ else
+ offset = mtd->oobsize - layout->eccbytes;
+
/* put ecc bytes at oob tail */
for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
+ layout->eccpos[i] = offset + i;
+
+ if (info->is_elm_used && (info->nand.ecc.bytes == BCH8_SIZE))
+ layout->oobfree[0].offset = 2 + layout->eccbytes * steps;
+ else
+ layout->oobfree[0].offset = 2;
- layout->oobfree[0].offset = 2;
layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
info->nand.ecc.layout = layout;
@@ -1360,6 +1856,9 @@ static int omap_nand_probe(struct platform_device *pdev)
info->nand.options = pdata->devsize;
info->nand.options |= NAND_SKIP_BBTSCAN;
+#ifdef CONFIG_MTD_NAND_OMAP_BCH
+ info->of_node = pdata->of_node;
+#endif
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index dbd3aa574ea..30bd907a260 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -174,7 +174,14 @@ out:
return rc;
}
+static void __exit ofpart_parser_exit(void)
+{
+ deregister_mtd_parser(&ofpart_parser);
+ deregister_mtd_parser(&ofoldpart_parser);
+}
+
module_init(ofpart_parser_init);
+module_exit(ofpart_parser_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Parser for MTD partitioning information in device tree");
diff --git a/drivers/mtd/tests/mtd_nandecctest.c b/drivers/mtd/tests/mtd_nandecctest.c
index db2f22e7966..70106607c24 100644
--- a/drivers/mtd/tests/mtd_nandecctest.c
+++ b/drivers/mtd/tests/mtd_nandecctest.c
@@ -44,7 +44,7 @@ struct nand_ecc_test {
static void single_bit_error_data(void *error_data, void *correct_data,
size_t size)
{
- unsigned int offset = random32() % (size * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (size * BITS_PER_BYTE);
memcpy(error_data, correct_data, size);
__change_bit_le(offset, error_data);
@@ -55,9 +55,9 @@ static void double_bit_error_data(void *error_data, void *correct_data,
{
unsigned int offset[2];
- offset[0] = random32() % (size * BITS_PER_BYTE);
+ offset[0] = prandom_u32() % (size * BITS_PER_BYTE);
do {
- offset[1] = random32() % (size * BITS_PER_BYTE);
+ offset[1] = prandom_u32() % (size * BITS_PER_BYTE);
} while (offset[0] == offset[1]);
memcpy(error_data, correct_data, size);
@@ -68,7 +68,7 @@ static void double_bit_error_data(void *error_data, void *correct_data,
static unsigned int random_ecc_bit(size_t size)
{
- unsigned int offset = random32() % (3 * BITS_PER_BYTE);
+ unsigned int offset = prandom_u32() % (3 * BITS_PER_BYTE);
if (size == 256) {
/*
@@ -76,7 +76,7 @@ static unsigned int random_ecc_bit(size_t size)
* and 17th bit) in ECC code for 256 byte data block
*/
while (offset == 16 || offset == 17)
- offset = random32() % (3 * BITS_PER_BYTE);
+ offset = prandom_u32() % (3 * BITS_PER_BYTE);
}
return offset;
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 2d7e6cffd6d..787f539d16c 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -55,7 +55,7 @@ static int rand_eb(void)
unsigned int eb;
again:
- eb = random32();
+ eb = prandom_u32();
/* Read or write up 2 eraseblocks at a time - hence 'ebcnt - 1' */
eb %= (ebcnt - 1);
if (bbt[eb])
@@ -67,7 +67,7 @@ static int rand_offs(void)
{
unsigned int offs;
- offs = random32();
+ offs = prandom_u32();
offs %= bufsize;
return offs;
}
@@ -76,7 +76,7 @@ static int rand_len(int offs)
{
unsigned int len;
- len = random32();
+ len = prandom_u32();
len %= (bufsize - offs);
return len;
}
@@ -191,7 +191,7 @@ static int do_write(void)
static int do_operation(void)
{
- if (random32() & 1)
+ if (prandom_u32() & 1)
return do_read();
else
return do_write();
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index c4cde1e9edd..3a9f6a6a79f 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -208,7 +208,7 @@ static inline int write_pattern(int ebnum, void *buf)
static int __init tort_init(void)
{
int err = 0, i, infinite = !cycles_count;
- int bad_ebs[ebcnt];
+ int *bad_ebs;
printk(KERN_INFO "\n");
printk(KERN_INFO "=================================================\n");
@@ -250,28 +250,24 @@ static int __init tort_init(void)
err = -ENOMEM;
patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_5A5) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_5A5)
goto out_mtd;
- }
patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_A5A) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_A5A)
goto out_patt_5A5;
- }
patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!patt_FF) {
- pr_err("error: cannot allocate memory\n");
+ if (!patt_FF)
goto out_patt_A5A;
- }
check_buf = kmalloc(mtd->erasesize, GFP_KERNEL);
- if (!check_buf) {
- pr_err("error: cannot allocate memory\n");
+ if (!check_buf)
goto out_patt_FF;
- }
+
+ bad_ebs = kcalloc(ebcnt, sizeof(*bad_ebs), GFP_KERNEL);
+ if (!bad_ebs)
+ goto out_check_buf;
err = 0;
@@ -290,7 +286,6 @@ static int __init tort_init(void)
/*
* Check if there is a bad eraseblock among those we are going to test.
*/
- memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
@@ -394,6 +389,8 @@ out:
pr_info("finished after %u erase cycles\n",
erase_cycles);
+ kfree(bad_ebs);
+out_check_buf:
kfree(check_buf);
out_patt_FF:
kfree(patt_FF);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 33f8f3b2c9b..cba89fcd158 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -86,7 +86,7 @@ static inline int ubi_dbg_is_bgt_disabled(const struct ubi_device *ubi)
static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_bitflips)
- return !(random32() % 200);
+ return !(prandom_u32() % 200);
return 0;
}
@@ -100,7 +100,7 @@ static inline int ubi_dbg_is_bitflip(const struct ubi_device *ubi)
static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 500);
+ return !(prandom_u32() % 500);
return 0;
}
@@ -114,7 +114,7 @@ static inline int ubi_dbg_is_write_failure(const struct ubi_device *ubi)
static inline int ubi_dbg_is_erase_failure(const struct ubi_device *ubi)
{
if (ubi->dbg.emulate_io_failures)
- return !(random32() % 400);
+ return !(prandom_u32() % 400);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index a7efec29303..9b017d9c58e 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -381,7 +381,7 @@ static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
}
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static void b44_wap54g10_workaround(struct b44 *bp)
{
char buf[20];
@@ -393,7 +393,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
* see https://dev.openwrt.org/ticket/146
* check and reset bit "isolate"
*/
- if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+ if (bcm47xx_nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
return;
if (simple_strtoul(buf, NULL, 0) == 2) {
err = __b44_readphy(bp, 0, MII_BMCR, &val);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index bf985c04524..639049d7e92 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -15,7 +15,7 @@
#include <linux/mii.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
static const struct bcma_device_id bgmac_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
@@ -908,7 +908,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CHIPCTL_1_IF_TYPE_RMII;
char buf[2];
- if (nvram_getenv("et_swtype", buf, 1) > 0) {
+ if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
@@ -1386,7 +1386,7 @@ static int bgmac_probe(struct bcma_device *core)
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
- if (nvram_getenv("et0_no_txint", NULL, 0) == 0)
+ if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
/* TODO: reset the external phy. Specs are needed */
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7ab0b2fba50..3338437b559 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -79,6 +79,17 @@ config ASUS_LAPTOP
If you have an ACPI-compatible ASUS laptop, say Y or M here.
+config CHROMEOS_LAPTOP
+ tristate "Chrome OS Laptop"
+ depends on I2C
+ depends on DMI
+ ---help---
+ This driver instantiates i2c and smbus devices such as
+ light sensors and touchpads.
+
+ If you have a supported Chromebook, choose Y or M here.
+ The module will be called chromeos_laptop.
+
config DELL_LAPTOP
tristate "Dell Laptop Extras"
depends on X86
@@ -288,9 +299,11 @@ config IDEAPAD_LAPTOP
depends on ACPI
depends on RFKILL && INPUT
depends on SERIO_I8042
+ depends on BACKLIGHT_CLASS_DEVICE
select INPUT_SPARSEKMAP
help
- This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
+ This is a driver for Lenovo IdeaPad netbooks contains drivers for
+ rfkill switch, hotkey, fan control and backlight control.
config THINKPAD_ACPI
tristate "ThinkPad ACPI Laptop Extras"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index bf7e4f935b1..ace2b38942f 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -50,3 +50,4 @@ obj-$(CONFIG_INTEL_MID_POWER_BUTTON) += intel_mid_powerbtn.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
+obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index afed7018a2b..c9076bdaf2c 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -511,6 +511,24 @@ static struct dmi_system_id acer_quirks[] = {
},
.driver_data = &quirk_fujitsu_amilo_li_1718,
},
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-10382JG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "10382JG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "Lenovo Ideapad S205-1038DPG",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "1038DPG"),
+ },
+ .driver_data = &quirk_lenovo_ideapad_s205,
+ },
{}
};
@@ -1204,6 +1222,9 @@ static acpi_status WMID_set_capabilities(void)
devices = *((u32 *) obj->buffer.pointer);
} else if (obj->type == ACPI_TYPE_INTEGER) {
devices = (u32) obj->integer.value;
+ } else {
+ kfree(out.pointer);
+ return AE_ERROR;
}
} else {
kfree(out.pointer);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index d9f9a0dbc6f..0eea09c1c13 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -128,10 +128,12 @@ MODULE_PARM_DESC(als_status, "Set the ALS status on boot "
/*
* Some events we use, same for all Asus
*/
-#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
-#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
-#define ATKD_BR_MIN ATKD_BR_UP
-#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
+#define ATKD_BRNUP_MIN 0x10
+#define ATKD_BRNUP_MAX 0x1f
+#define ATKD_BRNDOWN_MIN 0x20
+#define ATKD_BRNDOWN_MAX 0x2f
+#define ATKD_BRNDOWN 0x20
+#define ATKD_BRNUP 0x2f
#define ATKD_LCD_ON 0x33
#define ATKD_LCD_OFF 0x34
@@ -301,40 +303,65 @@ static const struct key_entry asus_keymap[] = {
{KE_KEY, 0x17, { KEY_ZOOM } },
{KE_KEY, 0x1f, { KEY_BATTERY } },
/* End of Lenovo SL Specific keycodes */
+ {KE_KEY, ATKD_BRNDOWN, { KEY_BRIGHTNESSDOWN } },
+ {KE_KEY, ATKD_BRNUP, { KEY_BRIGHTNESSUP } },
{KE_KEY, 0x30, { KEY_VOLUMEUP } },
{KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{KE_KEY, 0x32, { KEY_MUTE } },
- {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x33, { KEY_DISPLAYTOGGLE } }, /* LCD on */
+ {KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{KE_KEY, 0x41, { KEY_NEXTSONG } },
- {KE_KEY, 0x43, { KEY_STOPCD } },
+ {KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
{KE_KEY, 0x45, { KEY_PLAYPAUSE } },
- {KE_KEY, 0x4c, { KEY_MEDIA } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
{KE_KEY, 0x50, { KEY_EMAIL } },
{KE_KEY, 0x51, { KEY_WWW } },
{KE_KEY, 0x55, { KEY_CALC } },
+ {KE_IGNORE, 0x57, }, /* Battery mode */
+ {KE_IGNORE, 0x58, }, /* AC mode */
{KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
- {KE_KEY, 0x5D, { KEY_WLAN } },
- {KE_KEY, 0x5E, { KEY_WLAN } },
- {KE_KEY, 0x5F, { KEY_WLAN } },
- {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
- {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+ {KE_KEY, 0x5D, { KEY_WLAN } }, /* WLAN Toggle */
+ {KE_KEY, 0x5E, { KEY_WLAN } }, /* WLAN Enable */
+ {KE_KEY, 0x5F, { KEY_WLAN } }, /* WLAN Disable */
+ {KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ {KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ {KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ {KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ {KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
+ {KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, /* Lock Touchpad */
{KE_KEY, 0x6C, { KEY_SLEEP } }, /* Suspend */
{KE_KEY, 0x6D, { KEY_SLEEP } }, /* Hibernate */
- {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
- {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ {KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{KE_KEY, 0x82, { KEY_CAMERA } },
- {KE_KEY, 0x88, { KEY_WLAN } },
- {KE_KEY, 0x8A, { KEY_PROG1 } },
+ {KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ {KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ {KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ {KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ {KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ {KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ {KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ {KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ {KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ {KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{KE_KEY, 0x95, { KEY_MEDIA } },
{KE_KEY, 0x99, { KEY_PHONE } },
- {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
- {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
- {KE_KEY, 0xb5, { KEY_CALC } },
+ {KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
+ {KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
+ {KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
+ {KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
+ {KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ {KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ {KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ {KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ {KE_KEY, 0xB5, { KEY_CALC } },
+ {KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
@@ -1521,15 +1548,19 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
dev_name(&asus->device->dev), event,
count);
- /* Brightness events are special */
- if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
+ if (event >= ATKD_BRNUP_MIN && event <= ATKD_BRNUP_MAX)
+ event = ATKD_BRNUP;
+ else if (event >= ATKD_BRNDOWN_MIN &&
+ event <= ATKD_BRNDOWN_MAX)
+ event = ATKD_BRNDOWN;
- /* Ignore them completely if the acpi video driver is used */
+ /* Brightness events are special */
+ if (event == ATKD_BRNDOWN || event == ATKD_BRNUP) {
if (asus->backlight_device != NULL) {
/* Update the backlight device. */
asus_backlight_notify(asus);
+ return ;
}
- return ;
}
/* Accelerometer "coarse orientation change" event */
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index be790402e0f..210b5b87212 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -59,6 +59,17 @@ static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
};
+/*
+ * For those machines that need software to control bt/wifi status
+ * and can't adjust brightness through ACPI interface
+ * and have duplicate events(ACPI and WMI) for display toggle
+ */
+static struct quirk_entry quirk_asus_x55u = {
+ .wapf = 4,
+ .wmi_backlight_power = true,
+ .no_display_toggle = true,
+};
+
static struct quirk_entry quirk_asus_x401u = {
.wapf = 4,
};
@@ -77,6 +88,15 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X401U"),
},
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X401A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X401A"),
+ },
.driver_data = &quirk_asus_x401u,
},
{
@@ -95,6 +115,15 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X501U"),
},
+ .driver_data = &quirk_asus_x55u,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X501A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X501A"),
+ },
.driver_data = &quirk_asus_x401u,
},
{
@@ -131,7 +160,7 @@ static struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X55U"),
},
- .driver_data = &quirk_asus_x401u,
+ .driver_data = &quirk_asus_x55u,
},
{
.callback = dmi_matched,
@@ -161,6 +190,8 @@ static void asus_nb_wmi_quirks(struct asus_wmi_driver *driver)
}
static const struct key_entry asus_nb_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
{ KE_KEY, 0x32, { KEY_MUTE } },
@@ -168,9 +199,9 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x34, { KEY_DISPLAY_OFF } }, /* LCD off */
{ KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
{ KE_KEY, 0x41, { KEY_NEXTSONG } },
- { KE_KEY, 0x43, { KEY_STOPCD } },
+ { KE_KEY, 0x43, { KEY_STOPCD } }, /* Stop/Eject */
{ KE_KEY, 0x45, { KEY_PLAYPAUSE } },
- { KE_KEY, 0x4c, { KEY_MEDIA } },
+ { KE_KEY, 0x4c, { KEY_MEDIA } }, /* WMP Key */
{ KE_KEY, 0x50, { KEY_EMAIL } },
{ KE_KEY, 0x51, { KEY_WWW } },
{ KE_KEY, 0x55, { KEY_CALC } },
@@ -180,25 +211,42 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
{ KE_KEY, 0x5D, { KEY_WLAN } }, /* Wireless console Toggle */
{ KE_KEY, 0x5E, { KEY_WLAN } }, /* Wireless console Enable */
{ KE_KEY, 0x5F, { KEY_WLAN } }, /* Wireless console Disable */
- { KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
- { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+ { KE_KEY, 0x60, { KEY_TOUCHPAD_ON } },
+ { KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD only */
+ { KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT only */
+ { KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT */
+ { KE_KEY, 0x64, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV */
+ { KE_KEY, 0x65, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV */
+ { KE_KEY, 0x66, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV */
+ { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */
{ KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } },
- { KE_KEY, 0x7D, { KEY_BLUETOOTH } },
- { KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+ { KE_IGNORE, 0x6E, }, /* Low Battery notification */
+ { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
+ { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
{ KE_KEY, 0x82, { KEY_CAMERA } },
- { KE_KEY, 0x88, { KEY_RFKILL } },
- { KE_KEY, 0x8A, { KEY_PROG1 } },
+ { KE_KEY, 0x88, { KEY_RFKILL } }, /* Radio Toggle Key */
+ { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
+ { KE_KEY, 0x8C, { KEY_SWITCHVIDEOMODE } }, /* SDSP DVI only */
+ { KE_KEY, 0x8D, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + DVI */
+ { KE_KEY, 0x8E, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + DVI */
+ { KE_KEY, 0x8F, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + DVI */
+ { KE_KEY, 0x90, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + DVI */
+ { KE_KEY, 0x91, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + DVI */
+ { KE_KEY, 0x92, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + DVI */
+ { KE_KEY, 0x93, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + DVI */
{ KE_KEY, 0x95, { KEY_MEDIA } },
{ KE_KEY, 0x99, { KEY_PHONE } },
{ KE_KEY, 0xA0, { KEY_SWITCHVIDEOMODE } }, /* SDSP HDMI only */
{ KE_KEY, 0xA1, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + HDMI */
{ KE_KEY, 0xA2, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + HDMI */
{ KE_KEY, 0xA3, { KEY_SWITCHVIDEOMODE } }, /* SDSP TV + HDMI */
- { KE_KEY, 0xb5, { KEY_CALC } },
- { KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
- { KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
+ { KE_KEY, 0xA4, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + HDMI */
+ { KE_KEY, 0xA5, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + TV + HDMI */
+ { KE_KEY, 0xA6, { KEY_SWITCHVIDEOMODE } }, /* SDSP CRT + TV + HDMI */
+ { KE_KEY, 0xA7, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV + HDMI */
+ { KE_KEY, 0xB5, { KEY_CALC } },
+ { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
+ { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
{ KE_END, 0},
};
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index f80ae4d10f6..c11b2426dac 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -187,6 +187,8 @@ struct asus_wmi {
struct device *hwmon_device;
struct platform_device *platform_device;
+ struct led_classdev wlan_led;
+ int wlan_led_wk;
struct led_classdev tpd_led;
int tpd_led_wk;
struct led_classdev kbd_led;
@@ -194,6 +196,7 @@ struct asus_wmi {
struct workqueue_struct *led_workqueue;
struct work_struct tpd_led_work;
struct work_struct kbd_led_work;
+ struct work_struct wlan_led_work;
struct asus_rfkill wlan;
struct asus_rfkill bluetooth;
@@ -456,12 +459,65 @@ static enum led_brightness kbd_led_get(struct led_classdev *led_cdev)
return value;
}
+static int wlan_led_unknown_state(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_UNKNOWN_BIT;
+}
+
+static int wlan_led_presence(struct asus_wmi *asus)
+{
+ u32 result;
+
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_PRESENCE_BIT;
+}
+
+static void wlan_led_update(struct work_struct *work)
+{
+ int ctrl_param;
+ struct asus_wmi *asus;
+
+ asus = container_of(work, struct asus_wmi, wlan_led_work);
+
+ ctrl_param = asus->wlan_led_wk;
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_WIRELESS_LED, ctrl_param, NULL);
+}
+
+static void wlan_led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct asus_wmi *asus;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+
+ asus->wlan_led_wk = !!value;
+ queue_work(asus->led_workqueue, &asus->wlan_led_work);
+}
+
+static enum led_brightness wlan_led_get(struct led_classdev *led_cdev)
+{
+ struct asus_wmi *asus;
+ u32 result;
+
+ asus = container_of(led_cdev, struct asus_wmi, wlan_led);
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WIRELESS_LED, &result);
+
+ return result & ASUS_WMI_DSTS_BRIGHTNESS_MASK;
+}
+
static void asus_wmi_led_exit(struct asus_wmi *asus)
{
if (!IS_ERR_OR_NULL(asus->kbd_led.dev))
led_classdev_unregister(&asus->kbd_led);
if (!IS_ERR_OR_NULL(asus->tpd_led.dev))
led_classdev_unregister(&asus->tpd_led);
+ if (!IS_ERR_OR_NULL(asus->wlan_led.dev))
+ led_classdev_unregister(&asus->wlan_led);
if (asus->led_workqueue)
destroy_workqueue(asus->led_workqueue);
}
@@ -498,6 +554,23 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
rv = led_classdev_register(&asus->platform_device->dev,
&asus->kbd_led);
+ if (rv)
+ goto error;
+ }
+
+ if (wlan_led_presence(asus)) {
+ INIT_WORK(&asus->wlan_led_work, wlan_led_update);
+
+ asus->wlan_led.name = "asus::wlan";
+ asus->wlan_led.brightness_set = wlan_led_set;
+ if (!wlan_led_unknown_state(asus))
+ asus->wlan_led.brightness_get = wlan_led_get;
+ asus->wlan_led.flags = LED_CORE_SUSPENDRESUME;
+ asus->wlan_led.max_brightness = 1;
+ asus->wlan_led.default_trigger = "asus-wlan";
+
+ rv = led_classdev_register(&asus->platform_device->dev,
+ &asus->wlan_led);
}
error:
@@ -813,6 +886,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
if (!*rfkill)
return -EINVAL;
+ if (dev_id == ASUS_WMI_DEVID_WLAN)
+ rfkill_set_led_trigger_name(*rfkill, "asus-wlan");
+
rfkill_init_sw_state(*rfkill, !result);
result = rfkill_register(*rfkill);
if (result) {
@@ -1265,6 +1341,18 @@ static void asus_wmi_backlight_exit(struct asus_wmi *asus)
asus->backlight_device = NULL;
}
+static int is_display_toggle(int code)
+{
+ /* display toggle keys */
+ if ((code >= 0x61 && code <= 0x67) ||
+ (code >= 0x8c && code <= 0x93) ||
+ (code >= 0xa0 && code <= 0xa7) ||
+ (code >= 0xd0 && code <= 0xd5))
+ return 1;
+
+ return 0;
+}
+
static void asus_wmi_notify(u32 value, void *context)
{
struct asus_wmi *asus = context;
@@ -1298,16 +1386,24 @@ static void asus_wmi_notify(u32 value, void *context)
}
if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
- code = NOTIFY_BRNUP_MIN;
+ code = ASUS_WMI_BRN_UP;
else if (code >= NOTIFY_BRNDOWN_MIN &&
code <= NOTIFY_BRNDOWN_MAX)
- code = NOTIFY_BRNDOWN_MIN;
+ code = ASUS_WMI_BRN_DOWN;
- if (code == NOTIFY_BRNUP_MIN || code == NOTIFY_BRNDOWN_MIN) {
- if (!acpi_video_backlight_support())
+ if (code == ASUS_WMI_BRN_DOWN || code == ASUS_WMI_BRN_UP) {
+ if (!acpi_video_backlight_support()) {
asus_wmi_backlight_notify(asus, orig_code);
- } else if (!sparse_keymap_report_event(asus->inputdev, code,
- key_value, autorelease))
+ goto exit;
+ }
+ }
+
+ if (is_display_toggle(code) &&
+ asus->driver->quirks->no_display_toggle)
+ goto exit;
+
+ if (!sparse_keymap_report_event(asus->inputdev, code,
+ key_value, autorelease))
pr_info("Unknown key %x pressed\n", code);
exit:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index 4c9bd38bb0a..4da4c8bafe7 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -30,6 +30,8 @@
#include <linux/platform_device.h>
#define ASUS_WMI_KEY_IGNORE (-1)
+#define ASUS_WMI_BRN_DOWN 0x20
+#define ASUS_WMI_BRN_UP 0x2f
struct module;
struct key_entry;
@@ -41,6 +43,13 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
int wapf;
+ /*
+ * For machines with AMD graphic chips, it will send out WMI event
+ * and ACPI interrupt at the same time while hitting the hotkey.
+ * To simplify the problem, we just have to ignore the WMI event,
+ * and let the ACPI interrupt to send out the key event.
+ */
+ int no_display_toggle;
};
struct asus_wmi_driver {
diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
new file mode 100644
index 00000000000..93d66809355
--- /dev/null
+++ b/drivers/platform/x86/chromeos_laptop.c
@@ -0,0 +1,371 @@
+/*
+ * chromeos_laptop.c - Driver to instantiate Chromebook i2c/smbus devices.
+ *
+ * Author : Benson Leung <bleung@chromium.org>
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/dmi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#define ATMEL_TP_I2C_ADDR 0x4b
+#define ATMEL_TP_I2C_BL_ADDR 0x25
+#define ATMEL_TS_I2C_ADDR 0x4a
+#define ATMEL_TS_I2C_BL_ADDR 0x26
+#define CYAPA_TP_I2C_ADDR 0x67
+#define ISL_ALS_I2C_ADDR 0x44
+#define TAOS_ALS_I2C_ADDR 0x29
+
+static struct i2c_client *als;
+static struct i2c_client *tp;
+static struct i2c_client *ts;
+
+const char *i2c_adapter_names[] = {
+ "SMBus I801 adapter",
+ "i915 gmbus vga",
+ "i915 gmbus panel",
+};
+
+/* Keep this enum consistent with i2c_adapter_names */
+enum i2c_adapter_type {
+ I2C_ADAPTER_SMBUS = 0,
+ I2C_ADAPTER_VGADDC,
+ I2C_ADAPTER_PANEL,
+};
+
+static struct i2c_board_info __initdata cyapa_device = {
+ I2C_BOARD_INFO("cyapa", CYAPA_TP_I2C_ADDR),
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata isl_als_device = {
+ I2C_BOARD_INFO("isl29018", ISL_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2583_als_device = {
+ I2C_BOARD_INFO("tsl2583", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata tsl2563_als_device = {
+ I2C_BOARD_INFO("tsl2563", TAOS_ALS_I2C_ADDR),
+};
+
+static struct i2c_board_info __initdata atmel_224s_tp_device = {
+ I2C_BOARD_INFO("atmel_mxt_tp", ATMEL_TP_I2C_ADDR),
+ .platform_data = NULL,
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_board_info __initdata atmel_1664s_device = {
+ I2C_BOARD_INFO("atmel_mxt_ts", ATMEL_TS_I2C_ADDR),
+ .platform_data = NULL,
+ .flags = I2C_CLIENT_WAKE,
+};
+
+static struct i2c_client __init *__add_probed_i2c_device(
+ const char *name,
+ int bus,
+ struct i2c_board_info *info,
+ const unsigned short *addrs)
+{
+ const struct dmi_device *dmi_dev;
+ const struct dmi_dev_onboard *dev_data;
+ struct i2c_adapter *adapter;
+ struct i2c_client *client;
+
+ if (bus < 0)
+ return NULL;
+ /*
+ * If a name is specified, look for irq platform information stashed
+ * in DMI_DEV_TYPE_DEV_ONBOARD by the Chrome OS custom system firmware.
+ */
+ if (name) {
+ dmi_dev = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, name, NULL);
+ if (!dmi_dev) {
+ pr_err("%s failed to dmi find device %s.\n",
+ __func__,
+ name);
+ return NULL;
+ }
+ dev_data = (struct dmi_dev_onboard *)dmi_dev->device_data;
+ if (!dev_data) {
+ pr_err("%s failed to get data from dmi for %s.\n",
+ __func__, name);
+ return NULL;
+ }
+ info->irq = dev_data->instance;
+ }
+
+ adapter = i2c_get_adapter(bus);
+ if (!adapter) {
+ pr_err("%s failed to get i2c adapter %d.\n", __func__, bus);
+ return NULL;
+ }
+
+ /* add the i2c device */
+ client = i2c_new_probed_device(adapter, info, addrs, NULL);
+ if (!client)
+ pr_err("%s failed to register device %d-%02x\n",
+ __func__, bus, info->addr);
+ else
+ pr_debug("%s added i2c device %d-%02x\n",
+ __func__, bus, info->addr);
+
+ i2c_put_adapter(adapter);
+ return client;
+}
+
+static int __init __find_i2c_adap(struct device *dev, void *data)
+{
+ const char *name = data;
+ static const char *prefix = "i2c-";
+ struct i2c_adapter *adapter;
+ if (strncmp(dev_name(dev), prefix, strlen(prefix)) != 0)
+ return 0;
+ adapter = to_i2c_adapter(dev);
+ return (strncmp(adapter->name, name, strlen(name)) == 0);
+}
+
+static int __init find_i2c_adapter_num(enum i2c_adapter_type type)
+{
+ struct device *dev = NULL;
+ struct i2c_adapter *adapter;
+ const char *name = i2c_adapter_names[type];
+ /* find the adapter by name */
+ dev = bus_find_device(&i2c_bus_type, NULL, (void *)name,
+ __find_i2c_adap);
+ if (!dev) {
+ pr_err("%s: i2c adapter %s not found on system.\n", __func__,
+ name);
+ return -ENODEV;
+ }
+ adapter = to_i2c_adapter(dev);
+ return adapter->nr;
+}
+
+/*
+ * Takes a list of addresses in addrs as such :
+ * { addr1, ... , addrn, I2C_CLIENT_END };
+ * add_probed_i2c_device will use i2c_new_probed_device
+ * and probe for devices at all of the addresses listed.
+ * Returns NULL if no devices found.
+ * See Documentation/i2c/instantiating-devices for more information.
+ */
+static __init struct i2c_client *add_probed_i2c_device(
+ const char *name,
+ enum i2c_adapter_type type,
+ struct i2c_board_info *info,
+ const unsigned short *addrs)
+{
+ return __add_probed_i2c_device(name,
+ find_i2c_adapter_num(type),
+ info,
+ addrs);
+}
+
+/*
+ * Probes for a device at a single address, the one provided by
+ * info->addr.
+ * Returns NULL if no device found.
+ */
+static __init struct i2c_client *add_i2c_device(const char *name,
+ enum i2c_adapter_type type,
+ struct i2c_board_info *info)
+{
+ const unsigned short addr_list[] = { info->addr, I2C_CLIENT_END };
+ return __add_probed_i2c_device(name,
+ find_i2c_adapter_num(type),
+ info,
+ addr_list);
+}
+
+
+static struct i2c_client __init *add_smbus_device(const char *name,
+ struct i2c_board_info *info)
+{
+ return add_i2c_device(name, I2C_ADAPTER_SMBUS, info);
+}
+
+static int __init setup_cyapa_smbus_tp(const struct dmi_system_id *id)
+{
+ /* add cyapa touchpad on smbus */
+ tp = add_smbus_device("trackpad", &cyapa_device);
+ return 0;
+}
+
+static int __init setup_atmel_224s_tp(const struct dmi_system_id *id)
+{
+ const unsigned short addr_list[] = { ATMEL_TP_I2C_BL_ADDR,
+ ATMEL_TP_I2C_ADDR,
+ I2C_CLIENT_END };
+
+ /* add atmel mxt touchpad on VGA DDC GMBus */
+ tp = add_probed_i2c_device("trackpad", I2C_ADAPTER_VGADDC,
+ &atmel_224s_tp_device, addr_list);
+ return 0;
+}
+
+static int __init setup_atmel_1664s_ts(const struct dmi_system_id *id)
+{
+ const unsigned short addr_list[] = { ATMEL_TS_I2C_BL_ADDR,
+ ATMEL_TS_I2C_ADDR,
+ I2C_CLIENT_END };
+
+ /* add atmel mxt touch device on PANEL GMBus */
+ ts = add_probed_i2c_device("touchscreen", I2C_ADAPTER_PANEL,
+ &atmel_1664s_device, addr_list);
+ return 0;
+}
+
+
+static int __init setup_isl29018_als(const struct dmi_system_id *id)
+{
+ /* add isl29018 light sensor */
+ als = add_smbus_device("lightsensor", &isl_als_device);
+ return 0;
+}
+
+static int __init setup_isl29023_als(const struct dmi_system_id *id)
+{
+ /* add isl29023 light sensor on Panel GMBus */
+ als = add_i2c_device("lightsensor", I2C_ADAPTER_PANEL,
+ &isl_als_device);
+ return 0;
+}
+
+static int __init setup_tsl2583_als(const struct dmi_system_id *id)
+{
+ /* add tsl2583 light sensor on smbus */
+ als = add_smbus_device(NULL, &tsl2583_als_device);
+ return 0;
+}
+
+static int __init setup_tsl2563_als(const struct dmi_system_id *id)
+{
+ /* add tsl2563 light sensor on smbus */
+ als = add_smbus_device(NULL, &tsl2563_als_device);
+ return 0;
+}
+
+static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
+ {
+ .ident = "Samsung Series 5 550 - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "Chromebook Pixel - Touchscreen",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_atmel_1664s_ts,
+ },
+ {
+ .ident = "Chromebook Pixel - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_atmel_224s_tp,
+ },
+ {
+ .ident = "Samsung Series 5 550 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Lumpy"),
+ },
+ .callback = setup_isl29018_als,
+ },
+ {
+ .ident = "Chromebook Pixel - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Link"),
+ },
+ .callback = setup_isl29023_als,
+ },
+ {
+ .ident = "Acer C7 Chromebook - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Parrot"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "HP Pavilion 14 Chromebook - Touchpad",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Butterfly"),
+ },
+ .callback = setup_cyapa_smbus_tp,
+ },
+ {
+ .ident = "Samsung Series 5 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Alex"),
+ },
+ .callback = setup_tsl2583_als,
+ },
+ {
+ .ident = "Cr-48 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Mario"),
+ },
+ .callback = setup_tsl2563_als,
+ },
+ {
+ .ident = "Acer AC700 - Light Sensor",
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ .callback = setup_tsl2563_als,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, chromeos_laptop_dmi_table);
+
+static int __init chromeos_laptop_init(void)
+{
+ if (!dmi_check_system(chromeos_laptop_dmi_table)) {
+ pr_debug("%s unsupported system.\n", __func__);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static void __exit chromeos_laptop_exit(void)
+{
+ if (als)
+ i2c_unregister_device(als);
+ if (tp)
+ i2c_unregister_device(tp);
+ if (ts)
+ i2c_unregister_device(ts);
+}
+
+module_init(chromeos_laptop_init);
+module_exit(chromeos_laptop_exit);
+
+MODULE_DESCRIPTION("Chrome OS Laptop driver");
+MODULE_AUTHOR("Benson Leung <bleung@chromium.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 60cb76a5b51..af67e6e56eb 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -63,6 +63,8 @@ MODULE_PARM_DESC(hotplug_wireless,
#define HOME_RELEASE 0xe5
static const struct key_entry eeepc_wmi_keymap[] = {
+ { KE_KEY, ASUS_WMI_BRN_DOWN, { KEY_BRIGHTNESSDOWN } },
+ { KE_KEY, ASUS_WMI_BRN_UP, { KEY_BRIGHTNESSUP } },
/* Sleep already handled via generic ACPI code */
{ KE_KEY, 0x30, { KEY_VOLUMEUP } },
{ KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1dde7accf27..45cacf79f3a 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -60,6 +60,7 @@ enum hp_wmi_radio {
HPWMI_WIFI = 0,
HPWMI_BLUETOOTH = 1,
HPWMI_WWAN = 2,
+ HPWMI_GPS = 3,
};
enum hp_wmi_event_ids {
@@ -72,10 +73,6 @@ enum hp_wmi_event_ids {
HPWMI_LOCK_SWITCH = 7,
};
-static int hp_wmi_bios_setup(struct platform_device *device);
-static int __exit hp_wmi_bios_remove(struct platform_device *device);
-static int hp_wmi_resume_handler(struct device *device);
-
struct bios_args {
u32 signature;
u32 command;
@@ -137,6 +134,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -147,6 +145,7 @@ static struct platform_device *hp_wmi_platform_dev;
static struct rfkill *wifi_rfkill;
static struct rfkill *bluetooth_rfkill;
static struct rfkill *wwan_rfkill;
+static struct rfkill *gps_rfkill;
struct rfkill2_device {
u8 id;
@@ -157,21 +156,6 @@ struct rfkill2_device {
static int rfkill2_count;
static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES];
-static const struct dev_pm_ops hp_wmi_pm_ops = {
- .resume = hp_wmi_resume_handler,
- .restore = hp_wmi_resume_handler,
-};
-
-static struct platform_driver hp_wmi_driver = {
- .driver = {
- .name = "hp-wmi",
- .owner = THIS_MODULE,
- .pm = &hp_wmi_pm_ops,
- },
- .probe = hp_wmi_bios_setup,
- .remove = hp_wmi_bios_remove,
-};
-
/*
* hp_wmi_perform_query
*
@@ -543,6 +527,10 @@ static void hp_wmi_notify(u32 value, void *context)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
+ if (gps_rfkill)
+ rfkill_set_states(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS),
+ hp_wmi_get_hw_state(HPWMI_GPS));
break;
case HPWMI_CPU_BATTERY_THROTTLE:
pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n");
@@ -670,7 +658,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
(void *) HPWMI_WWAN);
if (!wwan_rfkill) {
err = -ENOMEM;
- goto register_bluetooth_error;
+ goto register_gps_error;
}
rfkill_init_sw_state(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN));
@@ -681,10 +669,33 @@ static int hp_wmi_rfkill_setup(struct platform_device *device)
goto register_wwan_err;
}
+ if (wireless & 0x8) {
+ gps_rfkill = rfkill_alloc("hp-gps", &device->dev,
+ RFKILL_TYPE_GPS,
+ &hp_wmi_rfkill_ops,
+ (void *) HPWMI_GPS);
+ if (!gps_rfkill) {
+ err = -ENOMEM;
+ goto register_bluetooth_error;
+ }
+ rfkill_init_sw_state(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS));
+ rfkill_set_hw_state(bluetooth_rfkill,
+ hp_wmi_get_hw_state(HPWMI_GPS));
+ err = rfkill_register(gps_rfkill);
+ if (err)
+ goto register_gps_error;
+ }
+
return 0;
register_wwan_err:
rfkill_destroy(wwan_rfkill);
wwan_rfkill = NULL;
+ if (gps_rfkill)
+ rfkill_unregister(gps_rfkill);
+register_gps_error:
+ rfkill_destroy(gps_rfkill);
+ gps_rfkill = NULL;
if (bluetooth_rfkill)
rfkill_unregister(bluetooth_rfkill);
register_bluetooth_error:
@@ -729,6 +740,10 @@ static int hp_wmi_rfkill2_setup(struct platform_device *device)
type = RFKILL_TYPE_WWAN;
name = "hp-wwan";
break;
+ case HPWMI_GPS:
+ type = RFKILL_TYPE_GPS;
+ name = "hp-gps";
+ break;
default:
pr_warn("unknown device type 0x%x\n",
state.device[i].radio_type);
@@ -778,7 +793,7 @@ fail:
return err;
}
-static int hp_wmi_bios_setup(struct platform_device *device)
+static int __init hp_wmi_bios_setup(struct platform_device *device)
{
int err;
@@ -786,6 +801,7 @@ static int hp_wmi_bios_setup(struct platform_device *device)
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
wwan_rfkill = NULL;
+ gps_rfkill = NULL;
rfkill2_count = 0;
if (hp_wmi_rfkill_setup(device))
@@ -835,6 +851,10 @@ static int __exit hp_wmi_bios_remove(struct platform_device *device)
rfkill_unregister(wwan_rfkill);
rfkill_destroy(wwan_rfkill);
}
+ if (gps_rfkill) {
+ rfkill_unregister(gps_rfkill);
+ rfkill_destroy(gps_rfkill);
+ }
return 0;
}
@@ -870,51 +890,70 @@ static int hp_wmi_resume_handler(struct device *device)
rfkill_set_states(wwan_rfkill,
hp_wmi_get_sw_state(HPWMI_WWAN),
hp_wmi_get_hw_state(HPWMI_WWAN));
+ if (gps_rfkill)
+ rfkill_set_states(gps_rfkill,
+ hp_wmi_get_sw_state(HPWMI_GPS),
+ hp_wmi_get_hw_state(HPWMI_GPS));
return 0;
}
+static const struct dev_pm_ops hp_wmi_pm_ops = {
+ .resume = hp_wmi_resume_handler,
+ .restore = hp_wmi_resume_handler,
+};
+
+static struct platform_driver hp_wmi_driver = {
+ .driver = {
+ .name = "hp-wmi",
+ .owner = THIS_MODULE,
+ .pm = &hp_wmi_pm_ops,
+ },
+ .remove = __exit_p(hp_wmi_bios_remove),
+};
+
static int __init hp_wmi_init(void)
{
int err;
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID);
+ if (!bios_capable && !event_capable)
+ return -ENODEV;
+
if (event_capable) {
err = hp_wmi_input_setup();
if (err)
return err;
+
+ //Enable magic for hotkeys that run on the SMBus
+ ec_write(0xe6,0x6e);
}
if (bios_capable) {
- err = platform_driver_register(&hp_wmi_driver);
- if (err)
- goto err_driver_reg;
- hp_wmi_platform_dev = platform_device_alloc("hp-wmi", -1);
- if (!hp_wmi_platform_dev) {
- err = -ENOMEM;
- goto err_device_alloc;
+ hp_wmi_platform_dev =
+ platform_device_register_simple("hp-wmi", -1, NULL, 0);
+ if (IS_ERR(hp_wmi_platform_dev)) {
+ err = PTR_ERR(hp_wmi_platform_dev);
+ goto err_destroy_input;
}
- err = platform_device_add(hp_wmi_platform_dev);
+
+ err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup);
if (err)
- goto err_device_add;
+ goto err_unregister_device;
}
- if (!bios_capable && !event_capable)
- return -ENODEV;
-
return 0;
-err_device_add:
- platform_device_put(hp_wmi_platform_dev);
-err_device_alloc:
- platform_driver_unregister(&hp_wmi_driver);
-err_driver_reg:
+err_unregister_device:
+ platform_device_unregister(hp_wmi_platform_dev);
+err_destroy_input:
if (event_capable)
hp_wmi_input_destroy();
return err;
}
+module_init(hp_wmi_init);
static void __exit hp_wmi_exit(void)
{
@@ -926,6 +965,4 @@ static void __exit hp_wmi_exit(void)
platform_driver_unregister(&hp_wmi_driver);
}
}
-
-module_init(hp_wmi_init);
module_exit(hp_wmi_exit);
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index 2111dbb7e1e..6b229387567 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -82,8 +82,19 @@
#define MSI_STANDARD_EC_SCM_LOAD_ADDRESS 0x2d
#define MSI_STANDARD_EC_SCM_LOAD_MASK (1 << 0)
-#define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4
+#define MSI_STANDARD_EC_FUNCTIONS_ADDRESS 0xe4
+/* Power LED is orange - Turbo mode */
+#define MSI_STANDARD_EC_TURBO_MASK (1 << 1)
+/* Power LED is green - ECO mode */
+#define MSI_STANDARD_EC_ECO_MASK (1 << 3)
+/* Touchpad is turned on */
#define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4)
+/* If this bit != bit 1, turbo mode can't be toggled */
+#define MSI_STANDARD_EC_TURBO_COOLDOWN_MASK (1 << 7)
+
+#define MSI_STANDARD_EC_FAN_ADDRESS 0x33
+/* If zero, fan rotates at maximal speed */
+#define MSI_STANDARD_EC_AUTOFAN_MASK (1 << 0)
#ifdef CONFIG_PM_SLEEP
static int msi_laptop_resume(struct device *device);
@@ -108,23 +119,38 @@ static const struct key_entry msi_laptop_keymap[] = {
static struct input_dev *msi_laptop_input_dev;
-static bool old_ec_model;
static int wlan_s, bluetooth_s, threeg_s;
static int threeg_exists;
-
-/* Some MSI 3G netbook only have one fn key to control Wlan/Bluetooth/3G,
- * those netbook will load the SCM (windows app) to disable the original
- * Wlan/Bluetooth control by BIOS when user press fn key, then control
- * Wlan/Bluetooth/3G by SCM (software control by OS). Without SCM, user
- * cann't on/off 3G module on those 3G netbook.
- * On Linux, msi-laptop driver will do the same thing to disable the
- * original BIOS control, then might need use HAL or other userland
- * application to do the software control that simulate with SCM.
- * e.g. MSI N034 netbook
- */
-static bool load_scm_model;
static struct rfkill *rfk_wlan, *rfk_bluetooth, *rfk_threeg;
+/* MSI laptop quirks */
+struct quirk_entry {
+ bool old_ec_model;
+
+ /* Some MSI 3G netbook only have one fn key to control
+ * Wlan/Bluetooth/3G, those netbook will load the SCM (windows app) to
+ * disable the original Wlan/Bluetooth control by BIOS when user press
+ * fn key, then control Wlan/Bluetooth/3G by SCM (software control by
+ * OS). Without SCM, user cann't on/off 3G module on those 3G netbook.
+ * On Linux, msi-laptop driver will do the same thing to disable the
+ * original BIOS control, then might need use HAL or other userland
+ * application to do the software control that simulate with SCM.
+ * e.g. MSI N034 netbook
+ */
+ bool load_scm_model;
+
+ /* Some MSI laptops need delay before reading from EC */
+ bool ec_delay;
+
+ /* Some MSI Wind netbooks (e.g. MSI Wind U100) need loading SCM to get
+ * some features working (e.g. ECO mode), but we cannot change
+ * Wlan/Bluetooth state in software and we can only read its state.
+ */
+ bool ec_read_only;
+};
+
+static struct quirk_entry *quirks;
+
/* Hardware access */
static int set_lcd_level(int level)
@@ -195,10 +221,13 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
if (sscanf(buf, "%i", &status) != 1 || (status < 0 || status > 1))
return -EINVAL;
+ if (quirks->ec_read_only)
+ return -EOPNOTSUPP;
+
/* read current device state */
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
- return -EINVAL;
+ return result;
if (!!(rdata & mask) != status) {
/* reverse device bit */
@@ -209,7 +238,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask)
result = ec_write(MSI_STANDARD_EC_COMMAND_ADDRESS, wdata);
if (result < 0)
- return -EINVAL;
+ return result;
}
return count;
@@ -222,7 +251,7 @@ static int get_wireless_state(int *wlan, int *bluetooth)
result = ec_transaction(MSI_EC_COMMAND_WIRELESS, &wdata, 1, &rdata, 1);
if (result < 0)
- return -1;
+ return result;
if (wlan)
*wlan = !!(rdata & 8);
@@ -240,7 +269,7 @@ static int get_wireless_state_ec_standard(void)
result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata);
if (result < 0)
- return -1;
+ return result;
wlan_s = !!(rdata & MSI_STANDARD_EC_WLAN_MASK);
@@ -258,7 +287,7 @@ static int get_threeg_exists(void)
result = ec_read(MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS, &rdata);
if (result < 0)
- return -1;
+ return result;
threeg_exists = !!(rdata & MSI_STANDARD_EC_3G_MASK);
@@ -291,9 +320,9 @@ static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret, enabled;
+ int ret, enabled = 0;
- if (old_ec_model) {
+ if (quirks->old_ec_model) {
ret = get_wireless_state(&enabled, NULL);
} else {
ret = get_wireless_state_ec_standard();
@@ -315,9 +344,9 @@ static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
- int ret, enabled;
+ int ret, enabled = 0;
- if (old_ec_model) {
+ if (quirks->old_ec_model) {
ret = get_wireless_state(NULL, &enabled);
} else {
ret = get_wireless_state_ec_standard();
@@ -342,8 +371,8 @@ static ssize_t show_threeg(struct device *dev,
int ret;
/* old msi ec not support 3G */
- if (old_ec_model)
- return -1;
+ if (quirks->old_ec_model)
+ return -ENODEV;
ret = get_wireless_state_ec_standard();
if (ret < 0)
@@ -417,18 +446,119 @@ static ssize_t store_auto_brightness(struct device *dev,
return count;
}
+static ssize_t show_touchpad(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK));
+}
+
+static ssize_t show_turbo(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_TURBO_MASK));
+}
+
+static ssize_t show_eco(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_ECO_MASK));
+}
+
+static ssize_t show_turbo_cooldown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", (!!(rdata & MSI_STANDARD_EC_TURBO_MASK)) |
+ (!!(rdata & MSI_STANDARD_EC_TURBO_COOLDOWN_MASK) << 1));
+}
+
+static ssize_t show_auto_fan(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+
+ u8 rdata;
+ int result;
+
+ result = ec_read(MSI_STANDARD_EC_FAN_ADDRESS, &rdata);
+ if (result < 0)
+ return result;
+
+ return sprintf(buf, "%i\n", !!(rdata & MSI_STANDARD_EC_AUTOFAN_MASK));
+}
+
+static ssize_t store_auto_fan(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+
+ int enable, result;
+
+ if (sscanf(buf, "%i", &enable) != 1 || (enable != (enable & 1)))
+ return -EINVAL;
+
+ result = ec_write(MSI_STANDARD_EC_FAN_ADDRESS, enable);
+ if (result < 0)
+ return result;
+
+ return count;
+}
+
static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
static DEVICE_ATTR(auto_brightness, 0644, show_auto_brightness,
store_auto_brightness);
static DEVICE_ATTR(bluetooth, 0444, show_bluetooth, NULL);
static DEVICE_ATTR(wlan, 0444, show_wlan, NULL);
static DEVICE_ATTR(threeg, 0444, show_threeg, NULL);
+static DEVICE_ATTR(touchpad, 0444, show_touchpad, NULL);
+static DEVICE_ATTR(turbo_mode, 0444, show_turbo, NULL);
+static DEVICE_ATTR(eco_mode, 0444, show_eco, NULL);
+static DEVICE_ATTR(turbo_cooldown, 0444, show_turbo_cooldown, NULL);
+static DEVICE_ATTR(auto_fan, 0644, show_auto_fan, store_auto_fan);
static struct attribute *msipf_attributes[] = {
- &dev_attr_lcd_level.attr,
- &dev_attr_auto_brightness.attr,
&dev_attr_bluetooth.attr,
&dev_attr_wlan.attr,
+ &dev_attr_touchpad.attr,
+ &dev_attr_turbo_mode.attr,
+ &dev_attr_eco_mode.attr,
+ &dev_attr_turbo_cooldown.attr,
+ &dev_attr_auto_fan.attr,
+ NULL
+};
+
+static struct attribute *msipf_old_attributes[] = {
+ &dev_attr_lcd_level.attr,
+ &dev_attr_auto_brightness.attr,
NULL
};
@@ -436,6 +566,10 @@ static struct attribute_group msipf_attribute_group = {
.attrs = msipf_attributes
};
+static struct attribute_group msipf_old_attribute_group = {
+ .attrs = msipf_old_attributes
+};
+
static struct platform_driver msipf_driver = {
.driver = {
.name = "msi-laptop-pf",
@@ -448,9 +582,26 @@ static struct platform_device *msipf_device;
/* Initialization */
-static int dmi_check_cb(const struct dmi_system_id *id)
+static struct quirk_entry quirk_old_ec_model = {
+ .old_ec_model = true,
+};
+
+static struct quirk_entry quirk_load_scm_model = {
+ .load_scm_model = true,
+ .ec_delay = true,
+};
+
+static struct quirk_entry quirk_load_scm_ro_model = {
+ .load_scm_model = true,
+ .ec_read_only = true,
+};
+
+static int dmi_check_cb(const struct dmi_system_id *dmi)
{
- pr_info("Identified laptop model '%s'\n", id->ident);
+ pr_info("Identified laptop model '%s'\n", dmi->ident);
+
+ quirks = dmi->driver_data;
+
return 1;
}
@@ -464,6 +615,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INT'L CO.,LTD")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -474,6 +626,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "0581"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1058")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -484,6 +637,7 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_BOARD_VENDOR, "MSI"),
DMI_MATCH(DMI_BOARD_NAME, "MS-1412")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
{
@@ -495,12 +649,9 @@ static struct dmi_system_id __initdata msi_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INT'L CO.,LTD")
},
+ .driver_data = &quirk_old_ec_model,
.callback = dmi_check_cb
},
- { }
-};
-
-static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
{
.ident = "MSI N034",
.matches = {
@@ -510,6 +661,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -521,6 +673,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
DMI_MATCH(DMI_CHASSIS_VENDOR,
"MICRO-STAR INTERNATIONAL CO., LTD")
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -530,6 +683,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"MICRO-STAR INTERNATIONAL CO., LTD"),
DMI_MATCH(DMI_PRODUCT_NAME, "MS-N014"),
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -539,6 +693,7 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"Micro-Star International"),
DMI_MATCH(DMI_PRODUCT_NAME, "CR620"),
},
+ .driver_data = &quirk_load_scm_model,
.callback = dmi_check_cb
},
{
@@ -548,6 +703,17 @@ static struct dmi_system_id __initdata msi_load_scm_models_dmi_table[] = {
"Micro-Star International Co., Ltd."),
DMI_MATCH(DMI_PRODUCT_NAME, "U270 series"),
},
+ .driver_data = &quirk_load_scm_model,
+ .callback = dmi_check_cb
+ },
+ {
+ .ident = "MSI U90/U100",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "MICRO-STAR INTERNATIONAL CO., LTD"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "U90/U100"),
+ },
+ .driver_data = &quirk_load_scm_ro_model,
.callback = dmi_check_cb
},
{ }
@@ -560,32 +726,26 @@ static int rfkill_bluetooth_set(void *data, bool blocked)
* blocked == false is on
* blocked == true is off
*/
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_BLUETOOTH_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_BLUETOOTH_MASK);
- return 0;
+ return min(result, 0);
}
static int rfkill_wlan_set(void *data, bool blocked)
{
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_WLAN_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_WLAN_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_WLAN_MASK);
- return 0;
+ return min(result, 0);
}
static int rfkill_threeg_set(void *data, bool blocked)
{
- if (blocked)
- set_device_state("0", 0, MSI_STANDARD_EC_3G_MASK);
- else
- set_device_state("1", 0, MSI_STANDARD_EC_3G_MASK);
+ int result = set_device_state(blocked ? "0" : "1", 0,
+ MSI_STANDARD_EC_3G_MASK);
- return 0;
+ return min(result, 0);
}
static const struct rfkill_ops rfkill_bluetooth_ops = {
@@ -618,25 +778,34 @@ static void rfkill_cleanup(void)
}
}
+static bool msi_rfkill_set_state(struct rfkill *rfkill, bool blocked)
+{
+ if (quirks->ec_read_only)
+ return rfkill_set_hw_state(rfkill, blocked);
+ else
+ return rfkill_set_sw_state(rfkill, blocked);
+}
+
static void msi_update_rfkill(struct work_struct *ignored)
{
get_wireless_state_ec_standard();
if (rfk_wlan)
- rfkill_set_sw_state(rfk_wlan, !wlan_s);
+ msi_rfkill_set_state(rfk_wlan, !wlan_s);
if (rfk_bluetooth)
- rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s);
+ msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s);
if (rfk_threeg)
- rfkill_set_sw_state(rfk_threeg, !threeg_s);
+ msi_rfkill_set_state(rfk_threeg, !threeg_s);
}
-static DECLARE_DELAYED_WORK(msi_rfkill_work, msi_update_rfkill);
+static DECLARE_DELAYED_WORK(msi_rfkill_dwork, msi_update_rfkill);
+static DECLARE_WORK(msi_rfkill_work, msi_update_rfkill);
static void msi_send_touchpad_key(struct work_struct *ignored)
{
u8 rdata;
int result;
- result = ec_read(MSI_STANDARD_EC_TOUCHPAD_ADDRESS, &rdata);
+ result = ec_read(MSI_STANDARD_EC_FUNCTIONS_ADDRESS, &rdata);
if (result < 0)
return;
@@ -644,7 +813,8 @@ static void msi_send_touchpad_key(struct work_struct *ignored)
(rdata & MSI_STANDARD_EC_TOUCHPAD_MASK) ?
KEY_TOUCHPAD_ON : KEY_TOUCHPAD_OFF, 1, true);
}
-static DECLARE_DELAYED_WORK(msi_touchpad_work, msi_send_touchpad_key);
+static DECLARE_DELAYED_WORK(msi_touchpad_dwork, msi_send_touchpad_key);
+static DECLARE_WORK(msi_touchpad_work, msi_send_touchpad_key);
static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
struct serio *port)
@@ -662,14 +832,20 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str,
extended = false;
switch (data) {
case 0xE4:
- schedule_delayed_work(&msi_touchpad_work,
- round_jiffies_relative(0.5 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_touchpad_dwork,
+ round_jiffies_relative(0.5 * HZ));
+ } else
+ schedule_work(&msi_touchpad_work);
break;
case 0x54:
case 0x62:
case 0x76:
- schedule_delayed_work(&msi_rfkill_work,
- round_jiffies_relative(0.5 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_rfkill_dwork,
+ round_jiffies_relative(0.5 * HZ));
+ } else
+ schedule_work(&msi_rfkill_work);
break;
}
}
@@ -736,8 +912,11 @@ static int rfkill_init(struct platform_device *sdev)
}
/* schedule to run rfkill state initial */
- schedule_delayed_work(&msi_rfkill_init,
- round_jiffies_relative(1 * HZ));
+ if (quirks->ec_delay) {
+ schedule_delayed_work(&msi_rfkill_init,
+ round_jiffies_relative(1 * HZ));
+ } else
+ schedule_work(&msi_rfkill_work);
return 0;
@@ -761,7 +940,7 @@ static int msi_laptop_resume(struct device *device)
u8 data;
int result;
- if (!load_scm_model)
+ if (!quirks->load_scm_model)
return 0;
/* set load SCM to disable hardware control by fn key */
@@ -819,13 +998,15 @@ static int __init load_scm_model_init(struct platform_device *sdev)
u8 data;
int result;
- /* allow userland write sysfs file */
- dev_attr_bluetooth.store = store_bluetooth;
- dev_attr_wlan.store = store_wlan;
- dev_attr_threeg.store = store_threeg;
- dev_attr_bluetooth.attr.mode |= S_IWUSR;
- dev_attr_wlan.attr.mode |= S_IWUSR;
- dev_attr_threeg.attr.mode |= S_IWUSR;
+ if (!quirks->ec_read_only) {
+ /* allow userland write sysfs file */
+ dev_attr_bluetooth.store = store_bluetooth;
+ dev_attr_wlan.store = store_wlan;
+ dev_attr_threeg.store = store_threeg;
+ dev_attr_bluetooth.attr.mode |= S_IWUSR;
+ dev_attr_wlan.attr.mode |= S_IWUSR;
+ dev_attr_threeg.attr.mode |= S_IWUSR;
+ }
/* disable hardware control by fn key */
result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
@@ -874,21 +1055,22 @@ static int __init msi_init(void)
if (acpi_disabled)
return -ENODEV;
- if (force || dmi_check_system(msi_dmi_table))
- old_ec_model = 1;
+ dmi_check_system(msi_dmi_table);
+ if (!quirks)
+ /* quirks may be NULL if no match in DMI table */
+ quirks = &quirk_load_scm_model;
+ if (force)
+ quirks = &quirk_old_ec_model;
- if (!old_ec_model)
+ if (!quirks->old_ec_model)
get_threeg_exists();
- if (!old_ec_model && dmi_check_system(msi_load_scm_models_dmi_table))
- load_scm_model = 1;
-
if (auto_brightness < 0 || auto_brightness > 2)
return -EINVAL;
/* Register backlight stuff */
- if (acpi_video_backlight_support()) {
+ if (!quirks->old_ec_model || acpi_video_backlight_support()) {
pr_info("Brightness ignored, must be controlled by ACPI video driver\n");
} else {
struct backlight_properties props;
@@ -918,7 +1100,7 @@ static int __init msi_init(void)
if (ret)
goto fail_platform_device1;
- if (load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
+ if (quirks->load_scm_model && (load_scm_model_init(msipf_device) < 0)) {
ret = -EINVAL;
goto fail_platform_device1;
}
@@ -928,20 +1110,25 @@ static int __init msi_init(void)
if (ret)
goto fail_platform_device2;
- if (!old_ec_model) {
+ if (!quirks->old_ec_model) {
if (threeg_exists)
ret = device_create_file(&msipf_device->dev,
&dev_attr_threeg);
if (ret)
goto fail_platform_device2;
- }
+ } else {
+ ret = sysfs_create_group(&msipf_device->dev.kobj,
+ &msipf_old_attribute_group);
+ if (ret)
+ goto fail_platform_device2;
- /* Disable automatic brightness control by default because
- * this module was probably loaded to do brightness control in
- * software. */
+ /* Disable automatic brightness control by default because
+ * this module was probably loaded to do brightness control in
+ * software. */
- if (auto_brightness != 2)
- set_auto_brightness(auto_brightness);
+ if (auto_brightness != 2)
+ set_auto_brightness(auto_brightness);
+ }
pr_info("driver " MSI_DRIVER_VERSION " successfully loaded\n");
@@ -949,9 +1136,10 @@ static int __init msi_init(void)
fail_platform_device2:
- if (load_scm_model) {
+ if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
- cancel_delayed_work_sync(&msi_rfkill_work);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
}
platform_device_del(msipf_device);
@@ -973,23 +1161,26 @@ fail_backlight:
static void __exit msi_cleanup(void)
{
- if (load_scm_model) {
+ if (quirks->load_scm_model) {
i8042_remove_filter(msi_laptop_i8042_filter);
msi_laptop_input_destroy();
- cancel_delayed_work_sync(&msi_rfkill_work);
+ cancel_delayed_work_sync(&msi_rfkill_dwork);
+ cancel_work_sync(&msi_rfkill_work);
rfkill_cleanup();
}
sysfs_remove_group(&msipf_device->dev.kobj, &msipf_attribute_group);
- if (!old_ec_model && threeg_exists)
+ if (!quirks->old_ec_model && threeg_exists)
device_remove_file(&msipf_device->dev, &dev_attr_threeg);
platform_device_unregister(msipf_device);
platform_driver_unregister(&msipf_driver);
backlight_device_unregister(msibl_device);
- /* Enable automatic brightness control again */
- if (auto_brightness != 2)
- set_auto_brightness(1);
+ if (quirks->old_ec_model) {
+ /* Enable automatic brightness control again */
+ if (auto_brightness != 2)
+ set_auto_brightness(1);
+ }
pr_info("driver unloaded\n");
}
@@ -1011,3 +1202,4 @@ MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N051:*");
MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnMS-N014:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnCR620:*");
MODULE_ALIAS("dmi:*:svnMicro-StarInternational*:pnU270series:*");
+MODULE_ALIAS("dmi:*:svnMICRO-STARINTERNATIONAL*:pnU90/U100:*");
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 2264331bd48..70222f265f6 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -34,29 +34,65 @@ MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
MODULE_DESCRIPTION("MSI laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("wmi:551A1F84-FBDD-4125-91DB-3EA8F44F1D45");
-MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
-
#define DRV_NAME "msi-wmi"
#define MSIWMI_BIOS_GUID "551A1F84-FBDD-4125-91DB-3EA8F44F1D45"
-#define MSIWMI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
-
-#define SCANCODE_BASE 0xD0
-#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
-#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
-#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2)
-#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3)
-#define MSI_WMI_MUTE (SCANCODE_BASE + 4)
+#define MSIWMI_MSI_EVENT_GUID "B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2"
+#define MSIWMI_WIND_EVENT_GUID "5B3CC38A-40D9-7245-8AE6-1145B751BE3F"
+
+MODULE_ALIAS("wmi:" MSIWMI_BIOS_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_MSI_EVENT_GUID);
+MODULE_ALIAS("wmi:" MSIWMI_WIND_EVENT_GUID);
+
+enum msi_scancodes {
+ /* Generic MSI keys (not present on MSI Wind) */
+ MSI_KEY_BRIGHTNESSUP = 0xD0,
+ MSI_KEY_BRIGHTNESSDOWN,
+ MSI_KEY_VOLUMEUP,
+ MSI_KEY_VOLUMEDOWN,
+ MSI_KEY_MUTE,
+ /* MSI Wind keys */
+ WIND_KEY_TOUCHPAD = 0x08, /* Fn+F3 touchpad toggle */
+ WIND_KEY_BLUETOOTH = 0x56, /* Fn+F11 Bluetooth toggle */
+ WIND_KEY_CAMERA, /* Fn+F6 webcam toggle */
+ WIND_KEY_WLAN = 0x5f, /* Fn+F11 Wi-Fi toggle */
+ WIND_KEY_TURBO, /* Fn+F10 turbo mode toggle */
+ WIND_KEY_ECO = 0x69, /* Fn+F10 ECO mode toggle */
+};
static struct key_entry msi_wmi_keymap[] = {
- { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
- { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
- { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
- { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
- { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} },
- { KE_END, 0}
+ { KE_KEY, MSI_KEY_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
+ { KE_KEY, MSI_KEY_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
+ { KE_KEY, MSI_KEY_VOLUMEUP, {KEY_VOLUMEUP} },
+ { KE_KEY, MSI_KEY_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
+ { KE_KEY, MSI_KEY_MUTE, {KEY_MUTE} },
+
+ /* These keys work without WMI. Ignore them to avoid double keycodes */
+ { KE_IGNORE, WIND_KEY_TOUCHPAD, {KEY_TOUCHPAD_TOGGLE} },
+ { KE_IGNORE, WIND_KEY_BLUETOOTH, {KEY_BLUETOOTH} },
+ { KE_IGNORE, WIND_KEY_CAMERA, {KEY_CAMERA} },
+ { KE_IGNORE, WIND_KEY_WLAN, {KEY_WLAN} },
+
+ /* These are unknown WMI events found on MSI Wind */
+ { KE_IGNORE, 0x00 },
+ { KE_IGNORE, 0x62 },
+ { KE_IGNORE, 0x63 },
+
+ /* These are MSI Wind keys that should be handled via WMI */
+ { KE_KEY, WIND_KEY_TURBO, {KEY_PROG1} },
+ { KE_KEY, WIND_KEY_ECO, {KEY_PROG2} },
+
+ { KE_END, 0 }
+};
+
+static ktime_t last_pressed;
+
+static const struct {
+ const char *guid;
+ bool quirk_last_pressed;
+} *event_wmi, event_wmis[] = {
+ { MSIWMI_MSI_EVENT_GUID, true },
+ { MSIWMI_WIND_EVENT_GUID, false },
};
-static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
static struct backlight_device *backlight;
@@ -149,7 +185,6 @@ static void msi_wmi_notify(u32 value, void *context)
struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
static struct key_entry *key;
union acpi_object *obj;
- ktime_t cur;
acpi_status status;
status = wmi_get_event_data(value, &response);
@@ -165,39 +200,67 @@ static void msi_wmi_notify(u32 value, void *context)
pr_debug("Eventcode: 0x%x\n", eventcode);
key = sparse_keymap_entry_from_scancode(msi_wmi_input_dev,
eventcode);
- if (key) {
- ktime_t diff;
- cur = ktime_get_real();
- diff = ktime_sub(cur, last_pressed[key->code -
- SCANCODE_BASE]);
- /* Ignore event if the same event happened in a 50 ms
+ if (!key) {
+ pr_info("Unknown key pressed - %x\n", eventcode);
+ goto msi_wmi_notify_exit;
+ }
+
+ if (event_wmi->quirk_last_pressed) {
+ ktime_t cur = ktime_get_real();
+ ktime_t diff = ktime_sub(cur, last_pressed);
+ /* Ignore event if any event happened in a 50 ms
timeframe -> Key press may result in 10-20 GPEs */
if (ktime_to_us(diff) < 1000 * 50) {
pr_debug("Suppressed key event 0x%X - "
"Last press was %lld us ago\n",
key->code, ktime_to_us(diff));
- return;
- }
- last_pressed[key->code - SCANCODE_BASE] = cur;
-
- if (key->type == KE_KEY &&
- /* Brightness is served via acpi video driver */
- (!acpi_video_backlight_support() ||
- (key->code != MSI_WMI_BRIGHTNESSUP &&
- key->code != MSI_WMI_BRIGHTNESSDOWN))) {
- pr_debug("Send key: 0x%X - "
- "Input layer keycode: %d\n",
- key->code, key->keycode);
- sparse_keymap_report_entry(msi_wmi_input_dev,
- key, 1, true);
+ goto msi_wmi_notify_exit;
}
- } else
- pr_info("Unknown key pressed - %x\n", eventcode);
+ last_pressed = cur;
+ }
+
+ if (key->type == KE_KEY &&
+ /* Brightness is served via acpi video driver */
+ (backlight ||
+ (key->code != MSI_KEY_BRIGHTNESSUP &&
+ key->code != MSI_KEY_BRIGHTNESSDOWN))) {
+ pr_debug("Send key: 0x%X - Input layer keycode: %d\n",
+ key->code, key->keycode);
+ sparse_keymap_report_entry(msi_wmi_input_dev, key, 1,
+ true);
+ }
} else
pr_info("Unknown event received\n");
+
+msi_wmi_notify_exit:
kfree(response.pointer);
}
+static int __init msi_wmi_backlight_setup(void)
+{
+ int err;
+ struct backlight_properties props;
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
+ props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+ backlight = backlight_device_register(DRV_NAME, NULL, NULL,
+ &msi_backlight_ops,
+ &props);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+
+ err = bl_get(NULL);
+ if (err < 0) {
+ backlight_device_unregister(backlight);
+ return err;
+ }
+
+ backlight->props.brightness = err;
+
+ return 0;
+}
+
static int __init msi_wmi_input_setup(void)
{
int err;
@@ -219,7 +282,7 @@ static int __init msi_wmi_input_setup(void)
if (err)
goto err_free_keymap;
- memset(last_pressed, 0, sizeof(last_pressed));
+ last_pressed = ktime_set(0, 0);
return 0;
@@ -233,61 +296,66 @@ err_free_dev:
static int __init msi_wmi_init(void)
{
int err;
+ int i;
- if (!wmi_has_guid(MSIWMI_EVENT_GUID)) {
- pr_err("This machine doesn't have MSI-hotkeys through WMI\n");
- return -ENODEV;
- }
- err = wmi_install_notify_handler(MSIWMI_EVENT_GUID,
- msi_wmi_notify, NULL);
- if (ACPI_FAILURE(err))
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(event_wmis); i++) {
+ if (!wmi_has_guid(event_wmis[i].guid))
+ continue;
- err = msi_wmi_input_setup();
- if (err)
- goto err_uninstall_notifier;
-
- if (!acpi_video_backlight_support()) {
- struct backlight_properties props;
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_PLATFORM;
- props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
- backlight = backlight_device_register(DRV_NAME, NULL, NULL,
- &msi_backlight_ops,
- &props);
- if (IS_ERR(backlight)) {
- err = PTR_ERR(backlight);
+ err = msi_wmi_input_setup();
+ if (err) {
+ pr_err("Unable to setup input device\n");
+ return err;
+ }
+
+ err = wmi_install_notify_handler(event_wmis[i].guid,
+ msi_wmi_notify, NULL);
+ if (ACPI_FAILURE(err)) {
+ pr_err("Unable to setup WMI notify handler\n");
goto err_free_input;
}
- err = bl_get(NULL);
- if (err < 0)
- goto err_free_backlight;
+ pr_debug("Event handler installed\n");
+ event_wmi = &event_wmis[i];
+ break;
+ }
- backlight->props.brightness = err;
+ if (wmi_has_guid(MSIWMI_BIOS_GUID) && !acpi_video_backlight_support()) {
+ err = msi_wmi_backlight_setup();
+ if (err) {
+ pr_err("Unable to setup backlight device\n");
+ goto err_uninstall_handler;
+ }
+ pr_debug("Backlight device created\n");
+ }
+
+ if (!event_wmi && !backlight) {
+ pr_err("This machine doesn't have neither MSI-hotkeys nor backlight through WMI\n");
+ return -ENODEV;
}
- pr_debug("Event handler installed\n");
return 0;
-err_free_backlight:
- backlight_device_unregister(backlight);
+err_uninstall_handler:
+ if (event_wmi)
+ wmi_remove_notify_handler(event_wmi->guid);
err_free_input:
- sparse_keymap_free(msi_wmi_input_dev);
- input_unregister_device(msi_wmi_input_dev);
-err_uninstall_notifier:
- wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ if (event_wmi) {
+ sparse_keymap_free(msi_wmi_input_dev);
+ input_unregister_device(msi_wmi_input_dev);
+ }
return err;
}
static void __exit msi_wmi_exit(void)
{
- if (wmi_has_guid(MSIWMI_EVENT_GUID)) {
- wmi_remove_notify_handler(MSIWMI_EVENT_GUID);
+ if (event_wmi) {
+ wmi_remove_notify_handler(event_wmi->guid);
sparse_keymap_free(msi_wmi_input_dev);
input_unregister_device(msi_wmi_input_dev);
- backlight_device_unregister(backlight);
}
+ if (backlight)
+ backlight_device_unregister(backlight);
}
module_init(msi_wmi_init);
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8da21876a79..14d4dced1de 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -158,6 +158,11 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd);
static int sony_nc_lid_resume_setup(struct platform_device *pd);
static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle);
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd);
+static int __sony_nc_gfx_switch_status_get(void);
+
static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
@@ -1241,17 +1246,13 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
/* Hybrid GFX switching */
sony_call_snc_handle(handle, 0x0000, &result);
dprintk("GFX switch event received (reason: %s)\n",
- (result & 0x01) ?
- "switch change" : "unknown");
-
- /* verify the switch state
- * 1: discrete GFX
- * 0: integrated GFX
- */
- sony_call_snc_handle(handle, 0x0100, &result);
+ (result == 0x1) ? "switch change" :
+ (result == 0x2) ? "output switch" :
+ (result == 0x3) ? "output switch" :
+ "");
ev_type = GFX_SWITCH;
- real_ev = result & 0xff;
+ real_ev = __sony_nc_gfx_switch_status_get();
break;
default:
@@ -1350,6 +1351,13 @@ static void sony_nc_function_setup(struct acpi_device *device,
pr_err("couldn't set up thermal profile function (%d)\n",
result);
break;
+ case 0x0128:
+ case 0x0146:
+ result = sony_nc_gfx_switch_setup(pf_device, handle);
+ if (result)
+ pr_err("couldn't set up GFX Switch status (%d)\n",
+ result);
+ break;
case 0x0131:
result = sony_nc_highspeed_charging_setup(pf_device);
if (result)
@@ -1365,6 +1373,8 @@ static void sony_nc_function_setup(struct acpi_device *device,
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
result = sony_nc_kbd_backlight_setup(pf_device, handle);
if (result)
pr_err("couldn't set up keyboard backlight function (%d)\n",
@@ -1414,6 +1424,10 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
case 0x0122:
sony_nc_thermal_cleanup(pd);
break;
+ case 0x0128:
+ case 0x0146:
+ sony_nc_gfx_switch_cleanup(pd);
+ break;
case 0x0131:
sony_nc_highspeed_charging_cleanup(pd);
break;
@@ -1423,6 +1437,8 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
sony_nc_kbd_backlight_cleanup(pd);
break;
default:
@@ -1467,6 +1483,8 @@ static void sony_nc_function_resume(void)
break;
case 0x0137:
case 0x0143:
+ case 0x014b:
+ case 0x014c:
sony_nc_kbd_backlight_resume();
break;
default:
@@ -1534,7 +1552,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
int argument = sony_rfkill_address[(long) data] + 0x100;
if (!blocked)
- argument |= 0x030000;
+ argument |= 0x070000;
return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
}
@@ -2333,7 +2351,7 @@ static int sony_nc_lid_resume_setup(struct platform_device *pd)
return 0;
liderror:
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
kfree(lid_ctl);
@@ -2355,6 +2373,97 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
}
}
+/* GFX Switch position */
+enum gfx_switch {
+ SPEED,
+ STAMINA,
+ AUTO
+};
+struct snc_gfx_switch_control {
+ struct device_attribute attr;
+ unsigned int handle;
+};
+static struct snc_gfx_switch_control *gfxs_ctl;
+
+/* returns 0 for speed, 1 for stamina */
+static int __sony_nc_gfx_switch_status_get(void)
+{
+ unsigned int result;
+
+ if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result))
+ return -EIO;
+
+ switch (gfxs_ctl->handle) {
+ case 0x0146:
+ /* 1: discrete GFX (speed)
+ * 0: integrated GFX (stamina)
+ */
+ return result & 0x1 ? SPEED : STAMINA;
+ break;
+ case 0x0128:
+ /* it's a more elaborated bitmask, for now:
+ * 2: integrated GFX (stamina)
+ * 0: discrete GFX (speed)
+ */
+ dprintk("GFX Status: 0x%x\n", result);
+ return result & 0x80 ? AUTO :
+ result & 0x02 ? STAMINA : SPEED;
+ break;
+ }
+ return -EINVAL;
+}
+
+static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buffer)
+{
+ int pos = __sony_nc_gfx_switch_status_get();
+
+ if (pos < 0)
+ return pos;
+
+ return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+}
+
+static int sony_nc_gfx_switch_setup(struct platform_device *pd,
+ unsigned int handle)
+{
+ unsigned int result;
+
+ gfxs_ctl = kzalloc(sizeof(struct snc_gfx_switch_control), GFP_KERNEL);
+ if (!gfxs_ctl)
+ return -ENOMEM;
+
+ gfxs_ctl->handle = handle;
+
+ sysfs_attr_init(&gfxs_ctl->attr.attr);
+ gfxs_ctl->attr.attr.name = "gfx_switch_status";
+ gfxs_ctl->attr.attr.mode = S_IRUGO;
+ gfxs_ctl->attr.show = sony_nc_gfx_switch_status_show;
+
+ result = device_create_file(&pd->dev, &gfxs_ctl->attr);
+ if (result)
+ goto gfxerror;
+
+ return 0;
+
+gfxerror:
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+
+ return result;
+}
+
+static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
+{
+ if (gfxs_ctl) {
+ device_remove_file(&pd->dev, &gfxs_ctl->attr);
+
+ kfree(gfxs_ctl);
+ gfxs_ctl = NULL;
+ }
+}
+
/* High speed charging function */
static struct device_attribute *hsc_handle;
@@ -2533,6 +2642,8 @@ static void sony_nc_backlight_ng_read_limits(int handle,
lvl_table_len = 9;
break;
case 0x143:
+ case 0x14b:
+ case 0x14c:
lvl_table_len = 16;
break;
}
@@ -2584,6 +2695,18 @@ static void sony_nc_backlight_setup(void)
sony_nc_backlight_ng_read_limits(0x143, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+ } else if (sony_find_snc_handle(0x14b) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14b, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
+ } else if (sony_find_snc_handle(0x14c) >= 0) {
+ ops = &sony_backlight_ng_ops;
+ sony_bl_props.cmd_base = 0x3000;
+ sony_nc_backlight_ng_read_limits(0x14c, &sony_bl_props);
+ max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
+
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
ops = &sony_backlight_ops;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f4f8408f3b5..9a907567f41 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -209,9 +209,8 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
- TP_HKEY_EV_UNK_6040 = 0x6040, /* Related to AC change?
- some sort of APM hint,
- W520 */
+ /* AC-related events */
+ TP_HKEY_EV_AC_CHANGED = 0x6040, /* AC status changed */
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
@@ -3629,6 +3628,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+ case TP_HKEY_EV_AC_CHANGED:
+ /* X120e, X121e, X220, X220i, X220t, X230, T420, T420s, W520:
+ * AC status changed; can be triggered by plugging or
+ * unplugging AC adapter, docking or undocking. */
+
+ /* fallthrough */
case TP_HKEY_EV_KEY_NUMLOCK:
case TP_HKEY_EV_KEY_FN:
@@ -8574,7 +8579,8 @@ static bool __pure __init tpacpi_is_valid_fw_id(const char* const s,
return s && strlen(s) >= 8 &&
tpacpi_is_fw_digit(s[0]) &&
tpacpi_is_fw_digit(s[1]) &&
- s[2] == t && s[3] == 'T' &&
+ s[2] == t &&
+ (s[3] == 'T' || s[3] == 'N') &&
tpacpi_is_fw_digit(s[4]) &&
tpacpi_is_fw_digit(s[5]);
}
@@ -8607,7 +8613,8 @@ static int __must_check __init get_thinkpad_model_data(
return -ENOMEM;
/* Really ancient ThinkPad 240X will fail this, which is fine */
- if (!tpacpi_is_valid_fw_id(tp->bios_version_str, 'E'))
+ if (!(tpacpi_is_valid_fw_id(tp->bios_version_str, 'E') ||
+ tpacpi_is_valid_fw_id(tp->bios_version_str, 'C')))
return 0;
tp->bios_model = tp->bios_version_str[0]
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b021ec6325..e2e349204e7 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON((u32)(address >> 32) != 0L);
+ BUG_ON(upper_32_bits(address) != 0L);
address |= fibsize;
} else {
/* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
address |= fibsize;
}
- src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
return 0;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 6401db494ef..2daf4b0da43 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -62,6 +62,10 @@ static int bnx2fc_destroy(struct net_device *net_device);
static int bnx2fc_enable(struct net_device *netdev);
static int bnx2fc_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev);
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
static void bnx2fc_recv_frame(struct sk_buff *skb);
static void bnx2fc_start_disc(struct bnx2fc_interface *interface);
@@ -89,7 +93,6 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
static void bnx2fc_stop(struct bnx2fc_interface *interface);
static int __init bnx2fc_mod_init(void);
static void __exit bnx2fc_mod_exit(void);
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
unsigned int bnx2fc_debug_level;
module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -107,44 +110,6 @@ static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport)
((struct fcoe_port *)lport_priv(lport))->priv)->netdev;
}
-/**
- * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void bnx2fc_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = bnx2fc_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = bnx2fc_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
-
static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
@@ -741,35 +706,6 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev)
return 0;
}
-static void bnx2fc_link_speed_update(struct fc_lport *lport)
-{
- struct fcoe_port *port = lport_priv(lport);
- struct bnx2fc_interface *interface = port->priv;
- struct net_device *netdev = interface->netdev;
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
-
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_2500:
- lport->link_speed = FC_PORTSPEED_2GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- }
-}
static int bnx2fc_link_ok(struct fc_lport *lport)
{
struct fcoe_port *port = lport_priv(lport);
@@ -827,7 +763,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
port->fcoe_pending_queue_active = 0;
setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
if (!lport->vport) {
if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
@@ -871,6 +807,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
u16 vlan_id)
{
struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport;
struct fc_lport *vport;
struct bnx2fc_interface *interface, *tmp;
@@ -930,30 +867,47 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
interface->netdev->name, event);
- bnx2fc_link_speed_update(lport);
+ fcoe_link_speed_update(lport);
+
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
if (link_possible && !bnx2fc_link_ok(lport)) {
- /* Reset max recv frame size to default */
- fc_set_mfs(lport, BNX2FC_MFS);
- /*
- * ctlr link up will only be handled during
- * enable to avoid sending discovery solicitation
- * on a stale vlan
- */
- if (interface->enabled)
- fcoe_ctlr_link_up(ctlr);
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ /* Reset max recv frame size to default */
+ fc_set_mfs(lport, BNX2FC_MFS);
+ /*
+ * ctlr link up will only be handled during
+ * enable to avoid sending discovery
+ * solicitation on a stale vlan
+ */
+ if (interface->enabled)
+ fcoe_ctlr_link_up(ctlr);
+ };
} else if (fcoe_ctlr_link_down(ctlr)) {
- mutex_lock(&lport->lp_mutex);
- list_for_each_entry(vport, &lport->vports, list)
- fc_host_port_type(vport->host) =
- FC_PORTTYPE_UNKNOWN;
- mutex_unlock(&lport->lp_mutex);
- fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
- per_cpu_ptr(lport->stats,
- get_cpu())->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
- wait_for_upload = 1;
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ mutex_lock(&lport->lp_mutex);
+ list_for_each_entry(vport, &lport->vports, list)
+ fc_host_port_type(vport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ mutex_unlock(&lport->lp_mutex);
+ fc_host_port_type(lport->host) =
+ FC_PORTTYPE_UNKNOWN;
+ per_cpu_ptr(lport->stats,
+ get_cpu())->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ wait_for_upload = 1;
+ };
}
}
mutex_unlock(&bnx2fc_dev_lock);
@@ -1484,6 +1438,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port = lport_priv(lport);
port->lport = lport;
port->priv = interface;
+ port->get_netdev = bnx2fc_netdev;
INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
@@ -2003,7 +1958,9 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
set_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic);
}
-
+/**
+ * Deperecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_disable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2029,7 +1986,9 @@ static int bnx2fc_disable(struct net_device *netdev)
return rc;
}
-
+/**
+ * Deprecated: Use bnx2fc_enabled()
+ */
static int bnx2fc_enable(struct net_device *netdev)
{
struct bnx2fc_interface *interface;
@@ -2055,17 +2014,57 @@ static int bnx2fc_enable(struct net_device *netdev)
}
/**
- * bnx2fc_create - Create bnx2fc FCoE interface
+ * bnx2fc_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int bnx2fc_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = bnx2fc_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return bnx2fc_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return bnx2fc_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+enum bnx2fc_create_link_state {
+ BNX2FC_CREATE_LINK_DOWN,
+ BNX2FC_CREATE_LINK_UP,
+};
+
+/**
+ * _bnx2fc_create() - Create bnx2fc FCoE interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * @buffer: The name of Ethernet interface to create on
- * @kp: The associated kernel param
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Called from sysfs.
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*
* Returns: 0 for success
*/
-static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _bnx2fc_create(struct net_device *netdev,
+ enum fip_state fip_mode,
+ enum bnx2fc_create_link_state link_state)
{
+ struct fcoe_ctlr_device *cdev;
struct fcoe_ctlr *ctlr;
struct bnx2fc_interface *interface;
struct bnx2fc_hba *hba;
@@ -2160,7 +2159,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
/* Make this master N_port */
ctlr->lp = lport;
- if (!bnx2fc_link_ok(lport)) {
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ cdev->enabled = FCOE_CTLR_ENABLED;
+ else
+ cdev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP &&
+ !bnx2fc_link_ok(lport)) {
fcoe_ctlr_link_up(ctlr);
fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
@@ -2168,7 +2175,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
BNX2FC_HBA_DBG(lport, "create: START DISC\n");
bnx2fc_start_disc(interface);
- interface->enabled = true;
+
+ if (link_state == BNX2FC_CREATE_LINK_UP)
+ interface->enabled = true;
+
/*
* Release from kref_init in bnx2fc_interface_setup, on success
* lport should be holding a reference taken in bnx2fc_if_create
@@ -2194,6 +2204,37 @@ mod_err:
}
/**
+ * bnx2fc_create() - Create a bnx2fc interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _bnx2fc_create(netdev, fip_mode, BNX2FC_CREATE_LINK_UP);
+}
+
+/**
+ * bnx2fc_ctlr_alloc() - Allocate a bnx2fc interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
+ *
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
+ */
+static int bnx2fc_ctlr_alloc(struct net_device *netdev)
+{
+ return _bnx2fc_create(netdev, FIP_MODE_FABRIC,
+ BNX2FC_CREATE_LINK_DOWN);
+}
+
+/**
* bnx2fc_find_hba_for_cnic - maps cnic instance to bnx2fc hba instance
*
* @cnic: Pointer to cnic device instance
@@ -2318,6 +2359,7 @@ static struct fcoe_transport bnx2fc_transport = {
.name = {"bnx2fc"},
.attached = false,
.list = LIST_HEAD_INIT(bnx2fc_transport.list),
+ .alloc = bnx2fc_ctlr_alloc,
.match = bnx2fc_match,
.create = bnx2fc_create,
.destroy = bnx2fc_destroy,
@@ -2562,13 +2604,13 @@ module_init(bnx2fc_mod_init);
module_exit(bnx2fc_mod_exit);
static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
- .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
- .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
+ .set_fcoe_ctlr_enabled = bnx2fc_ctlr_enabled,
+ .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
+ .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
.get_fcoe_fcf_selected = fcoe_fcf_get_selected,
.get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
@@ -2675,7 +2717,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
.elsct_send = bnx2fc_elsct_send,
.fcp_abort_io = bnx2fc_abort_io,
.fcp_cleanup = bnx2fc_cleanup,
- .get_lesb = bnx2fc_get_lesb,
+ .get_lesb = fcoe_get_lesb,
.rport_event_callback = bnx2fc_rport_event_handler,
};
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 865c64fa923..fed486bfd3f 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
dcb->max_command = 1;
dcb->target_id = target;
dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
#ifndef DC395x_NO_DISCONNECT
dcb->identify_msg =
IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
#else
dcb->identify_msg = IDENTIFY(0, lun);
#endif
- dcb->dev_mode = eeprom->target[target].cfg0;
dcb->inquiry7 = 0;
dcb->sync_mode = 0;
dcb->min_nego_period = clock_period[period_index];
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 666b7ac4475..b5d92fc93c7 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -82,11 +82,11 @@ static int fcoe_rcv(struct sk_buff *, struct net_device *,
struct packet_type *, struct net_device *);
static int fcoe_percpu_receive_thread(void *);
static void fcoe_percpu_clean(struct fc_lport *);
-static int fcoe_link_speed_update(struct fc_lport *);
static int fcoe_link_ok(struct fc_lport *);
static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
static int fcoe_hostlist_add(const struct fc_lport *);
+static void fcoe_hostlist_del(const struct fc_lport *);
static int fcoe_device_notification(struct notifier_block *, ulong, void *);
static void fcoe_dev_setup(void);
@@ -117,6 +117,11 @@ static int fcoe_destroy(struct net_device *netdev);
static int fcoe_enable(struct net_device *netdev);
static int fcoe_disable(struct net_device *netdev);
+/* fcoe_syfs control interface handlers */
+static int fcoe_ctlr_alloc(struct net_device *netdev);
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev);
+
+
static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
u32 did, struct fc_frame *,
unsigned int op,
@@ -126,8 +131,6 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *,
void *, u32 timeout);
static void fcoe_recv_frame(struct sk_buff *skb);
-static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
-
/* notification function for packets from net device */
static struct notifier_block fcoe_notifier = {
.notifier_call = fcoe_device_notification,
@@ -151,11 +154,11 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
static int fcoe_vport_disable(struct fc_vport *, bool disable);
static void fcoe_set_vport_symbolic_name(struct fc_vport *);
static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
- .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
+ .set_fcoe_ctlr_mode = fcoe_ctlr_set_fip_mode,
+ .set_fcoe_ctlr_enabled = fcoe_ctlr_enabled,
.get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
.get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
@@ -1112,10 +1115,17 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
port = lport_priv(lport);
port->lport = lport;
port->priv = fcoe;
+ port->get_netdev = fcoe_netdev;
port->max_queue_depth = FCOE_MAX_QUEUE_DEPTH;
port->min_queue_depth = FCOE_MIN_QUEUE_DEPTH;
INIT_WORK(&port->destroy_work, fcoe_destroy_work);
+ /*
+ * Need to add the lport to the hostlist
+ * so we catch NETDEV_CHANGE events.
+ */
+ fcoe_hostlist_add(lport);
+
/* configure a fc_lport including the exchange manager */
rc = fcoe_lport_config(lport);
if (rc) {
@@ -1187,6 +1197,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
out_lp_destroy:
fc_exch_mgr_free(lport);
out_host_put:
+ fcoe_hostlist_del(lport);
scsi_host_put(lport->host);
out:
return ERR_PTR(rc);
@@ -1964,6 +1975,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
static int fcoe_device_notification(struct notifier_block *notifier,
ulong event, void *ptr)
{
+ struct fcoe_ctlr_device *cdev;
struct fc_lport *lport = NULL;
struct net_device *netdev = ptr;
struct fcoe_ctlr *ctlr;
@@ -2020,13 +2032,29 @@ static int fcoe_device_notification(struct notifier_block *notifier,
fcoe_link_speed_update(lport);
- if (link_possible && !fcoe_link_ok(lport))
- fcoe_ctlr_link_up(ctlr);
- else if (fcoe_ctlr_link_down(ctlr)) {
- stats = per_cpu_ptr(lport->stats, get_cpu());
- stats->LinkFailureCount++;
- put_cpu();
- fcoe_clean_pending_queue(lport);
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
+ if (link_possible && !fcoe_link_ok(lport)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link up while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ fcoe_ctlr_link_up(ctlr);
+ };
+ } else if (fcoe_ctlr_link_down(ctlr)) {
+ switch (cdev->enabled) {
+ case FCOE_CTLR_DISABLED:
+ pr_info("Link down while interface is disabled.\n");
+ break;
+ case FCOE_CTLR_ENABLED:
+ case FCOE_CTLR_UNUSED:
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->LinkFailureCount++;
+ put_cpu();
+ fcoe_clean_pending_queue(lport);
+ };
}
out:
return rc;
@@ -2039,6 +2067,8 @@ out:
* Called from fcoe transport.
*
* Returns: 0 for success
+ *
+ * Deprecated: use fcoe_ctlr_enabled()
*/
static int fcoe_disable(struct net_device *netdev)
{
@@ -2098,6 +2128,33 @@ out:
}
/**
+ * fcoe_ctlr_enabled() - Enable or disable an FCoE Controller
+ * @cdev: The FCoE Controller that is being enabled or disabled
+ *
+ * fcoe_sysfs will ensure that the state of 'enabled' has
+ * changed, so no checking is necessary here. This routine simply
+ * calls fcoe_enable or fcoe_disable, both of which are deprecated.
+ * When those routines are removed the functionality can be merged
+ * here.
+ */
+static int fcoe_ctlr_enabled(struct fcoe_ctlr_device *cdev)
+{
+ struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(cdev);
+ struct fc_lport *lport = ctlr->lp;
+ struct net_device *netdev = fcoe_netdev(lport);
+
+ switch (cdev->enabled) {
+ case FCOE_CTLR_ENABLED:
+ return fcoe_enable(netdev);
+ case FCOE_CTLR_DISABLED:
+ return fcoe_disable(netdev);
+ case FCOE_CTLR_UNUSED:
+ default:
+ return -ENOTSUPP;
+ };
+}
+
+/**
* fcoe_destroy() - Destroy a FCoE interface
* @netdev : The net_device object the Ethernet interface to create on
*
@@ -2139,8 +2196,31 @@ static void fcoe_destroy_work(struct work_struct *work)
{
struct fcoe_port *port;
struct fcoe_interface *fcoe;
+ struct Scsi_Host *shost;
+ struct fc_host_attrs *fc_host;
+ unsigned long flags;
+ struct fc_vport *vport;
+ struct fc_vport *next_vport;
port = container_of(work, struct fcoe_port, destroy_work);
+ shost = port->lport->host;
+ fc_host = shost_to_fc_host(shost);
+
+ /* Loop through all the vports and mark them for deletion */
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers) {
+ if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+ continue;
+ } else {
+ vport->flags |= FC_VPORT_DELETING;
+ queue_work(fc_host_work_q(shost),
+ &vport->vport_delete_work);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ flush_workqueue(fc_host_work_q(shost));
+
mutex_lock(&fcoe_config_mutex);
fcoe = port->priv;
@@ -2204,16 +2284,26 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
#endif
}
+enum fcoe_create_link_state {
+ FCOE_CREATE_LINK_DOWN,
+ FCOE_CREATE_LINK_UP,
+};
+
/**
- * fcoe_create() - Create a fcoe interface
- * @netdev : The net_device object the Ethernet interface to create on
- * @fip_mode: The FIP mode for this creation
+ * _fcoe_create() - (internal) Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ * @link_state: The ctlr link state on creation
*
- * Called from fcoe transport
+ * Called from either the libfcoe 'create' module parameter
+ * via fcoe_create or from fcoe_syfs's ctlr_create file.
*
- * Returns: 0 for success
+ * libfcoe's 'create' module parameter is deprecated so some
+ * consolidation of code can be done when that interface is
+ * removed.
*/
-static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
+ enum fcoe_create_link_state link_state)
{
int rc = 0;
struct fcoe_ctlr_device *ctlr_dev;
@@ -2254,13 +2344,29 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
/* setup DCB priority attributes. */
fcoe_dcb_create(fcoe);
- /* add to lports list */
- fcoe_hostlist_add(lport);
-
/* start FIP Discovery and FLOGI */
lport->boot_time = jiffies;
fc_fabric_login(lport);
- if (!fcoe_link_ok(lport)) {
+
+ /*
+ * If the fcoe_ctlr_device is to be set to DISABLED
+ * it must be done after the lport is added to the
+ * hostlist, but before the rtnl_lock is released.
+ * This is because the rtnl_lock protects the
+ * hostlist that fcoe_device_notification uses. If
+ * the FCoE Controller is intended to be created
+ * DISABLED then 'enabled' needs to be considered
+ * handling link events. 'enabled' must be set
+ * before the lport can be found in the hostlist
+ * when a link up event is received.
+ */
+ if (link_state == FCOE_CREATE_LINK_UP)
+ ctlr_dev->enabled = FCOE_CTLR_ENABLED;
+ else
+ ctlr_dev->enabled = FCOE_CTLR_DISABLED;
+
+ if (link_state == FCOE_CREATE_LINK_UP &&
+ !fcoe_link_ok(lport)) {
rtnl_unlock();
fcoe_ctlr_link_up(ctlr);
mutex_unlock(&fcoe_config_mutex);
@@ -2275,37 +2381,34 @@ out_nortnl:
}
/**
- * fcoe_link_speed_update() - Update the supported and actual link speeds
- * @lport: The local port to update speeds for
+ * fcoe_create() - Create a fcoe interface
+ * @netdev : The net_device object the Ethernet interface to create on
+ * @fip_mode: The FIP mode for this creation
+ *
+ * Called from fcoe transport
+ *
+ * Returns: 0 for success
+ */
+static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
+{
+ return _fcoe_create(netdev, fip_mode, FCOE_CREATE_LINK_UP);
+}
+
+/**
+ * fcoe_ctlr_alloc() - Allocate a fcoe interface from fcoe_sysfs
+ * @netdev: The net_device to be used by the allocated FCoE Controller
*
- * Returns: 0 if the ethtool query was successful
- * -1 if the ethtool query failed
+ * This routine is called from fcoe_sysfs. It will start the fcoe_ctlr
+ * in a link_down state. The allows the user an opportunity to configure
+ * the FCoE Controller from sysfs before enabling the FCoE Controller.
+ *
+ * Creating in with this routine starts the FCoE Controller in Fabric
+ * mode. The user can change to VN2VN or another mode before enabling.
*/
-static int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_ctlr_alloc(struct net_device *netdev)
{
- struct net_device *netdev = fcoe_netdev(lport);
- struct ethtool_cmd ecmd;
-
- if (!__ethtool_get_settings(netdev, &ecmd)) {
- lport->link_supported_speeds &=
- ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
- if (ecmd.supported & (SUPPORTED_1000baseT_Half |
- SUPPORTED_1000baseT_Full))
- lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
- if (ecmd.supported & SUPPORTED_10000baseT_Full)
- lport->link_supported_speeds |=
- FC_PORTSPEED_10GBIT;
- switch (ethtool_cmd_speed(&ecmd)) {
- case SPEED_1000:
- lport->link_speed = FC_PORTSPEED_1GBIT;
- break;
- case SPEED_10000:
- lport->link_speed = FC_PORTSPEED_10GBIT;
- break;
- }
- return 0;
- }
- return -1;
+ return _fcoe_create(netdev, FIP_MODE_FABRIC,
+ FCOE_CREATE_LINK_DOWN);
}
/**
@@ -2375,10 +2478,13 @@ static int fcoe_reset(struct Scsi_Host *shost)
struct fcoe_port *port = lport_priv(lport);
struct fcoe_interface *fcoe = port->priv;
struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
fcoe_ctlr_link_down(ctlr);
fcoe_clean_pending_queue(ctlr->lp);
- if (!fcoe_link_ok(ctlr->lp))
+
+ if (cdev->enabled != FCOE_CTLR_DISABLED &&
+ !fcoe_link_ok(ctlr->lp))
fcoe_ctlr_link_up(ctlr);
return 0;
}
@@ -2445,12 +2551,31 @@ static int fcoe_hostlist_add(const struct fc_lport *lport)
return 0;
}
+/**
+ * fcoe_hostlist_del() - Remove the FCoE interface identified by a local
+ * port to the hostlist
+ * @lport: The local port that identifies the FCoE interface to be added
+ *
+ * Locking: must be called with the RTNL mutex held
+ *
+ */
+static void fcoe_hostlist_del(const struct fc_lport *lport)
+{
+ struct fcoe_interface *fcoe;
+ struct fcoe_port *port;
+
+ port = lport_priv(lport);
+ fcoe = port->priv;
+ list_del(&fcoe->list);
+ return;
+}
static struct fcoe_transport fcoe_sw_transport = {
.name = {FCOE_TRANSPORT_DEFAULT},
.attached = false,
.list = LIST_HEAD_INIT(fcoe_sw_transport.list),
.match = fcoe_match,
+ .alloc = fcoe_ctlr_alloc,
.create = fcoe_create,
.destroy = fcoe_destroy,
.enable = fcoe_enable,
@@ -2534,9 +2659,9 @@ static void __exit fcoe_exit(void)
/* releases the associated fcoe hosts */
rtnl_lock();
list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
- list_del(&fcoe->list);
ctlr = fcoe_to_ctlr(fcoe);
port = lport_priv(ctlr->lp);
+ fcoe_hostlist_del(port->lport);
queue_work(fcoe_wq, &port->destroy_work);
}
rtnl_unlock();
@@ -2776,43 +2901,6 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport)
NULL, NULL, 3 * lport->r_a_tov);
}
-/**
- * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
- * @lport: the local port
- * @fc_lesb: the link error status block
- */
-static void fcoe_get_lesb(struct fc_lport *lport,
- struct fc_els_lesb *fc_lesb)
-{
- struct net_device *netdev = fcoe_netdev(lport);
-
- __fcoe_get_lesb(lport, fc_lesb, netdev);
-}
-
-static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
-{
- struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
- struct net_device *netdev = fcoe_netdev(fip->lp);
- struct fcoe_fc_els_lesb *fcoe_lesb;
- struct fc_els_lesb fc_lesb;
-
- __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
- fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
-
- ctlr_dev->lesb.lesb_link_fail =
- ntohl(fcoe_lesb->lesb_link_fail);
- ctlr_dev->lesb.lesb_vlink_fail =
- ntohl(fcoe_lesb->lesb_vlink_fail);
- ctlr_dev->lesb.lesb_miss_fka =
- ntohl(fcoe_lesb->lesb_miss_fka);
- ctlr_dev->lesb.lesb_symb_err =
- ntohl(fcoe_lesb->lesb_symb_err);
- ctlr_dev->lesb.lesb_err_block =
- ntohl(fcoe_lesb->lesb_err_block);
- ctlr_dev->lesb.lesb_fcs_error =
- ntohl(fcoe_lesb->lesb_fcs_error);
-}
-
static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
{
struct fcoe_ctlr_device *ctlr_dev =
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index b42dc32cb5e..2b53672bf93 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -55,12 +55,12 @@ do { \
#define FCOE_DBG(fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_LOGGING, \
- printk(KERN_INFO "fcoe: " fmt, ##args);)
+ pr_info("fcoe: " fmt, ##args);)
#define FCOE_NETDEV_DBG(netdev, fmt, args...) \
FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING, \
- printk(KERN_INFO "fcoe: %s: " fmt, \
- netdev->name, ##args);)
+ pr_info("fcoe: %s: " fmt, \
+ netdev->name, ##args);)
/**
* struct fcoe_interface - A FCoE interface
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 4a909d7cfde..08c3bc398da 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1291,8 +1291,16 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
LIBFCOE_FIP_DBG(fip, "Clear Virtual Link received\n");
- if (!fcf || !lport->port_id)
+ if (!fcf || !lport->port_id) {
+ /*
+ * We are yet to select best FCF, but we got CVL in the
+ * meantime. reset the ctlr and let it rediscover the FCF
+ */
+ mutex_lock(&fip->ctlr_mutex);
+ fcoe_ctlr_reset(fip);
+ mutex_unlock(&fip->ctlr_mutex);
return;
+ }
/*
* mask of required descriptors. Validating each one clears its bit.
@@ -1551,15 +1559,6 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
fcf->fabric_name, fcf->vfid, fcf->fcf_mac,
fcf->fc_map, fcoe_ctlr_mtu_valid(fcf),
fcf->flogi_sent, fcf->pri);
- if (fcf->fabric_name != first->fabric_name ||
- fcf->vfid != first->vfid ||
- fcf->fc_map != first->fc_map) {
- LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
- "or FC-MAP\n");
- return NULL;
- }
- if (fcf->flogi_sent)
- continue;
if (!fcoe_ctlr_fcf_usable(fcf)) {
LIBFCOE_FIP_DBG(fip, "FCF for fab %16.16llx "
"map %x %svalid %savailable\n",
@@ -1569,6 +1568,15 @@ static struct fcoe_fcf *fcoe_ctlr_select(struct fcoe_ctlr *fip)
"" : "un");
continue;
}
+ if (fcf->fabric_name != first->fabric_name ||
+ fcf->vfid != first->vfid ||
+ fcf->fc_map != first->fc_map) {
+ LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
+ "or FC-MAP\n");
+ return NULL;
+ }
+ if (fcf->flogi_sent)
+ continue;
if (!best || fcf->pri < best->pri || best->flogi_sent)
best = fcf;
}
@@ -2864,22 +2872,21 @@ void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
}
EXPORT_SYMBOL(fcoe_fcf_get_selected);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
mutex_lock(&ctlr->ctlr_mutex);
- switch (ctlr->mode) {
- case FIP_MODE_FABRIC:
- ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
- break;
- case FIP_MODE_VN2VN:
- ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
+ switch (ctlr_dev->mode) {
+ case FIP_CONN_TYPE_VN2VN:
+ ctlr->mode = FIP_MODE_VN2VN;
break;
+ case FIP_CONN_TYPE_FABRIC:
default:
- ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
+ ctlr->mode = FIP_MODE_FABRIC;
break;
}
+
mutex_unlock(&ctlr->ctlr_mutex);
}
-EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
+EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
index 5e751689a08..8c05ae017f5 100644
--- a/drivers/scsi/fcoe/fcoe_sysfs.c
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -21,8 +21,17 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/etherdevice.h>
+#include <linux/ctype.h>
#include <scsi/fcoe_sysfs.h>
+#include <scsi/libfcoe.h>
+
+/*
+ * OK to include local libfcoe.h for debug_logging, but cannot include
+ * <scsi/libfcoe.h> otherwise non-netdev based fcoe solutions would have
+ * have to include more than fcoe_sysfs.h.
+ */
+#include "libfcoe.h"
static atomic_t ctlr_num;
static atomic_t fcf_num;
@@ -71,6 +80,8 @@ MODULE_PARM_DESC(fcf_dev_loss_tmo,
((x)->lesb.lesb_err_block)
#define fcoe_ctlr_fcs_error(x) \
((x)->lesb.lesb_fcs_error)
+#define fcoe_ctlr_enabled(x) \
+ ((x)->enabled)
#define fcoe_fcf_state(x) \
((x)->state)
#define fcoe_fcf_fabric_name(x) \
@@ -210,25 +221,34 @@ static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
#define fcoe_enum_name_search(title, table_type, table) \
static const char *get_fcoe_##title##_name(enum table_type table_key) \
{ \
- int i; \
- char *name = NULL; \
- \
- for (i = 0; i < ARRAY_SIZE(table); i++) { \
- if (table[i].value == table_key) { \
- name = table[i].name; \
- break; \
- } \
- } \
- return name; \
+ if (table_key < 0 || table_key >= ARRAY_SIZE(table)) \
+ return NULL; \
+ return table[table_key]; \
+}
+
+static char *fip_conn_type_names[] = {
+ [ FIP_CONN_TYPE_UNKNOWN ] = "Unknown",
+ [ FIP_CONN_TYPE_FABRIC ] = "Fabric",
+ [ FIP_CONN_TYPE_VN2VN ] = "VN2VN",
+};
+fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
+
+static enum fip_conn_type fcoe_parse_mode(const char *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fip_conn_type_names); i++) {
+ if (strcasecmp(buf, fip_conn_type_names[i]) == 0)
+ return i;
+ }
+
+ return FIP_CONN_TYPE_UNKNOWN;
}
-static struct {
- enum fcf_state value;
- char *name;
-} fcf_state_names[] = {
- { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
- { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
- { FCOE_FCF_STATE_CONNECTED, "Connected" },
+static char *fcf_state_names[] = {
+ [ FCOE_FCF_STATE_UNKNOWN ] = "Unknown",
+ [ FCOE_FCF_STATE_DISCONNECTED ] = "Disconnected",
+ [ FCOE_FCF_STATE_CONNECTED ] = "Connected",
};
fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
#define FCOE_FCF_STATE_MAX_NAMELEN 50
@@ -246,17 +266,7 @@ static ssize_t show_fcf_state(struct device *dev,
}
static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
-static struct {
- enum fip_conn_type value;
- char *name;
-} fip_conn_type_names[] = {
- { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
- { FIP_CONN_TYPE_FABRIC, "Fabric" },
- { FIP_CONN_TYPE_VN2VN, "VN2VN" },
-};
-fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
-#define FCOE_CTLR_MODE_MAX_NAMELEN 50
-
+#define FCOE_MAX_MODENAME_LEN 20
static ssize_t show_ctlr_mode(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -264,17 +274,116 @@ static ssize_t show_ctlr_mode(struct device *dev,
struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
const char *name;
- if (ctlr->f->get_fcoe_ctlr_mode)
- ctlr->f->get_fcoe_ctlr_mode(ctlr);
-
name = get_fcoe_ctlr_mode_name(ctlr->mode);
if (!name)
return -EINVAL;
- return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
+ return snprintf(buf, FCOE_MAX_MODENAME_LEN,
+ "%s\n", name);
+}
+
+static ssize_t store_ctlr_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ char mode[FCOE_MAX_MODENAME_LEN + 1];
+
+ if (count > FCOE_MAX_MODENAME_LEN)
+ return -EINVAL;
+
+ strncpy(mode, buf, count);
+
+ if (mode[count - 1] == '\n')
+ mode[count - 1] = '\0';
+ else
+ mode[count] = '\0';
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
+ return -EBUSY;
+ case FCOE_CTLR_DISABLED:
+ if (!ctlr->f->set_fcoe_ctlr_mode) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Mode change not supported by LLD.");
+ return -ENOTSUPP;
+ }
+
+ ctlr->mode = fcoe_parse_mode(mode);
+ if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
+ LIBFCOE_SYSFS_DBG(ctlr,
+ "Unknown mode %s provided.", buf);
+ return -EINVAL;
+ }
+
+ ctlr->f->set_fcoe_ctlr_mode(ctlr);
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
+
+ return count;
+ case FCOE_CTLR_UNUSED:
+ default:
+ LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
+ return -ENOTSUPP;
+ };
+}
+
+static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO | S_IWUSR,
+ show_ctlr_mode, store_ctlr_mode);
+
+static ssize_t store_ctlr_enabled(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ int rc;
+
+ switch (ctlr->enabled) {
+ case FCOE_CTLR_ENABLED:
+ if (*buf == '1')
+ return count;
+ ctlr->enabled = FCOE_CTLR_DISABLED;
+ break;
+ case FCOE_CTLR_DISABLED:
+ if (*buf == '0')
+ return count;
+ ctlr->enabled = FCOE_CTLR_ENABLED;
+ break;
+ case FCOE_CTLR_UNUSED:
+ return -ENOTSUPP;
+ };
+
+ rc = ctlr->f->set_fcoe_ctlr_enabled(ctlr);
+ if (rc)
+ return rc;
+
+ return count;
+}
+
+static char *ctlr_enabled_state_names[] = {
+ [ FCOE_CTLR_ENABLED ] = "1",
+ [ FCOE_CTLR_DISABLED ] = "0",
+};
+fcoe_enum_name_search(ctlr_enabled_state, ctlr_enabled_state,
+ ctlr_enabled_state_names)
+#define FCOE_CTLR_ENABLED_MAX_NAMELEN 50
+
+static ssize_t show_ctlr_enabled_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
+ const char *name;
+
+ name = get_fcoe_ctlr_enabled_state_name(ctlr->enabled);
+ if (!name)
+ return -EINVAL;
+ return snprintf(buf, FCOE_CTLR_ENABLED_MAX_NAMELEN,
"%s\n", name);
}
-static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
- show_ctlr_mode, NULL);
+
+static FCOE_DEVICE_ATTR(ctlr, enabled, S_IRUGO | S_IWUSR,
+ show_ctlr_enabled_state,
+ store_ctlr_enabled);
static ssize_t
store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
@@ -359,6 +468,7 @@ static struct attribute_group fcoe_ctlr_lesb_attr_group = {
static struct attribute *fcoe_ctlr_attrs[] = {
&device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
+ &device_attr_fcoe_ctlr_enabled.attr,
&device_attr_fcoe_ctlr_mode.attr,
NULL,
};
@@ -443,9 +553,16 @@ struct device_type fcoe_fcf_device_type = {
.release = fcoe_fcf_device_release,
};
+struct bus_attribute fcoe_bus_attr_group[] = {
+ __ATTR(ctlr_create, S_IWUSR, NULL, fcoe_ctlr_create_store),
+ __ATTR(ctlr_destroy, S_IWUSR, NULL, fcoe_ctlr_destroy_store),
+ __ATTR_NULL
+};
+
struct bus_type fcoe_bus_type = {
.name = "fcoe",
.match = &fcoe_bus_match,
+ .bus_attrs = fcoe_bus_attr_group,
};
/**
@@ -566,6 +683,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
ctlr->id = atomic_inc_return(&ctlr_num) - 1;
ctlr->f = f;
+ ctlr->mode = FIP_CONN_TYPE_FABRIC;
INIT_LIST_HEAD(&ctlr->fcfs);
mutex_init(&ctlr->lock);
ctlr->dev.parent = parent;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index ac76d8a042d..f3a5a53e863 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -83,6 +83,50 @@ static struct notifier_block libfcoe_notifier = {
.notifier_call = libfcoe_device_notification,
};
+/**
+ * fcoe_link_speed_update() - Update the supported and actual link speeds
+ * @lport: The local port to update speeds for
+ *
+ * Returns: 0 if the ethtool query was successful
+ * -1 if the ethtool query failed
+ */
+int fcoe_link_speed_update(struct fc_lport *lport)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+ struct ethtool_cmd ecmd;
+
+ if (!__ethtool_get_settings(netdev, &ecmd)) {
+ lport->link_supported_speeds &=
+ ~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full))
+ lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+ if (ecmd.supported & SUPPORTED_10000baseT_Full)
+ lport->link_supported_speeds |=
+ FC_PORTSPEED_10GBIT;
+ switch (ethtool_cmd_speed(&ecmd)) {
+ case SPEED_1000:
+ lport->link_speed = FC_PORTSPEED_1GBIT;
+ break;
+ case SPEED_10000:
+ lport->link_speed = FC_PORTSPEED_10GBIT;
+ break;
+ }
+ return 0;
+ }
+ return -1;
+}
+EXPORT_SYMBOL_GPL(fcoe_link_speed_update);
+
+/**
+ * __fcoe_get_lesb() - Get the Link Error Status Block (LESB) for a given lport
+ * @lport: The local port to update speeds for
+ * @fc_lesb: Pointer to the LESB to be filled up
+ * @netdev: Pointer to the netdev that is associated with the lport
+ *
+ * Note, the Link Error Status Block (LESB) for FCoE is defined in FC-BB-6
+ * Clause 7.11 in v1.04.
+ */
void __fcoe_get_lesb(struct fc_lport *lport,
struct fc_els_lesb *fc_lesb,
struct net_device *netdev)
@@ -112,6 +156,51 @@ void __fcoe_get_lesb(struct fc_lport *lport,
}
EXPORT_SYMBOL_GPL(__fcoe_get_lesb);
+/**
+ * fcoe_get_lesb() - Fill the FCoE Link Error Status Block
+ * @lport: the local port
+ * @fc_lesb: the link error status block
+ */
+void fcoe_get_lesb(struct fc_lport *lport,
+ struct fc_els_lesb *fc_lesb)
+{
+ struct net_device *netdev = fcoe_get_netdev(lport);
+
+ __fcoe_get_lesb(lport, fc_lesb, netdev);
+}
+EXPORT_SYMBOL_GPL(fcoe_get_lesb);
+
+/**
+ * fcoe_ctlr_get_lesb() - Get the Link Error Status Block (LESB) for a given
+ * fcoe controller device
+ * @ctlr_dev: The given fcoe controller device
+ *
+ */
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
+{
+ struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
+ struct net_device *netdev = fcoe_get_netdev(fip->lp);
+ struct fcoe_fc_els_lesb *fcoe_lesb;
+ struct fc_els_lesb fc_lesb;
+
+ __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
+ fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
+
+ ctlr_dev->lesb.lesb_link_fail =
+ ntohl(fcoe_lesb->lesb_link_fail);
+ ctlr_dev->lesb.lesb_vlink_fail =
+ ntohl(fcoe_lesb->lesb_vlink_fail);
+ ctlr_dev->lesb.lesb_miss_fka =
+ ntohl(fcoe_lesb->lesb_miss_fka);
+ ctlr_dev->lesb.lesb_symb_err =
+ ntohl(fcoe_lesb->lesb_symb_err);
+ ctlr_dev->lesb.lesb_err_block =
+ ntohl(fcoe_lesb->lesb_err_block);
+ ctlr_dev->lesb.lesb_fcs_error =
+ ntohl(fcoe_lesb->lesb_fcs_error);
+}
+EXPORT_SYMBOL_GPL(fcoe_ctlr_get_lesb);
+
void fcoe_wwn_to_str(u64 wwn, char *buf, int len)
{
u8 wwpn[8];
@@ -627,6 +716,110 @@ static int libfcoe_device_notification(struct notifier_block *notifier,
return NOTIFY_OK;
}
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+ struct fcoe_ctlr_device *ctlr_dev = NULL;
+ int rc = 0;
+ int err;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("Invalid device %s.\n", buf);
+ rc = -ENODEV;
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (ft) {
+ LIBFCOE_TRANSPORT_DBG("transport %s already has existing "
+ "FCoE instance on %s.\n",
+ ft->name, netdev->name);
+ rc = -EEXIST;
+ goto out_putdev;
+ }
+
+ ft = fcoe_transport_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ /* pass to transport create */
+ err = ft->alloc ? ft->alloc(netdev) : -ENODEV;
+ if (err) {
+ fcoe_del_netdev_mapping(netdev);
+ rc = -ENOMEM;
+ goto out_putdev;
+ }
+
+ err = fcoe_add_netdev_mapping(netdev, ft);
+ if (err) {
+ LIBFCOE_TRANSPORT_DBG("failed to add new netdev mapping "
+ "for FCoE transport %s for %s.\n",
+ ft->name, netdev->name);
+ rc = -ENODEV;
+ goto out_putdev;
+ }
+
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to create fcoe on %s.\n",
+ ft->name, (ctlr_dev) ? "succeeded" : "failed",
+ netdev->name);
+
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ if (rc)
+ return rc;
+ return count;
+}
+
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count)
+{
+ int rc = -ENODEV;
+ struct net_device *netdev = NULL;
+ struct fcoe_transport *ft = NULL;
+
+ mutex_lock(&ft_mutex);
+
+ netdev = fcoe_if_to_netdev(buf);
+ if (!netdev) {
+ LIBFCOE_TRANSPORT_DBG("invalid device %s.\n", buf);
+ goto out_nodev;
+ }
+
+ ft = fcoe_netdev_map_lookup(netdev);
+ if (!ft) {
+ LIBFCOE_TRANSPORT_DBG("no FCoE transport found for %s.\n",
+ netdev->name);
+ goto out_putdev;
+ }
+
+ /* pass to transport destroy */
+ rc = ft->destroy(netdev);
+ if (rc)
+ goto out_putdev;
+
+ fcoe_del_netdev_mapping(netdev);
+ LIBFCOE_TRANSPORT_DBG("transport %s %s to destroy fcoe on %s.\n",
+ ft->name, (rc) ? "failed" : "succeeded",
+ netdev->name);
+ rc = count; /* required for successful return */
+out_putdev:
+ dev_put(netdev);
+out_nodev:
+ mutex_unlock(&ft_mutex);
+ return rc;
+}
+EXPORT_SYMBOL(fcoe_ctlr_destroy_store);
/**
* fcoe_transport_create() - Create a fcoe interface
@@ -769,11 +962,7 @@ out_putdev:
dev_put(netdev);
out_nodev:
mutex_unlock(&ft_mutex);
-
- if (rc == -ERESTARTSYS)
- return restart_syscall();
- else
- return rc;
+ return rc;
}
/**
diff --git a/drivers/scsi/fcoe/libfcoe.h b/drivers/scsi/fcoe/libfcoe.h
index 6af5fc3a17d..d3bb16d1140 100644
--- a/drivers/scsi/fcoe/libfcoe.h
+++ b/drivers/scsi/fcoe/libfcoe.h
@@ -2,9 +2,10 @@
#define _FCOE_LIBFCOE_H_
extern unsigned int libfcoe_debug_logging;
-#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
-#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
-#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_LOGGING 0x01 /* General logging, not categorized */
+#define LIBFCOE_FIP_LOGGING 0x02 /* FIP logging */
+#define LIBFCOE_TRANSPORT_LOGGING 0x04 /* FCoE transport logging */
+#define LIBFCOE_SYSFS_LOGGING 0x08 /* fcoe_sysfs logging */
#define LIBFCOE_CHECK_LOGGING(LEVEL, CMD) \
do { \
@@ -16,16 +17,19 @@ do { \
#define LIBFCOE_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING, \
- printk(KERN_INFO "libfcoe: " fmt, ##args);)
+ pr_info("libfcoe: " fmt, ##args);)
#define LIBFCOE_FIP_DBG(fip, fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_FIP_LOGGING, \
- printk(KERN_INFO "host%d: fip: " fmt, \
- (fip)->lp->host->host_no, ##args);)
+ pr_info("host%d: fip: " fmt, \
+ (fip)->lp->host->host_no, ##args);)
#define LIBFCOE_TRANSPORT_DBG(fmt, args...) \
LIBFCOE_CHECK_LOGGING(LIBFCOE_TRANSPORT_LOGGING, \
- printk(KERN_INFO "%s: " fmt, \
- __func__, ##args);)
+ pr_info("%s: " fmt, __func__, ##args);)
+
+#define LIBFCOE_SYSFS_DBG(cdev, fmt, args...) \
+ LIBFCOE_CHECK_LOGGING(LIBFCOE_SYSFS_LOGGING, \
+ pr_info("ctlr_%d: " fmt, cdev->id, ##args);)
#endif /* _FCOE_LIBFCOE_H_ */
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 37c3440bc17..383598fadf0 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -7,6 +7,8 @@ fnic-y := \
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 95a5ba29320..98436c36303 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -26,6 +26,7 @@
#include <scsi/libfcoe.h>
#include "fnic_io.h"
#include "fnic_res.h"
+#include "fnic_trace.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -56,6 +57,34 @@
#define FNIC_NO_TAG -1
/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
+/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
*/
@@ -64,6 +93,7 @@
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
@@ -71,9 +101,28 @@
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644
index 00000000000..adc1f7f471f
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic_tracing_enabled = val;
+ (*ppos)++;
+
+ return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ (3*(trace_max_pages * PAGE_SIZE)));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ file->private_data = fnic_dbg_prt;
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ loff_t pos = -1;
+
+ switch (howto) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos = file->f_pos + offset;
+ break;
+ case 2:
+ pos = fnic_dbg_prt->buffer_len - offset;
+ }
+ return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+ -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL, &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG "Cannot create trace_enable file"
+ " under debugfs");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL,
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ if (fnic_trace_debugfs_file) {
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+ }
+ if (fnic_trace_enable) {
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+ }
+ if (fnic_trace_debugfs_root) {
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f0b896988cd..c35b8f1889e 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -21,7 +21,7 @@
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
-#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
};
enum fnic_ioreq_state {
- FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fbf3ac6e0c5..d601ac543c5 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
@@ -624,6 +628,9 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
fnic->state = FNIC_IN_FC_MODE;
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
@@ -858,6 +865,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Trace buffer initialization Failed "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -928,6 +943,7 @@ err_create_fnic_ioreq_slab:
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
return err;
}
@@ -939,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c40ce52ed7c..be99e7549d8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
};
static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
int ret = 0;
unsigned long flags;
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else
+ else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
+ }
+
return ret;
}
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
- struct fc_lport *lp;
+ struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
- struct fnic_io_req *io_req;
- struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
struct vnic_wq_copy *wq;
int ret;
- int sg_count;
+ u64 cmd_trace;
+ int sg_count = 0;
unsigned long flags;
unsigned long ptr;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
return 0;
}
- lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
+ atomic_inc(&fnic->in_flight);
+
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
- fnic = lport_priv(lp);
-
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
+ } else {
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
return ret;
}
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
}
/*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
return;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
return;
}
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
-
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
return;
-
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
- if (id & FNIC_TAG_ABORT) {
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
sc->scsi_done(sc);
+ }
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
- int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
- ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
- ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
}
- return ret;
+ return 0;
}
/*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
}
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
unsigned long flags;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
return 0;
}
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "fnic_rport_reset_exch called portid 0x%06x\n",
+ "fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
- rport->port_name, rport->node_name,
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
int ret = SUCCESS;
- u32 task_req;
+ u32 task_req = 0;
struct scsi_lun fc_lun;
+ int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
- rport->port_id, sc->device->lun, sc->request->tag);
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ if (task_req == FCPIO_ITMF_ABT_TASK)
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
/*
* We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from abort cmd %s\n",
+ "Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
return ret;
}
@@ -1412,7 +1773,7 @@ lr_io_req_end:
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
- int tag;
+ int tag, abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
- BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
}
io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
}
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
clean_pending_aborts_end:
return ret;
}
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x\n",
- rport->port_id, sc->device->lun);
+ "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
- sc->request->tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
- spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- goto fnic_device_reset_end;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on the device reset request.
+ * If q'ing of the abort fails, retry issue it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ continue;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644
index 00000000000..23a60e3d852
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644
index 00000000000..cef42b4c4d6
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4f338061b5c..7f4f790a3d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -1131,7 +1131,7 @@ clean:
return -ENOMEM;
}
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr.lower = 0;
+ chain_sg->Addr.upper = 0;
+ return -1;
+ }
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ return 0;
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
}
}
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
- return;
+ return 0;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return -1;
+ }
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ return 0;
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
return -ENOMEM;
}
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+ NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
- fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
- buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
- hpsa_map_sg_chain_block(h, cp);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
return 0;
}
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
- /* Send the Test Unit Ready */
- fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
+ int rc = 0;
if (!argp)
return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_kfree;
}
} else {
memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c = cmd_special_alloc(h);
if (c == NULL) {
- kfree(buff);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_kfree;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[0].Addr.lower = 0;
+ c->SG[0].Addr.upper = 0;
+ c->SG[0].Len = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
}
- kfree(buff);
+out:
cmd_special_free(h, c);
- return 0;
+out_kfree:
+ kfree(buff);
+ return rc;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[i].Addr.lower = 0;
+ c->SG[i].Addr.upper = 0;
+ c->SG[i].Len = 0;
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup1;
+ }
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
@@ -3190,7 +3238,8 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
return 0;
}
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
- return;
+ return -1;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
-
- hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
- return;
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
}
/*
@@ -4882,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
- fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
- RAID_CTLR_LUNID, TYPE_CMD);
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
+out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 8fa79b83f2d..f328089a106 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6112,7 +6112,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead)) {
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -6121,7 +6121,7 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(hrrq->ioa_is_dead || !res)) {
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
@@ -6741,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -8494,7 +8497,8 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
wmb();
- scsi_block_requests(ioa_cfg->host);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
@@ -8549,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_fail_all_ops(ioa_cfg);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
return;
} else {
ioa_cfg->in_ioa_bringdown = 1;
@@ -9695,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
{
unsigned long host_lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9704,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 1a9a246932a..21a6ff1ed5c 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -493,6 +493,7 @@ struct ipr_hrr_queue {
u8 allow_interrupts:1;
u8 ioa_is_dead:1;
u8 allow_cmds:1;
+ u8 removing_ioa:1;
struct blk_iopoll iopoll;
};
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index fcb9d0b20ee..09c81b2f216 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1381,10 +1381,10 @@ static void fc_fcp_timeout(unsigned long data)
fsp->state |= FC_SRB_FCP_PROCESSING_TMO;
- if (fsp->state & FC_SRB_RCV_STATUS)
- fc_fcp_complete_locked(fsp);
- else if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
+ if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_rec(fsp);
+ else if (fsp->state & FC_SRB_RCV_STATUS)
+ fc_fcp_complete_locked(fsp);
else
fc_fcp_recovery(fsp, FC_TIMED_OUT);
fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c2830cc66d6..b74189d8932 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -41,25 +41,25 @@ extern unsigned int fc_debug_logging;
#define FC_LIBFC_DBG(fmt, args...) \
FC_CHECK_LOGGING(FC_LIBFC_LOGGING, \
- printk(KERN_INFO "libfc: " fmt, ##args))
+ pr_info("libfc: " fmt, ##args))
#define FC_LPORT_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_LPORT_LOGGING, \
- printk(KERN_INFO "host%u: lport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (lport)->port_id, ##args))
+ pr_info("host%u: lport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (lport)->port_id, ##args))
-#define FC_DISC_DBG(disc, fmt, args...) \
- FC_CHECK_LOGGING(FC_DISC_LOGGING, \
- printk(KERN_INFO "host%u: disc: " fmt, \
- fc_disc_lport(disc)->host->host_no, \
- ##args))
+#define FC_DISC_DBG(disc, fmt, args...) \
+ FC_CHECK_LOGGING(FC_DISC_LOGGING, \
+ pr_info("host%u: disc: " fmt, \
+ fc_disc_lport(disc)->host->host_no, \
+ ##args))
#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...) \
FC_CHECK_LOGGING(FC_RPORT_LOGGING, \
- printk(KERN_INFO "host%u: rport %6.6x: " fmt, \
- (lport)->host->host_no, \
- (port_id), ##args))
+ pr_info("host%u: rport %6.6x: " fmt, \
+ (lport)->host->host_no, \
+ (port_id), ##args))
#define FC_RPORT_DBG(rdata, fmt, args...) \
FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
@@ -70,13 +70,13 @@ extern unsigned int fc_debug_logging;
if ((pkt)->seq_ptr) { \
struct fc_exch *_ep = NULL; \
_ep = fc_seq_exch((pkt)->seq_ptr); \
- printk(KERN_INFO "host%u: fcp: %6.6x: " \
+ pr_info("host%u: fcp: %6.6x: " \
"xid %04x-%04x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, \
(_ep)->oxid, (_ep)->rxid, ##args); \
} else { \
- printk(KERN_INFO "host%u: fcp: %6.6x: " fmt, \
+ pr_info("host%u: fcp: %6.6x: " fmt, \
(pkt)->lp->host->host_no, \
(pkt)->rport->port_id, ##args); \
} \
@@ -84,14 +84,14 @@ extern unsigned int fc_debug_logging;
#define FC_EXCH_DBG(exch, fmt, args...) \
FC_CHECK_LOGGING(FC_EXCH_LOGGING, \
- printk(KERN_INFO "host%u: xid %4x: " fmt, \
- (exch)->lp->host->host_no, \
- exch->xid, ##args))
+ pr_info("host%u: xid %4x: " fmt, \
+ (exch)->lp->host->host_no, \
+ exch->xid, ##args))
#define FC_SCSI_DBG(lport, fmt, args...) \
FC_CHECK_LOGGING(FC_SCSI_LOGGING, \
- printk(KERN_INFO "host%u: scsi: " fmt, \
- (lport)->host->host_no, ##args))
+ pr_info("host%u: scsi: " fmt, \
+ (lport)->host->host_no, ##args))
/*
* FC-4 Providers.
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 83aa1efec87..d518d17e940 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -582,7 +582,7 @@ static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
static void fc_rport_error_retry(struct fc_rport_priv *rdata,
struct fc_frame *fp)
{
- unsigned long delay = FC_DEF_E_D_TOV;
+ unsigned long delay = msecs_to_jiffies(FC_DEF_E_D_TOV);
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
if (PTR_ERR(fp) == -FC_EX_CLOSED)
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 55b6fc83ad7..74b67d98e95 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6644,7 +6644,7 @@ static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
@@ -6698,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6713,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6767,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -6777,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
@@ -6796,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
@@ -6818,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
@@ -6842,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
@@ -6859,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
@@ -6873,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
@@ -6965,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
@@ -6992,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -7001,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -7016,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 3b2365c8eab..408d2548a74 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.504.01.00-rc1"
-#define MEGASAS_RELDATE "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "06.506.00.00-rc1"
+#define MEGASAS_RELDATE "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013"
/*
* Device IDs
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 66a0fec0437..9d53540207e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v06.504.01.00-rc1
+ * Version : v06.506.00.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 74030aff69a..a7d56687bfc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+ io_request->EEDPBlockSize = scp->device->sector_size;
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
- io_request->DevHandle =
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index a7c64f05199..f68a3cd11d5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -61,7 +61,6 @@
#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MEGASAS_EEDPBLOCKSIZE 512
/*
* Raid context flags
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 5e24e7e7371..bcb23d28b3e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -2023,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
default:
break;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index c6ee7aad750..4caaac13682 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -165,6 +165,10 @@
"Intel(R) Integrated RAID Module RMS25KB080"
#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
"Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
@@ -180,6 +184,8 @@
#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 078c63913b5..532110f4562 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct domain_device *dev = task->dev;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct scatterlist *sg_req, *sg_resp;
u32 req_len, resp_len, tag = tei->tag;
void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
hdr->flags |= flags;
hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 2ae77a0394b..9f3cc13a5ce 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -76,6 +76,7 @@ extern struct kmem_cache *mvs_task_list_cache;
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
+#define MVS_PHY_ID (1U << sas_phy->id)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index c06b8e5aa2c..d8293f25ca3 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
odi->osdname_len = get_attrs[a].len;
/* Avoid NULL for memcmp optimization 0-length is good enough */
odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (odi->osdname_len)
memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 4c9fe733fe8..3d5e522e00f 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -140,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
- pm8001_ha->memoryMap.region[i].element_size,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 83d798428c1..1d82eef4e1e 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval = QLA_FUNCTION_FAILED;
- uint16_t temp, frac;
+ uint16_t temp = 0;
- if (!vha->hw->flags.thermal_supported)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (!vha->hw->thermal_support) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Thermal not supported by this card.\n");
+ goto done;
+ }
- temp = frac = 0;
- if (qla2x00_reset_active(vha))
- ql_log(ql_log_warn, vha, 0x707b,
- "ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
- if (rval != QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 9f34dedcdad..ad54099cb80 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -27,7 +27,7 @@ void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- mempool_free(sp, vha->hw->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
}
int
@@ -368,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -515,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -531,6 +531,75 @@ done_unmap_sg:
done:
return rval;
}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
/*
* Set the port configuration to enable the internal or external loopback
* depending on the loopback mode.
@@ -566,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+ if (!wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
- "State change notification not received.\n");
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
rval = -EINVAL;
} else {
if (ha->flags.idc_compl_status) {
@@ -578,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
ha->flags.idc_compl_status = 0;
} else
ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ "DCBX completion received.\n");
}
ha->notify_dcbx_comp = 0;
@@ -587,57 +666,6 @@ done_set_internal:
return rval;
}
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
- int wait)
-{
- int ret = 0;
- int rval = 0;
- uint16_t new_config[4];
- struct qla_hw_data *ha = vha->hw;
-
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- goto done_reset_internal;
-
- memset(new_config, 0 , sizeof(new_config));
- if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK ||
- (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_EXTERNAL_LOOPBACK) {
- new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
- ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
- (new_config[0] & INTERNAL_LOOPBACK_MASK));
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
- ha->notify_dcbx_comp = wait;
- ret = qla81xx_set_port_config(vha, new_config);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7025,
- "Set port config failed.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- }
-
- /* Wait for DCBX complete event */
- if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
- (20 * HZ))) {
- ql_dbg(ql_dbg_user, vha, 0x7026,
- "State change notification not received.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- } else
- ql_dbg(ql_dbg_user, vha, 0x7027,
- "State change received.\n");
-
- ha->notify_dcbx_comp = 0;
- }
-done_reset_internal:
- return rval;
-}
-
static int
qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
{
@@ -739,6 +767,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
+
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
@@ -746,6 +775,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
goto done_free_dma_rsp;
}
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
+ }
+
ql_dbg(ql_dbg_user, vha, 0x70c0,
"elreq.options=%04x\n", elreq.options);
@@ -755,7 +792,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
config, new_config, elreq.options);
else
rval = qla81xx_reset_loopback_mode(vha,
- config, 1);
+ config, 1, 0);
else
rval = qla81xx_set_loopback_mode(vha, config,
new_config, elreq.options);
@@ -772,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
- if (new_config[0]) {
- /* Revert back to original port config
- * Also clear internal loopback
- */
- qla81xx_reset_loopback_mode(vha,
- new_config, 0);
- }
-
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
ql_log(ql_log_warn, vha, 0x7029,
@@ -788,15 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
- if (qla81xx_restart_mpi_firmware(vha) !=
- QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x702a,
- "MPI reset failed.\n");
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
}
rval = -EIO;
goto done_free_dma_rsp;
}
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
+ }
+
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -1950,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
@@ -1985,6 +2038,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 37b8b7ba742..e9f6b9bbf29 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 53f9e492f9d..1626de52e32 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -11,20 +11,21 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0125 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x114f | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
@@ -35,11 +36,11 @@
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe06f | |
- * | Target Mode Management | 0xf071 | |
+ * | Target Mode | 0xe070 | |
+ * | Target Mode Management | 0xf072 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8f911c0b1e7..35e20b4f8b6 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 6e7727f46d4..c6509911772 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -37,6 +37,7 @@
#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -253,8 +254,8 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
uint32_t req_q_out;
uint32_t rsp_q_in;
uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
};
typedef union {
@@ -563,6 +566,9 @@ typedef union {
&(reg)->u.isp2100.mailbox5 : \
&(reg)->u.isp2300.rsp_q_out)
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
#define MAILBOX_REG(ha, reg, num) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
(num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
-#define MBC_DATA_RATE 0x5d /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
/* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
uint8_t scan_state;
} fc_port_t;
-#define QLA_FCPORT_SCAN_NONE 0
-#define QLA_FCPORT_SCAN_FOUND 1
-
/*
* Fibre channel port/lun states.
*/
@@ -2533,8 +2540,10 @@ struct req_que {
uint16_t qos;
uint16_t vp_idx;
struct rsp_que *rsp;
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+#define MAX_Q_DEPTH 32
int max_q_depth;
};
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
- struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
- uint32_t thermal_supported:1;
uint32_t nic_core_reset_hdlr_active:1;
uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
/* HBA serial number */
uint8_t serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
int notify_dcbx_comp;
+ int notify_lb_portup_comp;
struct mutex selflogin_lock;
/* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
+#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
+ unsigned long host_last_rampdown_time;
+ unsigned long host_last_rampup_time;
+ int cfg_lun_q_depth;
+
struct qlt_hw_data tgt;
+ uint16_t thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
+#define HOST_RAMP_UP_QUEUE_DEPTH 23
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
#define NVRAM_DELAY() udelay(10)
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
/*
* Flash support definitions
*/
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 706c4f7bc7c..792a29294b6 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index be6d61a89ed..1ac2b0e3a0e 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -300,7 +300,8 @@ struct init_cb_24xx {
uint32_t prio_request_q_address[2];
uint16_t msix;
- uint8_t reserved_2[6];
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
@@ -1387,9 +1388,7 @@ struct qla_flt_header {
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
#define FLT_REG_FCOE_FW 0xA4
-#define FLT_REG_FCOE_VPD_0 0xA9
#define FLT_REG_FCOE_NVRAM_0 0xAA
-#define FLT_REG_FCOE_VPD_1 0xAB
#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2411d1a12b2..eb3ca21a7f1 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
extern void qla83xx_reset_ownership(scsi_qla_host_t *);
extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -278,6 +282,9 @@ extern int
qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
qla2x00_lip_reset(scsi_qla_host_t *);
extern int
@@ -351,6 +358,9 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 01efc0e9cc3..9b455250c10 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1328,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
- strcpy(eiter->a.manufacturer, "QLogic Corporation");
- alen = strlen(eiter->a.manufacturer);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1649,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
- alen = strlen(eiter->a.os_dev_name);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 563eee3fa92..edf4d14a133 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- mempool_free(sp, vha->hw->srb_mempool);
-
- QLA_VHA_MARK_NOT_BUSY(vha);
+ qla2x00_rel_sp(vha, sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->flags.thermal_supported = 1;
+ ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1559,6 +1559,47 @@ done:
return rval;
}
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
@@ -1628,9 +1669,18 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
if (!fw_major_version && ql2xallocfwdump
&& !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
@@ -1914,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
- qlt_24xx_config_rings(vha, reg);
+ qlt_24xx_config_rings(vha);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
@@ -1948,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -2157,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -2170,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
} else {
ql_log(ql_log_warn, vha, 0x2009,
"Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
@@ -2690,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -3103,7 +3160,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport;
+ fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -3141,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- break;
+ return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3173,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /* Add new ports to existing port list */
- list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3190,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- /* Logout lost/gone fabric devices (non-FCP2) */
- if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3204,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
}
- continue;
}
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
- /* Login fabric devices that need a login */
- if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
- atomic_read(&vha->loop_down_timer) == 0) {
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- continue;
- }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
}
}
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ }
+
+ /* Exit if out of loop IDs. */
+ if (rval != QLA_SUCCESS) {
+ break;
+ }
+
+ /*
+ * Login and add the new devices to our port list.
+ */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3263,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
int first_dev, last_dev;
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
rval = QLA_SUCCESS;
@@ -3377,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
continue;
/* Bypass virtual ports of the same host. */
- found = 0;
- if (ha->num_vhosts) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (new_fcport->d_id.b24 == vp->d_id.b24) {
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- if (found)
- continue;
- }
+ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+ continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3417,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
@@ -5004,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return rval;
}
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
int
qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5529,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7;
+ qlt_81xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -5569,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLE8XXX");
+ qlt_81xx_config_nvram_stage2(vha, icb);
+
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c0462c04c88..68e2c4afc13 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -198,6 +198,13 @@ done:
}
static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+ mempool_free(sp, vha->hw->srb_mempool);
+ QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
+static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+ if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+ if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+ HOST_QUEUE_RAMPDOWN_INTERVAL)))
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+ if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+ HOST_QUEUE_RAMPUP_INTERVAL)))
+ return;
+
+ set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a481684479c..d2630317cce 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
- }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2767,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 873c82014b1..e9dbd74c20d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -13,6 +13,8 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
+#include "qla_target.h"
+
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
@@ -489,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
ql_log(ql_log_info, vha, 0x506a,
"IDC Device-State changed = 0x%x.\n", mb[4]);
+ if (ha->flags.nic_core_reset_owner)
+ return;
qla83xx_schedule_work(vha, MBA_IDC_AEN);
}
}
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ uint32_t vp_did;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ha->num_vhosts)
+ return ret;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ vp_did = vp->d_id.b24;
+ if (vp_did == rscn_entry) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return ret;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -899,6 +928,10 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+ /* Skip RSCNs for virtual ports on the same physical port */
+ if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ break;
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -983,14 +1016,25 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- /* See if we need to quiesce any I/O */
- if (IS_QLA8031(vha->hw))
- if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
- (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+ if (IS_QLA8031(vha->hw)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+ (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ /*
+ * Extend loop down timer since port is active.
+ */
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
qla2xxx_wake_dpc(vha);
}
+ }
case MBA_IDC_COMPLETE:
+ if (ha->notify_lb_portup_comp)
+ complete(&ha->lb_portup_comp);
+ /* Fallthru */
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1029,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
@@ -1067,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
uint16_t index;
index = LSW(pkt->handle);
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
@@ -1740,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
sts24 = (struct sts_entry_24xx *) pkt;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x70af,
"Invalid SCSI completion handle 0x%x.\n", index);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1910,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
req = ha->req_q_map[que];
/* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS) {
+ if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
- } else
+ else
sp = NULL;
if (sp == NULL) {
@@ -1934,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
@@ -2193,6 +2238,9 @@ out:
cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
+ if (!res)
+ qla2x00_do_host_ramp_up(vha);
+
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -2747,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2827,9 +2881,13 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- if (IS_QLA82XX(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[i].handler,
+ 0, qla83xx_msix_entries[i].name, rsp);
+ } else if (IS_QLA82XX(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 68c55eaa318..186dd59ce4f 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* command not found */
return QLA_FUNCTION_FAILED;
}
@@ -1633,6 +1633,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
}
/*
+ * qla24xx_link_initialization
+ * Issue link initialization mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_LINK_INITIALIZATION;
+ mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
* qla2x00_lip_reset
* Issue LIP reset mailbox command.
*
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
}
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags;
+ int found;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3128,13 +3177,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
return;
}
+ found = 0;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list)
- if (vp_idx == vp->vp_idx)
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp_idx == vp->vp_idx) {
+ found = 1;
break;
+ }
+ }
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!vp)
+ if (!found)
return;
vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3814,6 +3867,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
}
int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ *temp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
{
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
{
- int rval;
- uint8_t byte;
+ int rval = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = vha->hw;
+ uint8_t byte;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
"Entered %s.\n", __func__);
- /* Integer part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+ *temp = byte;
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x10c9,
+ "Thermal not supported by I2C.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
- *temp = byte;
- /* Fraction part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+ rval = qla2x00_read_asic_temperature(vha, temp);
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x1019,
+ "Thermal not supported by ISP.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}
- *frac = (byte >> 6) * 25;
+ ql_log(ql_log_warn, vha, 0x1150,
+ "Thermal not supported by this card "
+ "(ignoring further requests).\n");
+ return rval;
+
+done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
"Done %s.\n", __func__);
-fail:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 20fd974f903..f868a9f98af 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
+ kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
+ ret = qla2x00_alloc_outstanding_cmds(ha, req);
+ if (ret != QLA_SUCCESS)
+ goto que_failed;
+
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 3e3f593bada..10754f51830 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -847,14 +847,21 @@ static int
qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla82xx_rom_lock_timeout)
+ if (timeout >= qla82xx_rom_lock_timeout) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_dbg(ql_dbg_p3p, vha, 0xb085,
+ "Failed to acquire rom lock, acquired by %d.\n",
+ lock_owner);
return -1;
+ }
timeout++;
}
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -3629,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (!sp->u.scmd.ctx ||
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6c953e8c08f..d268e8406fd 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -897,7 +897,7 @@ struct ct6_dsd {
#define FLT_REG_BOOT_CODE_82XX 0x78
#define FLT_REG_FW_82XX 0x74
#define FLT_REG_GOLD_FW_82XX 0x75
-#define FLT_REG_VPD_82XX 0x81
+#define FLT_REG_VPD_8XXX 0x81
#define FA_VPD_SIZE_82XX 0x400
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 10d23f8b703..2c6dd3dfe0f 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -111,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
-#define MAX_Q_DEPTH 32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
@@ -360,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
+ if (req)
+ kfree(req->outstanding_cmds);
+
kfree(req);
req = NULL;
}
@@ -628,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(sp->fcport->vha, sp);
}
static void
@@ -716,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
- if (!sp)
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp) {
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy;
+ }
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -731,6 +735,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy_free_sp;
}
@@ -1010,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
spin_lock_irqsave(&ha->hardware_lock, flags);
req = vha->req;
for (cnt = 1; status == QLA_SUCCESS &&
- cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
@@ -1300,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
"full_login_lip=%d.\n", ret);
}
- atomic_set(&vha->loop_state, LOOP_DOWN);
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
}
if (ha->flags.enable_lip_reset) {
@@ -1337,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
@@ -1453,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
return tag_type;
}
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampdown_time = jiffies;
+
+ if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+ return;
+
+ if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+ ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+ else
+ ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+ /*
+ * Geometrically ramp down the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > shost->cmd_per_lun) {
+ if (sdev->queue_depth < ha->cfg_lun_q_depth)
+ continue;
+ ql_log(ql_log_warn, vp, 0x3031,
+ "%ld:%d:%d: Ramping down queue depth to %d",
+ vp->host_no, sdev->id, sdev->lun,
+ ha->cfg_lun_q_depth);
+ qla2x00_change_queue_depth(sdev,
+ ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampup_time = jiffies;
+ ha->cfg_lun_q_depth++;
+
+ /*
+ * Linearly ramp up the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > ha->cfg_lun_q_depth)
+ continue;
+ qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+ SCSI_QDEPTH_RAMP_UP);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@@ -1730,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+
+ qlt_83xx_iospace_config(ha);
+
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count:%d.\n", ha->msix_count);
return 0;
@@ -2230,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->cfg_lun_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -2307,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2338,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2377,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->dcbx_comp);
+ init_completion(&ha->lb_portup_comp);
set_bit(0, (unsigned long *) ha->vp_idx_map);
@@ -2720,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
vha = pci_get_drvdata(pdev);
ha = vha->hw;
@@ -3974,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+ "Trying force recovery of the IDC lock.\n");
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
if (rval)
@@ -4065,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
uint16_t options = (requester_id << 15) | BIT_6;
uint32_t data;
+ uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
/* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4076,8 +4171,11 @@ retry_lock:
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
ha->portnum);
} else {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ &lock_owner);
ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
- "Failed to acquire IDC lock. retrying...\n");
+ "Failed to acquire IDC lock, acquired by %d, "
+ "retrying...\n", lock_owner);
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
@@ -4605,6 +4703,18 @@ qla2x00_do_dpc(void *data)
qla2xxx_flash_npiv_conf(base_vha);
}
+ if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+ &base_vha->dpc_flags)) {
+ /* Prevents simultaneous ramp up and down */
+ clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags);
+ qla2x00_host_ramp_down_queuedepth(base_vha);
+ }
+
+ if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags))
+ qla2x00_host_ramp_up_queuedepth(base_vha);
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -4733,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
cpu_flags);
req = ha->req_q_map[0];
for (index = 1;
- index < MAX_OUTSTANDING_COMMANDS;
+ index < req->num_outstanding_cmds;
index++) {
fc_port_t *sfcp;
@@ -4802,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -4815,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, host_ramp_down_needed=%d "
+ "host_ramp_up_needed=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 892a81e457b..46ef0ac48f4 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 32fdc2a66dd..3bef6736d88 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOTLOAD_82XX:
ha->flt_region_bootload = start;
break;
- case FLT_REG_VPD_82XX:
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_0:
- if (!IS_QLA8031(ha))
- break;
- ha->flt_region_vpd_nvram = start;
- if (ha->flags.port0)
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_1:
- if (!IS_QLA8031(ha))
- break;
- if (!ha->flags.port0)
+ case FLT_REG_VPD_8XXX:
+ if (IS_CNA_CAPABLE(ha))
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 80f4b849e2b..61b5d8c2b5d 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
"\"disabled\" - initiator mode will never be enabled; "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
/*
* From scsi/fc/fc_fcp.h
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1570,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
/* always increment cmd handle */
do {
++h;
- if (h > MAX_OUTSTANDING_COMMANDS)
+ if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2441,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
/* handle-1 is actually used */
- if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
@@ -4305,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
+ if (!IS_TGT_MODE_CAPABLE(ha)) {
+ ql_log(ql_log_warn, base_vha, 0xe070,
+ "This adapter does not support target mode.\n");
+ return 0;
+ }
+
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
"Registering target for host %ld(%p)", base_vha->host_no, ha);
@@ -4666,7 +4673,6 @@ void
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct atio_from_isp *pkt;
int cnt, i;
@@ -4694,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
}
/* Adjust ring index */
- WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
- if (ha->mqenable) {
-#if 0
- WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
- } else {
- /* Setup APTIO registers for target mode */
- WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp24.atio_q_out);
+ WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
}
}
@@ -4796,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |=
+ __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_81xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ ha->msix_count += 1; /* For ATIO Q */
+}
+
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt)
@@ -4828,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+ ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+ }
+
mutex_init(&ha->tgt.tgt_mutex);
mutex_init(&ha->tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+ struct rsp_que *rsp;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bad749561ec..ff9ccb9fd03 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -60,8 +60,9 @@
* multi-complete should come to the tgt driver or be handled there by qla2xxx
*/
#define CTIO_COMPLETION_HANDLE_MARK BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+ "DEFAULT_OUTSTANDING_COMMANDS"
#endif
#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
- device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);
extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_81xx *);
extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
extern void qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 49697ca41e7..2b6e478d9e3 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 81e738d61ec..160d3369721 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1420,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
- if (chap_table == NULL) {
- ret = -ENOMEM;
- goto exit_get_chap;
- }
+ if (chap_table == NULL)
+ return -ENOMEM;
chap_size = sizeof(struct ql4_chap_table);
memset(chap_table, 0, chap_size);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 270b3cf6f37..16a3a0cc967 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -201,6 +201,7 @@ enum storvsc_request_type {
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
/*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
uint lun;
};
+static void storvsc_device_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ uint lun;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ lun = wrk->lun;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ if (!sdev)
+ goto done;
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+
+done:
+ kfree(wrk);
+}
+
static void storvsc_bus_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
if (!bounce_sgl)
return NULL;
+ sg_init_table(bounce_sgl, num_pages);
for (i = 0; i < num_pages; i++) {
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
return ret;
}
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ struct scsi_cmnd *scmnd,
+ struct Scsi_Host *host,
+ u8 asc, u8 ascq)
+{
+ struct storvsc_scan_work *wrk;
+ void (*process_err_fn)(struct work_struct *work);
+ bool do_work = false;
+
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
+ */
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ break;
+ case SRB_STATUS_INVALID_LUN:
+ do_work = true;
+ process_err_fn = storvsc_remove_lun;
+ break;
+ case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ do_work = true;
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that trigerred this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ }
+ break;
+ }
+
+ if (!do_work)
+ return;
+
+ /*
+ * We need to schedule work to process this error; schedule it.
+ */
+ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!wrk) {
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ return;
+ }
+
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, process_err_fn);
+ schedule_work(&wrk->work);
+}
+
static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
{
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
struct stor_mem_pools *memp = scmnd->device->hostdata;
+ struct Scsi_Host *host;
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
cmd_request->bounce_sgl_count);
}
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side. However, if the command
- * were a pass-through command deal with it appropriately.
- */
scmnd->result = vm_srb->scsi_status;
- if (vm_srb->srb_status == SRB_STATUS_ERROR) {
- switch (scmnd->cmnd[0]) {
- case ATA_16:
- case ATA_12:
- set_host_byte(scmnd, DID_PASSTHROUGH);
- break;
- default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
- }
- }
-
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ sense_hdr.ascq);
+
scsi_set_resid(scmnd,
cmd_request->data_buffer.len -
vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+ sdevice->no_write_same = 1;
+
return 0;
}
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
+ /* the host does not handle WRITE_SAME, log accident usage */
+ case WRITE_SAME:
/*
* smartd sends this command and the host does not handle
* this. So, don't send it.
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8f27f9d6f91..0371047c592 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -2,48 +2,58 @@
# Kernel configuration file for the UFS Host Controller
#
# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011 Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
#
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller Driver Core"
+ depends on SCSI
+ ---help---
+ This selects the support for UFS devices in Linux, say Y and make
+ sure that you know the name of your UFS host adapter (the card
+ inside your computer that "speaks" the UFS protocol, also
+ called UFS Host Controller), because you will be asked for it.
+ The module will be called ufshcd.
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
-# USA.
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.txt>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
-config SCSI_UFSHCD
- tristate "Universal Flash Storage host controller driver"
- depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on SCSI_UFSHCD && PCI
---help---
- This is a generic driver which supports PCIe UFS Host controllers.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index adf7895a6a9..9eda0dfbd6d 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,2 +1,3 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b207529f8d5..139bc0647b4 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644
index 00000000000..5cb1d75f586
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ disable_irq(pdev->irq);
+ free_irq(pdev->irq, hba);
+ ufshcd_remove(hba);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 0;
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ err = ufshcd_set_dma_mask(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 91a4046ca9b..60fd40c4e4c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1,77 +1,39 @@
/*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
*
* This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -102,121 +64,6 @@ enum {
};
/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct pci_dev *pdev;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
-
- struct uic_command active_uic_cmd;
- wait_queue_head_t ufshcd_tm_wait_queue;
- unsigned long tm_condition;
-
- u32 ufshcd_state;
- u32 int_enable_mask;
-
- /* Work Queues */
- struct work_struct uic_workq;
- struct work_struct feh_workq;
-
- /* HBA Errors */
- u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- unsigned int lun;
-};
-
-/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba - Pointer to adapter instance
*
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
if (hba->utmrdl_base_addr) {
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ dma_free_coherent(hba->dev, utmrdl_size,
hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
}
if (hba->utrdl_base_addr) {
utrdl_size =
(sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ dma_free_coherent(hba->dev, utrdl_size,
hba->utrdl_base_addr, hba->utrdl_dma_addr);
}
if (hba->ucdl_base_addr) {
ucdl_size =
(sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ dma_free_coherent(hba->dev, ucdl_size,
hba->ucdl_base_addr, hba->ucdl_dma_addr);
}
}
@@ -429,15 +276,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
}
/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
-/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
*/
if (!hba->ucdl_base_addr ||
WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
}
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
utrdl_size,
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
utmrdl_size,
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for local reference block */
hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
if (!hba->lrb) {
- dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
goto out;
}
return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
/* check if controller is ready to accept UIC commands */
if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
UIC_COMMAND_READY) == 0x0) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller not ready"
" to accept UIC commands\n");
return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
if (!ufshcd_is_device_present(reg)) {
- dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ dev_err(hba->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
}
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
if (retry) {
retry--;
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller enable failed\n");
return -EIO;
}
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
/* start the initialization process */
if (ufshcd_initialize_hba(hba)) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Reset: Controller initialization failed\n");
return FAILED;
}
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_result = SUCCESS;
} else {
task_result = FAILED;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"trc: Invalid ocs = %x\n", ocs_value);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* check if the returned transfer response is valid */
result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
if (result) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Invalid response = %x\n", result);
break;
}
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_FATAL_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"OCS error from controller = %x\n", ocs);
break;
} /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
!(ufshcd_get_uic_cmd_result(hba))) {
if (ufshcd_make_hba_operational(hba))
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"cc: hba not operational state\n");
return;
}
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
free_slot = ufshcd_get_tm_free_slot(hba);
if (free_slot >= hba->nutmrs) {
spin_unlock_irqrestore(host->host_lock, flags);
- dev_err(&hba->pdev->dev, "Task management queue full\n");
+ dev_err(hba->dev, "Task management queue full\n");
err = FAILED;
goto out;
}
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
&hba->tm_condition) != 0),
60 * HZ);
if (!err) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task management command timed-out\n");
err = FAILED;
goto out;
@@ -1688,23 +1526,13 @@ static struct scsi_host_template ufshcd_driver_template = {
};
/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
- ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
-/**
* ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
* @state: power state
*
* Returns -ENOSYS
*/
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
{
/*
* TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
/**
* ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
*
* Returns -ENOSYS
*/
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
{
/*
* TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
return -ENOSYS;
}
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
/**
* ufshcd_hba_free - free allocated memory for
@@ -1748,107 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
{
iounmap(hba->mmio_base);
ufshcd_free_hba_memory(hba);
- pci_release_regions(hba->pdev);
}
/**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
*/
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
/* disable interrupts */
ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
- free_irq(pdev->irq, hba);
ufshcd_hba_stop(hba);
ufshcd_hba_free(hba);
scsi_remove_host(hba->host);
scsi_host_put(hba->host);
- pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- int err;
- u64 dma_mask;
-
- /*
- * If controller supports 64 bit addressing mode, then set the DMA
- * mask to 64-bit, else set the DMA mask to 32-bit
- */
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
- dma_mask = DMA_BIT_MASK(64);
- else
- dma_mask = DMA_BIT_MASK(32);
-
- err = pci_set_dma_mask(hba->pdev, dma_mask);
- if (err)
- return err;
-
- err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
- return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
/**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
* Returns 0 on success, non-zero value on failure
*/
-static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+ void __iomem *mmio_base, unsigned int irq)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
int err;
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
goto out_error;
}
- pci_set_master(pdev);
+ if (!mmio_base) {
+ dev_err(dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
- dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM;
- goto out_disable;
+ goto out_error;
}
hba = shost_priv(host);
-
- err = pci_request_regions(pdev, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_host_put;
- }
-
- hba->mmio_base = pci_ioremap_bar(pdev, 0);
- if (!hba->mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
-
hba->host = host;
- hba->pdev = pdev;
+ hba->dev = dev;
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
/* Read capabilities registers */
ufshcd_hba_capabilities(hba);
@@ -1856,17 +1645,11 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
- }
-
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
- dev_err(&pdev->dev, "Memory allocation failed\n");
- goto out_iounmap;
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
}
/* Configure LRB */
@@ -1888,76 +1671,50 @@ static int ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
/* IRQ registration */
- err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
- dev_err(&pdev->dev, "request irq failed\n");
+ dev_err(hba->dev, "request irq failed\n");
goto out_lrb_free;
}
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
- dev_err(&pdev->dev, "init shared queue failed\n");
+ dev_err(hba->dev, "init shared queue failed\n");
goto out_free_irq;
}
- pci_set_drvdata(pdev, hba);
-
- err = scsi_add_host(host, &pdev->dev);
+ err = scsi_add_host(host, hba->dev);
if (err) {
- dev_err(&pdev->dev, "scsi_add_host failed\n");
+ dev_err(hba->dev, "scsi_add_host failed\n");
goto out_free_irq;
}
/* Initialization routine */
err = ufshcd_initialize_hba(hba);
if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- goto out_free_irq;
+ dev_err(hba->dev, "Initialization failed\n");
+ goto out_remove_scsi_host;
}
+ *hba_handle = hba;
return 0;
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
out_free_irq:
- free_irq(pdev->irq, hba);
+ free_irq(irq, hba);
out_lrb_free:
ufshcd_free_hba_memory(hba);
-out_iounmap:
- iounmap(hba->mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_host_put:
- scsi_host_put(host);
out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
+ scsi_host_put(host);
out_error:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_init);
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_probe,
- .remove = ufshcd_remove,
- .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_suspend,
- .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
- "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644
index 00000000000..6b99a42f581
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+ unsigned int irq;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+ unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6e3510f7116..0c164847a3e 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFSHCI_H
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index a43415a7fbe..4c0f6d883dd 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -14,7 +14,7 @@
#include <linux/delay.h>
#include <linux/export.h>
#ifdef CONFIG_BCM47XX
-#include <asm/mach-bcm47xx/nvram.h>
+#include <bcm47xx_nvram.h>
#endif
#include "ssb_private.h"
@@ -322,7 +322,7 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
if (bus->bustype == SSB_BUSTYPE_SSB) {
#ifdef CONFIG_BCM47XX
char buf[20];
- if (nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
+ if (bcm47xx_nvram_getenv("xtalfreq", buf, sizeof(buf)) >= 0)
crystalfreq = simple_strtoul(buf, NULL, 0);
#endif
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9435a3d369a..7ea246a0773 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3584,6 +3584,10 @@ check_rsp_state:
spin_lock_bh(&cmd->istate_lock);
cmd->i_state = ISTATE_SENT_STATUS;
spin_unlock_bh(&cmd->istate_lock);
+
+ if (atomic_read(&conn->check_immediate_queue))
+ return 1;
+
continue;
} else if (ret == 2) {
/* Still must send status,
@@ -3673,7 +3677,7 @@ check_rsp_state:
}
if (atomic_read(&conn->check_immediate_queue))
- break;
+ return 1;
}
return 0;
@@ -3717,12 +3721,15 @@ restart:
signal_pending(current))
goto transport_err;
+get_immediate:
ret = handle_immediate_queue(conn);
if (ret < 0)
goto transport_err;
ret = handle_response_queue(conn);
- if (ret == -EAGAIN)
+ if (ret == 1)
+ goto get_immediate;
+ else if (ret == -EAGAIN)
goto restart;
else if (ret < 0)
goto transport_err;
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 6917a9e938e..d3536f57444 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -2598,7 +2598,7 @@ static int __init sbp_init(void)
return 0;
};
-static void sbp_exit(void)
+static void __exit sbp_exit(void)
{
sbp_deregister_configfs();
};
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index d226c10a985..17a6acbc3ab 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -631,7 +631,7 @@ static int __init fileio_module_init(void)
return transport_subsystem_register(&fileio_template);
}
-static void fileio_module_exit(void)
+static void __exit fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c73f4a950e2..8bcc514ec8b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -821,7 +821,7 @@ static int __init iblock_module_init(void)
return transport_subsystem_register(&iblock_template);
}
-static void iblock_module_exit(void)
+static void __exit iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 2bcfd79cf59..82e78d72fdb 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -840,14 +840,14 @@ static void pscsi_bi_endio(struct bio *bio, int error)
bio_put(bio);
}
-static inline struct bio *pscsi_get_bio(int sg_num)
+static inline struct bio *pscsi_get_bio(int nr_vecs)
{
struct bio *bio;
/*
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
- bio = bio_kmalloc(GFP_KERNEL, sg_num);
+ bio = bio_kmalloc(GFP_KERNEL, nr_vecs);
if (!bio) {
pr_err("PSCSI: bio_kmalloc() failed\n");
return NULL;
@@ -940,7 +940,6 @@ pscsi_map_sg(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
bio = NULL;
}
- page++;
len -= bytes;
data_len -= bytes;
off = 0;
@@ -952,7 +951,6 @@ fail:
while (*hbio) {
bio = *hbio;
*hbio = (*hbio)->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1092,7 +1090,6 @@ fail_free_bio:
while (hbio) {
struct bio *bio = hbio;
hbio = hbio->bi_next;
- bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -1178,7 +1175,7 @@ static int __init pscsi_module_init(void)
return transport_subsystem_register(&pscsi_template);
}
-static void pscsi_module_exit(void)
+static void __exit pscsi_module_exit(void)
{
transport_subsystem_release(&pscsi_template);
}
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index a0162cbf055..cf9210db9fa 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -729,19 +729,19 @@ config SERIAL_SH_SCI_DMA
config SERIAL_PNX8XXX
bool "Enable PNX8XXX SoCs' UART Support"
- depends on SOC_PNX8550 || SOC_PNX833X
+ depends on SOC_PNX833X
select SERIAL_CORE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial ports, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial ports, say Y. Otherwise, say N.
config SERIAL_PNX8XXX_CONSOLE
bool "Enable PNX8XX0 serial console"
depends on SERIAL_PNX8XXX
select SERIAL_CORE_CONSOLE
help
- If you have a MIPS-based Philips SoC such as PNX8550 or PNX8330
- and you want to use serial console, say Y. Otherwise, say N.
+ If you have a MIPS-based Philips SoC such as PNX8330 and you want
+ to use serial console, say Y. Otherwise, say N.
config SERIAL_HS_LPC32XX
tristate "LPC32XX high speed serial port support"
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 230bd2aad4f..9bd16255dd9 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -383,8 +383,10 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
goto done;
}
} else {
- if (!simple_empty(dentry))
+ if (!simple_empty(dentry)) {
+ spin_unlock(&sbi->fs_lock);
goto done;
+ }
}
ino->flags |= AUTOFS_INF_PENDING;
spin_unlock(&sbi->fs_lock);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 03bc1d347d8..3db70dae40d 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -42,10 +42,8 @@ void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
while (wq) {
nwq = wq->next;
wq->status = -ENOENT; /* Magic is gone - report failure */
- if (wq->name.name) {
- kfree(wq->name.name);
- wq->name.name = NULL;
- }
+ kfree(wq->name.name);
+ wq->name.name = NULL;
wq->wait_ctr--;
wake_up_interruptible(&wq->queue);
wq = nwq;
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index ccd25ba7a9a..9a8622a5b86 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -5,6 +5,9 @@ config BTRFS_FS
select ZLIB_DEFLATE
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select RAID6_PQ
+ select XOR_BLOCKS
+
help
Btrfs is a new filesystem with extents, writable snapshotting,
support for multiple devices and many more features.
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 7df3e0f0ee5..3932224f99e 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -8,7 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
- reada.o backref.o ulist.o qgroup.o send.o dev-replace.o
+ reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 04edf69be87..bd605c87adf 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -352,11 +352,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
err = __resolve_indirect_ref(fs_info, search_commit_root,
time_seq, ref, parents,
extent_item_pos);
- if (err) {
- if (ret == 0)
- ret = err;
+ if (err)
continue;
- }
/* we put the first parent into the ref at hand */
ULIST_ITER_INIT(&uiter);
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index d61feca7945..310a7f6d09b 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -19,7 +19,7 @@
#ifndef __BTRFS_BACKREF__
#define __BTRFS_BACKREF__
-#include "ioctl.h"
+#include <linux/btrfs.h>
#include "ulist.h"
#include "extent_io.h"
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 2a8c242bc4f..d9b97d4960e 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -40,6 +40,8 @@
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
#define BTRFS_INODE_COPY_EVERYTHING 8
+#define BTRFS_INODE_IN_DELALLOC_LIST 9
+#define BTRFS_INODE_READDIO_NEED_LOCK 10
/* in memory btrfs inode */
struct btrfs_inode {
@@ -216,4 +218,22 @@ static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
return 0;
}
+/*
+ * Disable DIO read nolock optimization, so new dio readers will be forced
+ * to grab i_mutex. It is used to avoid the endless truncate due to
+ * nonlocked dio read.
+ */
+static inline void btrfs_inode_block_unlocked_dio(struct inode *inode)
+{
+ set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags);
+ smp_mb();
+}
+
+static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags);
+}
+
#endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 11d47bfb62b..18af6f48781 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -813,8 +813,7 @@ static int btrfsic_process_superblock_dev_mirror(
(bh->b_data + (dev_bytenr & 4095));
if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
- strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
- sizeof(super_tmp->magic)) ||
+ super_tmp->magic != cpu_to_le64(BTRFS_MAGIC) ||
memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
btrfs_super_nodesize(super_tmp) != state->metablock_size ||
btrfs_super_leafsize(super_tmp) != state->metablock_size ||
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 94ab2f80e7e..15b94089abc 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -372,7 +372,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_size)
- ret = io_tree->ops->merge_bio_hook(page, 0,
+ ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
PAGE_CACHE_SIZE,
bio, 0);
else
@@ -655,7 +655,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_CACHE_SHIFT;
if (comp_bio->bi_size)
- ret = tree->ops->merge_bio_hook(page, 0,
+ ret = tree->ops->merge_bio_hook(READ, page, 0,
PAGE_CACHE_SIZE,
comp_bio, 0);
else
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index eea5da7a2b9..ecd25a1b4e5 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1138,6 +1138,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
switch (tm->op) {
case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
BUG_ON(tm->slot < n);
+ /* Fallthrough */
case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
case MOD_LOG_KEY_REMOVE:
btrfs_set_node_key(eb, &tm->key, tm->slot);
@@ -1222,7 +1223,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
__tree_mod_log_rewind(eb_rewin, time_seq, tm);
WARN_ON(btrfs_header_nritems(eb_rewin) >
- BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
+ BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
return eb_rewin;
}
@@ -1441,7 +1442,7 @@ int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
*/
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, int cache_only, u64 *last_ret,
+ int start_slot, u64 *last_ret,
struct btrfs_key *progress)
{
struct extent_buffer *cur;
@@ -1461,8 +1462,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_disk_key disk_key;
parent_level = btrfs_header_level(parent);
- if (cache_only && parent_level != 1)
- return 0;
WARN_ON(trans->transaction != root->fs_info->running_transaction);
WARN_ON(trans->transid != root->fs_info->generation);
@@ -1508,10 +1507,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
else
uptodate = 0;
if (!cur || !uptodate) {
- if (cache_only) {
- free_extent_buffer(cur);
- continue;
- }
if (!cur) {
cur = read_tree_block(root, blocknr,
blocksize, gen);
@@ -4825,8 +4820,8 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
/*
* A helper function to walk down the tree starting at min_key, and looking
- * for nodes or leaves that are either in cache or have a minimum
- * transaction id. This is used by the btree defrag code, and tree logging
+ * for nodes or leaves that are have a minimum transaction id.
+ * This is used by the btree defrag code, and tree logging
*
* This does not cow, but it does stuff the starting key it finds back
* into min_key, so you can call btrfs_search_slot with cow=1 on the
@@ -4847,7 +4842,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
*/
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
- struct btrfs_path *path, int cache_only,
+ struct btrfs_path *path,
u64 min_trans)
{
struct extent_buffer *cur;
@@ -4887,15 +4882,12 @@ again:
if (sret && slot > 0)
slot--;
/*
- * check this node pointer against the cache_only and
- * min_trans parameters. If it isn't in cache or is too
- * old, skip to the next one.
+ * check this node pointer against the min_trans parameters.
+ * If it is too old, old, skip to the next one.
*/
while (slot < nritems) {
u64 blockptr;
u64 gen;
- struct extent_buffer *tmp;
- struct btrfs_disk_key disk_key;
blockptr = btrfs_node_blockptr(cur, slot);
gen = btrfs_node_ptr_generation(cur, slot);
@@ -4903,27 +4895,7 @@ again:
slot++;
continue;
}
- if (!cache_only)
- break;
-
- if (max_key) {
- btrfs_node_key(cur, &disk_key, slot);
- if (comp_keys(&disk_key, max_key) >= 0) {
- ret = 1;
- goto out;
- }
- }
-
- tmp = btrfs_find_tree_block(root, blockptr,
- btrfs_level_size(root, level - 1));
-
- if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
- free_extent_buffer(tmp);
- break;
- }
- if (tmp)
- free_extent_buffer(tmp);
- slot++;
+ break;
}
find_next_key:
/*
@@ -4934,7 +4906,7 @@ find_next_key:
path->slots[level] = slot;
btrfs_set_path_blocking(path);
sret = btrfs_find_next_key(root, path, min_key, level,
- cache_only, min_trans);
+ min_trans);
if (sret == 0) {
btrfs_release_path(path);
goto again;
@@ -5399,8 +5371,7 @@ out:
/*
* this is similar to btrfs_next_leaf, but does not try to preserve
* and fixup the path. It looks for and returns the next key in the
- * tree based on the current path and the cache_only and min_trans
- * parameters.
+ * tree based on the current path and the min_trans parameters.
*
* 0 is returned if another key is found, < 0 if there are any errors
* and 1 is returned if there are no higher keys in the tree
@@ -5409,8 +5380,7 @@ out:
* calling this function.
*/
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key, int level,
- int cache_only, u64 min_trans)
+ struct btrfs_key *key, int level, u64 min_trans)
{
int slot;
struct extent_buffer *c;
@@ -5461,22 +5431,8 @@ next:
if (level == 0)
btrfs_item_key_to_cpu(c, key, slot);
else {
- u64 blockptr = btrfs_node_blockptr(c, slot);
u64 gen = btrfs_node_ptr_generation(c, slot);
- if (cache_only) {
- struct extent_buffer *cur;
- cur = btrfs_find_tree_block(root, blockptr,
- btrfs_level_size(root, level - 1));
- if (!cur ||
- btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
- slot++;
- if (cur)
- free_extent_buffer(cur);
- goto next;
- }
- free_extent_buffer(cur);
- }
if (gen < min_trans) {
slot++;
goto next;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 547b7b05727..0d82922179d 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -31,10 +31,10 @@
#include <trace/events/btrfs.h>
#include <asm/kmap_types.h>
#include <linux/pagemap.h>
+#include <linux/btrfs.h>
#include "extent_io.h"
#include "extent_map.h"
#include "async-thread.h"
-#include "ioctl.h"
struct btrfs_trans_handle;
struct btrfs_transaction;
@@ -46,7 +46,7 @@ extern struct kmem_cache *btrfs_path_cachep;
extern struct kmem_cache *btrfs_free_space_cachep;
struct btrfs_ordered_sum;
-#define BTRFS_MAGIC "_BHRfS_M"
+#define BTRFS_MAGIC 0x4D5F53665248425FULL /* ascii _BHRfS_M, no null */
#define BTRFS_MAX_MIRRORS 3
@@ -191,6 +191,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
/* ioprio of readahead is set to idle */
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
+#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
+
/*
* The key defines the order in the tree, and so it also defines (optimal)
* block layout.
@@ -336,7 +338,10 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
/*
* File system states
*/
+#define BTRFS_FS_STATE_ERROR 0
+#define BTRFS_FS_STATE_REMOUNTING 1
+/* Super block flags */
/* Errors detected */
#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2)
@@ -502,6 +507,7 @@ struct btrfs_super_block {
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
+#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
@@ -511,6 +517,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
/*
@@ -952,8 +959,20 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4)
#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5)
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RAID5 (1 << 7)
+#define BTRFS_BLOCK_GROUP_RAID6 (1 << 8)
#define BTRFS_BLOCK_GROUP_RESERVED BTRFS_AVAIL_ALLOC_BIT_SINGLE
-#define BTRFS_NR_RAID_TYPES 5
+
+enum btrfs_raid_types {
+ BTRFS_RAID_RAID10,
+ BTRFS_RAID_RAID1,
+ BTRFS_RAID_DUP,
+ BTRFS_RAID_RAID0,
+ BTRFS_RAID_SINGLE,
+ BTRFS_RAID_RAID5,
+ BTRFS_RAID_RAID6,
+ BTRFS_NR_RAID_TYPES
+};
#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \
BTRFS_BLOCK_GROUP_SYSTEM | \
@@ -961,6 +980,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID5 | \
+ BTRFS_BLOCK_GROUP_RAID6 | \
BTRFS_BLOCK_GROUP_DUP | \
BTRFS_BLOCK_GROUP_RAID10)
/*
@@ -1185,6 +1206,10 @@ struct btrfs_block_group_cache {
u64 flags;
u64 sectorsize;
u64 cache_generation;
+
+ /* for raid56, this is a full stripe, without parity */
+ unsigned long full_stripe_len;
+
unsigned int ro:1;
unsigned int dirty:1;
unsigned int iref:1;
@@ -1225,6 +1250,28 @@ struct seq_list {
u64 seq;
};
+enum btrfs_orphan_cleanup_state {
+ ORPHAN_CLEANUP_STARTED = 1,
+ ORPHAN_CLEANUP_DONE = 2,
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash {
+ struct list_head hash_list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+};
+
+/* used by the raid56 code to lock stripes for read/modify/write */
+struct btrfs_stripe_hash_table {
+ struct list_head stripe_cache;
+ spinlock_t cache_lock;
+ int cache_size;
+ struct btrfs_stripe_hash table[];
+};
+
+#define BTRFS_STRIPE_HASH_TABLE_BITS 11
+
/* fs_info */
struct reloc_control;
struct btrfs_device;
@@ -1250,6 +1297,7 @@ struct btrfs_fs_info {
/* block group cache stuff */
spinlock_t block_group_cache_lock;
+ u64 first_logical_byte;
struct rb_root block_group_cache_tree;
/* keep track of unallocated space */
@@ -1288,7 +1336,23 @@ struct btrfs_fs_info {
u64 last_trans_log_full_commit;
unsigned long mount_opt;
unsigned long compress_type:4;
+ /*
+ * It is a suggestive number, the read side is safe even it gets a
+ * wrong number because we will write out the data into a regular
+ * extent. The write side(mount/remount) is under ->s_umount lock,
+ * so it is also safe.
+ */
u64 max_inline;
+ /*
+ * Protected by ->chunk_mutex and sb->s_umount.
+ *
+ * The reason that we use two lock to protect it is because only
+ * remount and mount operations can change it and these two operations
+ * are under sb->s_umount, but the read side (chunk allocation) can not
+ * acquire sb->s_umount or the deadlock would happen. So we use two
+ * locks to protect it. On the write side, we must acquire two locks,
+ * and on the read side, we just need acquire one of them.
+ */
u64 alloc_start;
struct btrfs_transaction *running_transaction;
wait_queue_head_t transaction_throttle;
@@ -1307,6 +1371,13 @@ struct btrfs_fs_info {
struct mutex cleaner_mutex;
struct mutex chunk_mutex;
struct mutex volume_mutex;
+
+ /* this is used during read/modify/write to make sure
+ * no two ios are trying to mod the same stripe at the same
+ * time
+ */
+ struct btrfs_stripe_hash_table *stripe_hash_table;
+
/*
* this protects the ordered operations list only while we are
* processing all of the entries on it. This way we make
@@ -1365,6 +1436,7 @@ struct btrfs_fs_info {
*/
struct list_head ordered_extents;
+ spinlock_t delalloc_lock;
/*
* all of the inodes that have delalloc bytes. It is possible for
* this list to be empty even when there is still dirty data=ordered
@@ -1373,13 +1445,6 @@ struct btrfs_fs_info {
struct list_head delalloc_inodes;
/*
- * special rename and truncate targets that must be on disk before
- * we're allowed to commit. This is basically the ext3 style
- * data=ordered list.
- */
- struct list_head ordered_operations;
-
- /*
* there is a pool of worker threads for checksumming during writes
* and a pool for checksumming after reads. This is because readers
* can run with FS locks held, and the writers may be waiting for
@@ -1395,6 +1460,8 @@ struct btrfs_fs_info {
struct btrfs_workers flush_workers;
struct btrfs_workers endio_workers;
struct btrfs_workers endio_meta_workers;
+ struct btrfs_workers endio_raid56_workers;
+ struct btrfs_workers rmw_workers;
struct btrfs_workers endio_meta_write_workers;
struct btrfs_workers endio_write_workers;
struct btrfs_workers endio_freespace_worker;
@@ -1423,10 +1490,12 @@ struct btrfs_fs_info {
u64 total_pinned;
- /* protected by the delalloc lock, used to keep from writing
- * metadata until there is a nice batch
- */
- u64 dirty_metadata_bytes;
+ /* used to keep from writing metadata until there is a nice batch */
+ struct percpu_counter dirty_metadata_bytes;
+ struct percpu_counter delalloc_bytes;
+ s32 dirty_metadata_batch;
+ s32 delalloc_batch;
+
struct list_head dirty_cowonly_roots;
struct btrfs_fs_devices *fs_devices;
@@ -1442,9 +1511,6 @@ struct btrfs_fs_info {
struct reloc_control *reloc_ctl;
- spinlock_t delalloc_lock;
- u64 delalloc_bytes;
-
/* data_alloc_cluster is only used in ssd mode */
struct btrfs_free_cluster data_alloc_cluster;
@@ -1456,6 +1522,8 @@ struct btrfs_fs_info {
struct rb_root defrag_inodes;
atomic_t defrag_running;
+ /* Used to protect avail_{data, metadata, system}_alloc_bits */
+ seqlock_t profiles_lock;
/*
* these three are in extended format (availability of single
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
@@ -1520,7 +1588,7 @@ struct btrfs_fs_info {
u64 qgroup_seq;
/* filesystem state */
- u64 fs_state;
+ unsigned long fs_state;
struct btrfs_delayed_root *delayed_root;
@@ -1623,6 +1691,9 @@ struct btrfs_root {
struct list_head root_list;
+ spinlock_t log_extents_lock[2];
+ struct list_head logged_list[2];
+
spinlock_t orphan_lock;
atomic_t orphan_inodes;
struct btrfs_block_rsv *orphan_block_rsv;
@@ -1832,6 +1903,7 @@ struct btrfs_ioctl_defrag_range_args {
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
+#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
BTRFS_MOUNT_##opt)
/*
@@ -2936,8 +3008,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
u64 num_bytes, u64 *refs, u64 *flags);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num, int reserved);
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
u64 bytenr, u64 num_bytes);
int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -3035,8 +3106,13 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode);
void btrfs_orphan_release_metadata(struct inode *inode);
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_pending_snapshot *pending);
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int nitems,
+ u64 *qgroup_reserved);
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ u64 qgroup_reserved);
int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
@@ -3092,10 +3168,10 @@ struct extent_buffer *btrfs_root_node(struct btrfs_root *root);
struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root);
int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_key *key, int lowest_level,
- int cache_only, u64 min_trans);
+ u64 min_trans);
int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
struct btrfs_key *max_key,
- struct btrfs_path *path, int cache_only,
+ struct btrfs_path *path,
u64 min_trans);
enum btrfs_compare_tree_result {
BTRFS_COMPARE_TREE_NEW,
@@ -3148,7 +3224,7 @@ int btrfs_search_slot_for_read(struct btrfs_root *root,
int find_higher, int return_any);
int btrfs_realloc_node(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *parent,
- int start_slot, int cache_only, u64 *last_ret,
+ int start_slot, u64 *last_ret,
struct btrfs_key *progress);
void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
@@ -3459,9 +3535,9 @@ int btrfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root, u64 new_dirid);
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
- size_t size, struct bio *bio, unsigned long bio_flags);
-
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+ size_t size, struct bio *bio,
+ unsigned long bio_flags);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
@@ -3543,7 +3619,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
/* tree-defrag.c */
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int cache_only);
+ struct btrfs_root *root);
/* sysfs.c */
int btrfs_init_sysfs(void);
@@ -3620,11 +3696,14 @@ __printf(5, 6)
void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...);
+/*
+ * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic
+ * will panic(). Otherwise we BUG() here.
+ */
#define btrfs_panic(fs_info, errno, fmt, args...) \
do { \
- struct btrfs_fs_info *_i = (fs_info); \
- __btrfs_panic(_i, __func__, __LINE__, errno, fmt, ##args); \
- BUG_ON(!(_i->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)); \
+ __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \
+ BUG(); \
} while (0)
/* acl.c */
@@ -3745,4 +3824,11 @@ static inline int is_fstree(u64 rootid)
return 1;
return 0;
}
+
+static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
+{
+ return signal_pending(current);
+}
+
+
#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 34836036f01..0b278b117cb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -875,7 +875,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
struct btrfs_delayed_item *delayed_item)
{
struct extent_buffer *leaf;
- struct btrfs_item *item;
char *ptr;
int ret;
@@ -886,7 +885,6 @@ static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
- item = btrfs_item_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr(leaf, path->slots[0], char);
write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
@@ -1065,32 +1063,25 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
}
}
-static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_delayed_node *node)
+static int __btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
{
struct btrfs_key key;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
int ret;
- mutex_lock(&node->mutex);
- if (!node->inode_dirty) {
- mutex_unlock(&node->mutex);
- return 0;
- }
-
key.objectid = node->inode_id;
btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
key.offset = 0;
+
ret = btrfs_lookup_inode(trans, root, path, &key, 1);
if (ret > 0) {
btrfs_release_path(path);
- mutex_unlock(&node->mutex);
return -ENOENT;
} else if (ret < 0) {
- mutex_unlock(&node->mutex);
return ret;
}
@@ -1105,11 +1096,47 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
btrfs_delayed_inode_release_metadata(root, node);
btrfs_release_delayed_inode(node);
- mutex_unlock(&node->mutex);
return 0;
}
+static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ int ret;
+
+ mutex_lock(&node->mutex);
+ if (!node->inode_dirty) {
+ mutex_unlock(&node->mutex);
+ return 0;
+ }
+
+ ret = __btrfs_update_delayed_inode(trans, root, path, node);
+ mutex_unlock(&node->mutex);
+ return ret;
+}
+
+static inline int
+__btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct btrfs_path *path,
+ struct btrfs_delayed_node *node)
+{
+ int ret;
+
+ ret = btrfs_insert_delayed_items(trans, path, node->root, node);
+ if (ret)
+ return ret;
+
+ ret = btrfs_delete_delayed_items(trans, path, node->root, node);
+ if (ret)
+ return ret;
+
+ ret = btrfs_update_delayed_inode(trans, node->root, path, node);
+ return ret;
+}
+
/*
* Called when committing the transaction.
* Returns 0 on success.
@@ -1119,7 +1146,6 @@ static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int nr)
{
- struct btrfs_root *curr_root = root;
struct btrfs_delayed_root *delayed_root;
struct btrfs_delayed_node *curr_node, *prev_node;
struct btrfs_path *path;
@@ -1142,15 +1168,8 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
curr_node = btrfs_first_delayed_node(delayed_root);
while (curr_node && (!count || (count && nr--))) {
- curr_root = curr_node->root;
- ret = btrfs_insert_delayed_items(trans, path, curr_root,
- curr_node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path,
- curr_root, curr_node);
- if (!ret)
- ret = btrfs_update_delayed_inode(trans, curr_root,
- path, curr_node);
+ ret = __btrfs_commit_inode_delayed_items(trans, path,
+ curr_node);
if (ret) {
btrfs_release_delayed_node(curr_node);
curr_node = NULL;
@@ -1183,51 +1202,93 @@ int btrfs_run_delayed_items_nr(struct btrfs_trans_handle *trans,
return __btrfs_run_delayed_items(trans, root, nr);
}
-static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
- struct btrfs_delayed_node *node)
+int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
+ struct inode *inode)
{
+ struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
struct btrfs_path *path;
struct btrfs_block_rsv *block_rsv;
int ret;
+ if (!delayed_node)
+ return 0;
+
+ mutex_lock(&delayed_node->mutex);
+ if (!delayed_node->count) {
+ mutex_unlock(&delayed_node->mutex);
+ btrfs_release_delayed_node(delayed_node);
+ return 0;
+ }
+ mutex_unlock(&delayed_node->mutex);
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
block_rsv = trans->block_rsv;
- trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
+ trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
- ret = btrfs_insert_delayed_items(trans, path, node->root, node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, node->root, node);
- if (!ret)
- ret = btrfs_update_delayed_inode(trans, node->root, path, node);
- btrfs_free_path(path);
+ ret = __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
+ btrfs_release_delayed_node(delayed_node);
+ btrfs_free_path(path);
trans->block_rsv = block_rsv;
+
return ret;
}
-int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
- struct inode *inode)
+int btrfs_commit_inode_delayed_inode(struct inode *inode)
{
+ struct btrfs_trans_handle *trans;
struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
+ struct btrfs_path *path;
+ struct btrfs_block_rsv *block_rsv;
int ret;
if (!delayed_node)
return 0;
mutex_lock(&delayed_node->mutex);
- if (!delayed_node->count) {
+ if (!delayed_node->inode_dirty) {
mutex_unlock(&delayed_node->mutex);
btrfs_release_delayed_node(delayed_node);
return 0;
}
mutex_unlock(&delayed_node->mutex);
- ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
+ trans = btrfs_join_transaction(delayed_node->root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+
+ path = btrfs_alloc_path();
+ if (!path) {
+ ret = -ENOMEM;
+ goto trans_out;
+ }
+ path->leave_spinning = 1;
+
+ block_rsv = trans->block_rsv;
+ trans->block_rsv = &delayed_node->root->fs_info->delayed_block_rsv;
+
+ mutex_lock(&delayed_node->mutex);
+ if (delayed_node->inode_dirty)
+ ret = __btrfs_update_delayed_inode(trans, delayed_node->root,
+ path, delayed_node);
+ else
+ ret = 0;
+ mutex_unlock(&delayed_node->mutex);
+
+ btrfs_free_path(path);
+ trans->block_rsv = block_rsv;
+trans_out:
+ btrfs_end_transaction(trans, delayed_node->root);
+ btrfs_btree_balance_dirty(delayed_node->root);
+out:
btrfs_release_delayed_node(delayed_node);
+
return ret;
}
@@ -1258,7 +1319,6 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
struct btrfs_root *root;
struct btrfs_block_rsv *block_rsv;
int need_requeue = 0;
- int ret;
async_node = container_of(work, struct btrfs_async_delayed_node, work);
@@ -1277,14 +1337,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
block_rsv = trans->block_rsv;
trans->block_rsv = &root->fs_info->delayed_block_rsv;
- ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
- if (!ret)
- ret = btrfs_delete_delayed_items(trans, path, root,
- delayed_node);
-
- if (!ret)
- btrfs_update_delayed_inode(trans, root, path, delayed_node);
-
+ __btrfs_commit_inode_delayed_items(trans, path, delayed_node);
/*
* Maybe new delayed items have been inserted, so we need requeue
* the work. Besides that, we must dequeue the empty delayed nodes
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 4f808e1baee..78b6ad0fc66 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -117,6 +117,7 @@ int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
/* Used for evicting the inode. */
void btrfs_remove_delayed_node(struct inode *inode);
void btrfs_kill_delayed_inode_items(struct inode *inode);
+int btrfs_commit_inode_delayed_inode(struct inode *inode);
int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ae941177339..b7a0641ead7 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -23,6 +23,10 @@
#include "delayed-ref.h"
#include "transaction.h"
+struct kmem_cache *btrfs_delayed_ref_head_cachep;
+struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+struct kmem_cache *btrfs_delayed_data_ref_cachep;
+struct kmem_cache *btrfs_delayed_extent_op_cachep;
/*
* delayed back reference update tracking. For subvolume trees
* we queue up extent allocations and backref maintenance for
@@ -422,6 +426,14 @@ again:
return 1;
}
+void btrfs_release_ref_cluster(struct list_head *cluster)
+{
+ struct list_head *pos, *q;
+
+ list_for_each_safe(pos, q, cluster)
+ list_del_init(pos);
+}
+
/*
* helper function to update an extent delayed ref in the
* rbtree. existing and update must both have the same
@@ -511,7 +523,7 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
ref->extent_op->flags_to_set;
existing_ref->extent_op->update_flags = 1;
}
- kfree(ref->extent_op);
+ btrfs_free_delayed_extent_op(ref->extent_op);
}
}
/*
@@ -592,7 +604,7 @@ static noinline void add_delayed_ref_head(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(head_ref);
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, head_ref);
} else {
delayed_refs->num_heads++;
delayed_refs->num_heads_ready++;
@@ -653,7 +665,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(full_ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
@@ -714,7 +726,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
* we've updated the existing ref, free the newly
* allocated ref
*/
- kfree(full_ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, full_ref);
} else {
delayed_refs->num_entries++;
trans->delayed_ref_updates++;
@@ -738,13 +750,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
return -ENOMEM;
}
@@ -786,13 +798,13 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs;
BUG_ON(extent_op && !extent_op->is_data);
- ref = kmalloc(sizeof(*ref), GFP_NOFS);
+ ref = kmem_cache_alloc(btrfs_delayed_data_ref_cachep, GFP_NOFS);
if (!ref)
return -ENOMEM;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref) {
- kfree(ref);
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
return -ENOMEM;
}
@@ -826,7 +838,7 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_head *head_ref;
struct btrfs_delayed_ref_root *delayed_refs;
- head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS);
+ head_ref = kmem_cache_alloc(btrfs_delayed_ref_head_cachep, GFP_NOFS);
if (!head_ref)
return -ENOMEM;
@@ -860,3 +872,51 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
return btrfs_delayed_node_to_head(ref);
return NULL;
}
+
+void btrfs_delayed_ref_exit(void)
+{
+ if (btrfs_delayed_ref_head_cachep)
+ kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
+ if (btrfs_delayed_tree_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_tree_ref_cachep);
+ if (btrfs_delayed_data_ref_cachep)
+ kmem_cache_destroy(btrfs_delayed_data_ref_cachep);
+ if (btrfs_delayed_extent_op_cachep)
+ kmem_cache_destroy(btrfs_delayed_extent_op_cachep);
+}
+
+int btrfs_delayed_ref_init(void)
+{
+ btrfs_delayed_ref_head_cachep = kmem_cache_create(
+ "btrfs_delayed_ref_head",
+ sizeof(struct btrfs_delayed_ref_head), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_ref_head_cachep)
+ goto fail;
+
+ btrfs_delayed_tree_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_tree_ref",
+ sizeof(struct btrfs_delayed_tree_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_tree_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_data_ref_cachep = kmem_cache_create(
+ "btrfs_delayed_data_ref",
+ sizeof(struct btrfs_delayed_data_ref), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_data_ref_cachep)
+ goto fail;
+
+ btrfs_delayed_extent_op_cachep = kmem_cache_create(
+ "btrfs_delayed_extent_op",
+ sizeof(struct btrfs_delayed_extent_op), 0,
+ SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ if (!btrfs_delayed_extent_op_cachep)
+ goto fail;
+
+ return 0;
+fail:
+ btrfs_delayed_ref_exit();
+ return -ENOMEM;
+}
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index c9d703693df..f75fcaf79ae 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -132,6 +132,15 @@ struct btrfs_delayed_ref_root {
unsigned long num_heads_ready;
/*
+ * bumped when someone is making progress on the delayed
+ * refs, so that other procs know they are just adding to
+ * contention intead of helping
+ */
+ atomic_t procs_running_refs;
+ atomic_t ref_seq;
+ wait_queue_head_t wait;
+
+ /*
* set when the tree is flushing before a transaction commit,
* used by the throttling code to decide if new updates need
* to be run right away
@@ -141,12 +150,47 @@ struct btrfs_delayed_ref_root {
u64 run_delayed_start;
};
+extern struct kmem_cache *btrfs_delayed_ref_head_cachep;
+extern struct kmem_cache *btrfs_delayed_tree_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_data_ref_cachep;
+extern struct kmem_cache *btrfs_delayed_extent_op_cachep;
+
+int btrfs_delayed_ref_init(void);
+void btrfs_delayed_ref_exit(void);
+
+static inline struct btrfs_delayed_extent_op *
+btrfs_alloc_delayed_extent_op(void)
+{
+ return kmem_cache_alloc(btrfs_delayed_extent_op_cachep, GFP_NOFS);
+}
+
+static inline void
+btrfs_free_delayed_extent_op(struct btrfs_delayed_extent_op *op)
+{
+ if (op)
+ kmem_cache_free(btrfs_delayed_extent_op_cachep, op);
+}
+
static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
{
WARN_ON(atomic_read(&ref->refs) == 0);
if (atomic_dec_and_test(&ref->refs)) {
WARN_ON(ref->in_tree);
- kfree(ref);
+ switch (ref->type) {
+ case BTRFS_TREE_BLOCK_REF_KEY:
+ case BTRFS_SHARED_BLOCK_REF_KEY:
+ kmem_cache_free(btrfs_delayed_tree_ref_cachep, ref);
+ break;
+ case BTRFS_EXTENT_DATA_REF_KEY:
+ case BTRFS_SHARED_DATA_REF_KEY:
+ kmem_cache_free(btrfs_delayed_data_ref_cachep, ref);
+ break;
+ case 0:
+ kmem_cache_free(btrfs_delayed_ref_head_cachep, ref);
+ break;
+ default:
+ BUG();
+ }
}
}
@@ -176,8 +220,14 @@ struct btrfs_delayed_ref_head *
btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr);
int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
struct btrfs_delayed_ref_head *head);
+static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head)
+{
+ mutex_unlock(&head->mutex);
+}
+
int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
struct list_head *cluster, u64 search_start);
+void btrfs_release_ref_cluster(struct list_head *cluster);
int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info,
struct btrfs_delayed_ref_root *delayed_refs,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 66dbc8dbddf..7ba7b3900cb 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -465,7 +465,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
* flush all outstanding I/O and inode extent mappings before the
* copy operation is declared as being finished
*/
- btrfs_start_delalloc_inodes(root, 0);
+ ret = btrfs_start_delalloc_inodes(root, 0);
+ if (ret) {
+ mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
+ return ret;
+ }
btrfs_wait_ordered_extents(root, 0);
trans = btrfs_start_transaction(root, 0);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a8f652dc940..02369a3c162 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -46,6 +46,7 @@
#include "check-integrity.h"
#include "rcu-string.h"
#include "dev-replace.h"
+#include "raid56.h"
#ifdef CONFIG_X86
#include <asm/cpufeature.h>
@@ -56,7 +57,8 @@ static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root);
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+ struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
@@ -420,7 +422,7 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root,
static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
{
struct extent_io_tree *tree;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 found_start;
struct extent_buffer *eb;
@@ -639,8 +641,15 @@ err:
btree_readahead_hook(root, eb, eb->start, ret);
}
- if (ret)
+ if (ret) {
+ /*
+ * our io error hook is going to dec the io pages
+ * again, we have to make sure it has something
+ * to decrement
+ */
+ atomic_inc(&eb->io_pages);
clear_extent_buffer_uptodate(eb);
+ }
free_extent_buffer(eb);
out:
return ret;
@@ -654,6 +663,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror)
eb = (struct extent_buffer *)page->private;
set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
eb->read_mirror = failed_mirror;
+ atomic_dec(&eb->io_pages);
if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags))
btree_readahead_hook(root, eb, eb->start, -EIO);
return -EIO; /* we fixed nothing */
@@ -670,17 +680,23 @@ static void end_workqueue_bio(struct bio *bio, int err)
end_io_wq->work.flags = 0;
if (bio->bi_rw & REQ_WRITE) {
- if (end_io_wq->metadata == 1)
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA)
btrfs_queue_worker(&fs_info->endio_meta_write_workers,
&end_io_wq->work);
- else if (end_io_wq->metadata == 2)
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE)
btrfs_queue_worker(&fs_info->endio_freespace_worker,
&end_io_wq->work);
+ else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+ btrfs_queue_worker(&fs_info->endio_raid56_workers,
+ &end_io_wq->work);
else
btrfs_queue_worker(&fs_info->endio_write_workers,
&end_io_wq->work);
} else {
- if (end_io_wq->metadata)
+ if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56)
+ btrfs_queue_worker(&fs_info->endio_raid56_workers,
+ &end_io_wq->work);
+ else if (end_io_wq->metadata)
btrfs_queue_worker(&fs_info->endio_meta_workers,
&end_io_wq->work);
else
@@ -695,6 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err)
* 0 - if data
* 1 - if normal metadta
* 2 - if writing to the free space cache area
+ * 3 - raid parity work
*/
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
int metadata)
@@ -946,18 +963,20 @@ static int btree_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct extent_io_tree *tree;
+ struct btrfs_fs_info *fs_info;
+ int ret;
+
tree = &BTRFS_I(mapping->host)->io_tree;
if (wbc->sync_mode == WB_SYNC_NONE) {
- struct btrfs_root *root = BTRFS_I(mapping->host)->root;
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
if (wbc->for_kupdate)
return 0;
+ fs_info = BTRFS_I(mapping->host)->root->fs_info;
/* this is a bit racy, but that's ok */
- num_dirty = root->fs_info->dirty_metadata_bytes;
- if (num_dirty < thresh)
+ ret = percpu_counter_compare(&fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH);
+ if (ret < 0)
return 0;
}
return btree_write_cache_pages(mapping, wbc);
@@ -1125,24 +1144,16 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf)
{
+ struct btrfs_fs_info *fs_info = root->fs_info;
+
if (btrfs_header_generation(buf) ==
- root->fs_info->running_transaction->transid) {
+ fs_info->running_transaction->transid) {
btrfs_assert_tree_locked(buf);
if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &buf->bflags)) {
- spin_lock(&root->fs_info->delalloc_lock);
- if (root->fs_info->dirty_metadata_bytes >= buf->len)
- root->fs_info->dirty_metadata_bytes -= buf->len;
- else {
- spin_unlock(&root->fs_info->delalloc_lock);
- btrfs_panic(root->fs_info, -EOVERFLOW,
- "Can't clear %lu bytes from "
- " dirty_mdatadata_bytes (%llu)",
- buf->len,
- root->fs_info->dirty_metadata_bytes);
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+ -buf->len,
+ fs_info->dirty_metadata_batch);
/* ugh, clear_extent_buffer_dirty needs to lock the page */
btrfs_set_lock_blocking(buf);
clear_extent_buffer_dirty(buf);
@@ -1178,9 +1189,13 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
INIT_LIST_HEAD(&root->dirty_list);
INIT_LIST_HEAD(&root->root_list);
+ INIT_LIST_HEAD(&root->logged_list[0]);
+ INIT_LIST_HEAD(&root->logged_list[1]);
spin_lock_init(&root->orphan_lock);
spin_lock_init(&root->inode_lock);
spin_lock_init(&root->accounting_lock);
+ spin_lock_init(&root->log_extents_lock[0]);
+ spin_lock_init(&root->log_extents_lock[1]);
mutex_init(&root->objectid_mutex);
mutex_init(&root->log_mutex);
init_waitqueue_head(&root->log_writer_wait);
@@ -2004,10 +2019,24 @@ int open_ctree(struct super_block *sb,
goto fail_srcu;
}
+ ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0);
+ if (ret) {
+ err = ret;
+ goto fail_bdi;
+ }
+ fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE *
+ (1 + ilog2(nr_cpu_ids));
+
+ ret = percpu_counter_init(&fs_info->delalloc_bytes, 0);
+ if (ret) {
+ err = ret;
+ goto fail_dirty_metadata_bytes;
+ }
+
fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) {
err = -ENOMEM;
- goto fail_bdi;
+ goto fail_delalloc_bytes;
}
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
@@ -2017,7 +2046,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_inodes);
- INIT_LIST_HEAD(&fs_info->ordered_operations);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
spin_lock_init(&fs_info->delalloc_lock);
spin_lock_init(&fs_info->trans_lock);
@@ -2028,6 +2056,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->tree_mod_seq_lock);
rwlock_init(&fs_info->tree_mod_log_lock);
mutex_init(&fs_info->reloc_mutex);
+ seqlock_init(&fs_info->profiles_lock);
init_completion(&fs_info->kobj_unregister);
INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
@@ -2126,6 +2155,7 @@ int open_ctree(struct super_block *sb,
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
+ fs_info->first_logical_byte = (u64)-1;
extent_io_tree_init(&fs_info->freed_extents[0],
fs_info->btree_inode->i_mapping);
@@ -2165,6 +2195,12 @@ int open_ctree(struct super_block *sb,
init_waitqueue_head(&fs_info->transaction_blocked_wait);
init_waitqueue_head(&fs_info->async_submit_wait);
+ ret = btrfs_alloc_stripe_hash_table(fs_info);
+ if (ret) {
+ err = ret;
+ goto fail_alloc;
+ }
+
__setup_root(4096, 4096, 4096, 4096, tree_root,
fs_info, BTRFS_ROOT_TREE_OBJECTID);
@@ -2187,7 +2223,8 @@ int open_ctree(struct super_block *sb,
goto fail_alloc;
/* check FS state, whether FS is broken. */
- fs_info->fs_state |= btrfs_super_flags(disk_super);
+ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR)
+ set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY);
if (ret) {
@@ -2261,6 +2298,8 @@ int open_ctree(struct super_block *sb,
leafsize = btrfs_super_leafsize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
stripesize = btrfs_super_stripesize(disk_super);
+ fs_info->dirty_metadata_batch = leafsize * (1 + ilog2(nr_cpu_ids));
+ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
/*
* mixed block groups end up with duplicate but slightly offset
@@ -2332,6 +2371,12 @@ int open_ctree(struct super_block *sb,
btrfs_init_workers(&fs_info->endio_meta_write_workers,
"endio-meta-write", fs_info->thread_pool_size,
&fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->endio_raid56_workers,
+ "endio-raid56", fs_info->thread_pool_size,
+ &fs_info->generic_worker);
+ btrfs_init_workers(&fs_info->rmw_workers,
+ "rmw", fs_info->thread_pool_size,
+ &fs_info->generic_worker);
btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
fs_info->thread_pool_size,
&fs_info->generic_worker);
@@ -2350,6 +2395,8 @@ int open_ctree(struct super_block *sb,
*/
fs_info->endio_workers.idle_thresh = 4;
fs_info->endio_meta_workers.idle_thresh = 4;
+ fs_info->endio_raid56_workers.idle_thresh = 4;
+ fs_info->rmw_workers.idle_thresh = 2;
fs_info->endio_write_workers.idle_thresh = 2;
fs_info->endio_meta_write_workers.idle_thresh = 2;
@@ -2366,6 +2413,8 @@ int open_ctree(struct super_block *sb,
ret |= btrfs_start_workers(&fs_info->fixup_workers);
ret |= btrfs_start_workers(&fs_info->endio_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->rmw_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_raid56_workers);
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_write_workers);
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
@@ -2390,8 +2439,7 @@ int open_ctree(struct super_block *sb,
sb->s_blocksize = sectorsize;
sb->s_blocksize_bits = blksize_bits(sectorsize);
- if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
- sizeof(disk_super->magic))) {
+ if (disk_super->magic != cpu_to_le64(BTRFS_MAGIC)) {
printk(KERN_INFO "btrfs: valid FS not found on %s\n", sb->s_id);
goto fail_sb_buffer;
}
@@ -2694,13 +2742,13 @@ fail_cleaner:
* kthreads
*/
filemap_write_and_wait(fs_info->btree_inode->i_mapping);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_block_groups:
btrfs_free_block_groups(fs_info);
fail_tree_roots:
free_root_pointers(fs_info, 1);
+ invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
fail_sb_buffer:
btrfs_stop_workers(&fs_info->generic_worker);
@@ -2710,6 +2758,8 @@ fail_sb_buffer:
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers);
+ btrfs_stop_workers(&fs_info->endio_raid56_workers);
+ btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -2721,13 +2771,17 @@ fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
- invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
iput(fs_info->btree_inode);
+fail_delalloc_bytes:
+ percpu_counter_destroy(&fs_info->delalloc_bytes);
+fail_dirty_metadata_bytes:
+ percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
fail_bdi:
bdi_destroy(&fs_info->bdi);
fail_srcu:
cleanup_srcu_struct(&fs_info->subvol_srcu);
fail:
+ btrfs_free_stripe_hash_table(fs_info);
btrfs_close_devices(fs_info->fs_devices);
return err;
@@ -2795,8 +2849,7 @@ struct buffer_head *btrfs_read_dev_super(struct block_device *bdev)
super = (struct btrfs_super_block *)bh->b_data;
if (btrfs_super_bytenr(super) != bytenr ||
- strncmp((char *)(&super->magic), BTRFS_MAGIC,
- sizeof(super->magic))) {
+ super->magic != cpu_to_le64(BTRFS_MAGIC)) {
brelse(bh);
continue;
}
@@ -3076,11 +3129,16 @@ int btrfs_calc_num_tolerated_disk_barrier_failures(
((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK)
== 0)))
num_tolerated_disk_barrier_failures = 0;
- else if (num_tolerated_disk_barrier_failures > 1
- &&
- (flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10)))
- num_tolerated_disk_barrier_failures = 1;
+ else if (num_tolerated_disk_barrier_failures > 1) {
+ if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID10)) {
+ num_tolerated_disk_barrier_failures = 1;
+ } else if (flags &
+ BTRFS_BLOCK_GROUP_RAID5) {
+ num_tolerated_disk_barrier_failures = 2;
+ }
+ }
}
}
up_read(&sinfo->groups_sem);
@@ -3195,6 +3253,11 @@ void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
if (btrfs_root_refs(&root->root_item) == 0)
synchronize_srcu(&fs_info->subvol_srcu);
+ if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ btrfs_free_log(NULL, root);
+ btrfs_free_log_root_tree(NULL, fs_info);
+ }
+
__btrfs_remove_free_space_cache(root->free_ino_pinned);
__btrfs_remove_free_space_cache(root->free_ino_ctl);
free_fs_root(root);
@@ -3339,7 +3402,7 @@ int close_ctree(struct btrfs_root *root)
printk(KERN_ERR "btrfs: commit super ret %d\n", ret);
}
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
btrfs_error_commit_super(root);
btrfs_put_block_group_cache(fs_info);
@@ -3352,9 +3415,9 @@ int close_ctree(struct btrfs_root *root)
btrfs_free_qgroup_config(root->fs_info);
- if (fs_info->delalloc_bytes) {
- printk(KERN_INFO "btrfs: at unmount delalloc count %llu\n",
- (unsigned long long)fs_info->delalloc_bytes);
+ if (percpu_counter_sum(&fs_info->delalloc_bytes)) {
+ printk(KERN_INFO "btrfs: at unmount delalloc count %lld\n",
+ percpu_counter_sum(&fs_info->delalloc_bytes));
}
free_extent_buffer(fs_info->extent_root->node);
@@ -3384,6 +3447,8 @@ int close_ctree(struct btrfs_root *root)
btrfs_stop_workers(&fs_info->workers);
btrfs_stop_workers(&fs_info->endio_workers);
btrfs_stop_workers(&fs_info->endio_meta_workers);
+ btrfs_stop_workers(&fs_info->endio_raid56_workers);
+ btrfs_stop_workers(&fs_info->rmw_workers);
btrfs_stop_workers(&fs_info->endio_meta_write_workers);
btrfs_stop_workers(&fs_info->endio_write_workers);
btrfs_stop_workers(&fs_info->endio_freespace_worker);
@@ -3401,9 +3466,13 @@ int close_ctree(struct btrfs_root *root)
btrfs_close_devices(fs_info->fs_devices);
btrfs_mapping_tree_free(&fs_info->mapping_tree);
+ percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
+ percpu_counter_destroy(&fs_info->delalloc_bytes);
bdi_destroy(&fs_info->bdi);
cleanup_srcu_struct(&fs_info->subvol_srcu);
+ btrfs_free_stripe_hash_table(fs_info);
+
return 0;
}
@@ -3443,11 +3512,10 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
(unsigned long long)transid,
(unsigned long long)root->fs_info->generation);
was_dirty = set_extent_buffer_dirty(buf);
- if (!was_dirty) {
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->dirty_metadata_bytes += buf->len;
- spin_unlock(&root->fs_info->delalloc_lock);
- }
+ if (!was_dirty)
+ __percpu_counter_add(&root->fs_info->dirty_metadata_bytes,
+ buf->len,
+ root->fs_info->dirty_metadata_batch);
}
static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
@@ -3457,8 +3525,7 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
* looks as though older kernels can get into trouble with
* this code, they end up stuck in balance_dirty_pages forever
*/
- u64 num_dirty;
- unsigned long thresh = 32 * 1024 * 1024;
+ int ret;
if (current->flags & PF_MEMALLOC)
return;
@@ -3466,9 +3533,9 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
if (flush_delayed)
btrfs_balance_delayed_items(root);
- num_dirty = root->fs_info->dirty_metadata_bytes;
-
- if (num_dirty > thresh) {
+ ret = percpu_counter_compare(&root->fs_info->dirty_metadata_bytes,
+ BTRFS_DIRTY_METADATA_THRESH);
+ if (ret > 0) {
balance_dirty_pages_ratelimited(
root->fs_info->btree_inode->i_mapping);
}
@@ -3518,7 +3585,8 @@ void btrfs_error_commit_super(struct btrfs_root *root)
btrfs_cleanup_transaction(root);
}
-static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
+static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
+ struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
@@ -3528,7 +3596,7 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
- list_splice_init(&root->fs_info->ordered_operations, &splice);
+ list_splice_init(&t->ordered_operations, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
@@ -3544,35 +3612,16 @@ static void btrfs_destroy_ordered_operations(struct btrfs_root *root)
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
- struct list_head splice;
struct btrfs_ordered_extent *ordered;
- struct inode *inode;
-
- INIT_LIST_HEAD(&splice);
spin_lock(&root->fs_info->ordered_extent_lock);
-
- list_splice_init(&root->fs_info->ordered_extents, &splice);
- while (!list_empty(&splice)) {
- ordered = list_entry(splice.next, struct btrfs_ordered_extent,
- root_extent_list);
-
- list_del_init(&ordered->root_extent_list);
- atomic_inc(&ordered->refs);
-
- /* the inode may be getting freed (in sys_unlink path). */
- inode = igrab(ordered->inode);
-
- spin_unlock(&root->fs_info->ordered_extent_lock);
- if (inode)
- iput(inode);
-
- atomic_set(&ordered->refs, 1);
- btrfs_put_ordered_extent(ordered);
-
- spin_lock(&root->fs_info->ordered_extent_lock);
- }
-
+ /*
+ * This will just short circuit the ordered completion stuff which will
+ * make sure the ordered extent gets properly cleaned up.
+ */
+ list_for_each_entry(ordered, &root->fs_info->ordered_extents,
+ root_extent_list)
+ set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
spin_unlock(&root->fs_info->ordered_extent_lock);
}
@@ -3594,11 +3643,11 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
}
while ((node = rb_first(&delayed_refs->root)) != NULL) {
- ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
+ struct btrfs_delayed_ref_head *head = NULL;
+ ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
atomic_set(&ref->refs, 1);
if (btrfs_delayed_ref_is_head(ref)) {
- struct btrfs_delayed_ref_head *head;
head = btrfs_delayed_node_to_head(ref);
if (!mutex_trylock(&head->mutex)) {
@@ -3614,16 +3663,18 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
continue;
}
- kfree(head->extent_op);
+ btrfs_free_delayed_extent_op(head->extent_op);
delayed_refs->num_heads--;
if (list_empty(&head->cluster))
delayed_refs->num_heads_ready--;
list_del_init(&head->cluster);
}
+
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
-
+ if (head)
+ mutex_unlock(&head->mutex);
spin_unlock(&delayed_refs->lock);
btrfs_put_delayed_ref(ref);
@@ -3671,6 +3722,8 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
delalloc_inodes);
list_del_init(&btrfs_inode->delalloc_inodes);
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &btrfs_inode->runtime_flags);
btrfs_invalidate_inodes(btrfs_inode->root);
}
@@ -3823,10 +3876,8 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
while (!list_empty(&list)) {
t = list_entry(list.next, struct btrfs_transaction, list);
- if (!t)
- break;
- btrfs_destroy_ordered_operations(root);
+ btrfs_destroy_ordered_operations(t, root);
btrfs_destroy_ordered_extents(root);
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index 305c33efb0e..034d7dc552b 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -25,6 +25,13 @@
#define BTRFS_SUPER_MIRROR_MAX 3
#define BTRFS_SUPER_MIRROR_SHIFT 12
+enum {
+ BTRFS_WQ_ENDIO_DATA = 0,
+ BTRFS_WQ_ENDIO_METADATA = 1,
+ BTRFS_WQ_ENDIO_FREE_SPACE = 2,
+ BTRFS_WQ_ENDIO_RAID56 = 3,
+};
+
static inline u64 btrfs_sb_offset(int mirror)
{
u64 start = 16 * 1024;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index cf54bdfee33..3e074dab2d5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -31,6 +31,7 @@
#include "print-tree.h"
#include "transaction.h"
#include "volumes.h"
+#include "raid56.h"
#include "locking.h"
#include "free-space-cache.h"
#include "math.h"
@@ -72,8 +73,7 @@ enum {
RESERVE_ALLOC_NO_ACCOUNT = 2,
};
-static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc);
static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -103,6 +103,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
int dump_block_groups);
static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
u64 num_bytes, int reserve);
+static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
+ u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -162,6 +164,10 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
rb_link_node(&block_group->cache_node, parent, p);
rb_insert_color(&block_group->cache_node,
&info->block_group_cache_tree);
+
+ if (info->first_logical_byte > block_group->key.objectid)
+ info->first_logical_byte = block_group->key.objectid;
+
spin_unlock(&info->block_group_cache_lock);
return 0;
@@ -203,8 +209,11 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
break;
}
}
- if (ret)
+ if (ret) {
btrfs_get_block_group(ret);
+ if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
+ info->first_logical_byte = ret->key.objectid;
+ }
spin_unlock(&info->block_group_cache_lock);
return ret;
@@ -468,8 +477,6 @@ out:
}
static int cache_block_group(struct btrfs_block_group_cache *cache,
- struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
int load_cache_only)
{
DEFINE_WAIT(wait);
@@ -527,12 +534,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_FAST;
spin_unlock(&cache->lock);
- /*
- * We can't do the read from on-disk cache during a commit since we need
- * to have the normal tree locking. Also if we are currently trying to
- * allocate blocks for the tree root we can't do the fast caching since
- * we likely hold important locks.
- */
if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
ret = load_free_space_cache(fs_info, cache);
@@ -1852,6 +1853,8 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
*actual_bytes = discarded_bytes;
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
return ret;
}
@@ -2143,7 +2146,6 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
node->num_bytes);
}
}
- mutex_unlock(&head->mutex);
return ret;
}
@@ -2258,7 +2260,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
* process of being added. Don't run this ref yet.
*/
list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
+ btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
@@ -2285,7 +2287,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ref = &locked_ref->node;
if (extent_op && must_insert_reserved) {
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
extent_op = NULL;
}
@@ -2294,28 +2296,25 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_delayed_extent_op(trans, root,
ref, extent_op);
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
-
- printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret);
+ printk(KERN_DEBUG
+ "btrfs: run_delayed_extent_op "
+ "returned %d\n", ret);
spin_lock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(locked_ref);
return ret;
}
goto next;
}
-
- list_del_init(&locked_ref->cluster);
- locked_ref = NULL;
}
ref->in_tree = 0;
rb_erase(&ref->rb_node, &delayed_refs->root);
delayed_refs->num_entries--;
- if (locked_ref) {
+ if (!btrfs_delayed_ref_is_head(ref)) {
/*
* when we play the delayed ref, also correct the
* ref_mod on head
@@ -2337,20 +2336,29 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
ret = run_one_delayed_ref(trans, root, ref, extent_op,
must_insert_reserved);
- btrfs_put_delayed_ref(ref);
- kfree(extent_op);
- count++;
-
+ btrfs_free_delayed_extent_op(extent_op);
if (ret) {
- if (locked_ref) {
- list_del_init(&locked_ref->cluster);
- mutex_unlock(&locked_ref->mutex);
- }
- printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret);
+ btrfs_delayed_ref_unlock(locked_ref);
+ btrfs_put_delayed_ref(ref);
+ printk(KERN_DEBUG
+ "btrfs: run_one_delayed_ref returned %d\n", ret);
spin_lock(&delayed_refs->lock);
return ret;
}
+ /*
+ * If this node is a head, that means all the refs in this head
+ * have been dealt with, and we will pick the next head to deal
+ * with, so we must unlock the head and drop it from the cluster
+ * list before we release it.
+ */
+ if (btrfs_delayed_ref_is_head(ref)) {
+ list_del_init(&locked_ref->cluster);
+ btrfs_delayed_ref_unlock(locked_ref);
+ locked_ref = NULL;
+ }
+ btrfs_put_delayed_ref(ref);
+ count++;
next:
cond_resched();
spin_lock(&delayed_refs->lock);
@@ -2435,6 +2443,16 @@ int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
return ret;
}
+static int refs_newer(struct btrfs_delayed_ref_root *delayed_refs, int seq,
+ int count)
+{
+ int val = atomic_read(&delayed_refs->ref_seq);
+
+ if (val < seq || val >= seq + count)
+ return 1;
+ return 0;
+}
+
/*
* this starts processing the delayed reference count updates and
* extent insertions we have queued up so far. count can be
@@ -2469,6 +2487,44 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
INIT_LIST_HEAD(&cluster);
+ if (count == 0) {
+ count = delayed_refs->num_entries * 2;
+ run_most = 1;
+ }
+
+ if (!run_all && !run_most) {
+ int old;
+ int seq = atomic_read(&delayed_refs->ref_seq);
+
+progress:
+ old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+ if (old) {
+ DEFINE_WAIT(__wait);
+ if (delayed_refs->num_entries < 16348)
+ return 0;
+
+ prepare_to_wait(&delayed_refs->wait, &__wait,
+ TASK_UNINTERRUPTIBLE);
+
+ old = atomic_cmpxchg(&delayed_refs->procs_running_refs, 0, 1);
+ if (old) {
+ schedule();
+ finish_wait(&delayed_refs->wait, &__wait);
+
+ if (!refs_newer(delayed_refs, seq, 256))
+ goto progress;
+ else
+ return 0;
+ } else {
+ finish_wait(&delayed_refs->wait, &__wait);
+ goto again;
+ }
+ }
+
+ } else {
+ atomic_inc(&delayed_refs->procs_running_refs);
+ }
+
again:
loops = 0;
spin_lock(&delayed_refs->lock);
@@ -2477,10 +2533,6 @@ again:
delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
#endif
- if (count == 0) {
- count = delayed_refs->num_entries * 2;
- run_most = 1;
- }
while (1) {
if (!(run_all || run_most) &&
delayed_refs->num_heads_ready < 64)
@@ -2500,11 +2552,15 @@ again:
ret = run_clustered_refs(trans, root, &cluster);
if (ret < 0) {
+ btrfs_release_ref_cluster(&cluster);
spin_unlock(&delayed_refs->lock);
btrfs_abort_transaction(trans, root, ret);
+ atomic_dec(&delayed_refs->procs_running_refs);
return ret;
}
+ atomic_add(ret, &delayed_refs->ref_seq);
+
count -= min_t(unsigned long, ret, count);
if (count == 0)
@@ -2573,6 +2629,11 @@ again:
goto again;
}
out:
+ atomic_dec(&delayed_refs->procs_running_refs);
+ smp_mb();
+ if (waitqueue_active(&delayed_refs->wait))
+ wake_up(&delayed_refs->wait);
+
spin_unlock(&delayed_refs->lock);
assert_qgroups_uptodate(trans);
return 0;
@@ -2586,7 +2647,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_delayed_extent_op *extent_op;
int ret;
- extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+ extent_op = btrfs_alloc_delayed_extent_op();
if (!extent_op)
return -ENOMEM;
@@ -2598,7 +2659,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
num_bytes, extent_op);
if (ret)
- kfree(extent_op);
+ btrfs_free_delayed_extent_op(extent_op);
return ret;
}
@@ -3223,12 +3284,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
+ write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits |= extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits |= extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
}
/*
@@ -3276,6 +3339,7 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
u64 num_devices = root->fs_info->fs_devices->rw_devices +
root->fs_info->fs_devices->missing_devices;
u64 target;
+ u64 tmp;
/*
* see if restripe for this chunk_type is in progress, if so
@@ -3292,40 +3356,48 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
}
spin_unlock(&root->fs_info->balance_lock);
+ /* First, mask out the RAID levels which aren't possible */
if (num_devices == 1)
- flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
+ flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5);
+ if (num_devices < 3)
+ flags &= ~BTRFS_BLOCK_GROUP_RAID6;
if (num_devices < 4)
flags &= ~BTRFS_BLOCK_GROUP_RAID10;
- if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
- (flags & (BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10))) {
- flags &= ~BTRFS_BLOCK_GROUP_DUP;
- }
-
- if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
- (flags & BTRFS_BLOCK_GROUP_RAID10)) {
- flags &= ~BTRFS_BLOCK_GROUP_RAID1;
- }
+ tmp = flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 | BTRFS_BLOCK_GROUP_RAID10);
+ flags &= ~tmp;
- if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
- ((flags & BTRFS_BLOCK_GROUP_RAID1) |
- (flags & BTRFS_BLOCK_GROUP_RAID10) |
- (flags & BTRFS_BLOCK_GROUP_DUP))) {
- flags &= ~BTRFS_BLOCK_GROUP_RAID0;
- }
+ if (tmp & BTRFS_BLOCK_GROUP_RAID6)
+ tmp = BTRFS_BLOCK_GROUP_RAID6;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID5)
+ tmp = BTRFS_BLOCK_GROUP_RAID5;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID10)
+ tmp = BTRFS_BLOCK_GROUP_RAID10;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID1)
+ tmp = BTRFS_BLOCK_GROUP_RAID1;
+ else if (tmp & BTRFS_BLOCK_GROUP_RAID0)
+ tmp = BTRFS_BLOCK_GROUP_RAID0;
- return extended_to_chunk(flags);
+ return extended_to_chunk(flags | tmp);
}
static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
{
- if (flags & BTRFS_BLOCK_GROUP_DATA)
- flags |= root->fs_info->avail_data_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
- flags |= root->fs_info->avail_system_alloc_bits;
- else if (flags & BTRFS_BLOCK_GROUP_METADATA)
- flags |= root->fs_info->avail_metadata_alloc_bits;
+ unsigned seq;
+
+ do {
+ seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+ if (flags & BTRFS_BLOCK_GROUP_DATA)
+ flags |= root->fs_info->avail_data_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+ flags |= root->fs_info->avail_system_alloc_bits;
+ else if (flags & BTRFS_BLOCK_GROUP_METADATA)
+ flags |= root->fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&root->fs_info->profiles_lock, seq));
return btrfs_reduce_alloc_profile(root, flags);
}
@@ -3333,6 +3405,7 @@ static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
{
u64 flags;
+ u64 ret;
if (data)
flags = BTRFS_BLOCK_GROUP_DATA;
@@ -3341,7 +3414,8 @@ u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
else
flags = BTRFS_BLOCK_GROUP_METADATA;
- return get_alloc_profile(root, flags);
+ ret = get_alloc_profile(root, flags);
+ return ret;
}
/*
@@ -3357,7 +3431,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
int ret = 0, committed = 0, alloc_chunk = 1;
/* make sure bytes are sectorsize aligned */
- bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ bytes = ALIGN(bytes, root->sectorsize);
if (root == root->fs_info->tree_root ||
BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) {
@@ -3452,7 +3526,7 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
struct btrfs_space_info *data_sinfo;
/* make sure bytes are sectorsize aligned */
- bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ bytes = ALIGN(bytes, root->sectorsize);
data_sinfo = root->fs_info->data_sinfo;
spin_lock(&data_sinfo->lock);
@@ -3516,8 +3590,10 @@ static u64 get_system_chunk_thresh(struct btrfs_root *root, u64 type)
{
u64 num_dev;
- if (type & BTRFS_BLOCK_GROUP_RAID10 ||
- type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & (BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6))
num_dev = root->fs_info->fs_devices->rw_devices;
else if (type & BTRFS_BLOCK_GROUP_RAID1)
num_dev = 2;
@@ -3564,6 +3640,10 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
int wait_for_alloc = 0;
int ret = 0;
+ /* Don't re-enter if we're already allocating a chunk */
+ if (trans->allocating_chunk)
+ return -ENOSPC;
+
space_info = __find_space_info(extent_root->fs_info, flags);
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
@@ -3606,6 +3686,8 @@ again:
goto again;
}
+ trans->allocating_chunk = true;
+
/*
* If we have mixed data/metadata chunks we want to make sure we keep
* allocating mixed chunks instead of individual chunks.
@@ -3632,19 +3714,20 @@ again:
check_system_chunk(trans, extent_root, flags);
ret = btrfs_alloc_chunk(trans, extent_root, flags);
- if (ret < 0 && ret != -ENOSPC)
- goto out;
+ trans->allocating_chunk = false;
spin_lock(&space_info->lock);
+ if (ret < 0 && ret != -ENOSPC)
+ goto out;
if (ret)
space_info->full = 1;
else
ret = 1;
space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
+out:
space_info->chunk_alloc = 0;
spin_unlock(&space_info->lock);
-out:
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
@@ -3653,13 +3736,31 @@ static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
+ struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
u64 profile = btrfs_get_alloc_profile(root, 0);
+ u64 rsv_size = 0;
u64 avail;
u64 used;
+ u64 to_add;
used = space_info->bytes_used + space_info->bytes_reserved +
- space_info->bytes_pinned + space_info->bytes_readonly +
- space_info->bytes_may_use;
+ space_info->bytes_pinned + space_info->bytes_readonly;
+
+ spin_lock(&global_rsv->lock);
+ rsv_size = global_rsv->size;
+ spin_unlock(&global_rsv->lock);
+
+ /*
+ * We only want to allow over committing if we have lots of actual space
+ * free, but if we don't have enough space to handle the global reserve
+ * space then we could end up having a real enospc problem when trying
+ * to allocate a chunk or some other such important allocation.
+ */
+ rsv_size <<= 1;
+ if (used + rsv_size >= space_info->total_bytes)
+ return 0;
+
+ used += space_info->bytes_may_use;
spin_lock(&root->fs_info->free_chunk_lock);
avail = root->fs_info->free_chunk_space;
@@ -3667,28 +3768,60 @@ static int can_overcommit(struct btrfs_root *root,
/*
* If we have dup, raid1 or raid10 then only half of the free
- * space is actually useable.
+ * space is actually useable. For raid56, the space info used
+ * doesn't include the parity drive, so we don't have to
+ * change the math
*/
if (profile & (BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10))
avail >>= 1;
+ to_add = space_info->total_bytes;
+
/*
* If we aren't flushing all things, let us overcommit up to
* 1/2th of the space. If we can flush, don't let us overcommit
* too much, let it overcommit up to 1/8 of the space.
*/
if (flush == BTRFS_RESERVE_FLUSH_ALL)
- avail >>= 3;
+ to_add >>= 3;
else
- avail >>= 1;
+ to_add >>= 1;
- if (used + bytes < space_info->total_bytes + avail)
+ /*
+ * Limit the overcommit to the amount of free space we could possibly
+ * allocate for chunks.
+ */
+ to_add = min(avail, to_add);
+
+ if (used + bytes < space_info->total_bytes + to_add)
return 1;
return 0;
}
+void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
+ unsigned long nr_pages)
+{
+ struct super_block *sb = root->fs_info->sb;
+ int started;
+
+ /* If we can not start writeback, just sync all the delalloc file. */
+ started = try_to_writeback_inodes_sb_nr(sb, nr_pages,
+ WB_REASON_FS_FREE_SPACE);
+ if (!started) {
+ /*
+ * We needn't worry the filesystem going from r/w to r/o though
+ * we don't acquire ->s_umount mutex, because the filesystem
+ * should guarantee the delalloc inodes list be empty after
+ * the filesystem is readonly(all dirty pages are written to
+ * the disk).
+ */
+ btrfs_start_delalloc_inodes(root, 0);
+ btrfs_wait_ordered_extents(root, 0);
+ }
+}
+
/*
* shrink metadata reservation for delalloc
*/
@@ -3710,7 +3843,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
space_info = block_rsv->space_info;
smp_mb();
- delalloc_bytes = root->fs_info->delalloc_bytes;
+ delalloc_bytes = percpu_counter_sum_positive(
+ &root->fs_info->delalloc_bytes);
if (delalloc_bytes == 0) {
if (trans)
return;
@@ -3721,10 +3855,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
while (delalloc_bytes && loops < 3) {
max_reclaim = min(delalloc_bytes, to_reclaim);
nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
- try_to_writeback_inodes_sb_nr(root->fs_info->sb,
- nr_pages,
- WB_REASON_FS_FREE_SPACE);
-
+ btrfs_writeback_inodes_sb_nr(root, nr_pages);
/*
* We need to wait for the async pages to actually start before
* we do anything.
@@ -3752,7 +3883,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
break;
}
smp_mb();
- delalloc_bytes = root->fs_info->delalloc_bytes;
+ delalloc_bytes = percpu_counter_sum_positive(
+ &root->fs_info->delalloc_bytes);
}
}
@@ -4016,6 +4148,15 @@ again:
goto again;
out:
+ if (ret == -ENOSPC &&
+ unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
+ struct btrfs_block_rsv *global_rsv =
+ &root->fs_info->global_block_rsv;
+
+ if (block_rsv != global_rsv &&
+ !block_rsv_use_bytes(global_rsv, orig_bytes))
+ ret = 0;
+ }
if (flushing) {
spin_lock(&space_info->lock);
space_info->flush = 0;
@@ -4402,19 +4543,60 @@ void btrfs_orphan_release_metadata(struct inode *inode)
btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
}
-int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
- struct btrfs_pending_snapshot *pending)
+/*
+ * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
+ * root: the root of the parent directory
+ * rsv: block reservation
+ * items: the number of items that we need do reservation
+ * qgroup_reserved: used to return the reserved size in qgroup
+ *
+ * This function is used to reserve the space for snapshot/subvolume
+ * creation and deletion. Those operations are different with the
+ * common file/directory operations, they change two fs/file trees
+ * and root tree, the number of items that the qgroup reserves is
+ * different with the free space reservation. So we can not use
+ * the space reseravtion mechanism in start_transaction().
+ */
+int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ int items,
+ u64 *qgroup_reserved)
{
- struct btrfs_root *root = pending->root;
- struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
- struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
- /*
- * two for root back/forward refs, two for directory entries,
- * one for root of the snapshot and one for parent inode.
- */
- u64 num_bytes = btrfs_calc_trans_metadata_size(root, 6);
- dst_rsv->space_info = src_rsv->space_info;
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
+ u64 num_bytes;
+ int ret;
+
+ if (root->fs_info->quota_enabled) {
+ /* One for parent inode, two for dir entries */
+ num_bytes = 3 * root->leafsize;
+ ret = btrfs_qgroup_reserve(root, num_bytes);
+ if (ret)
+ return ret;
+ } else {
+ num_bytes = 0;
+ }
+
+ *qgroup_reserved = num_bytes;
+
+ num_bytes = btrfs_calc_trans_metadata_size(root, items);
+ rsv->space_info = __find_space_info(root->fs_info,
+ BTRFS_BLOCK_GROUP_METADATA);
+ ret = btrfs_block_rsv_add(root, rsv, num_bytes,
+ BTRFS_RESERVE_FLUSH_ALL);
+ if (ret) {
+ if (*qgroup_reserved)
+ btrfs_qgroup_free(root, *qgroup_reserved);
+ }
+
+ return ret;
+}
+
+void btrfs_subvolume_release_metadata(struct btrfs_root *root,
+ struct btrfs_block_rsv *rsv,
+ u64 qgroup_reserved)
+{
+ btrfs_block_rsv_release(root, rsv, (u64)-1);
+ if (qgroup_reserved)
+ btrfs_qgroup_free(root, qgroup_reserved);
}
/**
@@ -4522,6 +4704,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
+ u64 to_free = 0;
+ unsigned dropped;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
@@ -4565,54 +4749,19 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
- if (root->fs_info->quota_enabled)
+ if (root->fs_info->quota_enabled) {
ret = btrfs_qgroup_reserve(root, num_bytes +
nr_extents * root->leafsize);
+ if (ret)
+ goto out_fail;
+ }
- /*
- * ret != 0 here means the qgroup reservation failed, we go straight to
- * the shared error handling then.
- */
- if (ret == 0)
- ret = reserve_metadata_bytes(root, block_rsv,
- to_reserve, flush);
-
- if (ret) {
- u64 to_free = 0;
- unsigned dropped;
-
- spin_lock(&BTRFS_I(inode)->lock);
- dropped = drop_outstanding_extent(inode);
- /*
- * If the inodes csum_bytes is the same as the original
- * csum_bytes then we know we haven't raced with any free()ers
- * so we can just reduce our inodes csum bytes and carry on.
- * Otherwise we have to do the normal free thing to account for
- * the case that the free side didn't free up its reserve
- * because of this outstanding reservation.
- */
- if (BTRFS_I(inode)->csum_bytes == csum_bytes)
- calc_csum_metadata_size(inode, num_bytes, 0);
- else
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- if (dropped)
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
- if (to_free) {
- btrfs_block_rsv_release(root, block_rsv, to_free);
- trace_btrfs_space_reservation(root->fs_info,
- "delalloc",
- btrfs_ino(inode),
- to_free, 0);
- }
- if (root->fs_info->quota_enabled) {
+ ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+ if (unlikely(ret)) {
+ if (root->fs_info->quota_enabled)
btrfs_qgroup_free(root, num_bytes +
nr_extents * root->leafsize);
- }
- if (delalloc_lock)
- mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
- return ret;
+ goto out_fail;
}
spin_lock(&BTRFS_I(inode)->lock);
@@ -4633,6 +4782,34 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
+
+out_fail:
+ spin_lock(&BTRFS_I(inode)->lock);
+ dropped = drop_outstanding_extent(inode);
+ /*
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
+ */
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
+ if (to_free) {
+ btrfs_block_rsv_release(root, block_rsv, to_free);
+ trace_btrfs_space_reservation(root->fs_info, "delalloc",
+ btrfs_ino(inode), to_free, 0);
+ }
+ if (delalloc_lock)
+ mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
+ return ret;
}
/**
@@ -4654,7 +4831,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ if (num_bytes)
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
spin_unlock(&BTRFS_I(inode)->lock);
if (dropped > 0)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
@@ -4721,8 +4899,7 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
btrfs_free_reserved_data_space(inode, num_bytes);
}
-static int update_block_group(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+static int update_block_group(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int alloc)
{
struct btrfs_block_group_cache *cache = NULL;
@@ -4759,7 +4936,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
* space back to the block group, otherwise we will leak space.
*/
if (!alloc && cache->cached == BTRFS_CACHE_NO)
- cache_block_group(cache, trans, NULL, 1);
+ cache_block_group(cache, 1);
byte_in_group = bytenr - cache->key.objectid;
WARN_ON(byte_in_group > cache->key.offset);
@@ -4809,6 +4986,13 @@ static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
struct btrfs_block_group_cache *cache;
u64 bytenr;
+ spin_lock(&root->fs_info->block_group_cache_lock);
+ bytenr = root->fs_info->first_logical_byte;
+ spin_unlock(&root->fs_info->block_group_cache_lock);
+
+ if (bytenr < (u64)-1)
+ return bytenr;
+
cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
if (!cache)
return 0;
@@ -4859,8 +5043,7 @@ int btrfs_pin_extent(struct btrfs_root *root,
/*
* this function must be called within transaction
*/
-int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
- struct btrfs_root *root,
+int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
u64 bytenr, u64 num_bytes)
{
struct btrfs_block_group_cache *cache;
@@ -4874,7 +5057,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_trans_handle *trans,
* to one because the slow code to read in the free extents does check
* the pinned extents.
*/
- cache_block_group(cache, trans, root, 1);
+ cache_block_group(cache, 1);
pin_down_extent(root, cache, bytenr, num_bytes, 0);
@@ -5271,7 +5454,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
}
}
- ret = update_block_group(trans, root, bytenr, num_bytes, 0);
+ ret = update_block_group(root, bytenr, num_bytes, 0);
if (ret) {
btrfs_abort_transaction(trans, extent_root, ret);
goto out;
@@ -5316,7 +5499,7 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
if (head->extent_op) {
if (!head->must_insert_reserved)
goto out;
- kfree(head->extent_op);
+ btrfs_free_delayed_extent_op(head->extent_op);
head->extent_op = NULL;
}
@@ -5439,10 +5622,11 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
return ret;
}
-static u64 stripe_align(struct btrfs_root *root, u64 val)
+static u64 stripe_align(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache,
+ u64 val, u64 num_bytes)
{
- u64 mask = ((u64)root->stripesize - 1);
- u64 ret = (val + mask) & ~mask;
+ u64 ret = ALIGN(val, root->stripesize);
return ret;
}
@@ -5462,7 +5646,6 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
u64 num_bytes)
{
struct btrfs_caching_control *caching_ctl;
- DEFINE_WAIT(wait);
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
@@ -5479,7 +5662,6 @@ static noinline int
wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
{
struct btrfs_caching_control *caching_ctl;
- DEFINE_WAIT(wait);
caching_ctl = get_caching_control(cache);
if (!caching_ctl)
@@ -5493,20 +5675,20 @@ wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
int __get_raid_index(u64 flags)
{
- int index;
-
if (flags & BTRFS_BLOCK_GROUP_RAID10)
- index = 0;
+ return BTRFS_RAID_RAID10;
else if (flags & BTRFS_BLOCK_GROUP_RAID1)
- index = 1;
+ return BTRFS_RAID_RAID1;
else if (flags & BTRFS_BLOCK_GROUP_DUP)
- index = 2;
+ return BTRFS_RAID_DUP;
else if (flags & BTRFS_BLOCK_GROUP_RAID0)
- index = 3;
- else
- index = 4;
+ return BTRFS_RAID_RAID0;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID5)
+ return BTRFS_RAID_RAID5;
+ else if (flags & BTRFS_BLOCK_GROUP_RAID6)
+ return BTRFS_RAID_RAID6;
- return index;
+ return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
}
static int get_block_group_index(struct btrfs_block_group_cache *cache)
@@ -5649,6 +5831,8 @@ search:
if (!block_group_bits(block_group, data)) {
u64 extra = BTRFS_BLOCK_GROUP_DUP |
BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID10;
/*
@@ -5664,8 +5848,7 @@ have_block_group:
cached = block_group_cache_done(block_group);
if (unlikely(!cached)) {
found_uncached_bg = true;
- ret = cache_block_group(block_group, trans,
- orig_root, 0);
+ ret = cache_block_group(block_group, 0);
BUG_ON(ret < 0);
ret = 0;
}
@@ -5678,6 +5861,7 @@ have_block_group:
* lets look there
*/
if (last_ptr) {
+ unsigned long aligned_cluster;
/*
* the refill lock keeps out other
* people trying to start a new cluster
@@ -5744,11 +5928,15 @@ refill_cluster:
goto unclustered_alloc;
}
+ aligned_cluster = max_t(unsigned long,
+ empty_cluster + empty_size,
+ block_group->full_stripe_len);
+
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
search_start, num_bytes,
- empty_cluster + empty_size);
+ aligned_cluster);
if (ret == 0) {
/*
* now pull our allocation out of this
@@ -5819,7 +6007,8 @@ unclustered_alloc:
goto loop;
}
checks:
- search_start = stripe_align(root, offset);
+ search_start = stripe_align(root, used_block_group,
+ offset, num_bytes);
/* move on to the next group */
if (search_start + num_bytes >
@@ -5970,7 +6159,7 @@ again:
if (ret == -ENOSPC) {
if (!final_tried) {
num_bytes = num_bytes >> 1;
- num_bytes = num_bytes & ~(root->sectorsize - 1);
+ num_bytes = round_down(num_bytes, root->sectorsize);
num_bytes = max(num_bytes, min_alloc_size);
if (num_bytes == min_alloc_size)
final_tried = true;
@@ -6094,7 +6283,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(path->nodes[0]);
btrfs_free_path(path);
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(root, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
@@ -6158,7 +6347,7 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_free_path(path);
- ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
+ ret = update_block_group(root, ins->objectid, ins->offset, 1);
if (ret) { /* -ENOENT, logic error */
printk(KERN_ERR "btrfs update block group failed for %llu "
"%llu\n", (unsigned long long)ins->objectid,
@@ -6201,7 +6390,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
u64 num_bytes = ins->offset;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
- cache_block_group(block_group, trans, NULL, 0);
+ cache_block_group(block_group, 0);
caching_ctl = get_caching_control(block_group);
if (!caching_ctl) {
@@ -6315,12 +6504,14 @@ use_block_rsv(struct btrfs_trans_handle *trans,
if (!ret)
return block_rsv;
if (ret && !block_rsv->failfast) {
- static DEFINE_RATELIMIT_STATE(_rs,
- DEFAULT_RATELIMIT_INTERVAL,
- /*DEFAULT_RATELIMIT_BURST*/ 2);
- if (__ratelimit(&_rs))
- WARN(1, KERN_DEBUG "btrfs: block rsv returned %d\n",
- ret);
+ if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ static DEFINE_RATELIMIT_STATE(_rs,
+ DEFAULT_RATELIMIT_INTERVAL * 10,
+ /*DEFAULT_RATELIMIT_BURST*/ 1);
+ if (__ratelimit(&_rs))
+ WARN(1, KERN_DEBUG
+ "btrfs: block rsv returned %d\n", ret);
+ }
ret = reserve_metadata_bytes(root, block_rsv, blocksize,
BTRFS_RESERVE_NO_FLUSH);
if (!ret) {
@@ -6386,7 +6577,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
struct btrfs_delayed_extent_op *extent_op;
- extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
+ extent_op = btrfs_alloc_delayed_extent_op();
BUG_ON(!extent_op); /* -ENOMEM */
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
@@ -7189,6 +7380,7 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
root->fs_info->fs_devices->missing_devices;
stripped = BTRFS_BLOCK_GROUP_RAID0 |
+ BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
if (num_devices == 1) {
@@ -7467,16 +7659,16 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
index = get_block_group_index(block_group);
}
- if (index == 0) {
+ if (index == BTRFS_RAID_RAID10) {
dev_min = 4;
/* Divide by 2 */
min_free >>= 1;
- } else if (index == 1) {
+ } else if (index == BTRFS_RAID_RAID1) {
dev_min = 2;
- } else if (index == 2) {
+ } else if (index == BTRFS_RAID_DUP) {
/* Multiply by 2 */
min_free <<= 1;
- } else if (index == 3) {
+ } else if (index == BTRFS_RAID_RAID0) {
dev_min = fs_devices->rw_devices;
do_div(min_free, dev_min);
}
@@ -7637,11 +7829,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
- if (space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0) {
- WARN_ON(1);
- dump_space_info(space_info, 0, 0);
+ if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
+ if (space_info->bytes_pinned > 0 ||
+ space_info->bytes_reserved > 0 ||
+ space_info->bytes_may_use > 0) {
+ WARN_ON(1);
+ dump_space_info(space_info, 0, 0);
+ }
}
list_del(&space_info->list);
kfree(space_info);
@@ -7740,7 +7934,9 @@ int btrfs_read_block_groups(struct btrfs_root *root)
btrfs_release_path(path);
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
-
+ cache->full_stripe_len = btrfs_full_stripe_len(root,
+ &root->fs_info->mapping_tree,
+ found_key.objectid);
btrfs_init_free_space_ctl(cache);
/*
@@ -7794,6 +7990,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
if (!(get_alloc_profile(root, space_info->flags) &
(BTRFS_BLOCK_GROUP_RAID10 |
BTRFS_BLOCK_GROUP_RAID1 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6 |
BTRFS_BLOCK_GROUP_DUP)))
continue;
/*
@@ -7869,6 +8067,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
cache->sectorsize = root->sectorsize;
cache->fs_info = root->fs_info;
+ cache->full_stripe_len = btrfs_full_stripe_len(root,
+ &root->fs_info->mapping_tree,
+ chunk_offset);
atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
@@ -7918,12 +8119,14 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
u64 extra_flags = chunk_to_extended(flags) &
BTRFS_EXTENDED_PROFILE_MASK;
+ write_seqlock(&fs_info->profiles_lock);
if (flags & BTRFS_BLOCK_GROUP_DATA)
fs_info->avail_data_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_METADATA)
fs_info->avail_metadata_alloc_bits &= ~extra_flags;
if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
fs_info->avail_system_alloc_bits &= ~extra_flags;
+ write_sequnlock(&fs_info->profiles_lock);
}
int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
@@ -8022,6 +8225,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->block_group_cache_lock);
rb_erase(&block_group->cache_node,
&root->fs_info->block_group_cache_tree);
+
+ if (root->fs_info->first_logical_byte == block_group->key.objectid)
+ root->fs_info->first_logical_byte = (u64)-1;
spin_unlock(&root->fs_info->block_group_cache_lock);
down_write(&block_group->space_info->groups_sem);
@@ -8144,7 +8350,7 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
- ret = cache_block_group(cache, NULL, root, 0);
+ ret = cache_block_group(cache, 0);
if (!ret)
wait_block_group_cache_done(cache);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1b319df29ee..f173c5af646 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4,7 +4,6 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/page-flags.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
@@ -1834,7 +1833,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
*/
static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
SetPageUptodate(page);
@@ -1846,7 +1845,7 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
*/
static void check_page_locked(struct extent_io_tree *tree, struct page *page)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
unlock_page(page);
@@ -1895,13 +1894,11 @@ static int free_io_failure(struct inode *inode, struct io_failure_record *rec,
if (ret)
err = ret;
- if (did_repair) {
- ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
- rec->start + rec->len - 1,
- EXTENT_DAMAGED, GFP_NOFS);
- if (ret && !err)
- err = ret;
- }
+ ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start,
+ rec->start + rec->len - 1,
+ EXTENT_DAMAGED, GFP_NOFS);
+ if (ret && !err)
+ err = ret;
kfree(rec);
return err;
@@ -1932,10 +1929,15 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
u64 map_length = 0;
u64 sector;
struct btrfs_bio *bbio = NULL;
+ struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree;
int ret;
BUG_ON(!mirror_num);
+ /* we can't repair anything in raid56 yet */
+ if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
+ return 0;
+
bio = bio_alloc(GFP_NOFS, 1);
if (!bio)
return -EIO;
@@ -1960,7 +1962,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
return -EIO;
}
bio->bi_bdev = dev->bdev;
- bio_add_page(bio, page, length, start-page_offset(page));
+ bio_add_page(bio, page, length, start - page_offset(page));
btrfsic_submit_bio(WRITE_SYNC, bio);
wait_for_completion(&compl);
@@ -2052,6 +2054,7 @@ static int clean_io_failure(u64 start, struct page *page)
failrec->failed_mirror);
did_repair = !ret;
}
+ ret = 0;
}
out:
@@ -2293,8 +2296,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
struct page *page = bvec->bv_page;
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
end = start + bvec->bv_len - 1;
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2353,8 +2355,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
(long int)bio->bi_bdev);
tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
end = start + bvec->bv_len - 1;
if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
@@ -2471,7 +2472,7 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
struct extent_io_tree *tree = bio->bi_private;
u64 start;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
+ start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
@@ -2489,13 +2490,13 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
return ret;
}
-static int merge_bio(struct extent_io_tree *tree, struct page *page,
+static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
unsigned long offset, size_t size, struct bio *bio,
unsigned long bio_flags)
{
int ret = 0;
if (tree->ops && tree->ops->merge_bio_hook)
- ret = tree->ops->merge_bio_hook(page, offset, size, bio,
+ ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
bio_flags);
BUG_ON(ret < 0);
return ret;
@@ -2530,7 +2531,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
sector;
if (prev_bio_flags != bio_flags || !contig ||
- merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
+ merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) {
ret = submit_one_bio(rw, bio, mirror_num,
prev_bio_flags);
@@ -2595,7 +2596,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
unsigned long *bio_flags)
{
struct inode *inode = page->mapping->host;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end;
u64 cur = start;
@@ -2648,6 +2649,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
}
}
while (cur <= end) {
+ unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
+
if (cur >= last_byte) {
char *userpage;
struct extent_state *cached = NULL;
@@ -2682,7 +2685,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
iosize = min(extent_map_end(em) - cur, end - cur + 1);
cur_end = min(extent_map_end(em) - 1, end);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+ iosize = ALIGN(iosize, blocksize);
if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
disk_io_size = em->block_len;
sector = em->block_start >> 9;
@@ -2735,26 +2738,17 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
continue;
}
- ret = 0;
- if (tree->ops && tree->ops->readpage_io_hook) {
- ret = tree->ops->readpage_io_hook(page, cur,
- cur + iosize - 1);
- }
- if (!ret) {
- unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
- pnr -= page->index;
- ret = submit_extent_page(READ, tree, page,
+ pnr -= page->index;
+ ret = submit_extent_page(READ, tree, page,
sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
this_bio_flag);
- if (!ret) {
- nr++;
- *bio_flags = this_bio_flag;
- }
- }
- if (ret) {
+ if (!ret) {
+ nr++;
+ *bio_flags = this_bio_flag;
+ } else {
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
}
@@ -2806,7 +2800,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
struct inode *inode = page->mapping->host;
struct extent_page_data *epd = data;
struct extent_io_tree *tree = epd->tree;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 delalloc_start;
u64 page_end = start + PAGE_CACHE_SIZE - 1;
u64 end;
@@ -2982,7 +2976,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
BUG_ON(extent_map_end(em) <= cur);
BUG_ON(end < cur);
iosize = min(extent_map_end(em) - cur, end - cur + 1);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
+ iosize = ALIGN(iosize, blocksize);
sector = (em->block_start + extent_offset) >> 9;
bdev = em->bdev;
block_start = em->block_start;
@@ -3124,12 +3118,9 @@ static int lock_extent_buffer_for_io(struct extent_buffer *eb,
set_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
spin_unlock(&eb->refs_lock);
btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
- spin_lock(&fs_info->delalloc_lock);
- if (fs_info->dirty_metadata_bytes >= eb->len)
- fs_info->dirty_metadata_bytes -= eb->len;
- else
- WARN_ON(1);
- spin_unlock(&fs_info->delalloc_lock);
+ __percpu_counter_add(&fs_info->dirty_metadata_bytes,
+ -eb->len,
+ fs_info->dirty_metadata_batch);
ret = 1;
} else {
spin_unlock(&eb->refs_lock);
@@ -3446,15 +3437,9 @@ retry:
* swizzled back from swapper_space to tmpfs file
* mapping
*/
- if (tree->ops &&
- tree->ops->write_cache_pages_lock_hook) {
- tree->ops->write_cache_pages_lock_hook(page,
- data, flush_fn);
- } else {
- if (!trylock_page(page)) {
- flush_fn(data);
- lock_page(page);
- }
+ if (!trylock_page(page)) {
+ flush_fn(data);
+ lock_page(page);
}
if (unlikely(page->mapping != mapping)) {
@@ -3674,11 +3659,11 @@ int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset)
{
struct extent_state *cached_state = NULL;
- u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
size_t blocksize = page->mapping->host->i_sb->s_blocksize;
- start += (offset + blocksize - 1) & ~(blocksize - 1);
+ start += ALIGN(offset, blocksize);
if (start > end)
return 0;
@@ -3700,7 +3685,7 @@ int try_release_extent_state(struct extent_map_tree *map,
struct extent_io_tree *tree, struct page *page,
gfp_t mask)
{
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
int ret = 1;
@@ -3739,7 +3724,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
gfp_t mask)
{
struct extent_map *em;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
+ u64 start = page_offset(page);
u64 end = start + PAGE_CACHE_SIZE - 1;
if ((mask & __GFP_WAIT) &&
@@ -3797,7 +3782,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode,
len = last - offset;
if (len == 0)
break;
- len = (len + sectorsize - 1) & ~(sectorsize - 1);
+ len = ALIGN(len, sectorsize);
em = get_extent(inode, NULL, 0, offset, len, 0);
if (IS_ERR_OR_NULL(em))
return em;
@@ -3995,8 +3980,6 @@ static void __free_extent_buffer(struct extent_buffer *eb)
list_del(&eb->leak_list);
spin_unlock_irqrestore(&leak_lock, flags);
#endif
- if (eb->pages && eb->pages != eb->inline_pages)
- kfree(eb->pages);
kmem_cache_free(extent_buffer_cache, eb);
}
@@ -4037,19 +4020,12 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
atomic_set(&eb->refs, 1);
atomic_set(&eb->io_pages, 0);
- if (len > MAX_INLINE_EXTENT_BUFFER_SIZE) {
- struct page **pages;
- int num_pages = (len + PAGE_CACHE_SIZE - 1) >>
- PAGE_CACHE_SHIFT;
- pages = kzalloc(num_pages, mask);
- if (!pages) {
- __free_extent_buffer(eb);
- return NULL;
- }
- eb->pages = pages;
- } else {
- eb->pages = eb->inline_pages;
- }
+ /*
+ * Sanity checks, currently the maximum is 64k covered by 16x 4k pages
+ */
+ BUILD_BUG_ON(BTRFS_MAX_METADATA_BLOCKSIZE
+ > MAX_INLINE_EXTENT_BUFFER_SIZE);
+ BUG_ON(len > MAX_INLINE_EXTENT_BUFFER_SIZE);
return eb;
}
@@ -4180,6 +4156,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
static void check_buffer_tree_ref(struct extent_buffer *eb)
{
+ int refs;
/* the ref bit is tricky. We have to make sure it is set
* if we have the buffer dirty. Otherwise the
* code to free a buffer can end up dropping a dirty
@@ -4200,6 +4177,10 @@ static void check_buffer_tree_ref(struct extent_buffer *eb)
* So bump the ref count first, then set the bit. If someone
* beat us to it, drop the ref we added.
*/
+ refs = atomic_read(&eb->refs);
+ if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
+ return;
+
spin_lock(&eb->refs_lock);
if (!test_and_set_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
atomic_inc(&eb->refs);
@@ -4401,9 +4382,20 @@ static int release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
void free_extent_buffer(struct extent_buffer *eb)
{
+ int refs;
+ int old;
if (!eb)
return;
+ while (1) {
+ refs = atomic_read(&eb->refs);
+ if (refs <= 3)
+ break;
+ old = atomic_cmpxchg(&eb->refs, refs, refs - 1);
+ if (old == refs)
+ return;
+ }
+
spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) == 2 &&
test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 2eacfabd326..6068a198556 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -72,10 +72,9 @@ struct extent_io_ops {
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook;
- int (*merge_bio_hook)(struct page *page, unsigned long offset,
+ int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror);
@@ -90,8 +89,6 @@ struct extent_io_ops {
struct extent_state *other);
void (*split_extent_hook)(struct inode *inode,
struct extent_state *orig, u64 split);
- int (*write_cache_pages_lock_hook)(struct page *page, void *data,
- void (*flush_fn)(void *));
};
struct extent_io_tree {
@@ -161,8 +158,7 @@ struct extent_buffer {
*/
wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq;
- struct page *inline_pages[INLINE_EXTENT_BUFFER_PAGES];
- struct page **pages;
+ struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
};
static inline void extent_set_compress_type(unsigned long *bio_flags,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index fdb7a8db3b5..2834ca5768e 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -1,6 +1,5 @@
#include <linux/err.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include "ctree.h"
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 94aa53b3872..ec160202be3 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -684,6 +684,24 @@ out:
return ret;
}
+static u64 btrfs_sector_sum_left(struct btrfs_ordered_sum *sums,
+ struct btrfs_sector_sum *sector_sum,
+ u64 total_bytes, u64 sectorsize)
+{
+ u64 tmp = sectorsize;
+ u64 next_sector = sector_sum->bytenr;
+ struct btrfs_sector_sum *next = sector_sum + 1;
+
+ while ((tmp + total_bytes) < sums->len) {
+ if (next_sector + sectorsize != next->bytenr)
+ break;
+ tmp += sectorsize;
+ next_sector = next->bytenr;
+ next++;
+ }
+ return tmp;
+}
+
int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_ordered_sum *sums)
@@ -789,20 +807,32 @@ again:
goto insert;
}
- if (csum_offset >= btrfs_item_size_nr(leaf, path->slots[0]) /
+ if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
csum_size) {
- u32 diff = (csum_offset + 1) * csum_size;
+ int extend_nr;
+ u64 tmp;
+ u32 diff;
+ u32 free_space;
- /*
- * is the item big enough already? we dropped our lock
- * before and need to recheck
- */
- if (diff < btrfs_item_size_nr(leaf, path->slots[0]))
- goto csum;
+ if (btrfs_leaf_free_space(root, leaf) <
+ sizeof(struct btrfs_item) + csum_size * 2)
+ goto insert;
+
+ free_space = btrfs_leaf_free_space(root, leaf) -
+ sizeof(struct btrfs_item) - csum_size;
+ tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+ root->sectorsize);
+ tmp >>= root->fs_info->sb->s_blocksize_bits;
+ WARN_ON(tmp < 1);
+
+ extend_nr = max_t(int, 1, (int)tmp);
+ diff = (csum_offset + extend_nr) * csum_size;
+ diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
- if (diff != csum_size)
- goto insert;
+ diff = min(free_space, diff);
+ diff /= csum_size;
+ diff *= csum_size;
btrfs_extend_item(trans, root, path, diff);
goto csum;
@@ -812,19 +842,14 @@ insert:
btrfs_release_path(path);
csum_offset = 0;
if (found_next) {
- u64 tmp = total_bytes + root->sectorsize;
- u64 next_sector = sector_sum->bytenr;
- struct btrfs_sector_sum *next = sector_sum + 1;
+ u64 tmp;
- while (tmp < sums->len) {
- if (next_sector + root->sectorsize != next->bytenr)
- break;
- tmp += root->sectorsize;
- next_sector = next->bytenr;
- next++;
- }
- tmp = min(tmp, next_offset - file_key.offset);
+ tmp = btrfs_sector_sum_left(sums, sector_sum, total_bytes,
+ root->sectorsize);
tmp >>= root->fs_info->sb->s_blocksize_bits;
+ tmp = min(tmp, (next_offset - file_key.offset) >>
+ root->fs_info->sb->s_blocksize_bits);
+
tmp = max((u64)1, tmp);
tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
ins_size = csum_size * tmp;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 4b241fe9d2f..af1d0605a5c 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -30,11 +30,11 @@
#include <linux/statfs.h>
#include <linux/compat.h>
#include <linux/slab.h>
+#include <linux/btrfs.h>
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "tree-log.h"
#include "locking.h"
@@ -374,6 +374,11 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
atomic_inc(&fs_info->defrag_running);
while(1) {
+ /* Pause the auto defragger. */
+ if (test_bit(BTRFS_FS_STATE_REMOUNTING,
+ &fs_info->fs_state))
+ break;
+
if (!__need_auto_defrag(fs_info->tree_root))
break;
@@ -505,8 +510,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
loff_t isize = i_size_read(inode);
start_pos = pos & ~((u64)root->sectorsize - 1);
- num_bytes = (write_bytes + pos - start_pos +
- root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
+ num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
@@ -1544,7 +1548,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
* although we have opened a file as writable, we have
* to stop this write operation to ensure FS consistency.
*/
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
mutex_unlock(&inode->i_mutex);
err = -EROFS;
goto out;
@@ -1627,7 +1631,20 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
*/
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags)) {
- btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
+ struct btrfs_trans_handle *trans;
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+
+ /*
+ * We need to block on a committing transaction to keep us from
+ * throwing a ordered operation on to the list and causing
+ * something like sync to deadlock trying to flush out this
+ * inode.
+ */
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+ btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
+ btrfs_end_transaction(trans, root);
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(inode->i_mapping);
}
@@ -1654,16 +1671,21 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
struct btrfs_trans_handle *trans;
+ bool full_sync = 0;
trace_btrfs_sync_file(file, datasync);
/*
* We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by
- * multi-task, and make the performance up.
+ * multi-task, and make the performance up. See
+ * btrfs_wait_ordered_range for an explanation of the ASYNC check.
*/
atomic_inc(&BTRFS_I(inode)->sync_writers);
- ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
+ if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+ &BTRFS_I(inode)->runtime_flags))
+ ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
atomic_dec(&BTRFS_I(inode)->sync_writers);
if (ret)
return ret;
@@ -1675,7 +1697,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* range being left.
*/
atomic_inc(&root->log_batch);
- btrfs_wait_ordered_range(inode, start, end - start + 1);
+ full_sync = test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &BTRFS_I(inode)->runtime_flags);
+ if (full_sync)
+ btrfs_wait_ordered_range(inode, start, end - start + 1);
atomic_inc(&root->log_batch);
/*
@@ -1742,13 +1767,25 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret != BTRFS_NO_LOG_SYNC) {
if (ret > 0) {
+ /*
+ * If we didn't already wait for ordered extents we need
+ * to do that now.
+ */
+ if (!full_sync)
+ btrfs_wait_ordered_range(inode, start,
+ end - start + 1);
ret = btrfs_commit_transaction(trans, root);
} else {
ret = btrfs_sync_log(trans, root);
- if (ret == 0)
+ if (ret == 0) {
ret = btrfs_end_transaction(trans, root);
- else
+ } else {
+ if (!full_sync)
+ btrfs_wait_ordered_range(inode, start,
+ end -
+ start + 1);
ret = btrfs_commit_transaction(trans, root);
+ }
}
} else {
ret = btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 0be7a8742a4..1f84fc09c1a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -1356,6 +1356,8 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit;
int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg);
+ max_bitmaps = max(max_bitmaps, 1);
+
BUG_ON(ctl->total_bitmaps > max_bitmaps);
/*
@@ -1463,10 +1465,14 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
}
static struct btrfs_free_space *
-find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
+find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes,
+ unsigned long align)
{
struct btrfs_free_space *entry;
struct rb_node *node;
+ u64 ctl_off;
+ u64 tmp;
+ u64 align_off;
int ret;
if (!ctl->free_space_offset.rb_node)
@@ -1481,15 +1487,34 @@ find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes)
if (entry->bytes < *bytes)
continue;
+ /* make sure the space returned is big enough
+ * to match our requested alignment
+ */
+ if (*bytes >= align) {
+ ctl_off = entry->offset - ctl->start;
+ tmp = ctl_off + align - 1;;
+ do_div(tmp, align);
+ tmp = tmp * align + ctl->start;
+ align_off = tmp - entry->offset;
+ } else {
+ align_off = 0;
+ tmp = entry->offset;
+ }
+
+ if (entry->bytes < *bytes + align_off)
+ continue;
+
if (entry->bitmap) {
- ret = search_bitmap(ctl, entry, offset, bytes);
- if (!ret)
+ ret = search_bitmap(ctl, entry, &tmp, bytes);
+ if (!ret) {
+ *offset = tmp;
return entry;
+ }
continue;
}
- *offset = entry->offset;
- *bytes = entry->bytes;
+ *offset = tmp;
+ *bytes = entry->bytes - align_off;
return entry;
}
@@ -1636,10 +1661,14 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
}
/*
- * some block groups are so tiny they can't be enveloped by a bitmap, so
- * don't even bother to create a bitmap for this
+ * The original block groups from mkfs can be really small, like 8
+ * megabytes, so don't bother with a bitmap for those entries. However
+ * some block groups can be smaller than what a bitmap would cover but
+ * are still large enough that they could overflow the 32k memory limit,
+ * so allow those block groups to still be allowed to have a bitmap
+ * entry.
*/
- if (BITS_PER_BITMAP * ctl->unit > block_group->key.offset)
+ if (((BITS_PER_BITMAP * ctl->unit) >> 1) > block_group->key.offset)
return false;
return true;
@@ -2095,9 +2124,12 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *entry = NULL;
u64 bytes_search = bytes + empty_size;
u64 ret = 0;
+ u64 align_gap = 0;
+ u64 align_gap_len = 0;
spin_lock(&ctl->tree_lock);
- entry = find_free_space(ctl, &offset, &bytes_search);
+ entry = find_free_space(ctl, &offset, &bytes_search,
+ block_group->full_stripe_len);
if (!entry)
goto out;
@@ -2107,9 +2139,15 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
if (!entry->bytes)
free_bitmap(ctl, entry);
} else {
+
unlink_free_space(ctl, entry);
- entry->offset += bytes;
- entry->bytes -= bytes;
+ align_gap_len = offset - entry->offset;
+ align_gap = entry->offset;
+
+ entry->offset = offset + bytes;
+ WARN_ON(entry->bytes < bytes + align_gap_len);
+
+ entry->bytes -= bytes + align_gap_len;
if (!entry->bytes)
kmem_cache_free(btrfs_free_space_cachep, entry);
else
@@ -2119,6 +2157,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
out:
spin_unlock(&ctl->tree_lock);
+ if (align_gap_len)
+ __btrfs_add_free_space(ctl, align_gap, align_gap_len);
return ret;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 55c07b65037..c226daefd65 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -39,12 +39,13 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/mount.h>
+#include <linux/btrfs.h>
+#include <linux/blkdev.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "ordered-data.h"
#include "xattr.h"
@@ -54,6 +55,7 @@
#include "locking.h"
#include "free-space-cache.h"
#include "inode-map.h"
+#include "backref.h"
struct btrfs_iget_args {
u64 ino;
@@ -231,8 +233,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
u64 isize = i_size_read(inode);
u64 actual_end = min(end + 1, isize);
u64 inline_len = actual_end - start;
- u64 aligned_end = (end + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ u64 aligned_end = ALIGN(end, root->sectorsize);
u64 data_len = inline_len;
int ret;
@@ -265,6 +266,7 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
return 1;
}
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags);
btrfs_delalloc_release_metadata(inode, end + 1 - start);
btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
return 0;
@@ -389,7 +391,7 @@ again:
* a compressed extent to 128k.
*/
total_compressed = min(total_compressed, max_uncompressed);
- num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+ num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
total_in = 0;
ret = 0;
@@ -488,15 +490,13 @@ cont:
* up to a block size boundary so the allocator does sane
* things
*/
- total_compressed = (total_compressed + blocksize - 1) &
- ~(blocksize - 1);
+ total_compressed = ALIGN(total_compressed, blocksize);
/*
* one last check to make sure the compression is really a
* win, compare the page count read with the blocks on disk
*/
- total_in = (total_in + PAGE_CACHE_SIZE - 1) &
- ~(PAGE_CACHE_SIZE - 1);
+ total_in = ALIGN(total_in, PAGE_CACHE_SIZE);
if (total_compressed >= total_in) {
will_compress = 0;
} else {
@@ -608,7 +608,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
if (list_empty(&async_cow->extents))
return 0;
-
+again:
while (!list_empty(&async_cow->extents)) {
async_extent = list_entry(async_cow->extents.next,
struct async_extent, list);
@@ -648,6 +648,8 @@ retry:
async_extent->ram_size - 1,
btrfs_get_extent,
WB_SYNC_ALL);
+ else if (ret)
+ unlock_page(async_cow->locked_page);
kfree(async_extent);
cond_resched();
continue;
@@ -672,6 +674,7 @@ retry:
if (ret) {
int i;
+
for (i = 0; i < async_extent->nr_pages; i++) {
WARN_ON(async_extent->pages[i]->mapping);
page_cache_release(async_extent->pages[i]);
@@ -679,12 +682,10 @@ retry:
kfree(async_extent->pages);
async_extent->nr_pages = 0;
async_extent->pages = NULL;
- unlock_extent(io_tree, async_extent->start,
- async_extent->start +
- async_extent->ram_size - 1);
+
if (ret == -ENOSPC)
goto retry;
- goto out_free; /* JDM: Requeue? */
+ goto out_free;
}
/*
@@ -696,10 +697,13 @@ retry:
async_extent->ram_size - 1, 0);
em = alloc_extent_map();
- BUG_ON(!em); /* -ENOMEM */
+ if (!em)
+ goto out_free_reserve;
em->start = async_extent->start;
em->len = async_extent->ram_size;
em->orig_start = em->start;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
@@ -726,6 +730,9 @@ retry:
async_extent->ram_size - 1, 0);
}
+ if (ret)
+ goto out_free_reserve;
+
ret = btrfs_add_ordered_extent_compress(inode,
async_extent->start,
ins.objectid,
@@ -733,7 +740,8 @@ retry:
ins.offset,
BTRFS_ORDERED_COMPRESSED,
async_extent->compress_type);
- BUG_ON(ret); /* -ENOMEM */
+ if (ret)
+ goto out_free_reserve;
/*
* clear dirty, set writeback and unlock the pages.
@@ -754,18 +762,30 @@ retry:
ins.objectid,
ins.offset, async_extent->pages,
async_extent->nr_pages);
-
- BUG_ON(ret); /* -ENOMEM */
alloc_hint = ins.objectid + ins.offset;
kfree(async_extent);
+ if (ret)
+ goto out;
cond_resched();
}
ret = 0;
out:
return ret;
+out_free_reserve:
+ btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
out_free:
+ extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
+ async_extent->start,
+ async_extent->start +
+ async_extent->ram_size - 1,
+ NULL, EXTENT_CLEAR_UNLOCK_PAGE |
+ EXTENT_CLEAR_UNLOCK |
+ EXTENT_CLEAR_DELALLOC |
+ EXTENT_CLEAR_DIRTY |
+ EXTENT_SET_WRITEBACK |
+ EXTENT_END_WRITEBACK);
kfree(async_extent);
- goto out;
+ goto again;
}
static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
@@ -834,7 +854,7 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
BUG_ON(btrfs_is_free_space_inode(inode));
- num_bytes = (end - start + blocksize) & ~(blocksize - 1);
+ num_bytes = ALIGN(end - start + 1, blocksize);
num_bytes = max(blocksize, num_bytes);
disk_num_bytes = num_bytes;
@@ -892,6 +912,8 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
em->orig_start = em->start;
ram_size = ins.offset;
em->len = ins.offset;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
em->block_start = ins.objectid;
em->block_len = ins.offset;
@@ -1338,6 +1360,8 @@ out_check:
em->block_start = disk_bytenr;
em->orig_block_len = disk_num_bytes;
em->bdev = root->fs_info->fs_devices->latest_bdev;
+ em->mod_start = em->start;
+ em->mod_len = em->len;
set_bit(EXTENT_FLAG_PINNED, &em->flags);
set_bit(EXTENT_FLAG_FILLING, &em->flags);
em->generation = -1;
@@ -1508,14 +1532,22 @@ static void btrfs_set_bit_hook(struct inode *inode,
spin_unlock(&BTRFS_I(inode)->lock);
}
- spin_lock(&root->fs_info->delalloc_lock);
+ __percpu_counter_add(&root->fs_info->delalloc_bytes, len,
+ root->fs_info->delalloc_batch);
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes += len;
- root->fs_info->delalloc_bytes += len;
- if (do_list && list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
- list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
- &root->fs_info->delalloc_inodes);
+ if (do_list && !test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags)) {
+ spin_lock(&root->fs_info->delalloc_lock);
+ if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+ list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
+ &root->fs_info->delalloc_inodes);
+ set_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+ spin_unlock(&root->fs_info->delalloc_lock);
}
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -1550,15 +1582,22 @@ static void btrfs_clear_bit_hook(struct inode *inode,
&& do_list)
btrfs_free_reserved_data_space(inode, len);
- spin_lock(&root->fs_info->delalloc_lock);
- root->fs_info->delalloc_bytes -= len;
+ __percpu_counter_add(&root->fs_info->delalloc_bytes, -len,
+ root->fs_info->delalloc_batch);
+ spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->delalloc_bytes -= len;
-
if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 &&
- !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
- list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+ test_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags)) {
+ spin_lock(&root->fs_info->delalloc_lock);
+ if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
+ list_del_init(&BTRFS_I(inode)->delalloc_inodes);
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &BTRFS_I(inode)->runtime_flags);
+ }
+ spin_unlock(&root->fs_info->delalloc_lock);
}
- spin_unlock(&root->fs_info->delalloc_lock);
+ spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -1566,7 +1605,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
*/
-int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
@@ -1581,7 +1620,7 @@ int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
length = bio->bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, READ, logical,
+ ret = btrfs_map_block(root->fs_info, rw, logical,
&map_length, NULL, 0);
/* Will always return 0 with map_multi == NULL */
BUG_ON(ret < 0);
@@ -1892,6 +1931,640 @@ out:
return ret;
}
+/* snapshot-aware defrag */
+struct sa_defrag_extent_backref {
+ struct rb_node node;
+ struct old_sa_defrag_extent *old;
+ u64 root_id;
+ u64 inum;
+ u64 file_pos;
+ u64 extent_offset;
+ u64 num_bytes;
+ u64 generation;
+};
+
+struct old_sa_defrag_extent {
+ struct list_head list;
+ struct new_sa_defrag_extent *new;
+
+ u64 extent_offset;
+ u64 bytenr;
+ u64 offset;
+ u64 len;
+ int count;
+};
+
+struct new_sa_defrag_extent {
+ struct rb_root root;
+ struct list_head head;
+ struct btrfs_path *path;
+ struct inode *inode;
+ u64 file_pos;
+ u64 len;
+ u64 bytenr;
+ u64 disk_len;
+ u8 compress_type;
+};
+
+static int backref_comp(struct sa_defrag_extent_backref *b1,
+ struct sa_defrag_extent_backref *b2)
+{
+ if (b1->root_id < b2->root_id)
+ return -1;
+ else if (b1->root_id > b2->root_id)
+ return 1;
+
+ if (b1->inum < b2->inum)
+ return -1;
+ else if (b1->inum > b2->inum)
+ return 1;
+
+ if (b1->file_pos < b2->file_pos)
+ return -1;
+ else if (b1->file_pos > b2->file_pos)
+ return 1;
+
+ /*
+ * [------------------------------] ===> (a range of space)
+ * |<--->| |<---->| =============> (fs/file tree A)
+ * |<---------------------------->| ===> (fs/file tree B)
+ *
+ * A range of space can refer to two file extents in one tree while
+ * refer to only one file extent in another tree.
+ *
+ * So we may process a disk offset more than one time(two extents in A)
+ * and locate at the same extent(one extent in B), then insert two same
+ * backrefs(both refer to the extent in B).
+ */
+ return 0;
+}
+
+static void backref_insert(struct rb_root *root,
+ struct sa_defrag_extent_backref *backref)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct sa_defrag_extent_backref *entry;
+ int ret;
+
+ while (*p) {
+ parent = *p;
+ entry = rb_entry(parent, struct sa_defrag_extent_backref, node);
+
+ ret = backref_comp(backref, entry);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&backref->node, parent, p);
+ rb_insert_color(&backref->node, root);
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
+ void *ctx)
+{
+ struct btrfs_file_extent_item *extent;
+ struct btrfs_fs_info *fs_info;
+ struct old_sa_defrag_extent *old = ctx;
+ struct new_sa_defrag_extent *new = old->new;
+ struct btrfs_path *path = new->path;
+ struct btrfs_key key;
+ struct btrfs_root *root;
+ struct sa_defrag_extent_backref *backref;
+ struct extent_buffer *leaf;
+ struct inode *inode = new->inode;
+ int slot;
+ int ret;
+ u64 extent_offset;
+ u64 num_bytes;
+
+ if (BTRFS_I(inode)->root->root_key.objectid == root_id &&
+ inum == btrfs_ino(inode))
+ return 0;
+
+ key.objectid = root_id;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ fs_info = BTRFS_I(inode)->root->fs_info;
+ root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(root)) {
+ if (PTR_ERR(root) == -ENOENT)
+ return 0;
+ WARN_ON(1);
+ pr_debug("inum=%llu, offset=%llu, root_id=%llu\n",
+ inum, offset, root_id);
+ return PTR_ERR(root);
+ }
+
+ key.objectid = inum;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ if (offset > (u64)-1 << 32)
+ key.offset = 0;
+ else
+ key.offset = offset;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ WARN_ON(1);
+ return ret;
+ }
+
+ while (1) {
+ cond_resched();
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0) {
+ goto out;
+ } else if (ret > 0) {
+ ret = 0;
+ goto out;
+ }
+ continue;
+ }
+
+ path->slots[0]++;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+
+ if (key.objectid > inum)
+ goto out;
+
+ if (key.objectid < inum || key.type != BTRFS_EXTENT_DATA_KEY)
+ continue;
+
+ extent = btrfs_item_ptr(leaf, slot,
+ struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
+ continue;
+
+ extent_offset = btrfs_file_extent_offset(leaf, extent);
+ if (key.offset - extent_offset != offset)
+ continue;
+
+ num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+ if (extent_offset >= old->extent_offset + old->offset +
+ old->len || extent_offset + num_bytes <=
+ old->extent_offset + old->offset)
+ continue;
+
+ break;
+ }
+
+ backref = kmalloc(sizeof(*backref), GFP_NOFS);
+ if (!backref) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ backref->root_id = root_id;
+ backref->inum = inum;
+ backref->file_pos = offset + extent_offset;
+ backref->num_bytes = num_bytes;
+ backref->extent_offset = extent_offset;
+ backref->generation = btrfs_file_extent_generation(leaf, extent);
+ backref->old = old;
+ backref_insert(&new->root, backref);
+ old->count++;
+out:
+ btrfs_release_path(path);
+ WARN_ON(ret);
+ return ret;
+}
+
+static noinline bool record_extent_backrefs(struct btrfs_path *path,
+ struct new_sa_defrag_extent *new)
+{
+ struct btrfs_fs_info *fs_info = BTRFS_I(new->inode)->root->fs_info;
+ struct old_sa_defrag_extent *old, *tmp;
+ int ret;
+
+ new->path = path;
+
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+ path, record_one_backref,
+ old);
+ BUG_ON(ret < 0 && ret != -ENOENT);
+
+ /* no backref to be processed for this extent */
+ if (!old->count) {
+ list_del(&old->list);
+ kfree(old);
+ }
+ }
+
+ if (list_empty(&new->head))
+ return false;
+
+ return true;
+}
+
+static int relink_is_mergable(struct extent_buffer *leaf,
+ struct btrfs_file_extent_item *fi,
+ u64 disk_bytenr)
+{
+ if (btrfs_file_extent_disk_bytenr(leaf, fi) != disk_bytenr)
+ return 0;
+
+ if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG)
+ return 0;
+
+ if (btrfs_file_extent_compression(leaf, fi) ||
+ btrfs_file_extent_encryption(leaf, fi) ||
+ btrfs_file_extent_other_encoding(leaf, fi))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * Note the backref might has changed, and in this case we just return 0.
+ */
+static noinline int relink_extent_backref(struct btrfs_path *path,
+ struct sa_defrag_extent_backref *prev,
+ struct sa_defrag_extent_backref *backref)
+{
+ struct btrfs_file_extent_item *extent;
+ struct btrfs_file_extent_item *item;
+ struct btrfs_ordered_extent *ordered;
+ struct btrfs_trans_handle *trans;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_root *root;
+ struct btrfs_key key;
+ struct extent_buffer *leaf;
+ struct old_sa_defrag_extent *old = backref->old;
+ struct new_sa_defrag_extent *new = old->new;
+ struct inode *src_inode = new->inode;
+ struct inode *inode;
+ struct extent_state *cached = NULL;
+ int ret = 0;
+ u64 start;
+ u64 len;
+ u64 lock_start;
+ u64 lock_end;
+ bool merge = false;
+ int index;
+
+ if (prev && prev->root_id == backref->root_id &&
+ prev->inum == backref->inum &&
+ prev->file_pos + prev->num_bytes == backref->file_pos)
+ merge = true;
+
+ /* step 1: get root */
+ key.objectid = backref->root_id;
+ key.type = BTRFS_ROOT_ITEM_KEY;
+ key.offset = (u64)-1;
+
+ fs_info = BTRFS_I(src_inode)->root->fs_info;
+ index = srcu_read_lock(&fs_info->subvol_srcu);
+
+ root = btrfs_read_fs_root_no_name(fs_info, &key);
+ if (IS_ERR(root)) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ if (PTR_ERR(root) == -ENOENT)
+ return 0;
+ return PTR_ERR(root);
+ }
+ if (btrfs_root_refs(&root->root_item) == 0) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ /* parse ENOENT to 0 */
+ return 0;
+ }
+
+ /* step 2: get inode */
+ key.objectid = backref->inum;
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ inode = btrfs_iget(fs_info->sb, &key, root, NULL);
+ if (IS_ERR(inode)) {
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+ return 0;
+ }
+
+ srcu_read_unlock(&fs_info->subvol_srcu, index);
+
+ /* step 3: relink backref */
+ lock_start = backref->file_pos;
+ lock_end = backref->file_pos + backref->num_bytes - 1;
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+ 0, &cached);
+
+ ordered = btrfs_lookup_first_ordered_extent(inode, lock_end);
+ if (ordered) {
+ btrfs_put_ordered_extent(ordered);
+ goto out_unlock;
+ }
+
+ trans = btrfs_join_transaction(root);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_unlock;
+ }
+
+ key.objectid = backref->inum;
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = backref->file_pos;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0) {
+ goto out_free_path;
+ } else if (ret > 0) {
+ ret = 0;
+ goto out_free_path;
+ }
+
+ extent = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_generation(path->nodes[0], extent) !=
+ backref->generation)
+ goto out_free_path;
+
+ btrfs_release_path(path);
+
+ start = backref->file_pos;
+ if (backref->extent_offset < old->extent_offset + old->offset)
+ start += old->extent_offset + old->offset -
+ backref->extent_offset;
+
+ len = min(backref->extent_offset + backref->num_bytes,
+ old->extent_offset + old->offset + old->len);
+ len -= max(backref->extent_offset, old->extent_offset + old->offset);
+
+ ret = btrfs_drop_extents(trans, root, inode, start,
+ start + len, 1);
+ if (ret)
+ goto out_free_path;
+again:
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = start;
+
+ if (merge) {
+ struct btrfs_file_extent_item *fi;
+ u64 extent_len;
+ struct btrfs_key found_key;
+
+ ret = btrfs_search_slot(trans, root, &key, path, 1, 1);
+ if (ret < 0)
+ goto out_free_path;
+
+ path->slots[0]--;
+ leaf = path->nodes[0];
+ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ extent_len = btrfs_file_extent_num_bytes(leaf, fi);
+
+ if (relink_is_mergable(leaf, fi, new->bytenr) &&
+ extent_len + found_key.offset == start) {
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_len + len);
+ btrfs_mark_buffer_dirty(leaf);
+ inode_add_bytes(inode, len);
+
+ ret = 1;
+ goto out_free_path;
+ } else {
+ merge = false;
+ btrfs_release_path(path);
+ goto again;
+ }
+ }
+
+ ret = btrfs_insert_empty_item(trans, root, path, &key,
+ sizeof(*extent));
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_free_path;
+ }
+
+ leaf = path->nodes[0];
+ item = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_disk_bytenr(leaf, item, new->bytenr);
+ btrfs_set_file_extent_disk_num_bytes(leaf, item, new->disk_len);
+ btrfs_set_file_extent_offset(leaf, item, start - new->file_pos);
+ btrfs_set_file_extent_num_bytes(leaf, item, len);
+ btrfs_set_file_extent_ram_bytes(leaf, item, new->len);
+ btrfs_set_file_extent_generation(leaf, item, trans->transid);
+ btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_compression(leaf, item, new->compress_type);
+ btrfs_set_file_extent_encryption(leaf, item, 0);
+ btrfs_set_file_extent_other_encoding(leaf, item, 0);
+
+ btrfs_mark_buffer_dirty(leaf);
+ inode_add_bytes(inode, len);
+
+ ret = btrfs_inc_extent_ref(trans, root, new->bytenr,
+ new->disk_len, 0,
+ backref->root_id, backref->inum,
+ new->file_pos, 0); /* start - extent_offset */
+ if (ret) {
+ btrfs_abort_transaction(trans, root, ret);
+ goto out_free_path;
+ }
+
+ ret = 1;
+out_free_path:
+ btrfs_release_path(path);
+ btrfs_end_transaction(trans, root);
+out_unlock:
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start, lock_end,
+ &cached, GFP_NOFS);
+ iput(inode);
+ return ret;
+}
+
+static void relink_file_extents(struct new_sa_defrag_extent *new)
+{
+ struct btrfs_path *path;
+ struct old_sa_defrag_extent *old, *tmp;
+ struct sa_defrag_extent_backref *backref;
+ struct sa_defrag_extent_backref *prev = NULL;
+ struct inode *inode;
+ struct btrfs_root *root;
+ struct rb_node *node;
+ int ret;
+
+ inode = new->inode;
+ root = BTRFS_I(inode)->root;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return;
+
+ if (!record_extent_backrefs(path, new)) {
+ btrfs_free_path(path);
+ goto out;
+ }
+ btrfs_release_path(path);
+
+ while (1) {
+ node = rb_first(&new->root);
+ if (!node)
+ break;
+ rb_erase(node, &new->root);
+
+ backref = rb_entry(node, struct sa_defrag_extent_backref, node);
+
+ ret = relink_extent_backref(path, prev, backref);
+ WARN_ON(ret < 0);
+
+ kfree(prev);
+
+ if (ret == 1)
+ prev = backref;
+ else
+ prev = NULL;
+ cond_resched();
+ }
+ kfree(prev);
+
+ btrfs_free_path(path);
+
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ list_del(&old->list);
+ kfree(old);
+ }
+out:
+ atomic_dec(&root->fs_info->defrag_running);
+ wake_up(&root->fs_info->transaction_wait);
+
+ kfree(new);
+}
+
+static struct new_sa_defrag_extent *
+record_old_file_extents(struct inode *inode,
+ struct btrfs_ordered_extent *ordered)
+{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_path *path;
+ struct btrfs_key key;
+ struct old_sa_defrag_extent *old, *tmp;
+ struct new_sa_defrag_extent *new;
+ int ret;
+
+ new = kmalloc(sizeof(*new), GFP_NOFS);
+ if (!new)
+ return NULL;
+
+ new->inode = inode;
+ new->file_pos = ordered->file_offset;
+ new->len = ordered->len;
+ new->bytenr = ordered->start;
+ new->disk_len = ordered->disk_len;
+ new->compress_type = ordered->compress_type;
+ new->root = RB_ROOT;
+ INIT_LIST_HEAD(&new->head);
+
+ path = btrfs_alloc_path();
+ if (!path)
+ goto out_kfree;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = new->file_pos;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out_free_path;
+ if (ret > 0 && path->slots[0] > 0)
+ path->slots[0]--;
+
+ /* find out all the old extents for the file range */
+ while (1) {
+ struct btrfs_file_extent_item *extent;
+ struct extent_buffer *l;
+ int slot;
+ u64 num_bytes;
+ u64 offset;
+ u64 end;
+ u64 disk_bytenr;
+ u64 extent_offset;
+
+ l = path->nodes[0];
+ slot = path->slots[0];
+
+ if (slot >= btrfs_header_nritems(l)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out_free_list;
+ else if (ret > 0)
+ break;
+ continue;
+ }
+
+ btrfs_item_key_to_cpu(l, &key, slot);
+
+ if (key.objectid != btrfs_ino(inode))
+ break;
+ if (key.type != BTRFS_EXTENT_DATA_KEY)
+ break;
+ if (key.offset >= new->file_pos + new->len)
+ break;
+
+ extent = btrfs_item_ptr(l, slot, struct btrfs_file_extent_item);
+
+ num_bytes = btrfs_file_extent_num_bytes(l, extent);
+ if (key.offset + num_bytes < new->file_pos)
+ goto next;
+
+ disk_bytenr = btrfs_file_extent_disk_bytenr(l, extent);
+ if (!disk_bytenr)
+ goto next;
+
+ extent_offset = btrfs_file_extent_offset(l, extent);
+
+ old = kmalloc(sizeof(*old), GFP_NOFS);
+ if (!old)
+ goto out_free_list;
+
+ offset = max(new->file_pos, key.offset);
+ end = min(new->file_pos + new->len, key.offset + num_bytes);
+
+ old->bytenr = disk_bytenr;
+ old->extent_offset = extent_offset;
+ old->offset = offset - key.offset;
+ old->len = end - offset;
+ old->new = new;
+ old->count = 0;
+ list_add_tail(&old->list, &new->head);
+next:
+ path->slots[0]++;
+ cond_resched();
+ }
+
+ btrfs_free_path(path);
+ atomic_inc(&root->fs_info->defrag_running);
+
+ return new;
+
+out_free_list:
+ list_for_each_entry_safe(old, tmp, &new->head, list) {
+ list_del(&old->list);
+ kfree(old);
+ }
+out_free_path:
+ btrfs_free_path(path);
+out_kfree:
+ kfree(new);
+ return NULL;
+}
+
/*
* helper function for btrfs_finish_ordered_io, this
* just reads in some of the csum leaves to prime them into ram
@@ -1909,6 +2582,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
struct btrfs_trans_handle *trans = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
struct extent_state *cached_state = NULL;
+ struct new_sa_defrag_extent *new = NULL;
int compress_type = 0;
int ret;
bool nolock;
@@ -1943,6 +2617,20 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset + ordered_extent->len - 1,
0, &cached_state);
+ ret = test_range_bit(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset + ordered_extent->len - 1,
+ EXTENT_DEFRAG, 1, cached_state);
+ if (ret) {
+ u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
+ if (last_snapshot >= BTRFS_I(inode)->generation)
+ /* the inode is shared */
+ new = record_old_file_extents(inode, ordered_extent);
+
+ clear_extent_bit(io_tree, ordered_extent->file_offset,
+ ordered_extent->file_offset + ordered_extent->len - 1,
+ EXTENT_DEFRAG, 0, 0, &cached_state, GFP_NOFS);
+ }
+
if (nolock)
trans = btrfs_join_transaction_nolock(root);
else
@@ -2001,17 +2689,33 @@ out:
if (trans)
btrfs_end_transaction(trans, root);
- if (ret)
+ if (ret) {
clear_extent_uptodate(io_tree, ordered_extent->file_offset,
ordered_extent->file_offset +
ordered_extent->len - 1, NULL, GFP_NOFS);
+ /*
+ * If the ordered extent had an IOERR or something else went
+ * wrong we need to return the space for this ordered extent
+ * back to the allocator.
+ */
+ if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
+ !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
+ btrfs_free_reserved_extent(root, ordered_extent->start,
+ ordered_extent->disk_len);
+ }
+
+
/*
* This needs to be done to make sure anybody waiting knows we are done
* updating everything for this ordered extent.
*/
btrfs_remove_ordered_extent(inode, ordered_extent);
+ /* for snapshot-aware defrag */
+ if (new)
+ relink_file_extents(new);
+
/* once for us */
btrfs_put_ordered_extent(ordered_extent);
/* once for the tree */
@@ -2062,7 +2766,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state, int mirror)
{
- size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
+ size_t offset = start - page_offset(page);
struct inode *inode = page->mapping->host;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
char *kaddr;
@@ -2167,11 +2871,6 @@ void btrfs_run_delayed_iputs(struct btrfs_root *root)
}
}
-enum btrfs_orphan_cleanup_state {
- ORPHAN_CLEANUP_STARTED = 1,
- ORPHAN_CLEANUP_DONE = 2,
-};
-
/*
* This is called in transaction commit time. If there are no orphan
* files in the subvolume, it removes orphan item and frees block_rsv
@@ -2469,6 +3168,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
*/
set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
+ atomic_inc(&root->orphan_inodes);
/* if we have links, this was a truncate, lets do that */
if (inode->i_nlink) {
@@ -2491,6 +3191,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
goto out;
ret = btrfs_truncate(inode);
+ if (ret)
+ btrfs_orphan_del(NULL, inode);
} else {
nr_unlink++;
}
@@ -2709,34 +3411,41 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
struct btrfs_inode_item *item,
struct inode *inode)
{
- btrfs_set_inode_uid(leaf, item, i_uid_read(inode));
- btrfs_set_inode_gid(leaf, item, i_gid_read(inode));
- btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
- btrfs_set_inode_mode(leaf, item, inode->i_mode);
- btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
+ struct btrfs_map_token token;
+
+ btrfs_init_map_token(&token);
+
+ btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
+ btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
+ btrfs_set_token_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size,
+ &token);
+ btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
+ btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
- inode->i_atime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+ inode->i_atime.tv_nsec, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
- inode->i_mtime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+ inode->i_mtime.tv_nsec, &token);
- btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_sec);
- btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
- inode->i_ctime.tv_nsec);
+ btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_sec, &token);
+ btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+ inode->i_ctime.tv_nsec, &token);
- btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
- btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
- btrfs_set_inode_sequence(leaf, item, inode->i_version);
- btrfs_set_inode_transid(leaf, item, trans->transid);
- btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
- btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
- btrfs_set_inode_block_group(leaf, item, 0);
+ btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
+ &token);
+ btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
+ &token);
+ btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
+ btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
+ btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
+ btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
+ btrfs_set_token_inode_block_group(leaf, item, 0, &token);
}
/*
@@ -3304,7 +4013,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 extent_num_bytes = 0;
u64 extent_offset = 0;
u64 item_end = 0;
- u64 mask = root->sectorsize - 1;
u32 found_type = (u8)-1;
int found_extent;
int del_item;
@@ -3328,7 +4036,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
* extent just the way it is.
*/
if (root->ref_cows || root == root->fs_info->tree_root)
- btrfs_drop_extent_cache(inode, (new_size + mask) & (~mask), (u64)-1, 0);
+ btrfs_drop_extent_cache(inode, ALIGN(new_size,
+ root->sectorsize), (u64)-1, 0);
/*
* This function is also used to drop the items in the log tree before
@@ -3407,10 +4116,9 @@ search_again:
if (!del_item) {
u64 orig_num_bytes =
btrfs_file_extent_num_bytes(leaf, fi);
- extent_num_bytes = new_size -
- found_key.offset + root->sectorsize - 1;
- extent_num_bytes = extent_num_bytes &
- ~((u64)root->sectorsize - 1);
+ extent_num_bytes = ALIGN(new_size -
+ found_key.offset,
+ root->sectorsize);
btrfs_set_file_extent_num_bytes(leaf, fi,
extent_num_bytes);
num_dec = (orig_num_bytes -
@@ -3646,9 +4354,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
- u64 mask = root->sectorsize - 1;
- u64 hole_start = (oldsize + mask) & ~mask;
- u64 block_end = (size + mask) & ~mask;
+ u64 hole_start = ALIGN(oldsize, root->sectorsize);
+ u64 block_end = ALIGN(size, root->sectorsize);
u64 last_byte;
u64 cur_offset;
u64 hole_size;
@@ -3681,7 +4388,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
break;
}
last_byte = min(extent_map_end(em), block_end);
- last_byte = (last_byte + mask) & ~mask;
+ last_byte = ALIGN(last_byte , root->sectorsize);
if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
struct extent_map *hole_em;
hole_size = last_byte - cur_offset;
@@ -3832,6 +4539,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
/* we don't support swapfiles, so vmtruncate shouldn't fail */
truncate_setsize(inode, newsize);
+
+ /* Disable nonlocked read DIO to avoid the end less truncate */
+ btrfs_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ btrfs_inode_resume_unlocked_dio(inode);
+
ret = btrfs_truncate(inode);
if (ret && inode->i_nlink)
btrfs_orphan_del(NULL, inode);
@@ -3904,6 +4617,12 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
+ ret = btrfs_commit_inode_delayed_inode(inode);
+ if (ret) {
+ btrfs_orphan_del(NULL, inode);
+ goto no_delete;
+ }
+
rsv = btrfs_alloc_block_rsv(root, BTRFS_BLOCK_RSV_TEMP);
if (!rsv) {
btrfs_orphan_del(NULL, inode);
@@ -3941,7 +4660,7 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
- trans = btrfs_start_transaction_lflush(root, 1);
+ trans = btrfs_join_transaction(root);
if (IS_ERR(trans)) {
btrfs_orphan_del(NULL, inode);
btrfs_free_block_rsv(root, rsv);
@@ -3955,9 +4674,6 @@ void btrfs_evict_inode(struct inode *inode)
break;
trans->block_rsv = &root->fs_info->trans_block_rsv;
- ret = btrfs_update_inode(trans, root, inode);
- BUG_ON(ret);
-
btrfs_end_transaction(trans, root);
trans = NULL;
btrfs_btree_balance_dirty(root);
@@ -4854,7 +5570,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
if (btrfs_test_opt(root, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
if (btrfs_test_opt(root, NODATACOW))
- BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
+ BTRFS_INODE_NODATASUM;
}
insert_inode_hash(inode);
@@ -5006,12 +5723,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
goto out_unlock;
}
- err = btrfs_update_inode(trans, root, inode);
- if (err) {
- drop_inode = 1;
- goto out_unlock;
- }
-
/*
* If the active LSM wants to access the inode during
* d_instantiate it needs these. Smack checks to see
@@ -5396,8 +6107,7 @@ again:
} else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size_t size;
size = btrfs_file_extent_inline_len(leaf, item);
- extent_end = (extent_start + size + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ extent_end = ALIGN(extent_start + size, root->sectorsize);
}
if (start >= extent_end) {
@@ -5469,8 +6179,7 @@ again:
copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
size - extent_offset);
em->start = extent_start + extent_offset;
- em->len = (copy_size + root->sectorsize - 1) &
- ~((u64)root->sectorsize - 1);
+ em->len = ALIGN(copy_size, root->sectorsize);
em->orig_block_len = em->len;
em->orig_start = em->start;
if (compress_type) {
@@ -5949,6 +6658,8 @@ static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
em->start = start;
em->orig_start = orig_start;
+ em->mod_start = start;
+ em->mod_len = len;
em->len = len;
em->block_len = block_len;
em->block_start = block_start;
@@ -5990,16 +6701,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
u64 len = bh_result->b_size;
struct btrfs_trans_handle *trans;
int unlock_bits = EXTENT_LOCKED;
- int ret;
+ int ret = 0;
- if (create) {
- ret = btrfs_delalloc_reserve_space(inode, len);
- if (ret)
- return ret;
+ if (create)
unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
- } else {
+ else
len = min_t(u64, len, root->sectorsize);
- }
lockstart = start;
lockend = start + len - 1;
@@ -6011,14 +6718,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create))
return -ENOTBLK;
- if (create) {
- ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, EXTENT_DELALLOC, NULL,
- &cached_state, GFP_NOFS);
- if (ret)
- goto unlock_err;
- }
-
em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
@@ -6050,7 +6749,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
if (!create && (em->block_start == EXTENT_MAP_HOLE ||
test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
free_extent_map(em);
- ret = 0;
goto unlock_err;
}
@@ -6148,6 +6846,15 @@ unlock:
*/
if (start + len > i_size_read(inode))
i_size_write(inode, start + len);
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ BTRFS_I(inode)->outstanding_extents++;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
+ ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockstart + len - 1, EXTENT_DELALLOC, NULL,
+ &cached_state, GFP_NOFS);
+ BUG_ON(ret);
}
/*
@@ -6156,24 +6863,9 @@ unlock:
* aren't using if there is any left over space.
*/
if (lockstart < lockend) {
- if (create && len < lockend - lockstart) {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockstart + len - 1,
- unlock_bits | EXTENT_DEFRAG, 1, 0,
- &cached_state, GFP_NOFS);
- /*
- * Beside unlock, we also need to cleanup reserved space
- * for the left range by attaching EXTENT_DO_ACCOUNTING.
- */
- clear_extent_bit(&BTRFS_I(inode)->io_tree,
- lockstart + len, lockend,
- unlock_bits | EXTENT_DO_ACCOUNTING |
- EXTENT_DEFRAG, 1, 0, NULL, GFP_NOFS);
- } else {
- clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
- lockend, unlock_bits, 1, 0,
- &cached_state, GFP_NOFS);
- }
+ clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
+ lockend, unlock_bits, 1, 0,
+ &cached_state, GFP_NOFS);
} else {
free_extent_state(cached_state);
}
@@ -6183,9 +6875,6 @@ unlock:
return 0;
unlock_err:
- if (create)
- unlock_bits |= EXTENT_DO_ACCOUNTING;
-
clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend,
unlock_bits, 1, 0, &cached_state, GFP_NOFS);
return ret;
@@ -6426,19 +7115,24 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int async_submit = 0;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(root->fs_info, READ, start_sector << 9,
+ ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
&map_length, NULL, 0);
if (ret) {
bio_put(orig_bio);
return -EIO;
}
-
if (map_length >= orig_bio->bi_size) {
bio = orig_bio;
goto submit;
}
- async_submit = 1;
+ /* async crcs make it difficult to collect full stripe writes. */
+ if (btrfs_get_alloc_profile(root, 1) &
+ (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
+ async_submit = 0;
+ else
+ async_submit = 1;
+
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
if (!bio)
return -ENOMEM;
@@ -6480,7 +7174,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
bio->bi_end_io = btrfs_end_dio_bio;
map_length = orig_bio->bi_size;
- ret = btrfs_map_block(root->fs_info, READ,
+ ret = btrfs_map_block(root->fs_info, rw,
start_sector << 9,
&map_length, NULL, 0);
if (ret) {
@@ -6623,15 +7317,60 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ size_t count = 0;
+ int flags = 0;
+ bool wakeup = true;
+ bool relock = false;
+ ssize_t ret;
if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov,
offset, nr_segs))
return 0;
- return __blockdev_direct_IO(rw, iocb, inode,
- BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
- iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
- btrfs_submit_direct, 0);
+ atomic_inc(&inode->i_dio_count);
+ smp_mb__after_atomic_inc();
+
+ if (rw & WRITE) {
+ count = iov_length(iov, nr_segs);
+ /*
+ * If the write DIO is beyond the EOF, we need update
+ * the isize, but it is protected by i_mutex. So we can
+ * not unlock the i_mutex at this case.
+ */
+ if (offset + count <= inode->i_size) {
+ mutex_unlock(&inode->i_mutex);
+ relock = true;
+ }
+ ret = btrfs_delalloc_reserve_space(inode, count);
+ if (ret)
+ goto out;
+ } else if (unlikely(test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
+ &BTRFS_I(inode)->runtime_flags))) {
+ inode_dio_done(inode);
+ flags = DIO_LOCKING | DIO_SKIP_HOLES;
+ wakeup = false;
+ }
+
+ ret = __blockdev_direct_IO(rw, iocb, inode,
+ BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev,
+ iov, offset, nr_segs, btrfs_get_blocks_direct, NULL,
+ btrfs_submit_direct, flags);
+ if (rw & WRITE) {
+ if (ret < 0 && ret != -EIOCBQUEUED)
+ btrfs_delalloc_release_space(inode, count);
+ else if (ret >= 0 && (size_t)ret < count)
+ btrfs_delalloc_release_space(inode,
+ count - (size_t)ret);
+ else
+ btrfs_delalloc_release_metadata(inode, 0);
+ }
+out:
+ if (wakeup)
+ inode_dio_done(inode);
+ if (relock)
+ mutex_lock(&inode->i_mutex);
+
+ return ret;
}
#define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
@@ -6735,8 +7474,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
return;
}
lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
- ordered = btrfs_lookup_ordered_extent(inode,
- page_offset(page));
+ ordered = btrfs_lookup_ordered_extent(inode, page_offset(page));
if (ordered) {
/*
* IO on this page will never be started, so we need
@@ -7216,8 +7954,9 @@ int btrfs_drop_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
+ /* the snap/subvol tree is on deleting */
if (btrfs_root_refs(&root->root_item) == 0 &&
- !btrfs_is_free_space_inode(inode))
+ root != root->fs_info->tree_root)
return 1;
else
return generic_drop_inode(inode);
@@ -7299,40 +8038,22 @@ fail:
static int btrfs_getattr(struct vfsmount *mnt,
struct dentry *dentry, struct kstat *stat)
{
+ u64 delalloc_bytes;
struct inode *inode = dentry->d_inode;
u32 blocksize = inode->i_sb->s_blocksize;
generic_fillattr(inode, stat);
stat->dev = BTRFS_I(inode)->root->anon_dev;
stat->blksize = PAGE_CACHE_SIZE;
+
+ spin_lock(&BTRFS_I(inode)->lock);
+ delalloc_bytes = BTRFS_I(inode)->delalloc_bytes;
+ spin_unlock(&BTRFS_I(inode)->lock);
stat->blocks = (ALIGN(inode_get_bytes(inode), blocksize) +
- ALIGN(BTRFS_I(inode)->delalloc_bytes, blocksize)) >> 9;
+ ALIGN(delalloc_bytes, blocksize)) >> 9;
return 0;
}
-/*
- * If a file is moved, it will inherit the cow and compression flags of the new
- * directory.
- */
-static void fixup_inode_flags(struct inode *dir, struct inode *inode)
-{
- struct btrfs_inode *b_dir = BTRFS_I(dir);
- struct btrfs_inode *b_inode = BTRFS_I(inode);
-
- if (b_dir->flags & BTRFS_INODE_NODATACOW)
- b_inode->flags |= BTRFS_INODE_NODATACOW;
- else
- b_inode->flags &= ~BTRFS_INODE_NODATACOW;
-
- if (b_dir->flags & BTRFS_INODE_COMPRESS) {
- b_inode->flags |= BTRFS_INODE_COMPRESS;
- b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
- } else {
- b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
- BTRFS_INODE_NOCOMPRESS);
- }
-}
-
static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
@@ -7498,8 +8219,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
- fixup_inode_flags(new_dir, old_inode);
-
ret = btrfs_add_link(trans, new_dir, old_inode,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
@@ -7583,7 +8302,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
INIT_LIST_HEAD(&works);
INIT_LIST_HEAD(&splice);
-again:
+
spin_lock(&root->fs_info->delalloc_lock);
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
while (!list_empty(&splice)) {
@@ -7593,8 +8312,11 @@ again:
list_del_init(&binode->delalloc_inodes);
inode = igrab(&binode->vfs_inode);
- if (!inode)
+ if (!inode) {
+ clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
+ &binode->runtime_flags);
continue;
+ }
list_add_tail(&binode->delalloc_inodes,
&root->fs_info->delalloc_inodes);
@@ -7619,13 +8341,6 @@ again:
btrfs_wait_and_free_delalloc_work(work);
}
- spin_lock(&root->fs_info->delalloc_lock);
- if (!list_empty(&root->fs_info->delalloc_inodes)) {
- spin_unlock(&root->fs_info->delalloc_lock);
- goto again;
- }
- spin_unlock(&root->fs_info->delalloc_lock);
-
/* the filemap_flush will queue IO into the worker threads, but
* we have to make sure the IO is actually started and that
* ordered extents get created before we return
@@ -7801,8 +8516,9 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
}
}
- ret = btrfs_reserve_extent(trans, root, num_bytes, min_size,
- 0, *alloc_hint, &ins, 1);
+ ret = btrfs_reserve_extent(trans, root,
+ min(num_bytes, 256ULL * 1024 * 1024),
+ min_size, 0, *alloc_hint, &ins, 1);
if (ret) {
if (own_trans)
btrfs_end_transaction(trans, root);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c3f09f71bed..c83086fdda0 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -42,12 +42,12 @@
#include <linux/slab.h>
#include <linux/blkdev.h>
#include <linux/uuid.h>
+#include <linux/btrfs.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "volumes.h"
#include "locking.h"
@@ -363,46 +363,52 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
return 0;
}
-static noinline int create_subvol(struct btrfs_root *root,
+static noinline int create_subvol(struct inode *dir,
struct dentry *dentry,
char *name, int namelen,
u64 *async_transid,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
struct btrfs_trans_handle *trans;
struct btrfs_key key;
struct btrfs_root_item root_item;
struct btrfs_inode_item *inode_item;
struct extent_buffer *leaf;
+ struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *new_root;
- struct dentry *parent = dentry->d_parent;
- struct inode *dir;
+ struct btrfs_block_rsv block_rsv;
struct timespec cur_time = CURRENT_TIME;
int ret;
int err;
u64 objectid;
u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
u64 index = 0;
+ u64 qgroup_reserved;
uuid_le new_uuid;
ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid);
if (ret)
return ret;
- dir = parent->d_inode;
-
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
/*
- * 1 - inode item
- * 2 - refs
- * 1 - root item
- * 2 - dir items
+ * The same as the snapshot creation, please see the comment
+ * of create_snapshot().
*/
- trans = btrfs_start_transaction(root, 6);
- if (IS_ERR(trans))
- return PTR_ERR(trans);
+ ret = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+ 7, &qgroup_reserved);
+ if (ret)
+ return ret;
+
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out;
+ }
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
- ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid,
- inherit ? *inherit : NULL);
+ ret = btrfs_qgroup_inherit(trans, root->fs_info, 0, objectid, inherit);
if (ret)
goto fail;
@@ -516,6 +522,8 @@ static noinline int create_subvol(struct btrfs_root *root,
BUG_ON(ret);
fail:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
if (async_transid) {
*async_transid = trans->transid;
err = btrfs_commit_transaction_async(trans, root, 1);
@@ -527,13 +535,15 @@ fail:
if (!ret)
d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
-
+out:
+ btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
return ret;
}
-static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
- char *name, int namelen, u64 *async_transid,
- bool readonly, struct btrfs_qgroup_inherit **inherit)
+static int create_snapshot(struct btrfs_root *root, struct inode *dir,
+ struct dentry *dentry, char *name, int namelen,
+ u64 *async_transid, bool readonly,
+ struct btrfs_qgroup_inherit *inherit)
{
struct inode *inode;
struct btrfs_pending_snapshot *pending_snapshot;
@@ -549,23 +559,31 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * 1 - parent dir inode
+ * 2 - dir entries
+ * 1 - root item
+ * 2 - root ref/backref
+ * 1 - root of snapshot
+ */
+ ret = btrfs_subvolume_reserve_metadata(BTRFS_I(dir)->root,
+ &pending_snapshot->block_rsv, 7,
+ &pending_snapshot->qgroup_reserved);
+ if (ret)
+ goto out;
+
pending_snapshot->dentry = dentry;
pending_snapshot->root = root;
pending_snapshot->readonly = readonly;
- if (inherit) {
- pending_snapshot->inherit = *inherit;
- *inherit = NULL; /* take responsibility to free it */
- }
+ pending_snapshot->dir = dir;
+ pending_snapshot->inherit = inherit;
- trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
+ trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto fail;
}
- ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
- BUG_ON(ret);
-
spin_lock(&root->fs_info->trans_lock);
list_add(&pending_snapshot->list,
&trans->transaction->pending_snapshots);
@@ -602,6 +620,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
d_instantiate(dentry, inode);
ret = 0;
fail:
+ btrfs_subvolume_release_metadata(BTRFS_I(dir)->root,
+ &pending_snapshot->block_rsv,
+ pending_snapshot->qgroup_reserved);
+out:
kfree(pending_snapshot);
return ret;
}
@@ -695,7 +717,7 @@ static noinline int btrfs_mksubvol(struct path *parent,
char *name, int namelen,
struct btrfs_root *snap_src,
u64 *async_transid, bool readonly,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
struct inode *dir = parent->dentry->d_inode;
struct dentry *dentry;
@@ -732,11 +754,11 @@ static noinline int btrfs_mksubvol(struct path *parent,
goto out_up_read;
if (snap_src) {
- error = create_snapshot(snap_src, dentry, name, namelen,
+ error = create_snapshot(snap_src, dir, dentry, name, namelen,
async_transid, readonly, inherit);
} else {
- error = create_subvol(BTRFS_I(dir)->root, dentry,
- name, namelen, async_transid, inherit);
+ error = create_subvol(dir, dentry, name, namelen,
+ async_transid, inherit);
}
if (!error)
fsnotify_mkdir(dir, dentry);
@@ -818,7 +840,7 @@ static int find_new_extents(struct btrfs_root *root,
while(1) {
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, newer_than);
+ path, newer_than);
if (ret != 0)
goto none;
if (min_key.objectid != ino)
@@ -1206,6 +1228,12 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!(inode->i_sb->s_flags & MS_ACTIVE))
break;
+ if (btrfs_defrag_cancelled(root->fs_info)) {
+ printk(KERN_DEBUG "btrfs: defrag_file cancelled\n");
+ ret = -EAGAIN;
+ break;
+ }
+
if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
extent_thresh, &last_len, &skip,
&defrag_end, range->flags &
@@ -1329,9 +1357,6 @@ static noinline int btrfs_ioctl_resize(struct file *file,
int ret = 0;
int mod = 0;
- if (root->fs_info->sb->s_flags & MS_RDONLY)
- return -EROFS;
-
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1363,6 +1388,10 @@ static noinline int btrfs_ioctl_resize(struct file *file,
*devstr = '\0';
devstr = vol_args->name;
devid = simple_strtoull(devstr, &end, 10);
+ if (!devid) {
+ ret = -EINVAL;
+ goto out_free;
+ }
printk(KERN_INFO "btrfs: resizing devid %llu\n",
(unsigned long long)devid);
}
@@ -1371,7 +1400,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
if (!device) {
printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
(unsigned long long)devid);
- ret = -EINVAL;
+ ret = -ENODEV;
goto out_free;
}
@@ -1379,7 +1408,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
printk(KERN_INFO "btrfs: resizer unable to apply on "
"readonly device %llu\n",
(unsigned long long)devid);
- ret = -EINVAL;
+ ret = -EPERM;
goto out_free;
}
@@ -1401,7 +1430,7 @@ static noinline int btrfs_ioctl_resize(struct file *file,
}
if (device->is_tgtdev_for_dev_replace) {
- ret = -EINVAL;
+ ret = -EPERM;
goto out_free;
}
@@ -1457,7 +1486,7 @@ out:
static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
char *name, unsigned long fd, int subvol,
u64 *transid, bool readonly,
- struct btrfs_qgroup_inherit **inherit)
+ struct btrfs_qgroup_inherit *inherit)
{
int namelen;
int ret = 0;
@@ -1566,7 +1595,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
ret = btrfs_ioctl_snap_create_transid(file, vol_args->name,
vol_args->fd, subvol, ptr,
- readonly, &inherit);
+ readonly, inherit);
if (ret == 0 && ptr &&
copy_to_user(arg +
@@ -1863,7 +1892,7 @@ static noinline int search_ioctl(struct inode *inode,
path->keep_locks = 1;
while(1) {
- ret = btrfs_search_forward(root, &key, &max_key, path, 0,
+ ret = btrfs_search_forward(root, &key, &max_key, path,
sk->min_transid);
if (ret != 0) {
if (ret > 0)
@@ -2035,6 +2064,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
struct btrfs_root *dest = NULL;
struct btrfs_ioctl_vol_args *vol_args;
struct btrfs_trans_handle *trans;
+ struct btrfs_block_rsv block_rsv;
+ u64 qgroup_reserved;
int namelen;
int ret;
int err = 0;
@@ -2124,12 +2155,23 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
if (err)
goto out_up_write;
+ btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
+ /*
+ * One for dir inode, two for dir entries, two for root
+ * ref/backref.
+ */
+ err = btrfs_subvolume_reserve_metadata(root, &block_rsv,
+ 5, &qgroup_reserved);
+ if (err)
+ goto out_up_write;
+
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
- goto out_up_write;
+ goto out_release;
}
- trans->block_rsv = &root->fs_info->global_block_rsv;
+ trans->block_rsv = &block_rsv;
+ trans->bytes_reserved = block_rsv.size;
ret = btrfs_unlink_subvol(trans, root, dir,
dest->root_key.objectid,
@@ -2159,10 +2201,14 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
}
}
out_end_trans:
+ trans->block_rsv = NULL;
+ trans->bytes_reserved = 0;
ret = btrfs_end_transaction(trans, root);
if (ret && !err)
err = ret;
inode->i_flags |= S_DEAD;
+out_release:
+ btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved);
out_up_write:
up_write(&root->fs_info->subvol_sem);
out_unlock:
@@ -2171,6 +2217,12 @@ out_unlock:
shrink_dcache_sb(root->fs_info->sb);
btrfs_invalidate_inodes(dest);
d_delete(dentry);
+
+ /* the last ref */
+ if (dest->cache_inode) {
+ iput(dest->cache_inode);
+ dest->cache_inode = NULL;
+ }
}
out_dput:
dput(dentry);
@@ -2211,10 +2263,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
ret = -EPERM;
goto out;
}
- ret = btrfs_defrag_root(root, 0);
+ ret = btrfs_defrag_root(root);
if (ret)
goto out;
- ret = btrfs_defrag_root(root->fs_info->extent_root, 0);
+ ret = btrfs_defrag_root(root->fs_info->extent_root);
break;
case S_IFREG:
if (!(file->f_mode & FMODE_WRITE)) {
@@ -3111,7 +3163,7 @@ static noinline long btrfs_ioctl_start_sync(struct btrfs_root *root,
u64 transid;
int ret;
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT)
return PTR_ERR(trans);
@@ -3289,7 +3341,7 @@ static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
struct inode_fs_paths *ipath = NULL;
struct btrfs_path *path;
- if (!capable(CAP_SYS_ADMIN))
+ if (!capable(CAP_DAC_READ_SEARCH))
return -EPERM;
path = btrfs_alloc_path();
@@ -3914,6 +3966,65 @@ out:
return ret;
}
+static int btrfs_ioctl_get_fslabel(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ const char *label = root->fs_info->super_copy->label;
+ size_t len = strnlen(label, BTRFS_LABEL_SIZE);
+ int ret;
+
+ if (len == BTRFS_LABEL_SIZE) {
+ pr_warn("btrfs: label is too long, return the first %zu bytes\n",
+ --len);
+ }
+
+ mutex_lock(&root->fs_info->volume_mutex);
+ ret = copy_to_user(arg, label, len);
+ mutex_unlock(&root->fs_info->volume_mutex);
+
+ return ret ? -EFAULT : 0;
+}
+
+static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
+{
+ struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
+ struct btrfs_super_block *super_block = root->fs_info->super_copy;
+ struct btrfs_trans_handle *trans;
+ char label[BTRFS_LABEL_SIZE];
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (copy_from_user(label, arg, sizeof(label)))
+ return -EFAULT;
+
+ if (strnlen(label, BTRFS_LABEL_SIZE) == BTRFS_LABEL_SIZE) {
+ pr_err("btrfs: unable to set label with more than %d bytes\n",
+ BTRFS_LABEL_SIZE - 1);
+ return -EINVAL;
+ }
+
+ ret = mnt_want_write_file(file);
+ if (ret)
+ return ret;
+
+ mutex_lock(&root->fs_info->volume_mutex);
+ trans = btrfs_start_transaction(root, 0);
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ goto out_unlock;
+ }
+
+ strcpy(super_block->label, label);
+ ret = btrfs_end_transaction(trans, root);
+
+out_unlock:
+ mutex_unlock(&root->fs_info->volume_mutex);
+ mnt_drop_write_file(file);
+ return ret;
+}
+
long btrfs_ioctl(struct file *file, unsigned int
cmd, unsigned long arg)
{
@@ -4014,6 +4125,10 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_qgroup_limit(file, argp);
case BTRFS_IOC_DEV_REPLACE:
return btrfs_ioctl_dev_replace(root, argp);
+ case BTRFS_IOC_GET_FSLABEL:
+ return btrfs_ioctl_get_fslabel(file, argp);
+ case BTRFS_IOC_SET_FSLABEL:
+ return btrfs_ioctl_set_fslabel(file, argp);
}
return -ENOTTY;
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 2a1762c6604..e95df435d89 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -113,11 +113,10 @@ again:
read_unlock(&eb->lock);
return;
}
- read_unlock(&eb->lock);
- wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
- read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock);
+ wait_event(eb->write_lock_wq,
+ atomic_read(&eb->blocking_writers) == 0);
goto again;
}
atomic_inc(&eb->read_locks);
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e5ed5672960..dc08d77b717 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -196,6 +196,9 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
entry->file_offset = file_offset;
entry->start = start;
entry->len = len;
+ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) &&
+ !(type == BTRFS_ORDERED_NOCOW))
+ entry->csum_bytes_left = disk_len;
entry->disk_len = disk_len;
entry->bytes_left = len;
entry->inode = igrab(inode);
@@ -213,6 +216,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
INIT_LIST_HEAD(&entry->root_extent_list);
INIT_LIST_HEAD(&entry->work_list);
init_completion(&entry->completion);
+ INIT_LIST_HEAD(&entry->log_list);
trace_btrfs_ordered_extent_add(inode, entry);
@@ -270,6 +274,10 @@ void btrfs_add_ordered_sum(struct inode *inode,
tree = &BTRFS_I(inode)->ordered_tree;
spin_lock_irq(&tree->lock);
list_add_tail(&sum->list, &entry->list);
+ WARN_ON(entry->csum_bytes_left < sum->len);
+ entry->csum_bytes_left -= sum->len;
+ if (entry->csum_bytes_left == 0)
+ wake_up(&entry->wait);
spin_unlock_irq(&tree->lock);
}
@@ -405,6 +413,66 @@ out:
return ret == 0;
}
+/* Needs to either be called under a log transaction or the log_mutex */
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode)
+{
+ struct btrfs_ordered_inode_tree *tree;
+ struct btrfs_ordered_extent *ordered;
+ struct rb_node *n;
+ int index = log->log_transid % 2;
+
+ tree = &BTRFS_I(inode)->ordered_tree;
+ spin_lock_irq(&tree->lock);
+ for (n = rb_first(&tree->tree); n; n = rb_next(n)) {
+ ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
+ spin_lock(&log->log_extents_lock[index]);
+ if (list_empty(&ordered->log_list)) {
+ list_add_tail(&ordered->log_list, &log->logged_list[index]);
+ atomic_inc(&ordered->refs);
+ }
+ spin_unlock(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&tree->lock);
+}
+
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
+{
+ struct btrfs_ordered_extent *ordered;
+ int index = transid % 2;
+
+ spin_lock_irq(&log->log_extents_lock[index]);
+ while (!list_empty(&log->logged_list[index])) {
+ ordered = list_first_entry(&log->logged_list[index],
+ struct btrfs_ordered_extent,
+ log_list);
+ list_del_init(&ordered->log_list);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
+ &ordered->flags));
+ btrfs_put_ordered_extent(ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid)
+{
+ struct btrfs_ordered_extent *ordered;
+ int index = transid % 2;
+
+ spin_lock_irq(&log->log_extents_lock[index]);
+ while (!list_empty(&log->logged_list[index])) {
+ ordered = list_first_entry(&log->logged_list[index],
+ struct btrfs_ordered_extent,
+ log_list);
+ list_del_init(&ordered->log_list);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ btrfs_put_ordered_extent(ordered);
+ spin_lock_irq(&log->log_extents_lock[index]);
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+}
+
/*
* used to drop a reference on an ordered extent. This will free
* the extent if the last reference is dropped
@@ -544,10 +612,12 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
* extra check to make sure the ordered operation list really is empty
* before we return
*/
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
+ struct btrfs_transaction *cur_trans = trans->transaction;
struct list_head splice;
struct list_head works;
struct btrfs_delalloc_work *work, *next;
@@ -558,14 +628,10 @@ int btrfs_run_ordered_operations(struct btrfs_root *root, int wait)
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_extent_lock);
-again:
- list_splice_init(&root->fs_info->ordered_operations, &splice);
-
+ list_splice_init(&cur_trans->ordered_operations, &splice);
while (!list_empty(&splice)) {
-
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
-
inode = &btrfs_inode->vfs_inode;
list_del_init(&btrfs_inode->ordered_operations);
@@ -574,24 +640,22 @@ again:
* the inode may be getting freed (in sys_unlink path).
*/
inode = igrab(inode);
-
- if (!wait && inode) {
- list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &root->fs_info->ordered_operations);
- }
-
if (!inode)
continue;
+
+ if (!wait)
+ list_add_tail(&BTRFS_I(inode)->ordered_operations,
+ &cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
work = btrfs_alloc_delalloc_work(inode, wait, 1);
if (!work) {
+ spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations))
list_add_tail(&btrfs_inode->ordered_operations,
&splice);
- spin_lock(&root->fs_info->ordered_extent_lock);
list_splice_tail(&splice,
- &root->fs_info->ordered_operations);
+ &cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_extent_lock);
ret = -ENOMEM;
goto out;
@@ -603,9 +667,6 @@ again:
cond_resched();
spin_lock(&root->fs_info->ordered_extent_lock);
}
- if (wait && !list_empty(&root->fs_info->ordered_operations))
- goto again;
-
spin_unlock(&root->fs_info->ordered_extent_lock);
out:
list_for_each_entry_safe(work, next, &works, list) {
@@ -974,6 +1035,7 @@ out:
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
+ struct btrfs_transaction *cur_trans = trans->transaction;
u64 last_mod;
last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
@@ -988,7 +1050,7 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->ordered_extent_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
- &root->fs_info->ordered_operations);
+ &cur_trans->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_extent_lock);
}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index f29d4bf5fbe..8eadfe406cd 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -79,6 +79,8 @@ struct btrfs_ordered_sum {
#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates whether this ordered extent
* has done its due diligence in updating
* the isize. */
+#define BTRFS_ORDERED_LOGGED_CSUM 8 /* We've logged the csums on this ordered
+ ordered extent */
struct btrfs_ordered_extent {
/* logical offset in the file */
@@ -96,6 +98,9 @@ struct btrfs_ordered_extent {
/* number of bytes that still need writing */
u64 bytes_left;
+ /* number of bytes that still need csumming */
+ u64 csum_bytes_left;
+
/*
* the end of the ordered extent which is behind it but
* didn't update disk_i_size. Please see the comment of
@@ -118,6 +123,9 @@ struct btrfs_ordered_extent {
/* list of checksums for insertion when the extent io is done */
struct list_head list;
+ /* If we need to wait on this to be done */
+ struct list_head log_list;
+
/* used to wait for the BTRFS_ORDERED_COMPLETE bit */
wait_queue_head_t wait;
@@ -189,11 +197,15 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum);
-int btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
+int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
+void btrfs_get_logged_extents(struct btrfs_root *log, struct inode *inode);
+void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid);
+void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
int __init ordered_data_init(void);
void ordered_data_exit(void);
#endif
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 50d95fd190a..920957ecb27 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -294,6 +294,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
btrfs_dev_extent_chunk_offset(l, dev_extent),
(unsigned long long)
btrfs_dev_extent_length(l, dev_extent));
+ break;
case BTRFS_DEV_STATS_KEY:
printk(KERN_INFO "\t\tdevice stats\n");
break;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index a5c85623432..aee4b1cc3d9 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -23,13 +23,13 @@
#include <linux/rbtree.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
+#include <linux/btrfs.h>
#include "ctree.h"
#include "transaction.h"
#include "disk-io.h"
#include "locking.h"
#include "ulist.h"
-#include "ioctl.h"
#include "backref.h"
/* TODO XXX FIXME
@@ -620,7 +620,9 @@ static int update_qgroup_limit_item(struct btrfs_trans_handle *trans,
key.offset = qgroupid;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -661,7 +663,9 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
key.offset = qgroup->qgroupid;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -702,7 +706,9 @@ static int update_qgroup_status_item(struct btrfs_trans_handle *trans,
key.offset = 0;
path = btrfs_alloc_path();
- BUG_ON(!path);
+ if (!path)
+ return -ENOMEM;
+
ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
if (ret > 0)
ret = -ENOENT;
@@ -732,33 +738,38 @@ static int btrfs_clean_quota_tree(struct btrfs_trans_handle *trans,
{
struct btrfs_path *path;
struct btrfs_key key;
+ struct extent_buffer *leaf = NULL;
int ret;
-
- if (!root)
- return -EINVAL;
+ int nr = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
- while (1) {
- key.objectid = 0;
- key.offset = 0;
- key.type = 0;
+ path->leave_spinning = 1;
- path->leave_spinning = 1;
+ key.objectid = 0;
+ key.offset = 0;
+ key.type = 0;
+
+ while (1) {
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret > 0) {
- if (path->slots[0] == 0)
- break;
- path->slots[0]--;
- } else if (ret < 0) {
+ if (ret < 0)
+ goto out;
+ leaf = path->nodes[0];
+ nr = btrfs_header_nritems(leaf);
+ if (!nr)
break;
- }
-
- ret = btrfs_del_item(trans, root, path);
+ /*
+ * delete the leaf one by one
+ * since the whole tree is going
+ * to be deleted.
+ */
+ path->slots[0] = 0;
+ ret = btrfs_del_items(trans, root, path, 0, nr);
if (ret)
goto out;
+
btrfs_release_path(path);
}
ret = 0;
@@ -847,6 +858,10 @@ int btrfs_quota_disable(struct btrfs_trans_handle *trans,
int ret = 0;
spin_lock(&fs_info->qgroup_lock);
+ if (!fs_info->quota_root) {
+ spin_unlock(&fs_info->qgroup_lock);
+ return 0;
+ }
fs_info->quota_enabled = 0;
fs_info->pending_quota_state = 0;
quota_root = fs_info->quota_root;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
new file mode 100644
index 00000000000..07222053c7d
--- /dev/null
+++ b/fs/btrfs/raid56.c
@@ -0,0 +1,2099 @@
+/*
+ * Copyright (C) 2012 Fusion-io All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/bio.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/random.h>
+#include <linux/iocontext.h>
+#include <linux/capability.h>
+#include <linux/ratelimit.h>
+#include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <linux/hash.h>
+#include <linux/list_sort.h>
+#include <linux/raid/xor.h>
+#include <asm/div64.h>
+#include "compat.h"
+#include "ctree.h"
+#include "extent_map.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "print-tree.h"
+#include "volumes.h"
+#include "raid56.h"
+#include "async-thread.h"
+#include "check-integrity.h"
+#include "rcu-string.h"
+
+/* set when additional merges to this rbio are not allowed */
+#define RBIO_RMW_LOCKED_BIT 1
+
+/*
+ * set when this rbio is sitting in the hash, but it is just a cache
+ * of past RMW
+ */
+#define RBIO_CACHE_BIT 2
+
+/*
+ * set when it is safe to trust the stripe_pages for caching
+ */
+#define RBIO_CACHE_READY_BIT 3
+
+
+#define RBIO_CACHE_SIZE 1024
+
+struct btrfs_raid_bio {
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_bio *bbio;
+
+ /*
+ * logical block numbers for the start of each stripe
+ * The last one or two are p/q. These are sorted,
+ * so raid_map[0] is the start of our full stripe
+ */
+ u64 *raid_map;
+
+ /* while we're doing rmw on a stripe
+ * we put it into a hash table so we can
+ * lock the stripe and merge more rbios
+ * into it.
+ */
+ struct list_head hash_list;
+
+ /*
+ * LRU list for the stripe cache
+ */
+ struct list_head stripe_cache;
+
+ /*
+ * for scheduling work in the helper threads
+ */
+ struct btrfs_work work;
+
+ /*
+ * bio list and bio_list_lock are used
+ * to add more bios into the stripe
+ * in hopes of avoiding the full rmw
+ */
+ struct bio_list bio_list;
+ spinlock_t bio_list_lock;
+
+ /* also protected by the bio_list_lock, the
+ * plug list is used by the plugging code
+ * to collect partial bios while plugged. The
+ * stripe locking code also uses it to hand off
+ * the stripe lock to the next pending IO
+ */
+ struct list_head plug_list;
+
+ /*
+ * flags that tell us if it is safe to
+ * merge with this bio
+ */
+ unsigned long flags;
+
+ /* size of each individual stripe on disk */
+ int stripe_len;
+
+ /* number of data stripes (no p/q) */
+ int nr_data;
+
+ /*
+ * set if we're doing a parity rebuild
+ * for a read from higher up, which is handled
+ * differently from a parity rebuild as part of
+ * rmw
+ */
+ int read_rebuild;
+
+ /* first bad stripe */
+ int faila;
+
+ /* second bad stripe (for raid6 use) */
+ int failb;
+
+ /*
+ * number of pages needed to represent the full
+ * stripe
+ */
+ int nr_pages;
+
+ /*
+ * size of all the bios in the bio_list. This
+ * helps us decide if the rbio maps to a full
+ * stripe or not
+ */
+ int bio_list_bytes;
+
+ atomic_t refs;
+
+ /*
+ * these are two arrays of pointers. We allocate the
+ * rbio big enough to hold them both and setup their
+ * locations when the rbio is allocated
+ */
+
+ /* pointers to pages that we allocated for
+ * reading/writing stripes directly from the disk (including P/Q)
+ */
+ struct page **stripe_pages;
+
+ /*
+ * pointers to the pages in the bio_list. Stored
+ * here for faster lookup
+ */
+ struct page **bio_pages;
+};
+
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
+static void rmw_work(struct btrfs_work *work);
+static void read_rebuild_work(struct btrfs_work *work);
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
+static void async_read_rebuild(struct btrfs_raid_bio *rbio);
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
+static void __free_raid_bio(struct btrfs_raid_bio *rbio);
+static void index_rbio_pages(struct btrfs_raid_bio *rbio);
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+
+/*
+ * the stripe hash table is used for locking, and to collect
+ * bios in hopes of making a full stripe
+ */
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
+{
+ struct btrfs_stripe_hash_table *table;
+ struct btrfs_stripe_hash_table *x;
+ struct btrfs_stripe_hash *cur;
+ struct btrfs_stripe_hash *h;
+ int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
+ int i;
+ int table_size;
+
+ if (info->stripe_hash_table)
+ return 0;
+
+ /*
+ * The table is large, starting with order 4 and can go as high as
+ * order 7 in case lock debugging is turned on.
+ *
+ * Try harder to allocate and fallback to vmalloc to lower the chance
+ * of a failing mount.
+ */
+ table_size = sizeof(*table) + sizeof(*h) * num_entries;
+ table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+ if (!table) {
+ table = vzalloc(table_size);
+ if (!table)
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&table->cache_lock);
+ INIT_LIST_HEAD(&table->stripe_cache);
+
+ h = table->table;
+
+ for (i = 0; i < num_entries; i++) {
+ cur = h + i;
+ INIT_LIST_HEAD(&cur->hash_list);
+ spin_lock_init(&cur->lock);
+ init_waitqueue_head(&cur->wait);
+ }
+
+ x = cmpxchg(&info->stripe_hash_table, NULL, table);
+ if (x) {
+ if (is_vmalloc_addr(x))
+ vfree(x);
+ else
+ kfree(x);
+ }
+ return 0;
+}
+
+/*
+ * caching an rbio means to copy anything from the
+ * bio_pages array into the stripe_pages array. We
+ * use the page uptodate bit in the stripe cache array
+ * to indicate if it has valid data
+ *
+ * once the caching is done, we set the cache ready
+ * bit.
+ */
+static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ char *s;
+ char *d;
+ int ret;
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ return;
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (!rbio->bio_pages[i])
+ continue;
+
+ s = kmap(rbio->bio_pages[i]);
+ d = kmap(rbio->stripe_pages[i]);
+
+ memcpy(d, s, PAGE_CACHE_SIZE);
+
+ kunmap(rbio->bio_pages[i]);
+ kunmap(rbio->stripe_pages[i]);
+ SetPageUptodate(rbio->stripe_pages[i]);
+ }
+ set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+}
+
+/*
+ * we hash on the first logical address of the stripe
+ */
+static int rbio_bucket(struct btrfs_raid_bio *rbio)
+{
+ u64 num = rbio->raid_map[0];
+
+ /*
+ * we shift down quite a bit. We're using byte
+ * addressing, and most of the lower bits are zeros.
+ * This tends to upset hash_64, and it consistently
+ * returns just one or two different values.
+ *
+ * shifting off the lower bits fixes things.
+ */
+ return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
+}
+
+/*
+ * stealing an rbio means taking all the uptodate pages from the stripe
+ * array in the source rbio and putting them into the destination rbio
+ */
+static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
+{
+ int i;
+ struct page *s;
+ struct page *d;
+
+ if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
+ return;
+
+ for (i = 0; i < dest->nr_pages; i++) {
+ s = src->stripe_pages[i];
+ if (!s || !PageUptodate(s)) {
+ continue;
+ }
+
+ d = dest->stripe_pages[i];
+ if (d)
+ __free_page(d);
+
+ dest->stripe_pages[i] = s;
+ src->stripe_pages[i] = NULL;
+ }
+}
+
+/*
+ * merging means we take the bio_list from the victim and
+ * splice it into the destination. The victim should
+ * be discarded afterwards.
+ *
+ * must be called with dest->rbio_list_lock held
+ */
+static void merge_rbio(struct btrfs_raid_bio *dest,
+ struct btrfs_raid_bio *victim)
+{
+ bio_list_merge(&dest->bio_list, &victim->bio_list);
+ dest->bio_list_bytes += victim->bio_list_bytes;
+ bio_list_init(&victim->bio_list);
+}
+
+/*
+ * used to prune items that are in the cache. The caller
+ * must hold the hash table lock.
+ */
+static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+ int bucket = rbio_bucket(rbio);
+ struct btrfs_stripe_hash_table *table;
+ struct btrfs_stripe_hash *h;
+ int freeit = 0;
+
+ /*
+ * check the bit again under the hash table lock.
+ */
+ if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+ h = table->table + bucket;
+
+ /* hold the lock for the bucket because we may be
+ * removing it from the hash table
+ */
+ spin_lock(&h->lock);
+
+ /*
+ * hold the lock for the bio list because we need
+ * to make sure the bio list is empty
+ */
+ spin_lock(&rbio->bio_list_lock);
+
+ if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+ list_del_init(&rbio->stripe_cache);
+ table->cache_size -= 1;
+ freeit = 1;
+
+ /* if the bio list isn't empty, this rbio is
+ * still involved in an IO. We take it out
+ * of the cache list, and drop the ref that
+ * was held for the list.
+ *
+ * If the bio_list was empty, we also remove
+ * the rbio from the hash_table, and drop
+ * the corresponding ref
+ */
+ if (bio_list_empty(&rbio->bio_list)) {
+ if (!list_empty(&rbio->hash_list)) {
+ list_del_init(&rbio->hash_list);
+ atomic_dec(&rbio->refs);
+ BUG_ON(!list_empty(&rbio->plug_list));
+ }
+ }
+ }
+
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock(&h->lock);
+
+ if (freeit)
+ __free_raid_bio(rbio);
+}
+
+/*
+ * prune a given rbio from the cache
+ */
+static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+
+ if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ __remove_rbio_from_cache(rbio);
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove everything in the cache
+ */
+void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+ struct btrfs_raid_bio *rbio;
+
+ table = info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ while (!list_empty(&table->stripe_cache)) {
+ rbio = list_entry(table->stripe_cache.next,
+ struct btrfs_raid_bio,
+ stripe_cache);
+ __remove_rbio_from_cache(rbio);
+ }
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+}
+
+/*
+ * remove all cached entries and free the hash table
+ * used by unmount
+ */
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
+{
+ if (!info->stripe_hash_table)
+ return;
+ btrfs_clear_rbio_cache(info);
+ if (is_vmalloc_addr(info->stripe_hash_table))
+ vfree(info->stripe_hash_table);
+ else
+ kfree(info->stripe_hash_table);
+ info->stripe_hash_table = NULL;
+}
+
+/*
+ * insert an rbio into the stripe cache. It
+ * must have already been prepared by calling
+ * cache_rbio_pages
+ *
+ * If this rbio was already cached, it gets
+ * moved to the front of the lru.
+ *
+ * If the size of the rbio cache is too big, we
+ * prune an item.
+ */
+static void cache_rbio(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_stripe_hash_table *table;
+ unsigned long flags;
+
+ if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
+ return;
+
+ table = rbio->fs_info->stripe_hash_table;
+
+ spin_lock_irqsave(&table->cache_lock, flags);
+ spin_lock(&rbio->bio_list_lock);
+
+ /* bump our ref if we were not in the list before */
+ if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
+ atomic_inc(&rbio->refs);
+
+ if (!list_empty(&rbio->stripe_cache)){
+ list_move(&rbio->stripe_cache, &table->stripe_cache);
+ } else {
+ list_add(&rbio->stripe_cache, &table->stripe_cache);
+ table->cache_size += 1;
+ }
+
+ spin_unlock(&rbio->bio_list_lock);
+
+ if (table->cache_size > RBIO_CACHE_SIZE) {
+ struct btrfs_raid_bio *found;
+
+ found = list_entry(table->stripe_cache.prev,
+ struct btrfs_raid_bio,
+ stripe_cache);
+
+ if (found != rbio)
+ __remove_rbio_from_cache(found);
+ }
+
+ spin_unlock_irqrestore(&table->cache_lock, flags);
+ return;
+}
+
+/*
+ * helper function to run the xor_blocks api. It is only
+ * able to do MAX_XOR_BLOCKS at a time, so we need to
+ * loop through.
+ */
+static void run_xor(void **pages, int src_cnt, ssize_t len)
+{
+ int src_off = 0;
+ int xor_src_cnt = 0;
+ void *dest = pages[src_cnt];
+
+ while(src_cnt > 0) {
+ xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
+ xor_blocks(xor_src_cnt, len, dest, pages + src_off);
+
+ src_cnt -= xor_src_cnt;
+ src_off += xor_src_cnt;
+ }
+}
+
+/*
+ * returns true if the bio list inside this rbio
+ * covers an entire stripe (no rmw required).
+ * Must be called with the bio list lock held, or
+ * at a time when you know it is impossible to add
+ * new bios into the list
+ */
+static int __rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+ unsigned long size = rbio->bio_list_bytes;
+ int ret = 1;
+
+ if (size != rbio->nr_data * rbio->stripe_len)
+ ret = 0;
+
+ BUG_ON(size > rbio->nr_data * rbio->stripe_len);
+ return ret;
+}
+
+static int rbio_is_full(struct btrfs_raid_bio *rbio)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
+ ret = __rbio_is_full(rbio);
+ spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+ return ret;
+}
+
+/*
+ * returns 1 if it is safe to merge two rbios together.
+ * The merging is safe if the two rbios correspond to
+ * the same stripe and if they are both going in the same
+ * direction (read vs write), and if neither one is
+ * locked for final IO
+ *
+ * The caller is responsible for locking such that
+ * rmw_locked is safe to test
+ */
+static int rbio_can_merge(struct btrfs_raid_bio *last,
+ struct btrfs_raid_bio *cur)
+{
+ if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
+ test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
+ return 0;
+
+ /*
+ * we can't merge with cached rbios, since the
+ * idea is that when we merge the destination
+ * rbio is going to run our IO for us. We can
+ * steal from cached rbio's though, other functions
+ * handle that.
+ */
+ if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
+ test_bit(RBIO_CACHE_BIT, &cur->flags))
+ return 0;
+
+ if (last->raid_map[0] !=
+ cur->raid_map[0])
+ return 0;
+
+ /* reads can't merge with writes */
+ if (last->read_rebuild !=
+ cur->read_rebuild) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/*
+ * helper to index into the pstripe
+ */
+static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+ index += (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * helper to index into the qstripe, returns null
+ * if there is no qstripe
+ */
+static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
+{
+ if (rbio->nr_data + 1 == rbio->bbio->num_stripes)
+ return NULL;
+
+ index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
+ PAGE_CACHE_SHIFT;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * The first stripe in the table for a logical address
+ * has the lock. rbios are added in one of three ways:
+ *
+ * 1) Nobody has the stripe locked yet. The rbio is given
+ * the lock and 0 is returned. The caller must start the IO
+ * themselves.
+ *
+ * 2) Someone has the stripe locked, but we're able to merge
+ * with the lock owner. The rbio is freed and the IO will
+ * start automatically along with the existing rbio. 1 is returned.
+ *
+ * 3) Someone has the stripe locked, but we're not able to merge.
+ * The rbio is added to the lock owner's plug list, or merged into
+ * an rbio already on the plug list. When the lock owner unlocks,
+ * the next rbio on the list is run and the IO is started automatically.
+ * 1 is returned
+ *
+ * If we return 0, the caller still owns the rbio and must continue with
+ * IO submission. If we return 1, the caller must assume the rbio has
+ * already been freed.
+ */
+static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
+{
+ int bucket = rbio_bucket(rbio);
+ struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
+ struct btrfs_raid_bio *cur;
+ struct btrfs_raid_bio *pending;
+ unsigned long flags;
+ DEFINE_WAIT(wait);
+ struct btrfs_raid_bio *freeit = NULL;
+ struct btrfs_raid_bio *cache_drop = NULL;
+ int ret = 0;
+ int walk = 0;
+
+ spin_lock_irqsave(&h->lock, flags);
+ list_for_each_entry(cur, &h->hash_list, hash_list) {
+ walk++;
+ if (cur->raid_map[0] == rbio->raid_map[0]) {
+ spin_lock(&cur->bio_list_lock);
+
+ /* can we steal this cached rbio's pages? */
+ if (bio_list_empty(&cur->bio_list) &&
+ list_empty(&cur->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &cur->flags) &&
+ !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
+ list_del_init(&cur->hash_list);
+ atomic_dec(&cur->refs);
+
+ steal_rbio(cur, rbio);
+ cache_drop = cur;
+ spin_unlock(&cur->bio_list_lock);
+
+ goto lockit;
+ }
+
+ /* can we merge into the lock owner? */
+ if (rbio_can_merge(cur, rbio)) {
+ merge_rbio(cur, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+
+
+ /*
+ * we couldn't merge with the running
+ * rbio, see if we can merge with the
+ * pending ones. We don't have to
+ * check for rmw_locked because there
+ * is no way they are inside finish_rmw
+ * right now
+ */
+ list_for_each_entry(pending, &cur->plug_list,
+ plug_list) {
+ if (rbio_can_merge(pending, rbio)) {
+ merge_rbio(pending, rbio);
+ spin_unlock(&cur->bio_list_lock);
+ freeit = rbio;
+ ret = 1;
+ goto out;
+ }
+ }
+
+ /* no merging, put us on the tail of the plug list,
+ * our rbio will be started with the currently
+ * running rbio unlocks
+ */
+ list_add_tail(&rbio->plug_list, &cur->plug_list);
+ spin_unlock(&cur->bio_list_lock);
+ ret = 1;
+ goto out;
+ }
+ }
+lockit:
+ atomic_inc(&rbio->refs);
+ list_add(&rbio->hash_list, &h->hash_list);
+out:
+ spin_unlock_irqrestore(&h->lock, flags);
+ if (cache_drop)
+ remove_rbio_from_cache(cache_drop);
+ if (freeit)
+ __free_raid_bio(freeit);
+ return ret;
+}
+
+/*
+ * called as rmw or parity rebuild is completed. If the plug list has more
+ * rbios waiting for this stripe, the next one on the list will be started
+ */
+static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bucket;
+ struct btrfs_stripe_hash *h;
+ unsigned long flags;
+ int keep_cache = 0;
+
+ bucket = rbio_bucket(rbio);
+ h = rbio->fs_info->stripe_hash_table->table + bucket;
+
+ if (list_empty(&rbio->plug_list))
+ cache_rbio(rbio);
+
+ spin_lock_irqsave(&h->lock, flags);
+ spin_lock(&rbio->bio_list_lock);
+
+ if (!list_empty(&rbio->hash_list)) {
+ /*
+ * if we're still cached and there is no other IO
+ * to perform, just leave this rbio here for others
+ * to steal from later
+ */
+ if (list_empty(&rbio->plug_list) &&
+ test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
+ keep_cache = 1;
+ clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ BUG_ON(!bio_list_empty(&rbio->bio_list));
+ goto done;
+ }
+
+ list_del_init(&rbio->hash_list);
+ atomic_dec(&rbio->refs);
+
+ /*
+ * we use the plug list to hold all the rbios
+ * waiting for the chance to lock this stripe.
+ * hand the lock over to one of them.
+ */
+ if (!list_empty(&rbio->plug_list)) {
+ struct btrfs_raid_bio *next;
+ struct list_head *head = rbio->plug_list.next;
+
+ next = list_entry(head, struct btrfs_raid_bio,
+ plug_list);
+
+ list_del_init(&rbio->plug_list);
+
+ list_add(&next->hash_list, &h->hash_list);
+ atomic_inc(&next->refs);
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+ if (next->read_rebuild)
+ async_read_rebuild(next);
+ else {
+ steal_rbio(rbio, next);
+ async_rmw_stripe(next);
+ }
+
+ goto done_nolock;
+ } else if (waitqueue_active(&h->wait)) {
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+ wake_up(&h->wait);
+ goto done_nolock;
+ }
+ }
+done:
+ spin_unlock(&rbio->bio_list_lock);
+ spin_unlock_irqrestore(&h->lock, flags);
+
+done_nolock:
+ if (!keep_cache)
+ remove_rbio_from_cache(rbio);
+}
+
+static void __free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+ int i;
+
+ WARN_ON(atomic_read(&rbio->refs) < 0);
+ if (!atomic_dec_and_test(&rbio->refs))
+ return;
+
+ WARN_ON(!list_empty(&rbio->stripe_cache));
+ WARN_ON(!list_empty(&rbio->hash_list));
+ WARN_ON(!bio_list_empty(&rbio->bio_list));
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i]) {
+ __free_page(rbio->stripe_pages[i]);
+ rbio->stripe_pages[i] = NULL;
+ }
+ }
+ kfree(rbio->raid_map);
+ kfree(rbio->bbio);
+ kfree(rbio);
+}
+
+static void free_raid_bio(struct btrfs_raid_bio *rbio)
+{
+ unlock_stripe(rbio);
+ __free_raid_bio(rbio);
+}
+
+/*
+ * this frees the rbio and runs through all the bios in the
+ * bio_list and calls end_io on them
+ */
+static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
+{
+ struct bio *cur = bio_list_get(&rbio->bio_list);
+ struct bio *next;
+ free_raid_bio(rbio);
+
+ while (cur) {
+ next = cur->bi_next;
+ cur->bi_next = NULL;
+ if (uptodate)
+ set_bit(BIO_UPTODATE, &cur->bi_flags);
+ bio_endio(cur, err);
+ cur = next;
+ }
+}
+
+/*
+ * end io function used by finish_rmw. When we finally
+ * get here, we've written a full stripe
+ */
+static void raid_write_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ err = 0;
+
+ /* OK, we have read all the stripes we need to. */
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ err = -EIO;
+
+ rbio_orig_end_io(rbio, err, 0);
+ return;
+}
+
+/*
+ * the read/modify/write code wants to use the original bio for
+ * any pages it included, and then use the rbio for everything
+ * else. This function decides if a given index (stripe number)
+ * and page number in that stripe fall inside the original bio
+ * or the rbio.
+ *
+ * if you set bio_list_only, you'll get a NULL back for any ranges
+ * that are outside the bio_list
+ *
+ * This doesn't take any refs on anything, you get a bare page pointer
+ * and the caller must bump refs as required.
+ *
+ * You must call index_rbio_pages once before you can trust
+ * the answers from this function.
+ */
+static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
+ int index, int pagenr, int bio_list_only)
+{
+ int chunk_page;
+ struct page *p = NULL;
+
+ chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
+
+ spin_lock_irq(&rbio->bio_list_lock);
+ p = rbio->bio_pages[chunk_page];
+ spin_unlock_irq(&rbio->bio_list_lock);
+
+ if (p || bio_list_only)
+ return p;
+
+ return rbio->stripe_pages[chunk_page];
+}
+
+/*
+ * number of pages we need for the entire stripe across all the
+ * drives
+ */
+static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
+{
+ unsigned long nr = stripe_len * nr_stripes;
+ return (nr + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+}
+
+/*
+ * allocation and initial setup for the btrfs_raid_bio. Not
+ * this does not allocate any pages for rbio->pages.
+ */
+static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len)
+{
+ struct btrfs_raid_bio *rbio;
+ int nr_data = 0;
+ int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
+ void *p;
+
+ rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
+ GFP_NOFS);
+ if (!rbio) {
+ kfree(raid_map);
+ kfree(bbio);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bio_list_init(&rbio->bio_list);
+ INIT_LIST_HEAD(&rbio->plug_list);
+ spin_lock_init(&rbio->bio_list_lock);
+ INIT_LIST_HEAD(&rbio->stripe_cache);
+ INIT_LIST_HEAD(&rbio->hash_list);
+ rbio->bbio = bbio;
+ rbio->raid_map = raid_map;
+ rbio->fs_info = root->fs_info;
+ rbio->stripe_len = stripe_len;
+ rbio->nr_pages = num_pages;
+ rbio->faila = -1;
+ rbio->failb = -1;
+ atomic_set(&rbio->refs, 1);
+
+ /*
+ * the stripe_pages and bio_pages array point to the extra
+ * memory we allocated past the end of the rbio
+ */
+ p = rbio + 1;
+ rbio->stripe_pages = p;
+ rbio->bio_pages = p + sizeof(struct page *) * num_pages;
+
+ if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
+ nr_data = bbio->num_stripes - 2;
+ else
+ nr_data = bbio->num_stripes - 1;
+
+ rbio->nr_data = nr_data;
+ return rbio;
+}
+
+/* allocate pages for all the stripes in the bio, including parity */
+static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ struct page *page;
+
+ for (i = 0; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i])
+ continue;
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[i] = page;
+ ClearPageUptodate(page);
+ }
+ return 0;
+}
+
+/* allocate pages for just the p/q stripes */
+static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ struct page *page;
+
+ i = (rbio->nr_data * rbio->stripe_len) >> PAGE_CACHE_SHIFT;
+
+ for (; i < rbio->nr_pages; i++) {
+ if (rbio->stripe_pages[i])
+ continue;
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[i] = page;
+ }
+ return 0;
+}
+
+/*
+ * add a single page from a specific stripe into our list of bios for IO
+ * this will try to merge into existing bios if possible, and returns
+ * zero if all went well.
+ */
+int rbio_add_io_page(struct btrfs_raid_bio *rbio,
+ struct bio_list *bio_list,
+ struct page *page,
+ int stripe_nr,
+ unsigned long page_index,
+ unsigned long bio_max_len)
+{
+ struct bio *last = bio_list->tail;
+ u64 last_end = 0;
+ int ret;
+ struct bio *bio;
+ struct btrfs_bio_stripe *stripe;
+ u64 disk_start;
+
+ stripe = &rbio->bbio->stripes[stripe_nr];
+ disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT);
+
+ /* if the device is missing, just fail this stripe */
+ if (!stripe->dev->bdev)
+ return fail_rbio_index(rbio, stripe_nr);
+
+ /* see if we can add this page onto our existing bio */
+ if (last) {
+ last_end = (u64)last->bi_sector << 9;
+ last_end += last->bi_size;
+
+ /*
+ * we can't merge these if they are from different
+ * devices or if they are not contiguous
+ */
+ if (last_end == disk_start && stripe->dev->bdev &&
+ test_bit(BIO_UPTODATE, &last->bi_flags) &&
+ last->bi_bdev == stripe->dev->bdev) {
+ ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0);
+ if (ret == PAGE_CACHE_SIZE)
+ return 0;
+ }
+ }
+
+ /* put a new bio on the list */
+ bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_size = 0;
+ bio->bi_bdev = stripe->dev->bdev;
+ bio->bi_sector = disk_start >> 9;
+ set_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
+ bio_list_add(bio_list, bio);
+ return 0;
+}
+
+/*
+ * while we're doing the read/modify/write cycle, we could
+ * have errors in reading pages off the disk. This checks
+ * for errors and if we're not able to read the page it'll
+ * trigger parity reconstruction. The rmw will be finished
+ * after we've reconstructed the failed stripes
+ */
+static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
+{
+ if (rbio->faila >= 0 || rbio->failb >= 0) {
+ BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1);
+ __raid56_parity_recover(rbio);
+ } else {
+ finish_rmw(rbio);
+ }
+}
+
+/*
+ * these are just the pages from the rbio array, not from anything
+ * the FS sent down to us
+ */
+static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, int page)
+{
+ int index;
+ index = stripe * (rbio->stripe_len >> PAGE_CACHE_SHIFT);
+ index += page;
+ return rbio->stripe_pages[index];
+}
+
+/*
+ * helper function to walk our bio list and populate the bio_pages array with
+ * the result. This seems expensive, but it is faster than constantly
+ * searching through the bio list as we setup the IO in finish_rmw or stripe
+ * reconstruction.
+ *
+ * This must be called before you trust the answers from page_in_rbio
+ */
+static void index_rbio_pages(struct btrfs_raid_bio *rbio)
+{
+ struct bio *bio;
+ u64 start;
+ unsigned long stripe_offset;
+ unsigned long page_index;
+ struct page *p;
+ int i;
+
+ spin_lock_irq(&rbio->bio_list_lock);
+ bio_list_for_each(bio, &rbio->bio_list) {
+ start = (u64)bio->bi_sector << 9;
+ stripe_offset = start - rbio->raid_map[0];
+ page_index = stripe_offset >> PAGE_CACHE_SHIFT;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ p = bio->bi_io_vec[i].bv_page;
+ rbio->bio_pages[page_index + i] = p;
+ }
+ }
+ spin_unlock_irq(&rbio->bio_list_lock);
+}
+
+/*
+ * this is called from one of two situations. We either
+ * have a full stripe from the higher layers, or we've read all
+ * the missing bits off disk.
+ *
+ * This will calculate the parity and then send down any
+ * changed blocks.
+ */
+static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
+{
+ struct btrfs_bio *bbio = rbio->bbio;
+ void *pointers[bbio->num_stripes];
+ int stripe_len = rbio->stripe_len;
+ int nr_data = rbio->nr_data;
+ int stripe;
+ int pagenr;
+ int p_stripe = -1;
+ int q_stripe = -1;
+ struct bio_list bio_list;
+ struct bio *bio;
+ int pages_per_stripe = stripe_len >> PAGE_CACHE_SHIFT;
+ int ret;
+
+ bio_list_init(&bio_list);
+
+ if (bbio->num_stripes - rbio->nr_data == 1) {
+ p_stripe = bbio->num_stripes - 1;
+ } else if (bbio->num_stripes - rbio->nr_data == 2) {
+ p_stripe = bbio->num_stripes - 2;
+ q_stripe = bbio->num_stripes - 1;
+ } else {
+ BUG();
+ }
+
+ /* at this point we either have a full stripe,
+ * or we've read the full stripe from the drive.
+ * recalculate the parity and write the new results.
+ *
+ * We're not allowed to add any new bios to the
+ * bio list here, anyone else that wants to
+ * change this stripe needs to do their own rmw.
+ */
+ spin_lock_irq(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock_irq(&rbio->bio_list_lock);
+
+ atomic_set(&rbio->bbio->error, 0);
+
+ /*
+ * now that we've set rmw_locked, run through the
+ * bio list one last time and map the page pointers
+ *
+ * We don't cache full rbios because we're assuming
+ * the higher layers are unlikely to use this area of
+ * the disk again soon. If they do use it again,
+ * hopefully they will send another full bio.
+ */
+ index_rbio_pages(rbio);
+ if (!rbio_is_full(rbio))
+ cache_rbio_pages(rbio);
+ else
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+ struct page *p;
+ /* first collect one page from each data stripe */
+ for (stripe = 0; stripe < nr_data; stripe++) {
+ p = page_in_rbio(rbio, stripe, pagenr, 0);
+ pointers[stripe] = kmap(p);
+ }
+
+ /* then add the parity stripe */
+ p = rbio_pstripe_page(rbio, pagenr);
+ SetPageUptodate(p);
+ pointers[stripe++] = kmap(p);
+
+ if (q_stripe != -1) {
+
+ /*
+ * raid6, add the qstripe and call the
+ * library function to fill in our p/q
+ */
+ p = rbio_qstripe_page(rbio, pagenr);
+ SetPageUptodate(p);
+ pointers[stripe++] = kmap(p);
+
+ raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
+ pointers);
+ } else {
+ /* raid5 */
+ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ }
+
+
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++)
+ kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ }
+
+ /*
+ * time to start writing. Make bios for everything from the
+ * higher layers (the bio_list in our rbio) and our p/q. Ignore
+ * everything else.
+ */
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
+ struct page *page;
+ if (stripe < rbio->nr_data) {
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (!page)
+ continue;
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+
+ ret = rbio_add_io_page(rbio, &bio_list,
+ page, stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list));
+ BUG_ON(atomic_read(&bbio->stripes_pending) == 0);
+
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_write_end_io;
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(WRITE, bio);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+/*
+ * helper to find the stripe number for a given bio. Used to figure out which
+ * stripe has failed. This expects the bio to correspond to a physical disk,
+ * so it looks up based on physical sector numbers.
+ */
+static int find_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ u64 physical = bio->bi_sector;
+ u64 stripe_start;
+ int i;
+ struct btrfs_bio_stripe *stripe;
+
+ physical <<= 9;
+
+ for (i = 0; i < rbio->bbio->num_stripes; i++) {
+ stripe = &rbio->bbio->stripes[i];
+ stripe_start = stripe->physical;
+ if (physical >= stripe_start &&
+ physical < stripe_start + rbio->stripe_len) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * helper to find the stripe number for a given
+ * bio (before mapping). Used to figure out which stripe has
+ * failed. This looks up based on logical block numbers.
+ */
+static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ u64 logical = bio->bi_sector;
+ u64 stripe_start;
+ int i;
+
+ logical <<= 9;
+
+ for (i = 0; i < rbio->nr_data; i++) {
+ stripe_start = rbio->raid_map[i];
+ if (logical >= stripe_start &&
+ logical < stripe_start + rbio->stripe_len) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+/*
+ * returns -EIO if we had too many failures
+ */
+static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&rbio->bio_list_lock, flags);
+
+ /* we already know this stripe is bad, move on */
+ if (rbio->faila == failed || rbio->failb == failed)
+ goto out;
+
+ if (rbio->faila == -1) {
+ /* first failure on this rbio */
+ rbio->faila = failed;
+ atomic_inc(&rbio->bbio->error);
+ } else if (rbio->failb == -1) {
+ /* second failure on this rbio */
+ rbio->failb = failed;
+ atomic_inc(&rbio->bbio->error);
+ } else {
+ ret = -EIO;
+ }
+out:
+ spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
+
+ return ret;
+}
+
+/*
+ * helper to fail a stripe based on a physical disk
+ * bio.
+ */
+static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
+ struct bio *bio)
+{
+ int failed = find_bio_stripe(rbio, bio);
+
+ if (failed < 0)
+ return -EIO;
+
+ return fail_rbio_index(rbio, failed);
+}
+
+/*
+ * this sets each page in the bio uptodate. It should only be used on private
+ * rbio pages, nothing that comes in from the higher layers
+ */
+static void set_bio_pages_uptodate(struct bio *bio)
+{
+ int i;
+ struct page *p;
+
+ for (i = 0; i < bio->bi_vcnt; i++) {
+ p = bio->bi_io_vec[i].bv_page;
+ SetPageUptodate(p);
+ }
+}
+
+/*
+ * end io for the read phase of the rmw cycle. All the bios here are physical
+ * stripe bios we've read from the disk so we can recalculate the parity of the
+ * stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid_rmw_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ err = 0;
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ goto cleanup;
+
+ /*
+ * this will normally call finish_rmw to start our write
+ * but if there are any failed stripes we'll reconstruct
+ * from parity first
+ */
+ validate_rbio_for_rmw(rbio);
+ return;
+
+cleanup:
+
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+ rbio->work.flags = 0;
+ rbio->work.func = rmw_work;
+
+ btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+static void async_read_rebuild(struct btrfs_raid_bio *rbio)
+{
+ rbio->work.flags = 0;
+ rbio->work.func = read_rebuild_work;
+
+ btrfs_queue_worker(&rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+/*
+ * the stripe must be locked by the caller. It will
+ * unlock after all the writes are done
+ */
+static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct btrfs_bio *bbio = rbio->bbio;
+ struct bio_list bio_list;
+ int ret;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ bio_list_init(&bio_list);
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ index_rbio_pages(rbio);
+
+ atomic_set(&rbio->bbio->error, 0);
+ /*
+ * build a list of bios to read all the missing parts of this
+ * stripe
+ */
+ for (stripe = 0; stripe < rbio->nr_data; stripe++) {
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ struct page *page;
+ /*
+ * we want to find all the pages missing from
+ * the rbio and read them from the disk. If
+ * page_in_rbio finds a page in the bio list
+ * we don't need to read it off the stripe.
+ */
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (page)
+ continue;
+
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ /*
+ * the bio cache may have handed us an uptodate
+ * page. If so, be happy and use it
+ */
+ if (PageUptodate(page))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * this can happen if others have merged with
+ * us, it means there is nothing left to read.
+ * But if there are missing devices it may not be
+ * safe to do the full stripe write yet.
+ */
+ goto finish;
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&bbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_rmw_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+ /* the actual write will happen once the reads are done */
+ return 0;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
+
+finish:
+ validate_rbio_for_rmw(rbio);
+ return 0;
+}
+
+/*
+ * if the upper layers pass in a full stripe, we thank them by only allocating
+ * enough pages to hold the parity, and sending it all down quickly.
+ */
+static int full_stripe_write(struct btrfs_raid_bio *rbio)
+{
+ int ret;
+
+ ret = alloc_rbio_parity_pages(rbio);
+ if (ret)
+ return ret;
+
+ ret = lock_stripe_add(rbio);
+ if (ret == 0)
+ finish_rmw(rbio);
+ return 0;
+}
+
+/*
+ * partial stripe writes get handed over to async helpers.
+ * We're really hoping to merge a few more writes into this
+ * rbio before calculating new parity
+ */
+static int partial_stripe_write(struct btrfs_raid_bio *rbio)
+{
+ int ret;
+
+ ret = lock_stripe_add(rbio);
+ if (ret == 0)
+ async_rmw_stripe(rbio);
+ return 0;
+}
+
+/*
+ * sometimes while we were reading from the drive to
+ * recalculate parity, enough new bios come into create
+ * a full stripe. So we do a check here to see if we can
+ * go directly to finish_rmw
+ */
+static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
+{
+ /* head off into rmw land if we don't have a full stripe */
+ if (!rbio_is_full(rbio))
+ return partial_stripe_write(rbio);
+ return full_stripe_write(rbio);
+}
+
+/*
+ * We use plugging call backs to collect full stripes.
+ * Any time we get a partial stripe write while plugged
+ * we collect it into a list. When the unplug comes down,
+ * we sort the list by logical block number and merge
+ * everything we can into the same rbios
+ */
+struct btrfs_plug_cb {
+ struct blk_plug_cb cb;
+ struct btrfs_fs_info *info;
+ struct list_head rbio_list;
+ struct btrfs_work work;
+};
+
+/*
+ * rbios on the plug list are sorted for easier merging.
+ */
+static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
+ plug_list);
+ struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
+ plug_list);
+ u64 a_sector = ra->bio_list.head->bi_sector;
+ u64 b_sector = rb->bio_list.head->bi_sector;
+
+ if (a_sector < b_sector)
+ return -1;
+ if (a_sector > b_sector)
+ return 1;
+ return 0;
+}
+
+static void run_plug(struct btrfs_plug_cb *plug)
+{
+ struct btrfs_raid_bio *cur;
+ struct btrfs_raid_bio *last = NULL;
+
+ /*
+ * sort our plug list then try to merge
+ * everything we can in hopes of creating full
+ * stripes.
+ */
+ list_sort(NULL, &plug->rbio_list, plug_cmp);
+ while (!list_empty(&plug->rbio_list)) {
+ cur = list_entry(plug->rbio_list.next,
+ struct btrfs_raid_bio, plug_list);
+ list_del_init(&cur->plug_list);
+
+ if (rbio_is_full(cur)) {
+ /* we have a full stripe, send it down */
+ full_stripe_write(cur);
+ continue;
+ }
+ if (last) {
+ if (rbio_can_merge(last, cur)) {
+ merge_rbio(last, cur);
+ __free_raid_bio(cur);
+ continue;
+
+ }
+ __raid56_parity_write(last);
+ }
+ last = cur;
+ }
+ if (last) {
+ __raid56_parity_write(last);
+ }
+ kfree(plug);
+}
+
+/*
+ * if the unplug comes from schedule, we have to push the
+ * work off to a helper thread
+ */
+static void unplug_work(struct btrfs_work *work)
+{
+ struct btrfs_plug_cb *plug;
+ plug = container_of(work, struct btrfs_plug_cb, work);
+ run_plug(plug);
+}
+
+static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct btrfs_plug_cb *plug;
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+
+ if (from_schedule) {
+ plug->work.flags = 0;
+ plug->work.func = unplug_work;
+ btrfs_queue_worker(&plug->info->rmw_workers,
+ &plug->work);
+ return;
+ }
+ run_plug(plug);
+}
+
+/*
+ * our main entry point for writes from the rest of the FS.
+ */
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len)
+{
+ struct btrfs_raid_bio *rbio;
+ struct btrfs_plug_cb *plug = NULL;
+ struct blk_plug_cb *cb;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio)) {
+ kfree(raid_map);
+ kfree(bbio);
+ return PTR_ERR(rbio);
+ }
+ bio_list_add(&rbio->bio_list, bio);
+ rbio->bio_list_bytes = bio->bi_size;
+
+ /*
+ * don't plug on full rbios, just get them out the door
+ * as quickly as we can
+ */
+ if (rbio_is_full(rbio))
+ return full_stripe_write(rbio);
+
+ cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
+ sizeof(*plug));
+ if (cb) {
+ plug = container_of(cb, struct btrfs_plug_cb, cb);
+ if (!plug->info) {
+ plug->info = root->fs_info;
+ INIT_LIST_HEAD(&plug->rbio_list);
+ }
+ list_add_tail(&rbio->plug_list, &plug->rbio_list);
+ } else {
+ return __raid56_parity_write(rbio);
+ }
+ return 0;
+}
+
+/*
+ * all parity reconstruction happens here. We've read in everything
+ * we can find from the drives and this does the heavy lifting of
+ * sorting the good from the bad.
+ */
+static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
+{
+ int pagenr, stripe;
+ void **pointers;
+ int faila = -1, failb = -1;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ struct page *page;
+ int err;
+ int i;
+
+ pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *),
+ GFP_NOFS);
+ if (!pointers) {
+ err = -ENOMEM;
+ goto cleanup_io;
+ }
+
+ faila = rbio->faila;
+ failb = rbio->failb;
+
+ if (rbio->read_rebuild) {
+ spin_lock_irq(&rbio->bio_list_lock);
+ set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
+ spin_unlock_irq(&rbio->bio_list_lock);
+ }
+
+ index_rbio_pages(rbio);
+
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ /* setup our array of pointers with pages
+ * from each stripe
+ */
+ for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ /*
+ * if we're rebuilding a read, we have to use
+ * pages from the bio list
+ */
+ if (rbio->read_rebuild &&
+ (stripe == faila || stripe == failb)) {
+ page = page_in_rbio(rbio, stripe, pagenr, 0);
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+ pointers[stripe] = kmap(page);
+ }
+
+ /* all raid6 handling here */
+ if (rbio->raid_map[rbio->bbio->num_stripes - 1] ==
+ RAID6_Q_STRIPE) {
+
+ /*
+ * single failure, rebuild from parity raid5
+ * style
+ */
+ if (failb < 0) {
+ if (faila == rbio->nr_data) {
+ /*
+ * Just the P stripe has failed, without
+ * a bad data or Q stripe.
+ * TODO, we should redo the xor here.
+ */
+ err = -EIO;
+ goto cleanup;
+ }
+ /*
+ * a single failure in raid6 is rebuilt
+ * in the pstripe code below
+ */
+ goto pstripe;
+ }
+
+ /* make sure our ps and qs are in order */
+ if (faila > failb) {
+ int tmp = failb;
+ failb = faila;
+ faila = tmp;
+ }
+
+ /* if the q stripe is failed, do a pstripe reconstruction
+ * from the xors.
+ * If both the q stripe and the P stripe are failed, we're
+ * here due to a crc mismatch and we can't give them the
+ * data they want
+ */
+ if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
+ if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
+ err = -EIO;
+ goto cleanup;
+ }
+ /*
+ * otherwise we have one bad data stripe and
+ * a good P stripe. raid5!
+ */
+ goto pstripe;
+ }
+
+ if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
+ raid6_datap_recov(rbio->bbio->num_stripes,
+ PAGE_SIZE, faila, pointers);
+ } else {
+ raid6_2data_recov(rbio->bbio->num_stripes,
+ PAGE_SIZE, faila, failb,
+ pointers);
+ }
+ } else {
+ void *p;
+
+ /* rebuild from P stripe here (raid5 or raid6) */
+ BUG_ON(failb != -1);
+pstripe:
+ /* Copy parity block into failed block to start with */
+ memcpy(pointers[faila],
+ pointers[rbio->nr_data],
+ PAGE_CACHE_SIZE);
+
+ /* rearrange the pointer array */
+ p = pointers[faila];
+ for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
+ pointers[stripe] = pointers[stripe + 1];
+ pointers[rbio->nr_data - 1] = p;
+
+ /* xor in the rest */
+ run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE);
+ }
+ /* if we're doing this rebuild as part of an rmw, go through
+ * and set all of our private rbio pages in the
+ * failed stripes as uptodate. This way finish_rmw will
+ * know they can be trusted. If this was a read reconstruction,
+ * other endio functions will fiddle the uptodate bits
+ */
+ if (!rbio->read_rebuild) {
+ for (i = 0; i < nr_pages; i++) {
+ if (faila != -1) {
+ page = rbio_stripe_page(rbio, faila, i);
+ SetPageUptodate(page);
+ }
+ if (failb != -1) {
+ page = rbio_stripe_page(rbio, failb, i);
+ SetPageUptodate(page);
+ }
+ }
+ }
+ for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) {
+ /*
+ * if we're rebuilding a read, we have to use
+ * pages from the bio list
+ */
+ if (rbio->read_rebuild &&
+ (stripe == faila || stripe == failb)) {
+ page = page_in_rbio(rbio, stripe, pagenr, 0);
+ } else {
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ }
+ kunmap(page);
+ }
+ }
+
+ err = 0;
+cleanup:
+ kfree(pointers);
+
+cleanup_io:
+
+ if (rbio->read_rebuild) {
+ if (err == 0)
+ cache_rbio_pages(rbio);
+ else
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ rbio_orig_end_io(rbio, err, err == 0);
+ } else if (err == 0) {
+ rbio->faila = -1;
+ rbio->failb = -1;
+ finish_rmw(rbio);
+ } else {
+ rbio_orig_end_io(rbio, err, 0);
+ }
+}
+
+/*
+ * This is called only for stripes we've read from disk to
+ * reconstruct the parity.
+ */
+static void raid_recover_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ /*
+ * we only read stripe pages off the disk, set them
+ * up to date if there were no errors
+ */
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->bbio->stripes_pending))
+ return;
+
+ if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors)
+ rbio_orig_end_io(rbio, -EIO, 0);
+ else
+ __raid_recover_end_io(rbio);
+}
+
+/*
+ * reads everything we need off the disk to reconstruct
+ * the parity. endio handlers trigger final reconstruction
+ * when the IO is done.
+ *
+ * This is used both for reads from the higher layers and for
+ * parity construction required to finish a rmw cycle.
+ */
+static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct btrfs_bio *bbio = rbio->bbio;
+ struct bio_list bio_list;
+ int ret;
+ int nr_pages = (rbio->stripe_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ bio_list_init(&bio_list);
+
+ ret = alloc_rbio_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ atomic_set(&rbio->bbio->error, 0);
+
+ /*
+ * read everything that hasn't failed. Thanks to the
+ * stripe cache, it is possible that some or all of these
+ * pages are going to be uptodate.
+ */
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ if (rbio->faila == stripe ||
+ rbio->failb == stripe)
+ continue;
+
+ for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ struct page *p;
+
+ /*
+ * the rmw code may have already read this
+ * page in
+ */
+ p = rbio_stripe_page(rbio, stripe, pagenr);
+ if (PageUptodate(p))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list,
+ rbio_stripe_page(rbio, stripe, pagenr),
+ stripe, pagenr, rbio->stripe_len);
+ if (ret < 0)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * we might have no bios to read just because the pages
+ * were up to date, or we might have no bios to read because
+ * the devices were gone.
+ */
+ if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) {
+ __raid_recover_end_io(rbio);
+ goto out;
+ } else {
+ goto cleanup;
+ }
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&bbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_recover_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+out:
+ return 0;
+
+cleanup:
+ if (rbio->read_rebuild)
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return -EIO;
+}
+
+/*
+ * the main entry point for reads from the higher layers. This
+ * is really only called when the normal read path had a failure,
+ * so we assume the bio they send down corresponds to a failed part
+ * of the drive.
+ */
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, int mirror_num)
+{
+ struct btrfs_raid_bio *rbio;
+ int ret;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio)) {
+ return PTR_ERR(rbio);
+ }
+
+ rbio->read_rebuild = 1;
+ bio_list_add(&rbio->bio_list, bio);
+ rbio->bio_list_bytes = bio->bi_size;
+
+ rbio->faila = find_logical_bio_stripe(rbio, bio);
+ if (rbio->faila == -1) {
+ BUG();
+ kfree(rbio);
+ return -EIO;
+ }
+
+ /*
+ * reconstruct from the q stripe if they are
+ * asking for mirror 3
+ */
+ if (mirror_num == 3)
+ rbio->failb = bbio->num_stripes - 2;
+
+ ret = lock_stripe_add(rbio);
+
+ /*
+ * __raid56_parity_recover will end the bio with
+ * any errors it hits. We don't want to return
+ * its error value up the stack because our caller
+ * will end up calling bio_endio with any nonzero
+ * return
+ */
+ if (ret == 0)
+ __raid56_parity_recover(rbio);
+ /*
+ * our rbio has been added to the list of
+ * rbios that will be handled after the
+ * currently lock owner is done
+ */
+ return 0;
+
+}
+
+static void rmw_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ raid56_rmw_stripe(rbio);
+}
+
+static void read_rebuild_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ __raid56_parity_recover(rbio);
+}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
new file mode 100644
index 00000000000..ea5d73bfdfb
--- /dev/null
+++ b/fs/btrfs/raid56.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2012 Fusion-io All rights reserved.
+ * Copyright (C) 2012 Intel Corp. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_RAID56__
+#define __BTRFS_RAID56__
+static inline int nr_parity_stripes(struct map_lookup *map)
+{
+ if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ return 1;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ return 2;
+ else
+ return 0;
+}
+
+static inline int nr_data_stripes(struct map_lookup *map)
+{
+ return map->num_stripes - nr_parity_stripes(map);
+}
+#define RAID5_P_STRIPE ((u64)-2)
+#define RAID6_Q_STRIPE ((u64)-1)
+
+#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
+ ((x) == RAID6_Q_STRIPE))
+
+int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, int mirror_num);
+int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len);
+
+int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
+void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
+#endif
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 17c306bf177..50695dc5e2a 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3017,7 +3017,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
}
}
- page_start = (u64)page->index << PAGE_CACHE_SHIFT;
+ page_start = page_offset(page);
page_end = page_start + PAGE_CACHE_SIZE - 1;
lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 67783e03d12..53c3501fa4c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -28,6 +28,7 @@
#include "dev-replace.h"
#include "check-integrity.h"
#include "rcu-string.h"
+#include "raid56.h"
/*
* This is only the first step towards a full-features scrub. It reads all
@@ -2254,6 +2255,13 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
struct btrfs_device *extent_dev;
int extent_mirror_num;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ if (num >= nr_data_stripes(map)) {
+ return 0;
+ }
+ }
+
nstripes = length;
offset = 0;
do_div(nstripes, map->stripe_len);
@@ -2708,7 +2716,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
int ret;
struct btrfs_root *root = sctx->dev_root;
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return -EIO;
gen = root->fs_info->last_trans_committed;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index f4ab7a9260e..f7a8b861058 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -85,6 +85,7 @@ struct send_ctx {
u32 send_max_size;
u64 total_send_size;
u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
+ u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
struct vfsmount *mnt;
@@ -3709,6 +3710,39 @@ out:
return ret;
}
+/*
+ * Send an update extent command to user space.
+ */
+static int send_update_extent(struct send_ctx *sctx,
+ u64 offset, u32 len)
+{
+ int ret = 0;
+ struct fs_path *p;
+
+ p = fs_path_alloc(sctx);
+ if (!p)
+ return -ENOMEM;
+
+ ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
+ if (ret < 0)
+ goto out;
+
+ ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
+ if (ret < 0)
+ goto out;
+
+ TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
+ TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
+
+ ret = send_cmd(sctx);
+
+tlv_put_failure:
+out:
+ fs_path_free(sctx, p);
+ return ret;
+}
+
static int send_write_or_clone(struct send_ctx *sctx,
struct btrfs_path *path,
struct btrfs_key *key,
@@ -3744,7 +3778,11 @@ static int send_write_or_clone(struct send_ctx *sctx,
goto out;
}
- if (!clone_root) {
+ if (clone_root) {
+ ret = send_clone(sctx, offset, len, clone_root);
+ } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) {
+ ret = send_update_extent(sctx, offset, len);
+ } else {
while (pos < len) {
l = len - pos;
if (l > BTRFS_SEND_READ_SIZE)
@@ -3757,10 +3795,7 @@ static int send_write_or_clone(struct send_ctx *sctx,
pos += ret;
}
ret = 0;
- } else {
- ret = send_clone(sctx, offset, len, clone_root);
}
-
out:
return ret;
}
@@ -4536,7 +4571,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
struct btrfs_fs_info *fs_info;
struct btrfs_ioctl_send_args *arg = NULL;
struct btrfs_key key;
- struct file *filp = NULL;
struct send_ctx *sctx = NULL;
u32 i;
u64 *clone_sources_tmp = NULL;
@@ -4561,6 +4595,11 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
goto out;
}
+ if (arg->flags & ~BTRFS_SEND_FLAG_NO_FILE_DATA) {
+ ret = -EINVAL;
+ goto out;
+ }
+
sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS);
if (!sctx) {
ret = -ENOMEM;
@@ -4572,6 +4611,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS);
INIT_LIST_HEAD(&sctx->name_cache_list);
+ sctx->flags = arg->flags;
+
sctx->send_filp = fget(arg->send_fd);
if (IS_ERR(sctx->send_filp)) {
ret = PTR_ERR(sctx->send_filp);
@@ -4673,8 +4714,6 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
goto out;
out:
- if (filp)
- fput(filp);
kfree(arg);
vfree(clone_sources_tmp);
diff --git a/fs/btrfs/send.h b/fs/btrfs/send.h
index 1bf4f32fd4e..8bb18f7ccaa 100644
--- a/fs/btrfs/send.h
+++ b/fs/btrfs/send.h
@@ -86,6 +86,7 @@ enum btrfs_send_cmd {
BTRFS_SEND_C_UTIMES,
BTRFS_SEND_C_END,
+ BTRFS_SEND_C_UPDATE_EXTENT,
__BTRFS_SEND_C_MAX,
};
#define BTRFS_SEND_C_MAX (__BTRFS_SEND_C_MAX - 1)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index d8982e9601d..68a29a1ea06 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -41,13 +41,13 @@
#include <linux/slab.h>
#include <linux/cleancache.h>
#include <linux/ratelimit.h>
+#include <linux/btrfs.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
-#include "ioctl.h"
#include "print-tree.h"
#include "xattr.h"
#include "volumes.h"
@@ -63,8 +63,7 @@
static const struct super_operations btrfs_super_ops;
static struct file_system_type btrfs_fs_type;
-static const char *btrfs_decode_error(struct btrfs_fs_info *fs_info, int errno,
- char nbuf[16])
+static const char *btrfs_decode_error(int errno, char nbuf[16])
{
char *errstr = NULL;
@@ -98,7 +97,7 @@ static void __save_error_info(struct btrfs_fs_info *fs_info)
* today we only save the error info into ram. Long term we'll
* also send it down to the disk
*/
- fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR;
+ set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
}
static void save_error_info(struct btrfs_fs_info *fs_info)
@@ -114,7 +113,7 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info)
if (sb->s_flags & MS_RDONLY)
return;
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
sb->s_flags |= MS_RDONLY;
printk(KERN_INFO "btrfs is forced readonly\n");
/*
@@ -142,8 +141,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
struct super_block *sb = fs_info->sb;
char nbuf[16];
const char *errstr;
- va_list args;
- va_start(args, fmt);
/*
* Special case: if the error is EROFS, and we're already
@@ -152,15 +149,18 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
if (errno == -EROFS && (sb->s_flags & MS_RDONLY))
return;
- errstr = btrfs_decode_error(fs_info, errno, nbuf);
+ errstr = btrfs_decode_error(errno, nbuf);
if (fmt) {
- struct va_format vaf = {
- .fmt = fmt,
- .va = &args,
- };
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s (%pV)\n",
sb->s_id, function, line, errstr, &vaf);
+ va_end(args);
} else {
printk(KERN_CRIT "BTRFS error (device %s) in %s:%d: %s\n",
sb->s_id, function, line, errstr);
@@ -171,7 +171,6 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
save_error_info(fs_info);
btrfs_handle_error(fs_info);
}
- va_end(args);
}
static const char * const logtypes[] = {
@@ -261,7 +260,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
char nbuf[16];
const char *errstr;
- errstr = btrfs_decode_error(root->fs_info, errno, nbuf);
+ errstr = btrfs_decode_error(errno, nbuf);
btrfs_printk(root->fs_info,
"%s:%d: Aborting unused transaction(%s).\n",
function, line, errstr);
@@ -289,8 +288,8 @@ void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function,
va_start(args, fmt);
vaf.va = &args;
- errstr = btrfs_decode_error(fs_info, errno, nbuf);
- if (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR)
+ errstr = btrfs_decode_error(errno, nbuf);
+ if (fs_info && (fs_info->mount_opt & BTRFS_MOUNT_PANIC_ON_FATAL_ERROR))
panic(KERN_CRIT "BTRFS panic (device %s) in %s:%d: %pV (%s)\n",
s_id, function, line, &vaf, errstr);
@@ -438,6 +437,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_compress_force:
case Opt_compress_force_type:
compress_force = true;
+ /* Fallthrough */
case Opt_compress:
case Opt_compress_type:
if (token == Opt_compress ||
@@ -519,7 +519,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_alloc_start:
num = match_strdup(&args[0]);
if (num) {
+ mutex_lock(&info->chunk_mutex);
info->alloc_start = memparse(num, NULL);
+ mutex_unlock(&info->chunk_mutex);
kfree(num);
printk(KERN_INFO
"btrfs: allocations start at %llu\n",
@@ -876,7 +878,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0);
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
if (PTR_ERR(trans) == -ENOENT)
@@ -1200,6 +1202,38 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
new_pool_size);
}
+static inline void btrfs_remount_prepare(struct btrfs_fs_info *fs_info,
+ unsigned long old_opts, int flags)
+{
+ set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+
+ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+ (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+ (flags & MS_RDONLY))) {
+ /* wait for any defraggers to finish */
+ wait_event(fs_info->transaction_wait,
+ (atomic_read(&fs_info->defrag_running) == 0));
+ if (flags & MS_RDONLY)
+ sync_filesystem(fs_info->sb);
+ }
+}
+
+static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info,
+ unsigned long old_opts)
+{
+ /*
+ * We need cleanup all defragable inodes if the autodefragment is
+ * close or the fs is R/O.
+ */
+ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
+ (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
+ (fs_info->sb->s_flags & MS_RDONLY))) {
+ btrfs_cleanup_defrag_inodes(fs_info);
+ }
+
+ clear_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
static int btrfs_remount(struct super_block *sb, int *flags, char *data)
{
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1213,6 +1247,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
unsigned int old_metadata_ratio = fs_info->metadata_ratio;
int ret;
+ btrfs_remount_prepare(fs_info, old_opts, *flags);
+
ret = btrfs_parse_options(root, data);
if (ret) {
ret = -EINVAL;
@@ -1223,7 +1259,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
fs_info->thread_pool_size, old_thread_pool_size);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
- return 0;
+ goto out;
if (*flags & MS_RDONLY) {
/*
@@ -1278,7 +1314,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
sb->s_flags &= ~MS_RDONLY;
}
-
+out:
+ btrfs_remount_cleanup(fs_info, old_opts);
return 0;
restore:
@@ -1289,10 +1326,13 @@ restore:
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;
fs_info->max_inline = old_max_inline;
+ mutex_lock(&fs_info->chunk_mutex);
fs_info->alloc_start = old_alloc_start;
+ mutex_unlock(&fs_info->chunk_mutex);
btrfs_resize_thread_pool(fs_info,
old_thread_pool_size, fs_info->thread_pool_size);
fs_info->metadata_ratio = old_metadata_ratio;
+ btrfs_remount_cleanup(fs_info, old_opts);
return ret;
}
@@ -1559,7 +1599,7 @@ static int btrfs_freeze(struct super_block *sb)
struct btrfs_trans_handle *trans;
struct btrfs_root *root = btrfs_sb(sb)->tree_root;
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_attach_transaction_barrier(root);
if (IS_ERR(trans)) {
/* no transaction, don't bother */
if (PTR_ERR(trans) == -ENOENT)
@@ -1684,10 +1724,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_delayed_inode;
- err = btrfs_interface_init();
+ err = btrfs_delayed_ref_init();
if (err)
goto free_auto_defrag;
+ err = btrfs_interface_init();
+ if (err)
+ goto free_delayed_ref;
+
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
@@ -1699,6 +1743,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
+free_delayed_ref:
+ btrfs_delayed_ref_exit();
free_auto_defrag:
btrfs_auto_defrag_exit();
free_delayed_inode:
@@ -1720,6 +1766,7 @@ free_compress:
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
+ btrfs_delayed_ref_exit();
btrfs_auto_defrag_exit();
btrfs_delayed_inode_exit();
ordered_data_exit();
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index daac9ae6d73..5b326cd60a4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -21,7 +21,6 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
-#include <linux/module.h>
#include <linux/kobject.h>
#include "ctree.h"
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4c0067c4f76..e52da6fb116 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -40,7 +40,6 @@ void put_transaction(struct btrfs_transaction *transaction)
if (atomic_dec_and_test(&transaction->use_count)) {
BUG_ON(!list_empty(&transaction->list));
WARN_ON(transaction->delayed_refs.root.rb_node);
- memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
}
}
@@ -51,6 +50,14 @@ static noinline void switch_commit_root(struct btrfs_root *root)
root->commit_root = btrfs_root_node(root);
}
+static inline int can_join_transaction(struct btrfs_transaction *trans,
+ int type)
+{
+ return !(trans->in_commit &&
+ type != TRANS_JOIN &&
+ type != TRANS_JOIN_NOLOCK);
+}
+
/*
* either allocate a new transaction or hop into the existing one
*/
@@ -62,7 +69,7 @@ static noinline int join_transaction(struct btrfs_root *root, int type)
spin_lock(&fs_info->trans_lock);
loop:
/* The file system has been taken offline. No new transactions. */
- if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
spin_unlock(&fs_info->trans_lock);
return -EROFS;
}
@@ -86,6 +93,10 @@ loop:
spin_unlock(&fs_info->trans_lock);
return cur_trans->aborted;
}
+ if (!can_join_transaction(cur_trans, type)) {
+ spin_unlock(&fs_info->trans_lock);
+ return -EBUSY;
+ }
atomic_inc(&cur_trans->use_count);
atomic_inc(&cur_trans->num_writers);
cur_trans->num_joined++;
@@ -113,7 +124,7 @@ loop:
*/
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
goto loop;
- } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
spin_unlock(&fs_info->trans_lock);
kmem_cache_free(btrfs_transaction_cachep, cur_trans);
return -EROFS;
@@ -155,8 +166,12 @@ loop:
spin_lock_init(&cur_trans->commit_lock);
spin_lock_init(&cur_trans->delayed_refs.lock);
+ atomic_set(&cur_trans->delayed_refs.procs_running_refs, 0);
+ atomic_set(&cur_trans->delayed_refs.ref_seq, 0);
+ init_waitqueue_head(&cur_trans->delayed_refs.wait);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
+ INIT_LIST_HEAD(&cur_trans->ordered_operations);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
extent_io_tree_init(&cur_trans->dirty_pages,
fs_info->btree_inode->i_mapping);
@@ -301,7 +316,7 @@ start_transaction(struct btrfs_root *root, u64 num_items, int type,
int ret;
u64 qgroup_reserved = 0;
- if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
+ if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
return ERR_PTR(-EROFS);
if (current->journal_info) {
@@ -359,8 +374,11 @@ again:
do {
ret = join_transaction(root, type);
- if (ret == -EBUSY)
+ if (ret == -EBUSY) {
wait_current_trans(root);
+ if (unlikely(type == TRANS_ATTACH))
+ ret = -ENOENT;
+ }
} while (ret == -EBUSY);
if (ret < 0) {
@@ -382,9 +400,10 @@ again:
h->block_rsv = NULL;
h->orig_rsv = NULL;
h->aborted = 0;
- h->qgroup_reserved = qgroup_reserved;
+ h->qgroup_reserved = 0;
h->delayed_ref_elem.seq = 0;
h->type = type;
+ h->allocating_chunk = false;
INIT_LIST_HEAD(&h->qgroup_ref_list);
INIT_LIST_HEAD(&h->new_bgs);
@@ -400,6 +419,7 @@ again:
h->block_rsv = &root->fs_info->trans_block_rsv;
h->bytes_reserved = num_bytes;
}
+ h->qgroup_reserved = qgroup_reserved;
got_it:
btrfs_record_root_in_trans(h, root);
@@ -451,11 +471,43 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root
return start_transaction(root, 0, TRANS_USERSPACE, 0);
}
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is used when we want to commit the current the transaction, but
+ * don't want to start a new one.
+ *
+ * Note: If this function return -ENOENT, it just means there is no
+ * running transaction. But it is possible that the inactive transaction
+ * is still in the memory, not fully on disk. If you hope there is no
+ * inactive transaction in the fs when -ENOENT is returned, you should
+ * invoke
+ * btrfs_attach_transaction_barrier()
+ */
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
{
return start_transaction(root, 0, TRANS_ATTACH, 0);
}
+/*
+ * btrfs_attach_transaction() - catch the running transaction
+ *
+ * It is similar to the above function, the differentia is this one
+ * will wait for all the inactive transactions until they fully
+ * complete.
+ */
+struct btrfs_trans_handle *
+btrfs_attach_transaction_barrier(struct btrfs_root *root)
+{
+ struct btrfs_trans_handle *trans;
+
+ trans = start_transaction(root, 0, TRANS_ATTACH, 0);
+ if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
+ btrfs_wait_for_commit(root, 0);
+
+ return trans;
+}
+
/* wait for a transaction commit to be fully complete */
static noinline void wait_for_commit(struct btrfs_root *root,
struct btrfs_transaction *commit)
@@ -587,7 +639,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
if (!list_empty(&trans->new_bgs))
btrfs_create_pending_block_groups(trans, root);
- while (count < 2) {
+ while (count < 1) {
unsigned long cur = trans->delayed_ref_updates;
trans->delayed_ref_updates = 0;
if (cur &&
@@ -599,6 +651,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
}
count++;
}
+
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
@@ -644,12 +697,10 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
btrfs_run_delayed_iputs(root);
if (trans->aborted ||
- root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
+ test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
err = -EIO;
- }
assert_qgroups_uptodate(trans);
- memset(trans, 0, sizeof(*trans));
kmem_cache_free(btrfs_trans_handle_cachep, trans);
return err;
}
@@ -696,7 +747,9 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_state *cached_state = NULL;
u64 start = 0;
u64 end;
+ struct blk_plug plug;
+ blk_start_plug(&plug);
while (!find_first_extent_bit(dirty_pages, start, &start, &end,
mark, &cached_state)) {
convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
@@ -710,6 +763,7 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
}
if (err)
werr = err;
+ blk_finish_plug(&plug);
return werr;
}
@@ -960,10 +1014,10 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
}
/*
- * defrag a given btree. If cacheonly == 1, this won't read from the disk,
- * otherwise every leaf in the btree is read and defragged.
+ * defrag a given btree.
+ * Every leaf in the btree is read and defragged.
*/
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
+int btrfs_defrag_root(struct btrfs_root *root)
{
struct btrfs_fs_info *info = root->fs_info;
struct btrfs_trans_handle *trans;
@@ -977,7 +1031,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
if (IS_ERR(trans))
return PTR_ERR(trans);
- ret = btrfs_defrag_leaves(trans, root, cacheonly);
+ ret = btrfs_defrag_leaves(trans, root);
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(info->tree_root);
@@ -985,6 +1039,12 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
break;
+
+ if (btrfs_defrag_cancelled(root->fs_info)) {
+ printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
+ ret = -EAGAIN;
+ break;
+ }
}
root->defrag_running = 0;
return ret;
@@ -1007,7 +1067,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
struct inode *parent_inode;
struct btrfs_path *path;
struct btrfs_dir_item *dir_item;
- struct dentry *parent;
struct dentry *dentry;
struct extent_buffer *tmp;
struct extent_buffer *old;
@@ -1022,7 +1081,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
ret = pending->error = -ENOMEM;
- goto path_alloc_fail;
+ return ret;
}
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
@@ -1062,10 +1121,10 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
rsv = trans->block_rsv;
trans->block_rsv = &pending->block_rsv;
+ trans->bytes_reserved = trans->block_rsv->reserved;
dentry = pending->dentry;
- parent = dget_parent(dentry);
- parent_inode = parent->d_inode;
+ parent_inode = pending->dir;
parent_root = BTRFS_I(parent_inode)->root;
record_root_in_trans(trans, parent_root);
@@ -1213,14 +1272,12 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret)
btrfs_abort_transaction(trans, root, ret);
fail:
- dput(parent);
trans->block_rsv = rsv;
+ trans->bytes_reserved = 0;
no_free_objectid:
kfree(new_root_item);
root_item_alloc_fail:
btrfs_free_path(path);
-path_alloc_fail:
- btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
return ret;
}
@@ -1306,13 +1363,13 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
struct btrfs_async_commit {
struct btrfs_trans_handle *newtrans;
struct btrfs_root *root;
- struct delayed_work work;
+ struct work_struct work;
};
static void do_async_commit(struct work_struct *work)
{
struct btrfs_async_commit *ac =
- container_of(work, struct btrfs_async_commit, work.work);
+ container_of(work, struct btrfs_async_commit, work);
/*
* We've got freeze protection passed with the transaction.
@@ -1340,7 +1397,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
if (!ac)
return -ENOMEM;
- INIT_DELAYED_WORK(&ac->work, do_async_commit);
+ INIT_WORK(&ac->work, do_async_commit);
ac->root = root;
ac->newtrans = btrfs_join_transaction(root);
if (IS_ERR(ac->newtrans)) {
@@ -1364,7 +1421,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1, _THIS_IP_);
- schedule_delayed_work(&ac->work, 0);
+ schedule_work(&ac->work);
/* wait for transaction to start and unblock */
if (wait_for_unblock)
@@ -1384,6 +1441,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int err)
{
struct btrfs_transaction *cur_trans = trans->transaction;
+ DEFINE_WAIT(wait);
WARN_ON(trans->use_count > 1);
@@ -1392,8 +1450,13 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->trans_lock);
list_del_init(&cur_trans->list);
if (cur_trans == root->fs_info->running_transaction) {
+ root->fs_info->trans_no_join = 1;
+ spin_unlock(&root->fs_info->trans_lock);
+ wait_event(cur_trans->writer_wait,
+ atomic_read(&cur_trans->num_writers) == 1);
+
+ spin_lock(&root->fs_info->trans_lock);
root->fs_info->running_transaction = NULL;
- root->fs_info->trans_no_join = 0;
}
spin_unlock(&root->fs_info->trans_lock);
@@ -1427,7 +1490,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
}
if (flush_on_commit || snap_pending) {
- btrfs_start_delalloc_inodes(root, 1);
+ ret = btrfs_start_delalloc_inodes(root, 1);
+ if (ret)
+ return ret;
btrfs_wait_ordered_extents(root, 1);
}
@@ -1449,9 +1514,9 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
* it here and no for sure that nothing new will be added
* to the list
*/
- btrfs_run_ordered_operations(root, 1);
+ ret = btrfs_run_ordered_operations(trans, root, 1);
- return 0;
+ return ret;
}
/*
@@ -1472,27 +1537,35 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
int should_grow = 0;
unsigned long now = get_seconds();
- ret = btrfs_run_ordered_operations(root, 0);
+ ret = btrfs_run_ordered_operations(trans, root, 0);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
- goto cleanup_transaction;
+ btrfs_end_transaction(trans, root);
+ return ret;
}
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
- goto cleanup_transaction;
+ btrfs_end_transaction(trans, root);
+ return ret;
}
/* make a pass through all the delayed refs we have so far
* any runnings procs may add more while we are here
*/
ret = btrfs_run_delayed_refs(trans, root, 0);
- if (ret)
- goto cleanup_transaction;
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
cur_trans = trans->transaction;
@@ -1506,8 +1579,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
btrfs_create_pending_block_groups(trans, root);
ret = btrfs_run_delayed_refs(trans, root, 0);
- if (ret)
- goto cleanup_transaction;
+ if (ret) {
+ btrfs_end_transaction(trans, root);
+ return ret;
+ }
spin_lock(&cur_trans->commit_lock);
if (cur_trans->in_commit) {
@@ -1771,6 +1846,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
cleanup_transaction:
btrfs_trans_release_metadata(trans, root);
trans->block_rsv = NULL;
+ if (trans->qgroup_reserved) {
+ btrfs_qgroup_free(root, trans->qgroup_reserved);
+ trans->qgroup_reserved = 0;
+ }
btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
// WARN_ON(1);
if (current->journal_info == trans)
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 0e8aa1e6c28..3c8e0d25c8e 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -43,6 +43,7 @@ struct btrfs_transaction {
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
+ struct list_head ordered_operations;
struct btrfs_delayed_ref_root delayed_refs;
int aborted;
};
@@ -68,6 +69,7 @@ struct btrfs_trans_handle {
struct btrfs_block_rsv *orig_rsv;
short aborted;
short adding_csums;
+ bool allocating_chunk;
enum btrfs_trans_type type;
/*
* this root is only needed to validate that the root passed to
@@ -82,11 +84,13 @@ struct btrfs_trans_handle {
struct btrfs_pending_snapshot {
struct dentry *dentry;
+ struct inode *dir;
struct btrfs_root *root;
struct btrfs_root *snap;
struct btrfs_qgroup_inherit *inherit;
/* block reservation for the operation */
struct btrfs_block_rsv block_rsv;
+ u64 qgroup_reserved;
/* extra metadata reseration for relocation */
int error;
bool readonly;
@@ -110,13 +114,15 @@ struct btrfs_trans_handle *btrfs_start_transaction_lflush(
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
+ struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btrfs_add_dead_root(struct btrfs_root *root);
-int btrfs_defrag_root(struct btrfs_root *root, int cacheonly);
+int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_old_snapshots(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c
index 3b580ee8ab1..94e05c1f118 100644
--- a/fs/btrfs/tree-defrag.c
+++ b/fs/btrfs/tree-defrag.c
@@ -23,13 +23,14 @@
#include "transaction.h"
#include "locking.h"
-/* defrag all the leaves in a given btree. If cache_only == 1, don't read
- * things from disk, otherwise read all the leaves and try to get key order to
+/*
+ * Defrag all the leaves in a given btree.
+ * Read all the leaves and try to get key order to
* better reflect disk order
*/
int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, int cache_only)
+ struct btrfs_root *root)
{
struct btrfs_path *path = NULL;
struct btrfs_key key;
@@ -41,9 +42,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
u64 last_ret = 0;
u64 min_trans = 0;
- if (cache_only)
- goto out;
-
if (root->fs_info->extent_root == root) {
/*
* there's recursion here right now in the tree locking,
@@ -86,11 +84,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
}
path->keep_locks = 1;
- if (cache_only)
- min_trans = root->defrag_trans_start;
- ret = btrfs_search_forward(root, &key, NULL, path,
- cache_only, min_trans);
+ ret = btrfs_search_forward(root, &key, NULL, path, min_trans);
if (ret < 0)
goto out;
if (ret > 0) {
@@ -109,11 +104,11 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
goto out;
}
path->slots[1] = btrfs_header_nritems(path->nodes[1]);
- next_key_ret = btrfs_find_next_key(root, path, &key, 1, cache_only,
+ next_key_ret = btrfs_find_next_key(root, path, &key, 1,
min_trans);
ret = btrfs_realloc_node(trans, root,
path->nodes[1], 0,
- cache_only, &last_ret,
+ &last_ret,
&root->defrag_progress);
if (ret) {
WARN_ON(ret == -EAGAIN);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 9027bb1e746..c7ef569eb22 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -278,8 +278,7 @@ static int process_one_buffer(struct btrfs_root *log,
struct walk_control *wc, u64 gen)
{
if (wc->pin)
- btrfs_pin_extent_for_log_replay(wc->trans,
- log->fs_info->extent_root,
+ btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
eb->start, eb->len);
if (btrfs_buffer_uptodate(eb, gen, 0)) {
@@ -485,7 +484,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *key)
{
int found_type;
- u64 mask = root->sectorsize - 1;
u64 extent_end;
u64 start = key->offset;
u64 saved_nbytes;
@@ -502,7 +500,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
extent_end = start + btrfs_file_extent_num_bytes(eb, item);
else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
size = btrfs_file_extent_inline_len(eb, item);
- extent_end = (start + size + mask) & ~mask;
+ extent_end = ALIGN(start + size, root->sectorsize);
} else {
ret = 0;
goto out;
@@ -2281,6 +2279,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
unsigned long log_transid = 0;
mutex_lock(&root->log_mutex);
+ log_transid = root->log_transid;
index1 = root->log_transid % 2;
if (atomic_read(&root->log_commit[index1])) {
wait_log_commit(trans, root, root->log_transid);
@@ -2308,11 +2307,11 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
/* bail out if we need to do a full commit */
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
ret = -EAGAIN;
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
- log_transid = root->log_transid;
if (log_transid % 2 == 0)
mark = EXTENT_DIRTY;
else
@@ -2324,6 +2323,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&root->log_mutex);
goto out;
}
@@ -2363,6 +2363,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
}
root->fs_info->last_trans_log_full_commit = trans->transid;
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out;
@@ -2373,6 +2374,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = 0;
goto out;
@@ -2392,6 +2394,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
*/
if (root->fs_info->last_trans_log_full_commit == trans->transid) {
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = -EAGAIN;
goto out_wake_log_root;
@@ -2402,10 +2405,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
+ btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
}
btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+ btrfs_wait_logged_extents(log, log_transid);
btrfs_set_super_log_root(root->fs_info->super_for_commit,
log_root_tree->node->start);
@@ -2461,8 +2466,10 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
.process_func = process_one_buffer
};
- ret = walk_log_tree(trans, log, &wc);
- BUG_ON(ret);
+ if (trans) {
+ ret = walk_log_tree(trans, log, &wc);
+ BUG_ON(ret);
+ }
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -2475,6 +2482,14 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
}
+ /*
+ * We may have short-circuited the log tree with the full commit logic
+ * and left ordered extents on our list, so clear these out to keep us
+ * from leaking inodes and memory.
+ */
+ btrfs_free_logged_extents(log, 0);
+ btrfs_free_logged_extents(log, 1);
+
free_extent_buffer(log->node);
kfree(log);
}
@@ -2724,7 +2739,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
path->keep_locks = 1;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
/*
* we didn't find anything from this transaction, see if there
@@ -3271,16 +3286,21 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *log = root->log_root;
struct btrfs_file_extent_item *fi;
struct extent_buffer *leaf;
+ struct btrfs_ordered_extent *ordered;
struct list_head ordered_sums;
struct btrfs_map_token token;
struct btrfs_key key;
- u64 csum_offset = em->mod_start - em->start;
- u64 csum_len = em->mod_len;
+ u64 mod_start = em->mod_start;
+ u64 mod_len = em->mod_len;
+ u64 csum_offset;
+ u64 csum_len;
u64 extent_offset = em->start - em->orig_start;
u64 block_len;
int ret;
+ int index = log->log_transid % 2;
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+insert:
INIT_LIST_HEAD(&ordered_sums);
btrfs_init_map_token(&token);
key.objectid = btrfs_ino(inode);
@@ -3296,6 +3316,23 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
leaf = path->nodes[0];
fi = btrfs_item_ptr(leaf, path->slots[0],
struct btrfs_file_extent_item);
+
+ /*
+ * If we are overwriting an inline extent with a real one then we need
+ * to just delete the inline extent as it may not be large enough to
+ * have the entire file_extent_item.
+ */
+ if (ret && btrfs_token_file_extent_type(leaf, fi, &token) ==
+ BTRFS_FILE_EXTENT_INLINE) {
+ ret = btrfs_del_item(trans, log, path);
+ btrfs_release_path(path);
+ if (ret) {
+ path->really_keep_locks = 0;
+ return ret;
+ }
+ goto insert;
+ }
+
btrfs_set_token_file_extent_generation(leaf, fi, em->generation,
&token);
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
@@ -3362,6 +3399,92 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
csum_len = block_len;
}
+ /*
+ * First check and see if our csums are on our outstanding ordered
+ * extents.
+ */
+again:
+ spin_lock_irq(&log->log_extents_lock[index]);
+ list_for_each_entry(ordered, &log->logged_list[index], log_list) {
+ struct btrfs_ordered_sum *sum;
+
+ if (!mod_len)
+ break;
+
+ if (ordered->inode != inode)
+ continue;
+
+ if (ordered->file_offset + ordered->len <= mod_start ||
+ mod_start + mod_len <= ordered->file_offset)
+ continue;
+
+ /*
+ * We are going to copy all the csums on this ordered extent, so
+ * go ahead and adjust mod_start and mod_len in case this
+ * ordered extent has already been logged.
+ */
+ if (ordered->file_offset > mod_start) {
+ if (ordered->file_offset + ordered->len >=
+ mod_start + mod_len)
+ mod_len = ordered->file_offset - mod_start;
+ /*
+ * If we have this case
+ *
+ * |--------- logged extent ---------|
+ * |----- ordered extent ----|
+ *
+ * Just don't mess with mod_start and mod_len, we'll
+ * just end up logging more csums than we need and it
+ * will be ok.
+ */
+ } else {
+ if (ordered->file_offset + ordered->len <
+ mod_start + mod_len) {
+ mod_len = (mod_start + mod_len) -
+ (ordered->file_offset + ordered->len);
+ mod_start = ordered->file_offset +
+ ordered->len;
+ } else {
+ mod_len = 0;
+ }
+ }
+
+ /*
+ * To keep us from looping for the above case of an ordered
+ * extent that falls inside of the logged extent.
+ */
+ if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
+ &ordered->flags))
+ continue;
+ atomic_inc(&ordered->refs);
+ spin_unlock_irq(&log->log_extents_lock[index]);
+ /*
+ * we've dropped the lock, we must either break or
+ * start over after this.
+ */
+
+ wait_event(ordered->wait, ordered->csum_bytes_left == 0);
+
+ list_for_each_entry(sum, &ordered->list, list) {
+ ret = btrfs_csum_file_blocks(trans, log, sum);
+ if (ret) {
+ btrfs_put_ordered_extent(ordered);
+ goto unlocked;
+ }
+ }
+ btrfs_put_ordered_extent(ordered);
+ goto again;
+
+ }
+ spin_unlock_irq(&log->log_extents_lock[index]);
+unlocked:
+
+ if (!mod_len || ret)
+ return ret;
+
+ csum_offset = mod_start - em->start;
+ csum_len = mod_len;
+
/* block start is already adjusted for the file extent offset. */
ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
em->block_start + csum_offset,
@@ -3393,6 +3516,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
u64 test_gen;
int ret = 0;
+ int num = 0;
INIT_LIST_HEAD(&extents);
@@ -3401,16 +3525,31 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
list_del_init(&em->list);
+
+ /*
+ * Just an arbitrary number, this can be really CPU intensive
+ * once we start getting a lot of extents, and really once we
+ * have a bunch of extents we just want to commit since it will
+ * be faster.
+ */
+ if (++num > 32768) {
+ list_del_init(&tree->modified_extents);
+ ret = -EFBIG;
+ goto process;
+ }
+
if (em->generation <= test_gen)
continue;
/* Need a ref to keep it from getting evicted from cache */
atomic_inc(&em->refs);
set_bit(EXTENT_FLAG_LOGGING, &em->flags);
list_add_tail(&em->list, &extents);
+ num++;
}
list_sort(NULL, &extents, extent_cmp);
+process:
while (!list_empty(&extents)) {
em = list_entry(extents.next, struct extent_map, list);
@@ -3513,6 +3652,8 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
mutex_lock(&BTRFS_I(inode)->log_mutex);
+ btrfs_get_logged_extents(log, inode);
+
/*
* a brute force approach to making sure we get the most uptodate
* copies of everything.
@@ -3558,7 +3699,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
while (1) {
ins_nr = 0;
ret = btrfs_search_forward(root, &min_key, &max_key,
- path, 0, trans->transid);
+ path, trans->transid);
if (ret != 0)
break;
again:
@@ -3656,6 +3797,8 @@ log_extents:
BTRFS_I(inode)->logged_trans = trans->transid;
BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
out_unlock:
+ if (err)
+ btrfs_free_logged_extents(log, log->log_transid);
mutex_unlock(&BTRFS_I(inode)->log_mutex);
btrfs_free_path(path);
@@ -3822,7 +3965,6 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
end_trans:
dput(old_parent);
if (ret < 0) {
- WARN_ON(ret != -ENOSPC);
root->fs_info->last_trans_log_full_commit = trans->transid;
ret = 1;
}
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 99be4c138db..ddc61cad008 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -5,7 +5,7 @@
*/
#include <linux/slab.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include "ulist.h"
/*
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 5cbb7f4b167..35bb2d4ed29 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -25,6 +25,8 @@
#include <linux/capability.h>
#include <linux/ratelimit.h>
#include <linux/kthread.h>
+#include <linux/raid/pq.h>
+#include <asm/div64.h>
#include "compat.h"
#include "ctree.h"
#include "extent_map.h"
@@ -32,6 +34,7 @@
#include "transaction.h"
#include "print-tree.h"
#include "volumes.h"
+#include "raid56.h"
#include "async-thread.h"
#include "check-integrity.h"
#include "rcu-string.h"
@@ -647,6 +650,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
new_device->writeable = 0;
new_device->in_fs_metadata = 0;
new_device->can_discard = 0;
+ spin_lock_init(&new_device->io_lock);
list_replace_rcu(&device->dev_list, &new_device->dev_list);
call_rcu(&device->rcu, free_device);
@@ -792,26 +796,75 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
return ret;
}
+/*
+ * Look for a btrfs signature on a device. This may be called out of the mount path
+ * and we are not allowed to call set_blocksize during the scan. The superblock
+ * is read via pagecache
+ */
int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
struct btrfs_fs_devices **fs_devices_ret)
{
struct btrfs_super_block *disk_super;
struct block_device *bdev;
- struct buffer_head *bh;
- int ret;
+ struct page *page;
+ void *p;
+ int ret = -EINVAL;
u64 devid;
u64 transid;
u64 total_devices;
+ u64 bytenr;
+ pgoff_t index;
+ /*
+ * we would like to check all the supers, but that would make
+ * a btrfs mount succeed after a mkfs from a different FS.
+ * So, we need to add a special mount option to scan for
+ * later supers, using BTRFS_SUPER_MIRROR_MAX instead
+ */
+ bytenr = btrfs_sb_offset(0);
flags |= FMODE_EXCL;
mutex_lock(&uuid_mutex);
- ret = btrfs_get_bdev_and_sb(path, flags, holder, 0, &bdev, &bh);
- if (ret)
+
+ bdev = blkdev_get_by_path(path, flags, holder);
+
+ if (IS_ERR(bdev)) {
+ ret = PTR_ERR(bdev);
goto error;
- disk_super = (struct btrfs_super_block *)bh->b_data;
+ }
+
+ /* make sure our super fits in the device */
+ if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode))
+ goto error_bdev_put;
+
+ /* make sure our super fits in the page */
+ if (sizeof(*disk_super) > PAGE_CACHE_SIZE)
+ goto error_bdev_put;
+
+ /* make sure our super doesn't straddle pages on disk */
+ index = bytenr >> PAGE_CACHE_SHIFT;
+ if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index)
+ goto error_bdev_put;
+
+ /* pull in the page with our super */
+ page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
+ index, GFP_NOFS);
+
+ if (IS_ERR_OR_NULL(page))
+ goto error_bdev_put;
+
+ p = kmap(page);
+
+ /* align our pointer to the offset of the super block */
+ disk_super = p + (bytenr & ~PAGE_CACHE_MASK);
+
+ if (btrfs_super_bytenr(disk_super) != bytenr ||
+ disk_super->magic != cpu_to_le64(BTRFS_MAGIC))
+ goto error_unmap;
+
devid = btrfs_stack_device_id(&disk_super->dev_item);
transid = btrfs_super_generation(disk_super);
total_devices = btrfs_super_num_devices(disk_super);
+
if (disk_super->label[0]) {
if (disk_super->label[BTRFS_LABEL_SIZE - 1])
disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0';
@@ -819,12 +872,19 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
} else {
printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
}
+
printk(KERN_CONT "devid %llu transid %llu %s\n",
(unsigned long long)devid, (unsigned long long)transid, path);
+
ret = device_list_add(path, disk_super, devid, fs_devices_ret);
if (!ret && fs_devices_ret)
(*fs_devices_ret)->total_devices = total_devices;
- brelse(bh);
+
+error_unmap:
+ kunmap(page);
+ page_cache_release(page);
+
+error_bdev_put:
blkdev_put(bdev, flags);
error:
mutex_unlock(&uuid_mutex);
@@ -1372,14 +1432,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
u64 devid;
u64 num_devices;
u8 *dev_uuid;
+ unsigned seq;
int ret = 0;
bool clear_super = false;
mutex_lock(&uuid_mutex);
- all_avail = root->fs_info->avail_data_alloc_bits |
- root->fs_info->avail_system_alloc_bits |
- root->fs_info->avail_metadata_alloc_bits;
+ do {
+ seq = read_seqbegin(&root->fs_info->profiles_lock);
+
+ all_avail = root->fs_info->avail_data_alloc_bits |
+ root->fs_info->avail_system_alloc_bits |
+ root->fs_info->avail_metadata_alloc_bits;
+ } while (read_seqretry(&root->fs_info->profiles_lock, seq));
num_devices = root->fs_info->fs_devices->num_devices;
btrfs_dev_replace_lock(&root->fs_info->dev_replace);
@@ -1403,6 +1468,21 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
goto out;
}
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) &&
+ root->fs_info->fs_devices->rw_devices <= 2) {
+ printk(KERN_ERR "btrfs: unable to go below two "
+ "devices on raid5\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) &&
+ root->fs_info->fs_devices->rw_devices <= 3) {
+ printk(KERN_ERR "btrfs: unable to go below three "
+ "devices on raid6\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
if (strcmp(device_path, "missing") == 0) {
struct list_head *devices;
struct btrfs_device *tmp;
@@ -2616,7 +2696,7 @@ static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
chunk_used = btrfs_block_group_used(&cache->item);
if (bargs->usage == 0)
- user_thresh = 0;
+ user_thresh = 1;
else if (bargs->usage > 100)
user_thresh = cache->key.offset;
else
@@ -2664,11 +2744,15 @@ static int chunk_drange_filter(struct extent_buffer *leaf,
return 0;
if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
- BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
- factor = 2;
- else
- factor = 1;
- factor = num_stripes / factor;
+ BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10)) {
+ factor = num_stripes / 2;
+ } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID5) {
+ factor = num_stripes - 1;
+ } else if (btrfs_chunk_type(leaf, chunk) & BTRFS_BLOCK_GROUP_RAID6) {
+ factor = num_stripes - 2;
+ } else {
+ factor = num_stripes;
+ }
for (i = 0; i < num_stripes; i++) {
stripe = btrfs_stripe_nr(chunk, i);
@@ -2985,6 +3069,7 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
int mixed = 0;
int ret;
u64 num_devices;
+ unsigned seq;
if (btrfs_fs_closing(fs_info) ||
atomic_read(&fs_info->balance_pause_req) ||
@@ -3027,7 +3112,9 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
else
allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10);
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6);
if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
(!alloc_profile_is_valid(bctl->data.target, 1) ||
@@ -3067,23 +3154,29 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
/* allow to reduce meta or sys integrity only if force set */
allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
- BTRFS_BLOCK_GROUP_RAID10;
- if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (fs_info->avail_system_alloc_bits & allowed) &&
- !(bctl->sys.target & allowed)) ||
- ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
- (fs_info->avail_metadata_alloc_bits & allowed) &&
- !(bctl->meta.target & allowed))) {
- if (bctl->flags & BTRFS_BALANCE_FORCE) {
- printk(KERN_INFO "btrfs: force reducing metadata "
- "integrity\n");
- } else {
- printk(KERN_ERR "btrfs: balance will reduce metadata "
- "integrity, use force if you want this\n");
- ret = -EINVAL;
- goto out;
+ BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6;
+ do {
+ seq = read_seqbegin(&fs_info->profiles_lock);
+
+ if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (fs_info->avail_system_alloc_bits & allowed) &&
+ !(bctl->sys.target & allowed)) ||
+ ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+ (fs_info->avail_metadata_alloc_bits & allowed) &&
+ !(bctl->meta.target & allowed))) {
+ if (bctl->flags & BTRFS_BALANCE_FORCE) {
+ printk(KERN_INFO "btrfs: force reducing metadata "
+ "integrity\n");
+ } else {
+ printk(KERN_ERR "btrfs: balance will reduce metadata "
+ "integrity, use force if you want this\n");
+ ret = -EINVAL;
+ goto out;
+ }
}
- }
+ } while (read_seqretry(&fs_info->profiles_lock, seq));
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
int num_tolerated_disk_barrier_failures;
@@ -3127,21 +3220,16 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
mutex_lock(&fs_info->balance_mutex);
atomic_dec(&fs_info->balance_running);
- if (bargs) {
- memset(bargs, 0, sizeof(*bargs));
- update_ioctl_balance_args(fs_info, 0, bargs);
- }
-
- if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
- balance_need_close(fs_info)) {
- __cancel_balance(fs_info);
- }
-
if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
fs_info->num_tolerated_disk_barrier_failures =
btrfs_calc_num_tolerated_disk_barrier_failures(fs_info);
}
+ if (bargs) {
+ memset(bargs, 0, sizeof(*bargs));
+ update_ioctl_balance_args(fs_info, 0, bargs);
+ }
+
wake_up(&fs_info->balance_wait_q);
return ret;
@@ -3504,13 +3592,86 @@ static int btrfs_cmp_device_info(const void *a, const void *b)
}
struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
- { 2, 1, 0, 4, 2, 2 /* raid10 */ },
- { 1, 1, 2, 2, 2, 2 /* raid1 */ },
- { 1, 2, 1, 1, 1, 2 /* dup */ },
- { 1, 1, 0, 2, 1, 1 /* raid0 */ },
- { 1, 1, 1, 1, 1, 1 /* single */ },
+ [BTRFS_RAID_RAID10] = {
+ .sub_stripes = 2,
+ .dev_stripes = 1,
+ .devs_max = 0, /* 0 == as many as possible */
+ .devs_min = 4,
+ .devs_increment = 2,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID1] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 2,
+ .devs_min = 2,
+ .devs_increment = 2,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_DUP] = {
+ .sub_stripes = 1,
+ .dev_stripes = 2,
+ .devs_max = 1,
+ .devs_min = 1,
+ .devs_increment = 1,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID0] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 2,
+ .devs_increment = 1,
+ .ncopies = 1,
+ },
+ [BTRFS_RAID_SINGLE] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 1,
+ .devs_min = 1,
+ .devs_increment = 1,
+ .ncopies = 1,
+ },
+ [BTRFS_RAID_RAID5] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 2,
+ .devs_increment = 1,
+ .ncopies = 2,
+ },
+ [BTRFS_RAID_RAID6] = {
+ .sub_stripes = 1,
+ .dev_stripes = 1,
+ .devs_max = 0,
+ .devs_min = 3,
+ .devs_increment = 1,
+ .ncopies = 3,
+ },
};
+static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
+{
+ /* TODO allow them to set a preferred stripe size */
+ return 64 * 1024;
+}
+
+static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
+{
+ u64 features;
+
+ if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
+ return;
+
+ features = btrfs_super_incompat_flags(info->super_copy);
+ if (features & BTRFS_FEATURE_INCOMPAT_RAID56)
+ return;
+
+ features |= BTRFS_FEATURE_INCOMPAT_RAID56;
+ btrfs_set_super_incompat_flags(info->super_copy, features);
+ printk(KERN_INFO "btrfs: setting RAID5/6 feature flag\n");
+}
+
static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct map_lookup **map_ret,
@@ -3526,6 +3687,8 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_device_info *devices_info = NULL;
u64 total_avail;
int num_stripes; /* total number of stripes to allocate */
+ int data_stripes; /* number of stripes that count for
+ block group size */
int sub_stripes; /* sub_stripes info for map */
int dev_stripes; /* stripes per dev */
int devs_max; /* max devs to use */
@@ -3537,6 +3700,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
u64 max_chunk_size;
u64 stripe_size;
u64 num_bytes;
+ u64 raid_stripe_len = BTRFS_STRIPE_LEN;
int ndevs;
int i;
int j;
@@ -3631,12 +3795,16 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (max_avail < BTRFS_STRIPE_LEN * dev_stripes)
continue;
+ if (ndevs == fs_devices->rw_devices) {
+ WARN(1, "%s: found more than %llu devices\n",
+ __func__, fs_devices->rw_devices);
+ break;
+ }
devices_info[ndevs].dev_offset = dev_offset;
devices_info[ndevs].max_avail = max_avail;
devices_info[ndevs].total_avail = total_avail;
devices_info[ndevs].dev = device;
++ndevs;
- WARN_ON(ndevs > fs_devices->rw_devices);
}
/*
@@ -3662,16 +3830,48 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
stripe_size = devices_info[ndevs-1].max_avail;
num_stripes = ndevs * dev_stripes;
- if (stripe_size * ndevs > max_chunk_size * ncopies) {
- stripe_size = max_chunk_size * ncopies;
- do_div(stripe_size, ndevs);
+ /*
+ * this will have to be fixed for RAID1 and RAID10 over
+ * more drives
+ */
+ data_stripes = num_stripes / ncopies;
+
+ if (type & BTRFS_BLOCK_GROUP_RAID5) {
+ raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
+ btrfs_super_stripesize(info->super_copy));
+ data_stripes = num_stripes - 1;
+ }
+ if (type & BTRFS_BLOCK_GROUP_RAID6) {
+ raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
+ btrfs_super_stripesize(info->super_copy));
+ data_stripes = num_stripes - 2;
+ }
+
+ /*
+ * Use the number of data stripes to figure out how big this chunk
+ * is really going to be in terms of logical address space,
+ * and compare that answer with the max chunk size
+ */
+ if (stripe_size * data_stripes > max_chunk_size) {
+ u64 mask = (1ULL << 24) - 1;
+ stripe_size = max_chunk_size;
+ do_div(stripe_size, data_stripes);
+
+ /* bump the answer up to a 16MB boundary */
+ stripe_size = (stripe_size + mask) & ~mask;
+
+ /* but don't go higher than the limits we found
+ * while searching for free extents
+ */
+ if (stripe_size > devices_info[ndevs-1].max_avail)
+ stripe_size = devices_info[ndevs-1].max_avail;
}
do_div(stripe_size, dev_stripes);
/* align to BTRFS_STRIPE_LEN */
- do_div(stripe_size, BTRFS_STRIPE_LEN);
- stripe_size *= BTRFS_STRIPE_LEN;
+ do_div(stripe_size, raid_stripe_len);
+ stripe_size *= raid_stripe_len;
map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
if (!map) {
@@ -3689,14 +3889,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
}
}
map->sector_size = extent_root->sectorsize;
- map->stripe_len = BTRFS_STRIPE_LEN;
- map->io_align = BTRFS_STRIPE_LEN;
- map->io_width = BTRFS_STRIPE_LEN;
+ map->stripe_len = raid_stripe_len;
+ map->io_align = raid_stripe_len;
+ map->io_width = raid_stripe_len;
map->type = type;
map->sub_stripes = sub_stripes;
*map_ret = map;
- num_bytes = stripe_size * (num_stripes / ncopies);
+ num_bytes = stripe_size * data_stripes;
*stripe_size_out = stripe_size;
*num_bytes_out = num_bytes;
@@ -3718,15 +3918,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
write_lock(&em_tree->lock);
ret = add_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock);
- free_extent_map(em);
- if (ret)
- goto error;
-
- ret = btrfs_make_block_group(trans, extent_root, 0, type,
- BTRFS_FIRST_CHUNK_TREE_OBJECTID,
- start, num_bytes);
- if (ret)
+ if (ret) {
+ free_extent_map(em);
goto error;
+ }
for (i = 0; i < map->num_stripes; ++i) {
struct btrfs_device *device;
@@ -3739,15 +3934,44 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
info->chunk_root->root_key.objectid,
BTRFS_FIRST_CHUNK_TREE_OBJECTID,
start, dev_offset, stripe_size);
- if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
- goto error;
- }
+ if (ret)
+ goto error_dev_extent;
+ }
+
+ ret = btrfs_make_block_group(trans, extent_root, 0, type,
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+ start, num_bytes);
+ if (ret) {
+ i = map->num_stripes - 1;
+ goto error_dev_extent;
}
+ free_extent_map(em);
+ check_raid56_incompat_flag(extent_root->fs_info, type);
+
kfree(devices_info);
return 0;
+error_dev_extent:
+ for (; i >= 0; i--) {
+ struct btrfs_device *device;
+ int err;
+
+ device = map->stripes[i].dev;
+ err = btrfs_free_dev_extent(trans, device, start);
+ if (err) {
+ btrfs_abort_transaction(trans, extent_root, err);
+ break;
+ }
+ }
+ write_lock(&em_tree->lock);
+ remove_extent_mapping(em_tree, em);
+ write_unlock(&em_tree->lock);
+
+ /* One for our allocation */
+ free_extent_map(em);
+ /* One for the tree reference */
+ free_extent_map(em);
error:
kfree(map);
kfree(devices_info);
@@ -3887,10 +4111,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
if (ret)
return ret;
- alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
- fs_info->avail_metadata_alloc_bits;
- alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+ alloc_profile = btrfs_get_alloc_profile(extent_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
&stripe_size, chunk_offset, alloc_profile);
if (ret)
@@ -3898,10 +4119,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
sys_chunk_offset = chunk_offset + chunk_size;
- alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
- fs_info->avail_system_alloc_bits;
- alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
-
+ alloc_profile = btrfs_get_alloc_profile(fs_info->chunk_root, 0);
ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
&sys_chunk_size, &sys_stripe_size,
sys_chunk_offset, alloc_profile);
@@ -4014,6 +4232,10 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
ret = map->num_stripes;
else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
ret = map->sub_stripes;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
+ ret = 2;
+ else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ ret = 3;
else
ret = 1;
free_extent_map(em);
@@ -4026,6 +4248,52 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
return ret;
}
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+ struct btrfs_mapping_tree *map_tree,
+ u64 logical)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct extent_map_tree *em_tree = &map_tree->map_tree;
+ unsigned long len = root->sectorsize;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, len);
+ read_unlock(&em_tree->lock);
+ BUG_ON(!em);
+
+ BUG_ON(em->start > logical || em->start + em->len < logical);
+ map = (struct map_lookup *)em->bdev;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ len = map->stripe_len * nr_data_stripes(map);
+ }
+ free_extent_map(em);
+ return len;
+}
+
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+ u64 logical, u64 len, int mirror_num)
+{
+ struct extent_map *em;
+ struct map_lookup *map;
+ struct extent_map_tree *em_tree = &map_tree->map_tree;
+ int ret = 0;
+
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, logical, len);
+ read_unlock(&em_tree->lock);
+ BUG_ON(!em);
+
+ BUG_ON(em->start > logical || em->start + em->len < logical);
+ map = (struct map_lookup *)em->bdev;
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6))
+ ret = 1;
+ free_extent_map(em);
+ return ret;
+}
+
static int find_live_mirror(struct btrfs_fs_info *fs_info,
struct map_lookup *map, int first, int num,
int optimal, int dev_replace_is_ongoing)
@@ -4063,10 +4331,39 @@ static int find_live_mirror(struct btrfs_fs_info *fs_info,
return optimal;
}
+static inline int parity_smaller(u64 a, u64 b)
+{
+ return a > b;
+}
+
+/* Bubble-sort the stripe set to put the parity/syndrome stripes last */
+static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
+{
+ struct btrfs_bio_stripe s;
+ int i;
+ u64 l;
+ int again = 1;
+
+ while (again) {
+ again = 0;
+ for (i = 0; i < bbio->num_stripes - 1; i++) {
+ if (parity_smaller(raid_map[i], raid_map[i+1])) {
+ s = bbio->stripes[i];
+ l = raid_map[i];
+ bbio->stripes[i] = bbio->stripes[i+1];
+ raid_map[i] = raid_map[i+1];
+ bbio->stripes[i+1] = s;
+ raid_map[i+1] = l;
+ again = 1;
+ }
+ }
+ }
+}
+
static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
- int mirror_num)
+ int mirror_num, u64 **raid_map_ret)
{
struct extent_map *em;
struct map_lookup *map;
@@ -4078,6 +4375,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 stripe_nr;
u64 stripe_nr_orig;
u64 stripe_nr_end;
+ u64 stripe_len;
+ u64 *raid_map = NULL;
int stripe_index;
int i;
int ret = 0;
@@ -4089,6 +4388,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
int num_alloc_stripes;
int patch_the_first_stripe_for_dev_replace = 0;
u64 physical_to_patch_in_first_stripe = 0;
+ u64 raid56_full_stripe_start = (u64)-1;
read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, logical, *length);
@@ -4105,29 +4405,63 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
map = (struct map_lookup *)em->bdev;
offset = logical - em->start;
+ if (mirror_num > map->num_stripes)
+ mirror_num = 0;
+
+ stripe_len = map->stripe_len;
stripe_nr = offset;
/*
* stripe_nr counts the total number of stripes we have to stride
* to get to this block
*/
- do_div(stripe_nr, map->stripe_len);
+ do_div(stripe_nr, stripe_len);
- stripe_offset = stripe_nr * map->stripe_len;
+ stripe_offset = stripe_nr * stripe_len;
BUG_ON(offset < stripe_offset);
/* stripe_offset is the offset of this block in its stripe*/
stripe_offset = offset - stripe_offset;
- if (rw & REQ_DISCARD)
+ /* if we're here for raid56, we need to know the stripe aligned start */
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+ unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
+ raid56_full_stripe_start = offset;
+
+ /* allow a write of a full stripe, but make sure we don't
+ * allow straddling of stripes
+ */
+ do_div(raid56_full_stripe_start, full_stripe_len);
+ raid56_full_stripe_start *= full_stripe_len;
+ }
+
+ if (rw & REQ_DISCARD) {
+ /* we don't discard raid56 yet */
+ if (map->type &
+ (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
*length = min_t(u64, em->len - offset, *length);
- else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
- /* we limit the length of each bio to what fits in a stripe */
- *length = min_t(u64, em->len - offset,
- map->stripe_len - stripe_offset);
+ } else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
+ u64 max_len;
+ /* For writes to RAID[56], allow a full stripeset across all disks.
+ For other RAID types and for RAID[56] reads, just allow a single
+ stripe (on a single disk). */
+ if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
+ (rw & REQ_WRITE)) {
+ max_len = stripe_len * nr_data_stripes(map) -
+ (offset - raid56_full_stripe_start);
+ } else {
+ /* we limit the length of each bio to what fits in a stripe */
+ max_len = stripe_len - stripe_offset;
+ }
+ *length = min_t(u64, em->len - offset, max_len);
} else {
*length = em->len - offset;
}
+ /* This is for when we're called from btrfs_merge_bio_hook() and all
+ it cares about is the length */
if (!bbio_ret)
goto out;
@@ -4160,7 +4494,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
u64 physical_of_found = 0;
ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
- logical, &tmp_length, &tmp_bbio, 0);
+ logical, &tmp_length, &tmp_bbio, 0, NULL);
if (ret) {
WARN_ON(tmp_bbio != NULL);
goto out;
@@ -4221,11 +4555,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_stripes = 1;
stripe_index = 0;
stripe_nr_orig = stripe_nr;
- stripe_nr_end = (offset + *length + map->stripe_len - 1) &
- (~(map->stripe_len - 1));
+ stripe_nr_end = ALIGN(offset + *length, map->stripe_len);
do_div(stripe_nr_end, map->stripe_len);
stripe_end_offset = stripe_nr_end * map->stripe_len -
(offset + *length);
+
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
if (rw & REQ_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
@@ -4276,6 +4610,65 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
dev_replace_is_ongoing);
mirror_num = stripe_index - old_stripe_index + 1;
}
+
+ } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ u64 tmp;
+
+ if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1)
+ && raid_map_ret) {
+ int i, rot;
+
+ /* push stripe_nr back to the start of the full stripe */
+ stripe_nr = raid56_full_stripe_start;
+ do_div(stripe_nr, stripe_len);
+
+ stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+
+ /* RAID[56] write or recovery. Return all stripes */
+ num_stripes = map->num_stripes;
+ max_errors = nr_parity_stripes(map);
+
+ raid_map = kmalloc(sizeof(u64) * num_stripes,
+ GFP_NOFS);
+ if (!raid_map) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Work out the disk rotation on this stripe-set */
+ tmp = stripe_nr;
+ rot = do_div(tmp, num_stripes);
+
+ /* Fill in the logical address of each stripe */
+ tmp = stripe_nr * nr_data_stripes(map);
+ for (i = 0; i < nr_data_stripes(map); i++)
+ raid_map[(i+rot) % num_stripes] =
+ em->start + (tmp + i) * map->stripe_len;
+
+ raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
+ if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+ raid_map[(i+rot+1) % num_stripes] =
+ RAID6_Q_STRIPE;
+
+ *length = map->stripe_len;
+ stripe_index = 0;
+ stripe_offset = 0;
+ } else {
+ /*
+ * Mirror #0 or #1 means the original data block.
+ * Mirror #2 is RAID5 parity block.
+ * Mirror #3 is RAID6 Q block.
+ */
+ stripe_index = do_div(stripe_nr, nr_data_stripes(map));
+ if (mirror_num > 1)
+ stripe_index = nr_data_stripes(map) +
+ mirror_num - 2;
+
+ /* We distribute the parity blocks across stripes */
+ tmp = stripe_nr + stripe_index;
+ stripe_index = do_div(tmp, map->num_stripes);
+ }
} else {
/*
* after this do_div call, stripe_nr is the number of stripes
@@ -4384,8 +4777,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) {
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
+ BTRFS_BLOCK_GROUP_RAID5 |
BTRFS_BLOCK_GROUP_DUP)) {
max_errors = 1;
+ } else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
+ max_errors = 2;
}
}
@@ -4486,6 +4882,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
bbio->mirror_num = map->num_stripes + 1;
}
+ if (raid_map) {
+ sort_parity_stripes(bbio, raid_map);
+ *raid_map_ret = raid_map;
+ }
out:
if (dev_replace_is_ongoing)
btrfs_dev_replace_unlock(dev_replace);
@@ -4498,7 +4898,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
struct btrfs_bio **bbio_ret, int mirror_num)
{
return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
- mirror_num);
+ mirror_num, NULL);
}
int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -4512,6 +4912,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
u64 bytenr;
u64 length;
u64 stripe_nr;
+ u64 rmap_len;
int i, j, nr = 0;
read_lock(&em_tree->lock);
@@ -4522,10 +4923,17 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
map = (struct map_lookup *)em->bdev;
length = em->len;
+ rmap_len = map->stripe_len;
+
if (map->type & BTRFS_BLOCK_GROUP_RAID10)
do_div(length, map->num_stripes / map->sub_stripes);
else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
do_div(length, map->num_stripes);
+ else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
+ BTRFS_BLOCK_GROUP_RAID6)) {
+ do_div(length, nr_data_stripes(map));
+ rmap_len = map->stripe_len * nr_data_stripes(map);
+ }
buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
BUG_ON(!buf); /* -ENOMEM */
@@ -4545,8 +4953,11 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
do_div(stripe_nr, map->sub_stripes);
} else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
stripe_nr = stripe_nr * map->num_stripes + i;
- }
- bytenr = chunk_start + stripe_nr * map->stripe_len;
+ } /* else if RAID[56], multiply by nr_data_stripes().
+ * Alternatively, just use rmap_len below instead of
+ * map->stripe_len */
+
+ bytenr = chunk_start + stripe_nr * rmap_len;
WARN_ON(nr >= map->num_stripes);
for (j = 0; j < nr; j++) {
if (buf[j] == bytenr)
@@ -4560,7 +4971,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
*logical = buf;
*naddrs = nr;
- *stripe_len = map->stripe_len;
+ *stripe_len = rmap_len;
free_extent_map(em);
return 0;
@@ -4634,7 +5045,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
bio->bi_bdev = (struct block_device *)
(unsigned long)bbio->mirror_num;
/* only send an error to the higher layers if it is
- * beyond the tolerance of the multi-bio
+ * beyond the tolerance of the btrfs bio
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
@@ -4668,13 +5079,18 @@ struct async_sched {
* This will add one bio to the pending list for a device and make sure
* the work struct is scheduled.
*/
-static noinline void schedule_bio(struct btrfs_root *root,
+noinline void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
int rw, struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
+ if (device->missing || !device->bdev) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
/* don't bother with additional async steps for reads, right now */
if (!(rw & REQ_WRITE)) {
bio_get(bio);
@@ -4772,7 +5188,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
#endif
bio->bi_bdev = dev->bdev;
if (async)
- schedule_bio(root, dev, rw, bio);
+ btrfs_schedule_bio(root, dev, rw, bio);
else
btrfsic_submit_bio(rw, bio);
}
@@ -4831,6 +5247,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
u64 logical = (u64)bio->bi_sector << 9;
u64 length = 0;
u64 map_length;
+ u64 *raid_map = NULL;
int ret;
int dev_nr = 0;
int total_devs = 1;
@@ -4839,12 +5256,30 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
length = bio->bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
- mirror_num);
- if (ret)
+ ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
+ mirror_num, &raid_map);
+ if (ret) /* -ENOMEM */
return ret;
total_devs = bbio->num_stripes;
+ bbio->orig_bio = first_bio;
+ bbio->private = first_bio->bi_private;
+ bbio->end_io = first_bio->bi_end_io;
+ atomic_set(&bbio->stripes_pending, bbio->num_stripes);
+
+ if (raid_map) {
+ /* In this case, map_length has been set to the length of
+ a single stripe; not the whole write */
+ if (rw & WRITE) {
+ return raid56_parity_write(root, bio, bbio,
+ raid_map, map_length);
+ } else {
+ return raid56_parity_recover(root, bio, bbio,
+ raid_map, map_length,
+ mirror_num);
+ }
+ }
+
if (map_length < length) {
printk(KERN_CRIT "btrfs: mapping failed logical %llu bio len %llu "
"len %llu\n", (unsigned long long)logical,
@@ -4853,11 +5288,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
BUG();
}
- bbio->orig_bio = first_bio;
- bbio->private = first_bio->bi_private;
- bbio->end_io = first_bio->bi_end_io;
- atomic_set(&bbio->stripes_pending, bbio->num_stripes);
-
while (dev_nr < total_devs) {
dev = bbio->stripes[dev_nr].dev;
if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index d3c3939ac75..062d8604d35 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -21,8 +21,8 @@
#include <linux/bio.h>
#include <linux/sort.h>
+#include <linux/btrfs.h>
#include "async-thread.h"
-#include "ioctl.h"
#define BTRFS_STRIPE_LEN (64 * 1024)
@@ -321,7 +321,14 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
struct btrfs_device *tgtdev);
int btrfs_scratch_superblock(struct btrfs_device *device);
-
+void btrfs_schedule_bio(struct btrfs_root *root,
+ struct btrfs_device *device,
+ int rw, struct bio *bio);
+int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
+ u64 logical, u64 len, int mirror_num);
+unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
+ struct btrfs_mapping_tree *map_tree,
+ u64 logical);
static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
int index)
{
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bad7b16271..1a052c0eee8 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -564,6 +564,11 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
dentry = ERR_PTR(-ENOENT);
break;
}
+ if (!S_ISDIR(dir->i_mode)) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOTDIR);
+ break;
+ }
/* skip separators */
while (*s == sep)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 00e12f2d626..7353bc5d73d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1909,8 +1909,11 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
} while (rc == -EAGAIN);
for (i = 0; i < wdata->nr_pages; i++) {
- if (rc != 0)
+ if (rc != 0) {
SetPageError(wdata->pages[i]);
+ end_page_writeback(wdata->pages[i]);
+ page_cache_release(wdata->pages[i]);
+ }
unlock_page(wdata->pages[i]);
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4474a57f30a..54125e04fd0 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1031,7 +1031,7 @@ static int cifs_parse_security_flavors(char *value,
switch (match_token(value, cifs_secflavor_tokens, args)) {
case Opt_sec_krb5:
- vol->secFlg |= CIFSSEC_MAY_KRB5;
+ vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_SIGN;
break;
case Opt_sec_krb5i:
vol->secFlg |= CIFSSEC_MAY_KRB5 | CIFSSEC_MUST_SIGN;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c16d2a018ab..8c0d8557731 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -43,6 +43,7 @@
#include "cifs_fs_sb.h"
#include "fscache.h"
+
static inline int cifs_convert_flags(unsigned int flags)
{
if ((flags & O_ACCMODE) == O_RDONLY)
@@ -72,10 +73,15 @@ static u32 cifs_posix_convert_flags(unsigned int flags)
else if ((flags & O_ACCMODE) == O_RDWR)
posix_flags = SMB_O_RDWR;
- if (flags & O_CREAT)
+ if (flags & O_CREAT) {
posix_flags |= SMB_O_CREAT;
- if (flags & O_EXCL)
- posix_flags |= SMB_O_EXCL;
+ if (flags & O_EXCL)
+ posix_flags |= SMB_O_EXCL;
+ } else if (flags & O_EXCL)
+ cFYI(1, "Application %s pid %d has incorrectly set O_EXCL flag"
+ "but not O_CREAT on file open. Ignoring O_EXCL",
+ current->comm, current->tgid);
+
if (flags & O_TRUNC)
posix_flags |= SMB_O_TRUNC;
/* be safe and imply O_SYNC for O_DSYNC */
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 2f2e0da1a6b..92e68b33fff 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -635,7 +635,7 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
brelse(bitmap_bh);
printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
", computed = %llu, %llu\n",
- EXT4_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
+ EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
desc_count, bitmap_count);
return bitmap_count;
#else
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 6dda04f05ef..d8cd1f0f466 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -334,7 +334,7 @@ static inline loff_t ext4_get_htree_eof(struct file *filp)
*
* For non-htree, ext4_llseek already chooses the proper max offset.
*/
-loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
+static loff_t ext4_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
int dx_dir = is_dx_dir(inode);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 6e16c186795..4a01ba31526 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1309,6 +1309,7 @@ struct ext4_sb_info {
/* Reclaim extents from extent status tree */
struct shrinker s_es_shrinker;
struct list_head s_es_lru;
+ struct percpu_counter s_extent_cache_cnt;
spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
};
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index f768f4a98a2..95796a1b752 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -147,11 +147,12 @@ static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t end);
static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
int nr_to_scan);
-static int ext4_es_reclaim_extents_count(struct super_block *sb);
int __init ext4_init_es(void)
{
- ext4_es_cachep = KMEM_CACHE(extent_status, SLAB_RECLAIM_ACCOUNT);
+ ext4_es_cachep = kmem_cache_create("ext4_extent_status",
+ sizeof(struct extent_status),
+ 0, (SLAB_RECLAIM_ACCOUNT), NULL);
if (ext4_es_cachep == NULL)
return -ENOMEM;
return 0;
@@ -302,8 +303,10 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
/*
* We don't count delayed extent because we never try to reclaim them
*/
- if (!ext4_es_is_delayed(es))
+ if (!ext4_es_is_delayed(es)) {
EXT4_I(inode)->i_es_lru_nr++;
+ percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
+ }
return es;
}
@@ -314,6 +317,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
if (!ext4_es_is_delayed(es)) {
BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
EXT4_I(inode)->i_es_lru_nr--;
+ percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt);
}
kmem_cache_free(ext4_es_cachep, es);
@@ -674,10 +678,11 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
int nr_to_scan = sc->nr_to_scan;
int ret, nr_shrunk = 0;
- trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan);
+ ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+ trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret);
if (!nr_to_scan)
- return ext4_es_reclaim_extents_count(sbi->s_sb);
+ return ret;
INIT_LIST_HEAD(&scanned);
@@ -705,9 +710,10 @@ static int ext4_es_shrink(struct shrinker *shrink, struct shrink_control *sc)
}
list_splice_tail(&scanned, &sbi->s_es_lru);
spin_unlock(&sbi->s_es_lru_lock);
- trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk);
- return ext4_es_reclaim_extents_count(sbi->s_sb);
+ ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt);
+ trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret);
+ return ret;
}
void ext4_es_register_shrinker(struct super_block *sb)
@@ -751,25 +757,6 @@ void ext4_es_lru_del(struct inode *inode)
spin_unlock(&sbi->s_es_lru_lock);
}
-static int ext4_es_reclaim_extents_count(struct super_block *sb)
-{
- struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct ext4_inode_info *ei;
- struct list_head *cur;
- int nr_cached = 0;
-
- spin_lock(&sbi->s_es_lru_lock);
- list_for_each(cur, &sbi->s_es_lru) {
- ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
- read_lock(&ei->i_es_lock);
- nr_cached += ei->i_es_lru_nr;
- read_unlock(&ei->i_es_lock);
- }
- spin_unlock(&sbi->s_es_lru_lock);
- trace_ext4_es_reclaim_extents_count(sb, nr_cached);
- return nr_cached;
-}
-
static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
int nr_to_scan)
{
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 6540ebe058e..7bb713a46fe 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3419,7 +3419,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
win = offs;
ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical -
- EXT4_B2C(sbi, win);
+ EXT4_NUM_B2C(sbi, win);
BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
}
@@ -4565,7 +4565,7 @@ do_more:
EXT4_BLOCKS_PER_GROUP(sb);
count -= overflow;
}
- count_clusters = EXT4_B2C(sbi, count);
+ count_clusters = EXT4_NUM_B2C(sbi, count);
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
if (!bitmap_bh) {
err = -EIO;
@@ -4807,11 +4807,11 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
ext4_group_desc_csum_set(sb, block_group, desc);
ext4_unlock_group(sb, block_group);
percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_B2C(sbi, blocks_freed));
+ EXT4_NUM_B2C(sbi, blocks_freed));
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
- atomic_add(EXT4_B2C(sbi, blocks_freed),
+ atomic_add(EXT4_NUM_B2C(sbi, blocks_freed),
&sbi->s_flex_groups[flex_group].free_clusters);
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c7f4d758466..b2c8ee56eb9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1247,7 +1247,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
ext4_inode_table_set(sb, gdp, group_data->inode_table);
ext4_free_group_clusters_set(sb, gdp,
- EXT4_B2C(sbi, group_data->free_blocks_count));
+ EXT4_NUM_B2C(sbi, group_data->free_blocks_count));
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
if (ext4_has_group_desc_csum(sb))
ext4_itable_unused_set(sb, gdp,
@@ -1349,7 +1349,7 @@ static void ext4_update_super(struct super_block *sb,
/* Update the free space counts */
percpu_counter_add(&sbi->s_freeclusters_counter,
- EXT4_B2C(sbi, free_blocks));
+ EXT4_NUM_B2C(sbi, free_blocks));
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb) * flex_gd->count);
@@ -1360,7 +1360,7 @@ static void ext4_update_super(struct super_block *sb,
sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, group_data[0].group);
- atomic_add(EXT4_B2C(sbi, free_blocks),
+ atomic_add(EXT4_NUM_B2C(sbi, free_blocks),
&sbi->s_flex_groups[flex_group].free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
&sbi->s_flex_groups[flex_group].free_inodes);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 620cf5615ba..5e6c8783619 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -783,6 +783,7 @@ static void ext4_put_super(struct super_block *sb)
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_extent_cache_cnt);
brelse(sbi->s_sbh);
#ifdef CONFIG_QUOTA
for (i = 0; i < MAXQUOTAS; i++)
@@ -1247,6 +1248,11 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
"quota options when quota turned on");
return -1;
}
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options "
+ "when QUOTA feature is enabled");
+ return -1;
+ }
qname = match_strdup(args);
if (!qname) {
ext4_msg(sb, KERN_ERR,
@@ -1544,6 +1550,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
"quota options when quota turned on");
return -1;
}
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+ EXT4_FEATURE_RO_COMPAT_QUOTA)) {
+ ext4_msg(sb, KERN_ERR,
+ "Cannot set journaled quota options "
+ "when QUOTA feature is enabled");
+ return -1;
+ }
sbi->s_jquota_fmt = m->mount_opt;
#endif
} else {
@@ -1592,6 +1605,12 @@ static int parse_options(char *options, struct super_block *sb,
return 0;
}
#ifdef CONFIG_QUOTA
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) {
+ ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA "
+ "feature is enabled");
+ return 0;
+ }
if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
clear_opt(sb, USRQUOTA);
@@ -3161,7 +3180,7 @@ int ext4_calculate_overhead(struct super_block *sb)
}
/* Add the journal blocks as well */
if (sbi->s_journal)
- overhead += EXT4_B2C(sbi, sbi->s_journal->j_maxlen);
+ overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
sbi->s_overhead = overhead;
smp_wmb();
@@ -3688,6 +3707,9 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
}
+ if (!err) {
+ err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
+ }
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
@@ -3711,13 +3733,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
#ifdef CONFIG_QUOTA
- sb->s_qcop = &ext4_qctl_operations;
sb->dq_op = &ext4_quota_operations;
-
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA)) {
- /* Use qctl operations for hidden quota files. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA))
sb->s_qcop = &ext4_qctl_sysfile_operations;
- }
+ else
+ sb->s_qcop = &ext4_qctl_operations;
#endif
memcpy(sb->s_uuid, es->s_uuid, sizeof(es->s_uuid));
@@ -3913,6 +3933,16 @@ no_journal:
if (err)
goto failed_mount7;
+#ifdef CONFIG_QUOTA
+ /* Enable quota usage during mount. */
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
+ !(sb->s_flags & MS_RDONLY)) {
+ err = ext4_enable_quotas(sb);
+ if (err)
+ goto failed_mount8;
+ }
+#endif /* CONFIG_QUOTA */
+
EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
ext4_orphan_cleanup(sb, es);
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
@@ -3930,16 +3960,6 @@ no_journal:
} else
descr = "out journal";
-#ifdef CONFIG_QUOTA
- /* Enable quota usage during mount. */
- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) &&
- !(sb->s_flags & MS_RDONLY)) {
- err = ext4_enable_quotas(sb);
- if (err)
- goto failed_mount8;
- }
-#endif /* CONFIG_QUOTA */
-
if (test_opt(sb, DISCARD)) {
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q))
@@ -3993,6 +4013,7 @@ failed_mount3:
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_counter_destroy(&sbi->s_extent_cache_cnt);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
@@ -4538,6 +4559,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (!old_opts.s_qf_names[i]) {
for (j = 0; j < i; j++)
kfree(old_opts.s_qf_names[j]);
+ kfree(orig_data);
return -ENOMEM;
}
} else
@@ -4816,9 +4838,12 @@ static int ext4_release_dquot(struct dquot *dquot)
static int ext4_mark_dquot_dirty(struct dquot *dquot)
{
+ struct super_block *sb = dquot->dq_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+
/* Are we journaling quotas? */
- if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
- EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
+ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_QUOTA) ||
+ sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
dquot_mark_dquot_dirty(dquot);
return ext4_write_dquot(dquot);
} else {
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index b7e2385c6e9..d6ee5aed56b 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -382,7 +382,7 @@ handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask,
if (err < 0) {
jbd2_free_handle(handle);
current->journal_info = NULL;
- handle = ERR_PTR(err);
+ return ERR_PTR(err);
}
handle->h_type = type;
handle->h_line_no = line_no;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b586fe9af47..1f941674b08 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -237,6 +237,8 @@ nfs_find_actor(struct inode *inode, void *opaque)
if (NFS_FILEID(inode) != fattr->fileid)
return 0;
+ if ((S_IFMT & inode->i_mode) != (S_IFMT & fattr->mode))
+ return 0;
if (nfs_compare_fh(NFS_FH(inode), fh))
return 0;
if (is_bad_inode(inode) || NFS_STALE(inode))
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 194c4841033..49eeb044c10 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -99,7 +99,8 @@ static void filelayout_reset_write(struct nfs_write_data *data)
task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
}
@@ -119,7 +120,8 @@ static void filelayout_reset_read(struct nfs_read_data *data)
task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
}
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 8c07241fe52..b8da95548d3 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -36,7 +36,7 @@
* Default data server connection timeout and retrans vaules.
* Set by module paramters dataserver_timeo and dataserver_retrans.
*/
-#define NFS4_DEF_DS_TIMEO 60
+#define NFS4_DEF_DS_TIMEO 600 /* in tenths of a second */
#define NFS4_DEF_DS_RETRANS 5
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index eae83bf96c6..b2671cb0f90 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -93,6 +93,8 @@ static int nfs4_map_errors(int err)
return err;
switch (err) {
case -NFS4ERR_RESOURCE:
+ case -NFS4ERR_LAYOUTTRYLATER:
+ case -NFS4ERR_RECALLCONFLICT:
return -EREMOTEIO;
case -NFS4ERR_WRONGSEC:
return -EPERM;
@@ -1158,6 +1160,7 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
data->o_arg.fmode);
iput(inode);
out:
+ nfs_release_seqid(data->o_arg.seqid);
return state;
err_put_inode:
iput(inode);
@@ -6045,6 +6048,7 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
struct nfs_server *server = NFS_SERVER(inode);
struct pnfs_layout_hdr *lo;
struct nfs4_state *state = NULL;
+ unsigned long timeo, giveup;
dprintk("--> %s\n", __func__);
@@ -6056,7 +6060,10 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata)
goto out;
case -NFS4ERR_LAYOUTTRYLATER:
case -NFS4ERR_RECALLCONFLICT:
- task->tk_status = -NFS4ERR_DELAY;
+ timeo = rpc_get_timeout(task->tk_client);
+ giveup = lgp->args.timestamp + timeo;
+ if (time_after(giveup, jiffies))
+ task->tk_status = -NFS4ERR_DELAY;
break;
case -NFS4ERR_EXPIRED:
case -NFS4ERR_BAD_STATEID:
@@ -6129,11 +6136,13 @@ static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags)
static void nfs4_layoutget_release(void *calldata)
{
struct nfs4_layoutget *lgp = calldata;
- struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct inode *inode = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(inode);
size_t max_pages = max_response_pages(server);
dprintk("--> %s\n", __func__);
nfs4_free_pages(lgp->args.layout.pages, max_pages);
+ pnfs_put_layout_hdr(NFS_I(inode)->layout);
put_nfs_open_context(lgp->args.ctx);
kfree(calldata);
dprintk("<-- %s\n", __func__);
@@ -6148,7 +6157,8 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = {
struct pnfs_layout_segment *
nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
{
- struct nfs_server *server = NFS_SERVER(lgp->args.inode);
+ struct inode *inode = lgp->args.inode;
+ struct nfs_server *server = NFS_SERVER(inode);
size_t max_pages = max_response_pages(server);
struct rpc_task *task;
struct rpc_message msg = {
@@ -6174,10 +6184,15 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
return ERR_PTR(-ENOMEM);
}
lgp->args.layout.pglen = max_pages * PAGE_SIZE;
+ lgp->args.timestamp = jiffies;
lgp->res.layoutp = &lgp->args.layout;
lgp->res.seq_res.sr_slot = NULL;
nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
+
+ /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+ pnfs_get_layout_hdr(NFS_I(inode)->layout);
+
task = rpc_run_task(&task_setup_data);
if (IS_ERR(task))
return ERR_CAST(task);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 6be70f622b6..48ac5aad625 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1181,7 +1181,7 @@ pnfs_update_layout(struct inode *ino,
struct nfs_client *clp = server->nfs_client;
struct pnfs_layout_hdr *lo;
struct pnfs_layout_segment *lseg = NULL;
- bool first = false;
+ bool first;
if (!pnfs_enabled_sb(NFS_SERVER(ino)))
goto out;
@@ -1215,10 +1215,9 @@ pnfs_update_layout(struct inode *ino,
goto out_unlock;
atomic_inc(&lo->plh_outstanding);
- if (list_empty(&lo->plh_segs))
- first = true;
-
+ first = list_empty(&lo->plh_layouts) ? true : false;
spin_unlock(&ino->i_lock);
+
if (first) {
/* The lo must be on the clp list if there is any
* chance of a CB_LAYOUTRECALL(FILE) coming in.
@@ -1422,13 +1421,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
int pnfs_write_done_resend_to_mds(struct inode *inode,
struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops)
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
+ pgio.pg_dreq = dreq;
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1463,7 +1464,8 @@ static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
/*
@@ -1578,13 +1580,15 @@ EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
int pnfs_read_done_resend_to_mds(struct inode *inode,
struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops)
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq)
{
struct nfs_pageio_descriptor pgio;
LIST_HEAD(failed);
/* Resend all requests through the MDS */
nfs_pageio_init_read(&pgio, inode, compl_ops);
+ pgio.pg_dreq = dreq;
while (!list_empty(head)) {
struct nfs_page *req = nfs_list_entry(head->next);
@@ -1615,7 +1619,8 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
&hdr->pages,
- hdr->completion_ops);
+ hdr->completion_ops,
+ hdr->dreq);
}
/*
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 97cb358bb88..94ba8041774 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -230,9 +230,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops);
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq);
int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
- const struct nfs_pgio_completion_ops *compl_ops);
+ const struct nfs_pgio_completion_ops *compl_ops,
+ struct nfs_direct_req *dreq);
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
/* nfs4_deviceid_flags */
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index d26a32f5b53..1f1f38f0c5d 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -335,20 +335,14 @@ static void nfs_async_rename_done(struct rpc_task *task, void *calldata)
struct inode *old_dir = data->old_dir;
struct inode *new_dir = data->new_dir;
struct dentry *old_dentry = data->old_dentry;
- struct dentry *new_dentry = data->new_dentry;
if (!NFS_PROTO(old_dir)->rename_done(task, old_dir, new_dir)) {
rpc_restart_call_prepare(task);
return;
}
- if (task->tk_status != 0) {
+ if (task->tk_status != 0)
nfs_cancel_async_unlink(old_dentry);
- return;
- }
-
- d_drop(old_dentry);
- d_drop(new_dentry);
}
/**
@@ -549,6 +543,18 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry)
error = rpc_wait_for_completion_task(task);
if (error == 0)
error = task->tk_status;
+ switch (error) {
+ case 0:
+ /* The rename succeeded */
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ d_move(dentry, sdentry);
+ break;
+ case -ERESTARTSYS:
+ /* The result of the rename is unknown. Play it safe by
+ * forcing a new lookup */
+ d_drop(dentry);
+ d_drop(sdentry);
+ }
rpc_put_task(task);
out_dput:
dput(sdentry);
diff --git a/fs/open.c b/fs/open.c
index 62f907e3bc3..e3441f58d2e 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -30,6 +30,7 @@
#include <linux/fs_struct.h>
#include <linux/ima.h>
#include <linux/dnotify.h>
+#include <linux/compat.h>
#include "internal.h"
@@ -140,6 +141,13 @@ SYSCALL_DEFINE2(truncate, const char __user *, path, long, length)
return do_sys_truncate(path, length);
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length)
+{
+ return do_sys_truncate(path, length);
+}
+#endif
+
static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
{
struct inode *inode;
@@ -195,6 +203,13 @@ SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length)
return ret;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length)
+{
+ return do_sys_ftruncate(fd, length, 1);
+}
+#endif
+
/* LFS versions of truncate are only needed on 32 bit machines */
#if BITS_PER_LONG == 32
SYSCALL_DEFINE(truncate64)(const char __user * path, loff_t length)
diff --git a/fs/read_write.c b/fs/read_write.c
index 3ae6dbe828b..a698eff457f 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -15,6 +15,7 @@
#include <linux/syscalls.h>
#include <linux/pagemap.h>
#include <linux/splice.h>
+#include <linux/compat.h>
#include "read_write.h"
#include <asm/uaccess.h>
@@ -247,6 +248,13 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
return retval;
}
+#ifdef CONFIG_COMPAT
+COMPAT_SYSCALL_DEFINE3(lseek, unsigned int, fd, compat_off_t, offset, unsigned int, whence)
+{
+ return sys_lseek(fd, offset, whence);
+}
+#endif
+
#ifdef __ARCH_WANT_SYS_LLSEEK
SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned long, offset_low, loff_t __user *, result,
@@ -278,7 +286,6 @@ out_putf:
}
#endif
-
/*
* rw_verify_area doesn't like huge counts. We limit
* them to something that fits in "int" so that others
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 0e606b12a59..32b644f0369 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -383,10 +383,10 @@ SYSCALL_DEFINE2(timerfd_gettime, int, ufd, struct itimerspec __user *, otmr)
return copy_to_user(otmr, &kotmr, sizeof(kotmr)) ? -EFAULT: 0;
}
-#ifdef COMPAT
+#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
- const struct itimerspec __user *, utmr,
- struct itimerspec __user *, otmr)
+ const struct compat_itimerspec __user *, utmr,
+ struct compat_itimerspec __user *, otmr)
{
struct itimerspec new, old;
int ret;
@@ -402,12 +402,12 @@ COMPAT_SYSCALL_DEFINE4(timerfd_settime, int, ufd, int, flags,
}
COMPAT_SYSCALL_DEFINE2(timerfd_gettime, int, ufd,
- struct itimerspec __user *, otmr)
+ struct compat_itimerspec __user *, otmr)
{
struct itimerspec kotmr;
int ret = do_timerfd_gettime(ufd, &kotmr);
if (ret)
return ret;
- return put_compat_itimerspec(otmr, &t) ? -EFAULT: 0;
+ return put_compat_itimerspec(otmr, &kotmr) ? -EFAULT: 0;
}
#endif
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
index c084767c88b..59811df58c5 100644
--- a/include/asm-generic/checksum.h
+++ b/include/asm-generic/checksum.h
@@ -38,12 +38,15 @@ extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
csum_partial_copy((src), (dst), (len), (sum))
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*/
extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+#endif
+#ifndef csum_fold
/*
* Fold a partial checksum
*/
@@ -54,6 +57,7 @@ static inline __sum16 csum_fold(__wsum csum)
sum = (sum & 0xffff) + (sum >> 16);
return (__force __sum16)~sum;
}
+#endif
#ifndef csum_tcpudp_nofold
/*
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 9788568f797..c184aa8ec8c 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -7,7 +7,6 @@
* address space, e.g. all NOMMU machines.
*/
#include <linux/sched.h>
-#include <linux/mm.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -32,7 +31,9 @@ static inline void set_fs(mm_segment_t fs)
}
#endif
+#ifndef segment_eq
#define segment_eq(a, b) ((a).seg == (b).seg)
+#endif
#define VERIFY_READ 0
#define VERIFY_WRITE 1
@@ -168,12 +169,18 @@ static inline __must_check long __copy_to_user(void __user *to,
-EFAULT; \
})
+#ifndef __put_user_fn
+
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
size = __copy_to_user(ptr, x, size);
return size ? -EFAULT : size;
}
+#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
+
+#endif
+
extern int __put_user_bad(void) __attribute__((noreturn));
#define __get_user(x, ptr) \
@@ -224,12 +231,17 @@ extern int __put_user_bad(void) __attribute__((noreturn));
-EFAULT; \
})
+#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
size = __copy_from_user(x, ptr, size);
return size ? -EFAULT : size;
}
+#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
+
+#endif
+
extern int __get_user_bad(void) __attribute__((noreturn));
#ifndef __copy_from_user_inatomic
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index f46cfd73a55..bcbdd7484e5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -485,14 +485,6 @@ static inline bool acpi_driver_match_device(struct device *dev,
#endif /* !CONFIG_ACPI */
-#ifdef CONFIG_ACPI_NUMA
-void __init early_parse_srat(void);
-#else
-static inline void early_parse_srat(void)
-{
-}
-#endif
-
#ifdef CONFIG_ACPI
void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
u32 pm1a_ctrl, u32 pm1b_ctrl));
diff --git a/include/linux/bcma/bcma_driver_chipcommon.h b/include/linux/bcma/bcma_driver_chipcommon.h
index 1d002b58b60..8390c474f69 100644
--- a/include/linux/bcma/bcma_driver_chipcommon.h
+++ b/include/linux/bcma/bcma_driver_chipcommon.h
@@ -528,6 +528,7 @@ struct bcma_sflash {
u32 size;
struct mtd_info *mtd;
+ void *priv;
};
#endif
diff --git a/include/linux/btrfs.h b/include/linux/btrfs.h
new file mode 100644
index 00000000000..22d799147db
--- /dev/null
+++ b/include/linux/btrfs.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_BTRFS_H
+#define _LINUX_BTRFS_H
+
+#include <uapi/linux/btrfs.h>
+
+#endif /* _LINUX_BTRFS_H */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index de095b0462a..76a87fb57ac 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -359,6 +359,7 @@ asmlinkage ssize_t compat_sys_preadv(unsigned long fd,
asmlinkage ssize_t compat_sys_pwritev(unsigned long fd,
const struct compat_iovec __user *vec,
unsigned long vlen, u32 pos_low, u32 pos_high);
+asmlinkage long comat_sys_lseek(unsigned int, compat_off_t, unsigned int);
asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr_t __user *argv,
const compat_uptr_t __user *envp);
@@ -535,6 +536,8 @@ asmlinkage long compat_sys_openat(int dfd, const char __user *filename,
asmlinkage long compat_sys_open_by_handle_at(int mountdirfd,
struct file_handle __user *handle,
int flags);
+asmlinkage long compat_sys_truncate(const char __user *, compat_off_t);
+asmlinkage long compat_sys_ftruncate(unsigned int, compat_ulong_t);
asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp,
compat_ulong_t __user *outp,
compat_ulong_t __user *exp,
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index bf6afa2fc43..1e483fa7afb 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -68,8 +68,8 @@ typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
typedef int (*dm_preresume_fn) (struct dm_target *ti);
typedef void (*dm_resume_fn) (struct dm_target *ti);
-typedef int (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
- unsigned status_flags, char *result, unsigned maxlen);
+typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
+ unsigned status_flags, char *result, unsigned maxlen);
typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
@@ -175,6 +175,14 @@ struct target_type {
#define DM_TARGET_IMMUTABLE 0x00000004
#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
+/*
+ * Some targets need to be sent the same WRITE bio severals times so
+ * that they can send copies of it to different devices. This function
+ * examines any supplied bio and returns the number of copies of it the
+ * target requires.
+ */
+typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -187,26 +195,26 @@ struct dm_target {
uint32_t max_io_len;
/*
- * A number of zero-length barrier requests that will be submitted
+ * A number of zero-length barrier bios that will be submitted
* to the target for the purpose of flushing cache.
*
- * The request number can be accessed with dm_bio_get_target_request_nr.
- * It is a responsibility of the target driver to remap these requests
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ * It is a responsibility of the target driver to remap these bios
* to the real underlying devices.
*/
- unsigned num_flush_requests;
+ unsigned num_flush_bios;
/*
- * The number of discard requests that will be submitted to the target.
- * The request number can be accessed with dm_bio_get_target_request_nr.
+ * The number of discard bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_discard_requests;
+ unsigned num_discard_bios;
/*
- * The number of WRITE SAME requests that will be submitted to the target.
- * The request number can be accessed with dm_bio_get_target_request_nr.
+ * The number of WRITE SAME bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
*/
- unsigned num_write_same_requests;
+ unsigned num_write_same_bios;
/*
* The minimum number of extra bytes allocated in each bio for the
@@ -214,6 +222,13 @@ struct dm_target {
*/
unsigned per_bio_data_size;
+ /*
+ * If defined, this function is called to find out how many
+ * duplicate bios should be sent to the target when writing
+ * data.
+ */
+ dm_num_write_bios_fn num_write_bios;
+
/* target specific data */
void *private;
@@ -233,10 +248,10 @@ struct dm_target {
bool discards_supported:1;
/*
- * Set if the target required discard request to be split
+ * Set if the target required discard bios to be split
* on max_io_len boundary.
*/
- bool split_discard_requests:1;
+ bool split_discard_bios:1;
/*
* Set if this target does not return zeroes on discarded blocks.
@@ -261,7 +276,7 @@ struct dm_target_io {
struct dm_io *io;
struct dm_target *ti;
union map_info info;
- unsigned target_request_nr;
+ unsigned target_bio_nr;
struct bio clone;
};
@@ -275,9 +290,9 @@ static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
}
-static inline unsigned dm_bio_get_target_request_nr(const struct bio *bio)
+static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
{
- return container_of(bio, struct dm_target_io, clone)->target_request_nr;
+ return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
}
int dm_register_target(struct target_type *t);
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index 47d9d376e4e..f486d636b82 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -21,11 +21,34 @@
#define DM_KCOPYD_IGNORE_ERROR 1
+struct dm_kcopyd_throttle {
+ unsigned throttle;
+ unsigned num_io_jobs;
+ unsigned io_period;
+ unsigned total_period;
+ unsigned last_jiffies;
+};
+
+/*
+ * kcopyd clients that want to support throttling must pass an initialised
+ * dm_kcopyd_throttle struct into dm_kcopyd_client_create().
+ * Two or more clients may share the same instance of this struct between
+ * them if they wish to be throttled as a group.
+ *
+ * This macro also creates a corresponding module parameter to configure
+ * the amount of throttling.
+ */
+#define DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(name, description) \
+static struct dm_kcopyd_throttle dm_kcopyd_throttle = { 100, 0, 0, 0, 0 }; \
+module_param_named(name, dm_kcopyd_throttle.throttle, uint, 0644); \
+MODULE_PARM_DESC(name, description)
+
/*
* To use kcopyd you must first create a dm_kcopyd_client object.
+ * throttle can be NULL if you don't want any throttling.
*/
struct dm_kcopyd_client;
-struct dm_kcopyd_client *dm_kcopyd_client_create(void);
+struct dm_kcopyd_client *dm_kcopyd_client_create(struct dm_kcopyd_throttle *throttle);
void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
/*
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f5939999cb6..91ac8da2502 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -1001,6 +1001,22 @@ void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
+#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
+ __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
+
+static inline struct dma_chan
+*__dma_request_slave_channel_compat(dma_cap_mask_t *mask, dma_filter_fn fn,
+ void *fn_param, struct device *dev,
+ char *name)
+{
+ struct dma_chan *chan;
+
+ chan = dma_request_slave_channel(dev, name);
+ if (chan)
+ return chan;
+
+ return __dma_request_channel(mask, fn, fn_param);
+}
/* --- Helper iov-locking functions --- */
diff --git a/include/linux/dw_dmac.h b/include/linux/dw_dmac.h
index 41766de66e3..481ab2345d6 100644
--- a/include/linux/dw_dmac.h
+++ b/include/linux/dw_dmac.h
@@ -27,7 +27,6 @@
*/
struct dw_dma_slave {
struct device *dma_dev;
- const char *bus_id;
u32 cfg_hi;
u32 cfg_lo;
u8 src_master;
@@ -60,9 +59,6 @@ struct dw_dma_platform_data {
unsigned short block_size;
unsigned char nr_masters;
unsigned char data_width[4];
-
- struct dw_dma_slave *sd;
- unsigned int sd_count;
};
/* bursts size */
@@ -114,6 +110,5 @@ void dw_dma_cyclic_stop(struct dma_chan *chan);
dma_addr_t dw_dma_get_src_addr(struct dma_chan *chan);
dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan);
-bool dw_dma_generic_filter(struct dma_chan *chan, void *param);
#endif /* DW_DMAC_H */
diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h
index 56fae865e27..0dca785288c 100644
--- a/include/linux/hsi/hsi.h
+++ b/include/linux/hsi/hsi.h
@@ -121,9 +121,9 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info,
* @device: Driver model representation of the device
* @tx_cfg: HSI TX configuration
* @rx_cfg: HSI RX configuration
- * @e_handler: Callback for handling port events (RX Wake High/Low)
- * @pclaimed: Keeps tracks if the clients claimed its associated HSI port
- * @nb: Notifier block for port events
+ * e_handler: Callback for handling port events (RX Wake High/Low)
+ * pclaimed: Keeps tracks if the clients claimed its associated HSI port
+ * nb: Notifier block for port events
*/
struct hsi_client {
struct device device;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 3e5ecb2d790..f388203db7e 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -42,7 +42,6 @@ struct memblock {
extern struct memblock memblock;
extern int memblock_debug;
-extern struct movablemem_map movablemem_map;
#define memblock_dbg(fmt, ...) \
if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
@@ -61,7 +60,6 @@ int memblock_reserve(phys_addr_t base, phys_addr_t size);
void memblock_trim_memory(phys_addr_t align);
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-
void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
unsigned long *out_end_pfn, int *out_nid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e7c3f9a0111..1ede55f292c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1333,24 +1333,6 @@ extern void free_bootmem_with_active_regions(int nid,
unsigned long max_low_pfn);
extern void sparse_memory_present_with_active_regions(int nid);
-#define MOVABLEMEM_MAP_MAX MAX_NUMNODES
-struct movablemem_entry {
- unsigned long start_pfn; /* start pfn of memory segment */
- unsigned long end_pfn; /* end pfn of memory segment (exclusive) */
-};
-
-struct movablemem_map {
- bool acpi; /* true if using SRAT info */
- int nr_map;
- struct movablemem_entry map[MOVABLEMEM_MAP_MAX];
- nodemask_t numa_nodes_hotplug; /* on which nodes we specify memory */
- nodemask_t numa_nodes_kernel; /* on which nodes kernel resides in */
-};
-
-extern void __init insert_movablemem_map(unsigned long start_pfn,
- unsigned long end_pfn);
-extern int __init movablemem_map_overlap(unsigned long start_pfn,
- unsigned long end_pfn);
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index f6eb4332ac9..4b02512e421 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -245,6 +245,7 @@ struct map_info {
unsigned long pfow_base;
unsigned long map_priv_1;
unsigned long map_priv_2;
+ struct device_node *device_node;
void *fldrv_priv;
struct mtd_chip_driver *fldrv;
};
@@ -328,7 +329,7 @@ static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word
static inline map_word map_word_load(struct map_info *map, const void *ptr)
{
- map_word r = {{0} };
+ map_word r;
if (map_bankwidth_is_1(map))
r.x[0] = *(unsigned char *)ptr;
@@ -342,6 +343,8 @@ static inline map_word map_word_load(struct map_info *map, const void *ptr)
#endif
else if (map_bankwidth_is_large(map))
memcpy(r.x, ptr, map->bankwidth);
+ else
+ BUG();
return r;
}
@@ -391,7 +394,7 @@ static inline map_word map_word_ff(struct map_info *map)
static inline map_word inline_map_read(struct map_info *map, unsigned long ofs)
{
- map_word uninitialized_var(r);
+ map_word r;
if (map_bankwidth_is_1(map))
r.x[0] = __raw_readb(map->virt + ofs);
@@ -425,6 +428,8 @@ static inline void inline_map_write(struct map_info *map, const map_word datum,
#endif
else if (map_bankwidth_is_large(map))
memcpy_toio(map->virt+ofs, datum.x, map->bankwidth);
+ else
+ BUG();
mb();
}
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 13441ddac33..4b993d358da 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -233,6 +233,7 @@ struct nfs4_layoutget_args {
struct inode *inode;
struct nfs_open_context *ctx;
nfs4_stateid stateid;
+ unsigned long timestamp;
struct nfs4_layoutdriver_data layout;
};
diff --git a/include/linux/platform_data/elm.h b/include/linux/platform_data/elm.h
new file mode 100644
index 00000000000..1bd5244d1dc
--- /dev/null
+++ b/include/linux/platform_data/elm.h
@@ -0,0 +1,54 @@
+/*
+ * BCH Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __ELM_H
+#define __ELM_H
+
+enum bch_ecc {
+ BCH4_ECC = 0,
+ BCH8_ECC,
+};
+
+/* ELM support 8 error syndrome process */
+#define ERROR_VECTOR_MAX 8
+
+#define BCH8_ECC_OOB_BYTES 13
+#define BCH4_ECC_OOB_BYTES 7
+/* RBL requires 14 byte even though BCH8 uses only 13 byte */
+#define BCH8_SIZE (BCH8_ECC_OOB_BYTES + 1)
+/* Uses 1 extra byte to handle erased pages */
+#define BCH4_SIZE (BCH4_ECC_OOB_BYTES + 1)
+
+/**
+ * struct elm_errorvec - error vector for elm
+ * @error_reported: set true for vectors error is reported
+ * @error_uncorrectable: number of uncorrectable errors
+ * @error_count: number of correctable errors in the sector
+ * @error_loc: buffer for error location
+ *
+ */
+struct elm_errorvec {
+ bool error_reported;
+ bool error_uncorrectable;
+ int error_count;
+ int error_loc[ERROR_VECTOR_MAX];
+};
+
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+ struct elm_errorvec *err_vec);
+void elm_config(struct device *dev, enum bch_ecc bch_type);
+#endif /* __ELM_H */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 4a4abde000c..2cf4ffaa3cd 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -160,6 +160,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
int rpc_protocol(struct rpc_clnt *);
struct net * rpc_net_ns(struct rpc_clnt *);
size_t rpc_max_payload(struct rpc_clnt *);
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
diff --git a/include/scsi/Kbuild b/include/scsi/Kbuild
index f2b94918994..562ff9d591b 100644
--- a/include/scsi/Kbuild
+++ b/include/scsi/Kbuild
@@ -1,4 +1 @@
-header-y += scsi_netlink.h
-header-y += scsi_netlink_fc.h
-header-y += scsi_bsg_fc.h
header-y += fc/
diff --git a/include/scsi/fc/Kbuild b/include/scsi/fc/Kbuild
index 56603813c6c..e69de29bb2d 100644
--- a/include/scsi/fc/Kbuild
+++ b/include/scsi/fc/Kbuild
@@ -1,4 +0,0 @@
-header-y += fc_els.h
-header-y += fc_fs.h
-header-y += fc_gs.h
-header-y += fc_ns.h
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
index 604cb9bb3e7..7e231487034 100644
--- a/include/scsi/fcoe_sysfs.h
+++ b/include/scsi/fcoe_sysfs.h
@@ -34,7 +34,8 @@ struct fcoe_sysfs_function_template {
void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
- void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+ void (*set_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
+ int (*set_fcoe_ctlr_enabled)(struct fcoe_ctlr_device *);
void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
};
@@ -48,6 +49,12 @@ enum fip_conn_type {
FIP_CONN_TYPE_VN2VN,
};
+enum ctlr_enabled_state {
+ FCOE_CTLR_ENABLED,
+ FCOE_CTLR_DISABLED,
+ FCOE_CTLR_UNUSED,
+};
+
struct fcoe_ctlr_device {
u32 id;
@@ -64,6 +71,8 @@ struct fcoe_ctlr_device {
int fcf_dev_loss_tmo;
enum fip_conn_type mode;
+ enum ctlr_enabled_state enabled;
+
/* expected in host order for displaying */
struct fcoe_fc_els_lesb lesb;
};
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index 8742d853a3b..4427393115e 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -260,6 +260,9 @@ void __fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb,
struct net_device *netdev);
void fcoe_wwn_to_str(u64 wwn, char *buf, int len);
int fcoe_validate_vport_create(struct fc_vport *vport);
+int fcoe_link_speed_update(struct fc_lport *);
+void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
+void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
/**
* is_fip_mode() - returns true if FIP mode selected.
@@ -289,8 +292,11 @@ static inline bool is_fip_mode(struct fcoe_ctlr *fip)
* @attached: whether this transport is already attached
* @list: list linkage to all attached transports
* @match: handler to allow the transport driver to match up a given netdev
+ * @alloc: handler to allocate per-instance FCoE structures
+ * (no discovery or login)
* @create: handler to sysfs entry of create for FCoE instances
- * @destroy: handler to sysfs entry of destroy for FCoE instances
+ * @destroy: handler to delete per-instance FCoE structures
+ * (frees all memory)
* @enable: handler to sysfs entry of enable for FCoE instances
* @disable: handler to sysfs entry of disable for FCoE instances
*/
@@ -299,6 +305,7 @@ struct fcoe_transport {
bool attached;
struct list_head list;
bool (*match) (struct net_device *device);
+ int (*alloc) (struct net_device *device);
int (*create) (struct net_device *device, enum fip_state fip_mode);
int (*destroy) (struct net_device *device);
int (*enable) (struct net_device *device);
@@ -347,7 +354,20 @@ struct fcoe_port {
struct timer_list timer;
struct work_struct destroy_work;
u8 data_src_addr[ETH_ALEN];
+ struct net_device * (*get_netdev)(const struct fc_lport *lport);
};
+
+/**
+ * fcoe_get_netdev() - Return the net device associated with a local port
+ * @lport: The local port to get the net device from
+ */
+static inline struct net_device *fcoe_get_netdev(const struct fc_lport *lport)
+{
+ struct fcoe_port *port = ((struct fcoe_port *)lport_priv(lport));
+
+ return (port->get_netdev) ? port->get_netdev(lport) : NULL;
+}
+
void fcoe_clean_pending_queue(struct fc_lport *);
void fcoe_check_wait_queue(struct fc_lport *lport, struct sk_buff *skb);
void fcoe_queue_timer(ulong lport);
@@ -356,7 +376,7 @@ int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
/* FCoE Sysfs helpers */
void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
-void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
+void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *);
/**
* struct netdev_list
@@ -372,4 +392,12 @@ struct fcoe_netdev_mapping {
int fcoe_transport_attach(struct fcoe_transport *ft);
int fcoe_transport_detach(struct fcoe_transport *ft);
+/* sysfs store handler for ctrl_control interface */
+ssize_t fcoe_ctlr_create_store(struct bus_type *bus,
+ const char *buf, size_t count);
+ssize_t fcoe_ctlr_destroy_store(struct bus_type *bus,
+ const char *buf, size_t count);
+
#endif /* _LIBFCOE_H */
+
+
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 49084807eb6..2b6956e9853 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -873,7 +873,7 @@ static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsign
SHOST_DIF_TYPE2_PROTECTION,
SHOST_DIF_TYPE3_PROTECTION };
- if (target_type > SHOST_DIF_TYPE3_PROTECTION)
+ if (target_type >= ARRAY_SIZE(cap))
return 0;
return shost->prot_capabilities & cap[target_type] ? target_type : 0;
@@ -887,7 +887,7 @@ static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsign
SHOST_DIX_TYPE2_PROTECTION,
SHOST_DIX_TYPE3_PROTECTION };
- if (target_type > SHOST_DIX_TYPE3_PROTECTION)
+ if (target_type >= ARRAY_SIZE(cap))
return 0;
return shost->prot_capabilities & cap[target_type];
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index c0457c0d1a6..4ee47100385 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -2255,64 +2255,48 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->found ? __entry->status : 0)
);
-TRACE_EVENT(ext4_es_reclaim_extents_count,
- TP_PROTO(struct super_block *sb, int nr_cached),
-
- TP_ARGS(sb, nr_cached),
-
- TP_STRUCT__entry(
- __field( dev_t, dev )
- __field( int, nr_cached )
- ),
-
- TP_fast_assign(
- __entry->dev = sb->s_dev;
- __entry->nr_cached = nr_cached;
- ),
-
- TP_printk("dev %d,%d cached objects nr %d",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->nr_cached)
-);
-
TRACE_EVENT(ext4_es_shrink_enter,
- TP_PROTO(struct super_block *sb, int nr_to_scan),
+ TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
- TP_ARGS(sb, nr_to_scan),
+ TP_ARGS(sb, nr_to_scan, cache_cnt),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, nr_to_scan )
+ __field( int, cache_cnt )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->nr_to_scan = nr_to_scan;
+ __entry->cache_cnt = cache_cnt;
),
- TP_printk("dev %d,%d nr to scan %d",
+ TP_printk("dev %d,%d nr_to_scan %d cache_cnt %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->nr_to_scan)
+ __entry->nr_to_scan, __entry->cache_cnt)
);
TRACE_EVENT(ext4_es_shrink_exit,
- TP_PROTO(struct super_block *sb, int shrunk_nr),
+ TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt),
- TP_ARGS(sb, shrunk_nr),
+ TP_ARGS(sb, shrunk_nr, cache_cnt),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( int, shrunk_nr )
+ __field( int, cache_cnt )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->shrunk_nr = shrunk_nr;
+ __entry->cache_cnt = cache_cnt;
),
- TP_printk("dev %d,%d nr to scan %d",
+ TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->shrunk_nr)
+ __entry->shrunk_nr, __entry->cache_cnt)
);
#endif /* _TRACE_EXT4_H */
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4e67194fd2c..5c8a1d25e21 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -68,6 +68,7 @@ header-y += blkpg.h
header-y += blktrace_api.h
header-y += bpqether.h
header-y += bsg.h
+header-y += btrfs.h
header-y += can.h
header-y += capability.h
header-y += capi.h
diff --git a/fs/btrfs/ioctl.h b/include/uapi/linux/btrfs.h
index dabca9cc8c2..fa3a5f9338f 100644
--- a/fs/btrfs/ioctl.h
+++ b/include/uapi/linux/btrfs.h
@@ -16,8 +16,9 @@
* Boston, MA 021110-1307, USA.
*/
-#ifndef __IOCTL_
-#define __IOCTL_
+#ifndef _UAPI_LINUX_BTRFS_H
+#define _UAPI_LINUX_BTRFS_H
+#include <linux/types.h>
#include <linux/ioctl.h>
#define BTRFS_IOCTL_MAGIC 0x94
@@ -406,6 +407,13 @@ struct btrfs_ioctl_received_subvol_args {
__u64 reserved[16]; /* in */
};
+/*
+ * Caller doesn't want file data in the send stream, even if the
+ * search of clone sources doesn't find an extent. UPDATE_EXTENT
+ * commands will be sent instead of WRITE commands.
+ */
+#define BTRFS_SEND_FLAG_NO_FILE_DATA 0x1
+
struct btrfs_ioctl_send_args {
__s64 send_fd; /* in */
__u64 clone_sources_count; /* in */
@@ -494,9 +502,13 @@ struct btrfs_ioctl_send_args {
struct btrfs_ioctl_qgroup_create_args)
#define BTRFS_IOC_QGROUP_LIMIT _IOR(BTRFS_IOCTL_MAGIC, 43, \
struct btrfs_ioctl_qgroup_limit_args)
+#define BTRFS_IOC_GET_FSLABEL _IOR(BTRFS_IOCTL_MAGIC, 49, \
+ char[BTRFS_LABEL_SIZE])
+#define BTRFS_IOC_SET_FSLABEL _IOW(BTRFS_IOCTL_MAGIC, 50, \
+ char[BTRFS_LABEL_SIZE])
#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
struct btrfs_ioctl_get_dev_stats)
#define BTRFS_IOC_DEV_REPLACE _IOWR(BTRFS_IOCTL_MAGIC, 53, \
struct btrfs_ioctl_dev_replace_args)
-#endif
+#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 539b179b349..7e75b6fd8d4 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -267,9 +267,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 23
-#define DM_VERSION_PATCHLEVEL 1
-#define DM_VERSION_EXTRA "-ioctl (2012-12-18)"
+#define DM_VERSION_MINOR 24
+#define DM_VERSION_PATCHLEVEL 0
+#define DM_VERSION_EXTRA "-ioctl (2013-01-15)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
@@ -336,4 +336,9 @@ enum {
*/
#define DM_SECURE_DATA_FLAG (1 << 15) /* In */
+/*
+ * If set, a message generated output data.
+ */
+#define DM_DATA_OUT_FLAG (1 << 16) /* Out */
+
#endif /* _LINUX_DM_IOCTL_H */
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index 29a87dd26cf..75746d52f20 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -1,2 +1,5 @@
# UAPI Header export list
header-y += fc/
+header-y += scsi_bsg_fc.h
+header-y += scsi_netlink.h
+header-y += scsi_netlink_fc.h
diff --git a/include/uapi/scsi/fc/Kbuild b/include/uapi/scsi/fc/Kbuild
index aafaa5aa54d..5ead9fac265 100644
--- a/include/uapi/scsi/fc/Kbuild
+++ b/include/uapi/scsi/fc/Kbuild
@@ -1 +1,5 @@
# UAPI Header export list
+header-y += fc_els.h
+header-y += fc_fs.h
+header-y += fc_gs.h
+header-y += fc_ns.h
diff --git a/include/scsi/fc/fc_els.h b/include/uapi/scsi/fc/fc_els.h
index 481abbd48e3..481abbd48e3 100644
--- a/include/scsi/fc/fc_els.h
+++ b/include/uapi/scsi/fc/fc_els.h
diff --git a/include/scsi/fc/fc_fs.h b/include/uapi/scsi/fc/fc_fs.h
index 50f28b14345..50f28b14345 100644
--- a/include/scsi/fc/fc_fs.h
+++ b/include/uapi/scsi/fc/fc_fs.h
diff --git a/include/scsi/fc/fc_gs.h b/include/uapi/scsi/fc/fc_gs.h
index a37346d47eb..a37346d47eb 100644
--- a/include/scsi/fc/fc_gs.h
+++ b/include/uapi/scsi/fc/fc_gs.h
diff --git a/include/scsi/fc/fc_ns.h b/include/uapi/scsi/fc/fc_ns.h
index f7751d53f1d..f7751d53f1d 100644
--- a/include/scsi/fc/fc_ns.h
+++ b/include/uapi/scsi/fc/fc_ns.h
diff --git a/include/scsi/scsi_bsg_fc.h b/include/uapi/scsi/scsi_bsg_fc.h
index 3031b900b08..3031b900b08 100644
--- a/include/scsi/scsi_bsg_fc.h
+++ b/include/uapi/scsi/scsi_bsg_fc.h
diff --git a/include/scsi/scsi_netlink.h b/include/uapi/scsi/scsi_netlink.h
index 62b4edab15d..62b4edab15d 100644
--- a/include/scsi/scsi_netlink.h
+++ b/include/uapi/scsi/scsi_netlink.h
diff --git a/include/scsi/scsi_netlink_fc.h b/include/uapi/scsi/scsi_netlink_fc.h
index cbf76e47976..cbf76e47976 100644
--- a/include/scsi/scsi_netlink_fc.h
+++ b/include/uapi/scsi/scsi_netlink_fc.h
diff --git a/init/Kconfig b/init/Kconfig
index 0a5e80fb9ba..22616cd434b 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1230,6 +1230,14 @@ config SYSCTL_ARCH_UNALIGN_NO_WARN
Allows arch to define/use @no_unaligned_warning to possibly warn
about unaligned access emulation going on under the hood.
+config SYSCTL_ARCH_UNALIGN_ALLOW
+ bool
+ help
+ Enable support for /proc/sys/kernel/unaligned-trap
+ Allows arches to define/use @unaligned_enabled to runtime toggle
+ the unaligned access emulation.
+ see arch/parisc/kernel/unaligned.c for reference
+
config KALLSYMS
bool "Load all symbols for debugging/ksymoops" if EXPERT
default y
diff --git a/kernel/debug/debug_core.h b/kernel/debug/debug_core.h
index 3494c28a7e7..2235967e78b 100644
--- a/kernel/debug/debug_core.h
+++ b/kernel/debug/debug_core.h
@@ -72,6 +72,8 @@ extern int dbg_kdb_mode;
#ifdef CONFIG_KGDB_KDB
extern int kdb_stub(struct kgdb_state *ks);
extern int kdb_parse(const char *cmdstr);
+extern int kdb_common_init_state(struct kgdb_state *ks);
+extern int kdb_common_deinit_state(void);
#else /* ! CONFIG_KGDB_KDB */
static inline int kdb_stub(struct kgdb_state *ks)
{
diff --git a/kernel/debug/gdbstub.c b/kernel/debug/gdbstub.c
index 38573f35a5a..19d9a578c75 100644
--- a/kernel/debug/gdbstub.c
+++ b/kernel/debug/gdbstub.c
@@ -783,7 +783,10 @@ static void gdb_cmd_query(struct kgdb_state *ks)
len = len / 2;
remcom_out_buffer[len++] = 0;
+ kdb_common_init_state(ks);
kdb_parse(remcom_out_buffer);
+ kdb_common_deinit_state();
+
strcpy(remcom_out_buffer, "OK");
}
break;
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 8418c2f8ec5..70a504601dc 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -486,11 +486,9 @@ static int kdb_bc(int argc, const char **argv)
/*
* kdb_ss
*
- * Process the 'ss' (Single Step) and 'ssb' (Single Step to Branch)
- * commands.
+ * Process the 'ss' (Single Step) command.
*
* ss
- * ssb
*
* Parameters:
* argc Argument count
@@ -498,35 +496,23 @@ static int kdb_bc(int argc, const char **argv)
* Outputs:
* None.
* Returns:
- * KDB_CMD_SS[B] for success, a kdb error if failure.
+ * KDB_CMD_SS for success, a kdb error if failure.
* Locking:
* None.
* Remarks:
*
* Set the arch specific option to trigger a debug trap after the next
* instruction.
- *
- * For 'ssb', set the trace flag in the debug trap handler
- * after printing the current insn and return directly without
- * invoking the kdb command processor, until a branch instruction
- * is encountered.
*/
static int kdb_ss(int argc, const char **argv)
{
- int ssb = 0;
-
- ssb = (strcmp(argv[0], "ssb") == 0);
if (argc != 0)
return KDB_ARGCOUNT;
/*
* Set trace flag and go.
*/
KDB_STATE_SET(DOING_SS);
- if (ssb) {
- KDB_STATE_SET(DOING_SSB);
- return KDB_CMD_SSB;
- }
return KDB_CMD_SS;
}
@@ -561,8 +547,6 @@ void __init kdb_initbptab(void)
kdb_register_repeat("ss", kdb_ss, "",
"Single Step", 1, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("ssb", kdb_ss, "",
- "Single step to branch/call", 0, KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index be7b33b73d3..328d18ef31e 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -34,6 +34,22 @@ EXPORT_SYMBOL_GPL(kdb_poll_idx);
static struct kgdb_state *kdb_ks;
+int kdb_common_init_state(struct kgdb_state *ks)
+{
+ kdb_initial_cpu = atomic_read(&kgdb_active);
+ kdb_current_task = kgdb_info[ks->cpu].task;
+ kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ return 0;
+}
+
+int kdb_common_deinit_state(void)
+{
+ kdb_initial_cpu = -1;
+ kdb_current_task = NULL;
+ kdb_current_regs = NULL;
+ return 0;
+}
+
int kdb_stub(struct kgdb_state *ks)
{
int error = 0;
@@ -94,13 +110,10 @@ int kdb_stub(struct kgdb_state *ks)
}
/* Set initial kdb state variables */
KDB_STATE_CLEAR(KGDB_TRANS);
- kdb_initial_cpu = atomic_read(&kgdb_active);
- kdb_current_task = kgdb_info[ks->cpu].task;
- kdb_current_regs = kgdb_info[ks->cpu].debuggerinfo;
+ kdb_common_init_state(ks);
/* Remove any breakpoints as needed by kdb and clear single step */
kdb_bp_remove();
KDB_STATE_CLEAR(DOING_SS);
- KDB_STATE_CLEAR(DOING_SSB);
KDB_STATE_SET(PAGER);
/* zero out any offline cpu data */
for_each_present_cpu(i) {
@@ -125,9 +138,7 @@ int kdb_stub(struct kgdb_state *ks)
* Upon exit from the kdb main loop setup break points and restart
* the system based on the requested continue state
*/
- kdb_initial_cpu = -1;
- kdb_current_task = NULL;
- kdb_current_regs = NULL;
+ kdb_common_deinit_state();
KDB_STATE_CLEAR(PAGER);
kdbnearsym_cleanup();
if (error == KDB_CMD_KGDB) {
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 8875254120b..00eb8f7fbf4 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -124,7 +124,7 @@ static kdbmsg_t kdbmsgs[] = {
};
#undef KDBMSG
-static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t);
+static const int __nkdb_err = ARRAY_SIZE(kdbmsgs);
/*
@@ -175,7 +175,7 @@ static char *__env[] = {
(char *)0,
};
-static const int __nenv = (sizeof(__env) / sizeof(char *));
+static const int __nenv = ARRAY_SIZE(__env);
struct task_struct *kdb_curr_task(int cpu)
{
@@ -681,34 +681,50 @@ static int kdb_defcmd(int argc, const char **argv)
}
if (argc != 3)
return KDB_ARGCOUNT;
- defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
- GFP_KDB);
- if (!defcmd_set) {
- kdb_printf("Could not allocate new defcmd_set entry for %s\n",
- argv[1]);
- defcmd_set = save_defcmd_set;
+ if (in_dbg_master()) {
+ kdb_printf("Command only available during kdb_init()\n");
return KDB_NOTIMP;
}
+ defcmd_set = kmalloc((defcmd_set_count + 1) * sizeof(*defcmd_set),
+ GFP_KDB);
+ if (!defcmd_set)
+ goto fail_defcmd;
memcpy(defcmd_set, save_defcmd_set,
defcmd_set_count * sizeof(*defcmd_set));
- kfree(save_defcmd_set);
s = defcmd_set + defcmd_set_count;
memset(s, 0, sizeof(*s));
s->usable = 1;
s->name = kdb_strdup(argv[1], GFP_KDB);
+ if (!s->name)
+ goto fail_name;
s->usage = kdb_strdup(argv[2], GFP_KDB);
+ if (!s->usage)
+ goto fail_usage;
s->help = kdb_strdup(argv[3], GFP_KDB);
+ if (!s->help)
+ goto fail_help;
if (s->usage[0] == '"') {
- strcpy(s->usage, s->usage+1);
+ strcpy(s->usage, argv[2]+1);
s->usage[strlen(s->usage)-1] = '\0';
}
if (s->help[0] == '"') {
- strcpy(s->help, s->help+1);
+ strcpy(s->help, argv[3]+1);
s->help[strlen(s->help)-1] = '\0';
}
++defcmd_set_count;
defcmd_in_progress = 1;
+ kfree(save_defcmd_set);
return 0;
+fail_help:
+ kfree(s->usage);
+fail_usage:
+ kfree(s->name);
+fail_name:
+ kfree(defcmd_set);
+fail_defcmd:
+ kdb_printf("Could not allocate new defcmd_set entry for %s\n", argv[1]);
+ defcmd_set = save_defcmd_set;
+ return KDB_NOTIMP;
}
/*
@@ -1112,7 +1128,6 @@ void kdb_set_current_task(struct task_struct *p)
* KDB_CMD_GO User typed 'go'.
* KDB_CMD_CPU User switched to another cpu.
* KDB_CMD_SS Single step.
- * KDB_CMD_SSB Single step until branch.
*/
static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_dbtrap_t db_result)
@@ -1151,14 +1166,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
kdb_printf("due to Debug @ " kdb_machreg_fmt "\n",
instruction_pointer(regs));
break;
- case KDB_DB_SSB:
- /*
- * In the midst of ssb command. Just return.
- */
- KDB_DEBUG_STATE("kdb_local 3", reason);
- return KDB_CMD_SSB; /* Continue with SSB command */
-
- break;
case KDB_DB_SS:
break;
case KDB_DB_SSBPT:
@@ -1281,7 +1288,6 @@ do_full_getstr:
if (diag == KDB_CMD_GO
|| diag == KDB_CMD_CPU
|| diag == KDB_CMD_SS
- || diag == KDB_CMD_SSB
|| diag == KDB_CMD_KGDB)
break;
@@ -1368,12 +1374,6 @@ int kdb_main_loop(kdb_reason_t reason, kdb_reason_t reason2, int error,
break;
}
- if (result == KDB_CMD_SSB) {
- KDB_STATE_SET(DOING_SS);
- KDB_STATE_SET(DOING_SSB);
- break;
- }
-
if (result == KDB_CMD_KGDB) {
if (!KDB_STATE(DOING_KGDB))
kdb_printf("Entering please attach debugger "
@@ -2350,69 +2350,6 @@ static int kdb_pid(int argc, const char **argv)
return 0;
}
-/*
- * kdb_ll - This function implements the 'll' command which follows a
- * linked list and executes an arbitrary command for each
- * element.
- */
-static int kdb_ll(int argc, const char **argv)
-{
- int diag = 0;
- unsigned long addr;
- long offset = 0;
- unsigned long va;
- unsigned long linkoffset;
- int nextarg;
- const char *command;
-
- if (argc != 3)
- return KDB_ARGCOUNT;
-
- nextarg = 1;
- diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
- if (diag)
- return diag;
-
- diag = kdbgetularg(argv[2], &linkoffset);
- if (diag)
- return diag;
-
- /*
- * Using the starting address as
- * the first element in the list, and assuming that
- * the list ends with a null pointer.
- */
-
- va = addr;
- command = kdb_strdup(argv[3], GFP_KDB);
- if (!command) {
- kdb_printf("%s: cannot duplicate command\n", __func__);
- return 0;
- }
- /* Recursive use of kdb_parse, do not use argv after this point */
- argv = NULL;
-
- while (va) {
- char buf[80];
-
- if (KDB_FLAG(CMD_INTERRUPT))
- goto out;
-
- sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va);
- diag = kdb_parse(buf);
- if (diag)
- goto out;
-
- addr = va + linkoffset;
- if (kdb_getword(&va, addr, sizeof(va)))
- goto out;
- }
-
-out:
- kfree(command);
- return diag;
-}
-
static int kdb_kgdb(int argc, const char **argv)
{
return KDB_CMD_KGDB;
@@ -2430,11 +2367,15 @@ static int kdb_help(int argc, const char **argv)
kdb_printf("-----------------------------"
"-----------------------------\n");
for_each_kdbcmd(kt, i) {
- if (kt->cmd_name)
- kdb_printf("%-15.15s %-20.20s %s\n", kt->cmd_name,
- kt->cmd_usage, kt->cmd_help);
+ char *space = "";
if (KDB_FLAG(CMD_INTERRUPT))
return 0;
+ if (!kt->cmd_name)
+ continue;
+ if (strlen(kt->cmd_usage) > 20)
+ space = "\n ";
+ kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
+ kt->cmd_usage, space, kt->cmd_help);
}
return 0;
}
@@ -2739,7 +2680,7 @@ int kdb_register_repeat(char *cmd,
(kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new));
kfree(kdb_commands);
}
- memset(new + kdb_max_commands, 0,
+ memset(new + kdb_max_commands - KDB_BASE_CMD_MAX, 0,
kdb_command_extend * sizeof(*new));
kdb_commands = new;
kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX;
@@ -2843,15 +2784,13 @@ static void __init kdb_inittab(void)
"Stack traceback", 1, KDB_REPEAT_NONE);
kdb_register_repeat("btp", kdb_bt, "<pid>",
"Display stack for process <pid>", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bta", kdb_bt, "[DRSTCZEUIMA]",
- "Display stack all processes", 0, KDB_REPEAT_NONE);
+ kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+ "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btc", kdb_bt, "",
"Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
kdb_register_repeat("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
KDB_REPEAT_NONE);
- kdb_register_repeat("ll", kdb_ll, "<first-element> <linkoffset> <cmd>",
- "Execute cmd for each element in linked list", 0, KDB_REPEAT_NONE);
kdb_register_repeat("env", kdb_env, "",
"Show environment variables", 0, KDB_REPEAT_NONE);
kdb_register_repeat("set", kdb_set, "",
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 392ec6a2584..7afd3c8c41d 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -19,7 +19,6 @@
#define KDB_CMD_GO (-1001)
#define KDB_CMD_CPU (-1002)
#define KDB_CMD_SS (-1003)
-#define KDB_CMD_SSB (-1004)
#define KDB_CMD_KGDB (-1005)
/* Internal debug flags */
@@ -125,8 +124,6 @@ extern int kdb_state;
* kdb control */
#define KDB_STATE_HOLD_CPU 0x00000010 /* Hold this cpu inside kdb */
#define KDB_STATE_DOING_SS 0x00000020 /* Doing ss command */
-#define KDB_STATE_DOING_SSB 0x00000040 /* Doing ssb command,
- * DOING_SS is also set */
#define KDB_STATE_SSBPT 0x00000080 /* Install breakpoint
* after one ss, independent of
* DOING_SS */
@@ -191,7 +188,6 @@ extern void kdb_bp_remove(void);
typedef enum {
KDB_DB_BPT, /* Breakpoint */
KDB_DB_SS, /* Single-step trap */
- KDB_DB_SSB, /* Single step to branch */
KDB_DB_SSBPT, /* Single step over breakpoint */
KDB_DB_NOBPT /* Spurious breakpoint */
} kdb_dbtrap_t;
diff --git a/kernel/signal.c b/kernel/signal.c
index 2676aac4103..2ec870a4c3c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2653,7 +2653,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
if (oset) {
compat_sigset_t old32;
sigset_to_compat(&old32, &old_set);
- if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
+ if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
return -EFAULT;
}
return 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d1b4ee67d2d..afc1dc60f3f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -157,6 +157,9 @@ extern int sysctl_tsb_ratio;
#ifdef __hppa__
extern int pwrsw_enabled;
+#endif
+
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
extern int unaligned_enabled;
#endif
@@ -555,6 +558,8 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#endif
+#ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW
{
.procname = "unaligned-trap",
.data = &unaligned_enabled,
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index dbb58ae1b8e..140e8782417 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -80,4 +80,22 @@ config KDB_KEYBOARD
help
KDB can use a PS/2 type keyboard for an input device
+config KDB_CONTINUE_CATASTROPHIC
+ int "KDB: continue after catastrophic errors"
+ depends on KGDB_KDB
+ default "0"
+ help
+ This integer controls the behaviour of kdb when the kernel gets a
+ catastrophic error, i.e. for a panic or oops.
+ When KDB is active and a catastrophic error occurs, nothing extra
+ will happen until you type 'go'.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 0 (default). The first time
+ you type 'go', you will be warned by kdb. The secend time you type
+ 'go', KDB tries to continue. No guarantees that the
+ kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 1. KDB tries to continue.
+ No guarantees that the kernel is still usable in this situation.
+ CONFIG_KDB_CONTINUE_CATASTROPHIC == 2. KDB forces a reboot.
+ If you are not sure, say 0.
+
endif # KGDB
diff --git a/lib/checksum.c b/lib/checksum.c
index 12dceb27ff2..129775eb6de 100644
--- a/lib/checksum.c
+++ b/lib/checksum.c
@@ -102,6 +102,7 @@ out:
}
#endif
+#ifndef ip_fast_csum
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
@@ -111,6 +112,7 @@ __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
return (__force __sum16)~do_csum(iph, ihl*4);
}
EXPORT_SYMBOL(ip_fast_csum);
+#endif
/*
* computes the checksum of a memory block at buff, length len,
diff --git a/mm/memblock.c b/mm/memblock.c
index 1bcd9b97056..b8d9147e5c0 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -92,58 +92,9 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
*
* Find @size free area aligned to @align in the specified range and node.
*
- * If we have CONFIG_HAVE_MEMBLOCK_NODE_MAP defined, we need to check if the
- * memory we found if not in hotpluggable ranges.
- *
* RETURNS:
* Found address on success, %0 on failure.
*/
-#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
- phys_addr_t end, phys_addr_t size,
- phys_addr_t align, int nid)
-{
- phys_addr_t this_start, this_end, cand;
- u64 i;
- int curr = movablemem_map.nr_map - 1;
-
- /* pump up @end */
- if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
- end = memblock.current_limit;
-
- /* avoid allocating the first page */
- start = max_t(phys_addr_t, start, PAGE_SIZE);
- end = max(start, end);
-
- for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
- this_start = clamp(this_start, start, end);
- this_end = clamp(this_end, start, end);
-
-restart:
- if (this_end <= this_start || this_end < size)
- continue;
-
- for (; curr >= 0; curr--) {
- if ((movablemem_map.map[curr].start_pfn << PAGE_SHIFT)
- < this_end)
- break;
- }
-
- cand = round_down(this_end - size, align);
- if (curr >= 0 &&
- cand < movablemem_map.map[curr].end_pfn << PAGE_SHIFT) {
- this_end = movablemem_map.map[curr].start_pfn
- << PAGE_SHIFT;
- goto restart;
- }
-
- if (cand >= this_start)
- return cand;
- }
-
- return 0;
-}
-#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
phys_addr_t end, phys_addr_t size,
phys_addr_t align, int nid)
@@ -172,7 +123,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
}
return 0;
}
-#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
* memblock_find_in_range - find free area in given range
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0dade3f18f7..8fcced7823f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -202,18 +202,11 @@ static unsigned long __meminitdata nr_all_pages;
static unsigned long __meminitdata dma_reserve;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
-/* Movable memory ranges, will also be used by memblock subsystem. */
-struct movablemem_map movablemem_map = {
- .acpi = false,
- .nr_map = 0,
-};
-
static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
static unsigned long __initdata required_kernelcore;
static unsigned long __initdata required_movablecore;
static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
-static unsigned long __meminitdata zone_movable_limit[MAX_NUMNODES];
/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
@@ -4412,77 +4405,6 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
-/**
- * sanitize_zone_movable_limit - Sanitize the zone_movable_limit array.
- *
- * zone_movable_limit is initialized as 0. This function will try to get
- * the first ZONE_MOVABLE pfn of each node from movablemem_map, and
- * assigne them to zone_movable_limit.
- * zone_movable_limit[nid] == 0 means no limit for the node.
- *
- * Note: Each range is represented as [start_pfn, end_pfn)
- */
-static void __meminit sanitize_zone_movable_limit(void)
-{
- int map_pos = 0, i, nid;
- unsigned long start_pfn, end_pfn;
-
- if (!movablemem_map.nr_map)
- return;
-
- /* Iterate all ranges from minimum to maximum */
- for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
- /*
- * If we have found lowest pfn of ZONE_MOVABLE of the node
- * specified by user, just go on to check next range.
- */
- if (zone_movable_limit[nid])
- continue;
-
-#ifdef CONFIG_ZONE_DMA
- /* Skip DMA memory. */
- if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA])
- start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA];
-#endif
-
-#ifdef CONFIG_ZONE_DMA32
- /* Skip DMA32 memory. */
- if (start_pfn < arch_zone_highest_possible_pfn[ZONE_DMA32])
- start_pfn = arch_zone_highest_possible_pfn[ZONE_DMA32];
-#endif
-
-#ifdef CONFIG_HIGHMEM
- /* Skip lowmem if ZONE_MOVABLE is highmem. */
- if (zone_movable_is_highmem() &&
- start_pfn < arch_zone_lowest_possible_pfn[ZONE_HIGHMEM])
- start_pfn = arch_zone_lowest_possible_pfn[ZONE_HIGHMEM];
-#endif
-
- if (start_pfn >= end_pfn)
- continue;
-
- while (map_pos < movablemem_map.nr_map) {
- if (end_pfn <= movablemem_map.map[map_pos].start_pfn)
- break;
-
- if (start_pfn >= movablemem_map.map[map_pos].end_pfn) {
- map_pos++;
- continue;
- }
-
- /*
- * The start_pfn of ZONE_MOVABLE is either the minimum
- * pfn specified by movablemem_map, or 0, which means
- * the node has no ZONE_MOVABLE.
- */
- zone_movable_limit[nid] = max(start_pfn,
- movablemem_map.map[map_pos].start_pfn);
-
- break;
- }
- }
-}
-
#else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
@@ -4500,6 +4422,7 @@ static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
+
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
@@ -4941,19 +4864,12 @@ static void __init find_zone_movable_pfns_for_nodes(void)
required_kernelcore = max(required_kernelcore, corepages);
}
- /*
- * If neither kernelcore/movablecore nor movablemem_map is specified,
- * there is no ZONE_MOVABLE. But if movablemem_map is specified, the
- * start pfn of ZONE_MOVABLE has been stored in zone_movable_limit[].
- */
- if (!required_kernelcore) {
- if (movablemem_map.nr_map)
- memcpy(zone_movable_pfn, zone_movable_limit,
- sizeof(zone_movable_pfn));
+ /* If kernelcore was not specified, there is no ZONE_MOVABLE */
+ if (!required_kernelcore)
goto out;
- }
/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+ find_usable_zone_for_movable();
usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
restart:
@@ -4981,24 +4897,10 @@ restart:
for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
unsigned long size_pages;
- /*
- * Find more memory for kernelcore in
- * [zone_movable_pfn[nid], zone_movable_limit[nid]).
- */
start_pfn = max(start_pfn, zone_movable_pfn[nid]);
if (start_pfn >= end_pfn)
continue;
- if (zone_movable_limit[nid]) {
- end_pfn = min(end_pfn, zone_movable_limit[nid]);
- /* No range left for kernelcore in this node */
- if (start_pfn >= end_pfn) {
- zone_movable_pfn[nid] =
- zone_movable_limit[nid];
- break;
- }
- }
-
/* Account for what is only usable for kernelcore */
if (start_pfn < usable_startpfn) {
unsigned long kernel_pages;
@@ -5058,12 +4960,12 @@ restart:
if (usable_nodes && required_kernelcore > usable_nodes)
goto restart;
-out:
/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
for (nid = 0; nid < MAX_NUMNODES; nid++)
zone_movable_pfn[nid] =
roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+out:
/* restore the node_state */
node_states[N_MEMORY] = saved_node_state;
}
@@ -5126,8 +5028,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Find the PFNs that ZONE_MOVABLE begins at in each node */
memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
- find_usable_zone_for_movable();
- sanitize_zone_movable_limit();
find_zone_movable_pfns_for_nodes();
/* Print out the zone ranges */
@@ -5211,181 +5111,6 @@ static int __init cmdline_parse_movablecore(char *p)
early_param("kernelcore", cmdline_parse_kernelcore);
early_param("movablecore", cmdline_parse_movablecore);
-/**
- * movablemem_map_overlap() - Check if a range overlaps movablemem_map.map[].
- * @start_pfn: start pfn of the range to be checked
- * @end_pfn: end pfn of the range to be checked (exclusive)
- *
- * This function checks if a given memory range [start_pfn, end_pfn) overlaps
- * the movablemem_map.map[] array.
- *
- * Return: index of the first overlapped element in movablemem_map.map[]
- * or -1 if they don't overlap each other.
- */
-int __init movablemem_map_overlap(unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int overlap;
-
- if (!movablemem_map.nr_map)
- return -1;
-
- for (overlap = 0; overlap < movablemem_map.nr_map; overlap++)
- if (start_pfn < movablemem_map.map[overlap].end_pfn)
- break;
-
- if (overlap == movablemem_map.nr_map ||
- end_pfn <= movablemem_map.map[overlap].start_pfn)
- return -1;
-
- return overlap;
-}
-
-/**
- * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
- * @start_pfn: start pfn of the range
- * @end_pfn: end pfn of the range
- *
- * This function will also merge the overlapped ranges, and sort the array
- * by start_pfn in monotonic increasing order.
- */
-void __init insert_movablemem_map(unsigned long start_pfn,
- unsigned long end_pfn)
-{
- int pos, overlap;
-
- /*
- * pos will be at the 1st overlapped range, or the position
- * where the element should be inserted.
- */
- for (pos = 0; pos < movablemem_map.nr_map; pos++)
- if (start_pfn <= movablemem_map.map[pos].end_pfn)
- break;
-
- /* If there is no overlapped range, just insert the element. */
- if (pos == movablemem_map.nr_map ||
- end_pfn < movablemem_map.map[pos].start_pfn) {
- /*
- * If pos is not the end of array, we need to move all
- * the rest elements backward.
- */
- if (pos < movablemem_map.nr_map)
- memmove(&movablemem_map.map[pos+1],
- &movablemem_map.map[pos],
- sizeof(struct movablemem_entry) *
- (movablemem_map.nr_map - pos));
- movablemem_map.map[pos].start_pfn = start_pfn;
- movablemem_map.map[pos].end_pfn = end_pfn;
- movablemem_map.nr_map++;
- return;
- }
-
- /* overlap will be at the last overlapped range */
- for (overlap = pos + 1; overlap < movablemem_map.nr_map; overlap++)
- if (end_pfn < movablemem_map.map[overlap].start_pfn)
- break;
-
- /*
- * If there are more ranges overlapped, we need to merge them,
- * and move the rest elements forward.
- */
- overlap--;
- movablemem_map.map[pos].start_pfn = min(start_pfn,
- movablemem_map.map[pos].start_pfn);
- movablemem_map.map[pos].end_pfn = max(end_pfn,
- movablemem_map.map[overlap].end_pfn);
-
- if (pos != overlap && overlap + 1 != movablemem_map.nr_map)
- memmove(&movablemem_map.map[pos+1],
- &movablemem_map.map[overlap+1],
- sizeof(struct movablemem_entry) *
- (movablemem_map.nr_map - overlap - 1));
-
- movablemem_map.nr_map -= overlap - pos;
-}
-
-/**
- * movablemem_map_add_region - Add a memory range into movablemem_map.
- * @start: physical start address of range
- * @end: physical end address of range
- *
- * This function transform the physical address into pfn, and then add the
- * range into movablemem_map by calling insert_movablemem_map().
- */
-static void __init movablemem_map_add_region(u64 start, u64 size)
-{
- unsigned long start_pfn, end_pfn;
-
- /* In case size == 0 or start + size overflows */
- if (start + size <= start)
- return;
-
- if (movablemem_map.nr_map >= ARRAY_SIZE(movablemem_map.map)) {
- pr_err("movablemem_map: too many entries;"
- " ignoring [mem %#010llx-%#010llx]\n",
- (unsigned long long) start,
- (unsigned long long) (start + size - 1));
- return;
- }
-
- start_pfn = PFN_DOWN(start);
- end_pfn = PFN_UP(start + size);
- insert_movablemem_map(start_pfn, end_pfn);
-}
-
-/*
- * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
- * @p: The boot option of the following format:
- * movablemem_map=nn[KMG]@ss[KMG]
- *
- * This option sets the memory range [ss, ss+nn) to be used as movable memory.
- *
- * Return: 0 on success or -EINVAL on failure.
- */
-static int __init cmdline_parse_movablemem_map(char *p)
-{
- char *oldp;
- u64 start_at, mem_size;
-
- if (!p)
- goto err;
-
- if (!strcmp(p, "acpi"))
- movablemem_map.acpi = true;
-
- /*
- * If user decide to use info from BIOS, all the other user specified
- * ranges will be ingored.
- */
- if (movablemem_map.acpi) {
- if (movablemem_map.nr_map) {
- memset(movablemem_map.map, 0,
- sizeof(struct movablemem_entry)
- * movablemem_map.nr_map);
- movablemem_map.nr_map = 0;
- }
- return 0;
- }
-
- oldp = p;
- mem_size = memparse(p, &p);
- if (p == oldp)
- goto err;
-
- if (*p == '@') {
- oldp = ++p;
- start_at = memparse(p, &p);
- if (p == oldp || *p != '\0')
- goto err;
-
- movablemem_map_add_region(start_at, mem_size);
- return 0;
- }
-err:
- return -EINVAL;
-}
-early_param("movablemem_map", cmdline_parse_movablemem_map);
-
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
/**
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d7a369e6108..dcc446e7fbf 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1197,6 +1197,21 @@ size_t rpc_max_payload(struct rpc_clnt *clnt)
EXPORT_SYMBOL_GPL(rpc_max_payload);
/**
+ * rpc_get_timeout - Get timeout for transport in units of HZ
+ * @clnt: RPC client to query
+ */
+unsigned long rpc_get_timeout(struct rpc_clnt *clnt)
+{
+ unsigned long ret;
+
+ rcu_read_lock();
+ ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval;
+ rcu_read_unlock();
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rpc_get_timeout);
+
+/**
* rpc_force_rebind - force transport to check that remote port is unchanged
* @clnt: client to rebind
*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 846c34fdee9..b7478d5e7ff 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -487,13 +487,17 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
* xprt_wait_for_buffer_space - wait for transport output buffer to clear
* @task: task to be put to sleep
* @action: function pointer to be executed after wait
+ *
+ * Note that we only set the timer for the case of RPC_IS_SOFT(), since
+ * we don't in general want to force a socket disconnection due to
+ * an incomplete RPC call transmission.
*/
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
- task->tk_timeout = req->rq_timeout;
+ task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0;
rpc_sleep_on(&xprt->pending, task, action);
}
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index c2206c87fc9..d5818c98d05 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -94,6 +94,12 @@
#define CPUINFO_PROC "cpu model"
#endif
+#ifdef __arc__
+#define rmb() asm volatile("" ::: "memory")
+#define cpu_relax() rmb()
+#define CPUINFO_PROC "Processor"
+#endif
+
#include <time.h>
#include <unistd.h>
#include <sys/types.h>